]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-3.0-3.13.7-201403281902.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.13.7-201403281902.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b89a739..e289b9b 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 +TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56 @@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60 +ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64 @@ -92,32 +101,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68 +builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74 +clut_vga16.c
75 +common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82 +config.c
83 config.mak
84 config.mak.autogen
85 +config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92 +devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97 +dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101 +exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105 @@ -125,12 +142,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109 +gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116 +hash
117 +hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121 @@ -145,14 +165,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125 -kconfig
126 +kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133 -linux
134 +lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138 @@ -162,14 +182,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142 -media
143 mconf
144 +mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151 +mkpiggy
152 mkprep
153 mkregtable
154 mktables
155 @@ -185,6 +206,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159 +parse-events*
160 +pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164 @@ -194,6 +217,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168 +pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172 @@ -203,7 +227,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176 +randomize_layout_hash.h
177 +randomize_layout_seed.h
178 +realmode.lds
179 +realmode.relocs
180 recordmcount
181 +regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185 @@ -213,8 +242,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189 +signing_key*
190 +size_overflow_hash.h
191 sImage
192 +slabinfo
193 sm_tbl*
194 +sortextable
195 split-include
196 syscalltab.h
197 tables.c
198 @@ -224,6 +257,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202 +user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206 @@ -235,13 +269,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210 +vdsox32.lds
211 +vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218 +vmlinux.bin.bz2
219 vmlinux.lds
220 +vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224 @@ -249,9 +287,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228 +utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232 +x509*
233 zImage*
234 zconf.hash.c
235 +zconf.lex.c
236 zoffset.h
237 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238 index b9e9bd8..bf49b92 100644
239 --- a/Documentation/kernel-parameters.txt
240 +++ b/Documentation/kernel-parameters.txt
241 @@ -1033,6 +1033,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246 + ignore grsecurity's /proc restrictions
247 +
248 +
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252 @@ -2018,6 +2022,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256 + nopcid [X86-64]
257 + Disable PCID (Process-Context IDentifier) even if it
258 + is supported by the processor.
259 +
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263 @@ -2285,6 +2293,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268 + virtualization environments that don't cope well with the
269 + expand down segment used by UDEREF on X86-32 or the frequent
270 + page table updates on X86-64.
271 +
272 + pax_sanitize_slab=
273 + 0/1 to disable/enable slab object sanitization (enabled by
274 + default).
275 +
276 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277 +
278 + pax_extra_latent_entropy
279 + Enable a very simple form of latent entropy extraction
280 + from the first 4GB of memory as the bootmem allocator
281 + passes the memory pages to the buddy allocator.
282 +
283 + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284 + when the processor supports PCID.
285 +
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289 diff --git a/Makefile b/Makefile
290 index 9f214b4..8c9c622 100644
291 --- a/Makefile
292 +++ b/Makefile
293 @@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
294
295 HOSTCC = gcc
296 HOSTCXX = g++
297 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
298 -HOSTCXXFLAGS = -O2
299 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
300 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
301 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
302
303 # Decide whether to build built-in, modular, or both.
304 # Normally, just do built-in.
305 @@ -311,9 +312,15 @@ endif
306 # If the user is running make -s (silent mode), suppress echoing of
307 # commands
308
309 +ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
310 +ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
311 + quiet=silent_
312 +endif
313 +else # make-3.8x
314 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
315 quiet=silent_
316 endif
317 +endif
318
319 export quiet Q KBUILD_VERBOSE
320
321 @@ -417,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
322 # Rules shared between *config targets and build targets
323
324 # Basic helpers built in scripts/
325 -PHONY += scripts_basic
326 -scripts_basic:
327 +PHONY += scripts_basic gcc-plugins
328 +scripts_basic: gcc-plugins
329 $(Q)$(MAKE) $(build)=scripts/basic
330 $(Q)rm -f .tmp_quiet_recordmcount
331
332 @@ -579,6 +586,72 @@ else
333 KBUILD_CFLAGS += -O2
334 endif
335
336 +ifndef DISABLE_PAX_PLUGINS
337 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
338 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
339 +else
340 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
341 +endif
342 +ifneq ($(PLUGINCC),)
343 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
344 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
345 +endif
346 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
347 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
348 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
349 +endif
350 +ifdef CONFIG_KALLOCSTAT_PLUGIN
351 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
352 +endif
353 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
354 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
355 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
356 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
357 +endif
358 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT
359 +RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
360 +ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
361 +RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
362 +endif
363 +endif
364 +ifdef CONFIG_CHECKER_PLUGIN
365 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
366 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
367 +endif
368 +endif
369 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
370 +ifdef CONFIG_PAX_SIZE_OVERFLOW
371 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
372 +endif
373 +ifdef CONFIG_PAX_LATENT_ENTROPY
374 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
375 +endif
376 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
377 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
378 +endif
379 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
380 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
381 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
382 +GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
383 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
384 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
385 +ifeq ($(KBUILD_EXTMOD),)
386 +gcc-plugins:
387 + $(Q)$(MAKE) $(build)=tools/gcc
388 +else
389 +gcc-plugins: ;
390 +endif
391 +else
392 +gcc-plugins:
393 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
394 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
395 +else
396 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
397 +endif
398 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
399 +endif
400 +endif
401 +
402 include $(srctree)/arch/$(SRCARCH)/Makefile
403
404 ifdef CONFIG_READABLE_ASM
405 @@ -619,7 +692,7 @@ endif
406
407 ifdef CONFIG_DEBUG_INFO
408 KBUILD_CFLAGS += -g
409 -KBUILD_AFLAGS += -gdwarf-2
410 +KBUILD_AFLAGS += -Wa,--gdwarf-2
411 endif
412
413 ifdef CONFIG_DEBUG_INFO_REDUCED
414 @@ -754,7 +827,7 @@ export mod_sign_cmd
415
416
417 ifeq ($(KBUILD_EXTMOD),)
418 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
419 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
420
421 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
422 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
423 @@ -803,6 +876,8 @@ endif
424
425 # The actual objects are generated when descending,
426 # make sure no implicit rule kicks in
427 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
428 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
429 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
430
431 # Handle descending into subdirectories listed in $(vmlinux-dirs)
432 @@ -812,7 +887,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
433 # Error messages still appears in the original language
434
435 PHONY += $(vmlinux-dirs)
436 -$(vmlinux-dirs): prepare scripts
437 +$(vmlinux-dirs): gcc-plugins prepare scripts
438 $(Q)$(MAKE) $(build)=$@
439
440 define filechk_kernel.release
441 @@ -855,10 +930,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
442
443 archprepare: archheaders archscripts prepare1 scripts_basic
444
445 +prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446 +prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447 prepare0: archprepare FORCE
448 $(Q)$(MAKE) $(build)=.
449
450 # All the preparing..
451 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
452 prepare: prepare0
453
454 # Generate some files
455 @@ -966,6 +1044,8 @@ all: modules
456 # using awk while concatenating to the final file.
457
458 PHONY += modules
459 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
460 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
461 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
462 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
463 @$(kecho) ' Building modules, stage 2.';
464 @@ -981,7 +1061,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
465
466 # Target to prepare building external modules
467 PHONY += modules_prepare
468 -modules_prepare: prepare scripts
469 +modules_prepare: gcc-plugins prepare scripts
470
471 # Target to install modules
472 PHONY += modules_install
473 @@ -1047,7 +1127,8 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
474 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
475 signing_key.priv signing_key.x509 x509.genkey \
476 extra_certificates signing_key.x509.keyid \
477 - signing_key.x509.signer
478 + signing_key.x509.signer tools/gcc/size_overflow_hash.h \
479 + tools/gcc/randomize_layout_seed.h
480
481 # clean - Delete most, but leave enough to build external modules
482 #
483 @@ -1087,6 +1168,7 @@ distclean: mrproper
484 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
485 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
486 -o -name '.*.rej' \
487 + -o -name '.*.rej' -o -name '*.so' \
488 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
489 -type f -print | xargs rm -f
490
491 @@ -1248,6 +1330,8 @@ PHONY += $(module-dirs) modules
492 $(module-dirs): crmodverdir $(objtree)/Module.symvers
493 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
494
495 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
496 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
497 modules: $(module-dirs)
498 @$(kecho) ' Building modules, stage 2.';
499 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
500 @@ -1387,17 +1471,21 @@ else
501 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
502 endif
503
504 -%.s: %.c prepare scripts FORCE
505 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
506 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
507 +%.s: %.c gcc-plugins prepare scripts FORCE
508 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
509 %.i: %.c prepare scripts FORCE
510 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
511 -%.o: %.c prepare scripts FORCE
512 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514 +%.o: %.c gcc-plugins prepare scripts FORCE
515 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
516 %.lst: %.c prepare scripts FORCE
517 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
518 -%.s: %.S prepare scripts FORCE
519 +%.s: %.S gcc-plugins prepare scripts FORCE
520 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
521 -%.o: %.S prepare scripts FORCE
522 +%.o: %.S gcc-plugins prepare scripts FORCE
523 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
524 %.symtypes: %.c prepare scripts FORCE
525 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
526 @@ -1407,11 +1495,15 @@ endif
527 $(cmd_crmodverdir)
528 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
529 $(build)=$(build-dir)
530 -%/: prepare scripts FORCE
531 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533 +%/: gcc-plugins prepare scripts FORCE
534 $(cmd_crmodverdir)
535 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
536 $(build)=$(build-dir)
537 -%.ko: prepare scripts FORCE
538 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
539 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
540 +%.ko: gcc-plugins prepare scripts FORCE
541 $(cmd_crmodverdir)
542 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
543 $(build)=$(build-dir) $(@:.ko=.o)
544 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
545 index 78b03ef..da28a51 100644
546 --- a/arch/alpha/include/asm/atomic.h
547 +++ b/arch/alpha/include/asm/atomic.h
548 @@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
549 #define atomic_dec(v) atomic_sub(1,(v))
550 #define atomic64_dec(v) atomic64_sub(1,(v))
551
552 +#define atomic64_read_unchecked(v) atomic64_read(v)
553 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
554 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
555 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
556 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
557 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
558 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
559 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
560 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
561 +
562 #define smp_mb__before_atomic_dec() smp_mb()
563 #define smp_mb__after_atomic_dec() smp_mb()
564 #define smp_mb__before_atomic_inc() smp_mb()
565 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
566 index ad368a9..fbe0f25 100644
567 --- a/arch/alpha/include/asm/cache.h
568 +++ b/arch/alpha/include/asm/cache.h
569 @@ -4,19 +4,19 @@
570 #ifndef __ARCH_ALPHA_CACHE_H
571 #define __ARCH_ALPHA_CACHE_H
572
573 +#include <linux/const.h>
574
575 /* Bytes per L1 (data) cache line. */
576 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
577 -# define L1_CACHE_BYTES 64
578 # define L1_CACHE_SHIFT 6
579 #else
580 /* Both EV4 and EV5 are write-through, read-allocate,
581 direct-mapped, physical.
582 */
583 -# define L1_CACHE_BYTES 32
584 # define L1_CACHE_SHIFT 5
585 #endif
586
587 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
588 #define SMP_CACHE_BYTES L1_CACHE_BYTES
589
590 #endif
591 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
592 index 968d999..d36b2df 100644
593 --- a/arch/alpha/include/asm/elf.h
594 +++ b/arch/alpha/include/asm/elf.h
595 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
596
597 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
598
599 +#ifdef CONFIG_PAX_ASLR
600 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
601 +
602 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
603 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
604 +#endif
605 +
606 /* $0 is set by ld.so to a pointer to a function which might be
607 registered using atexit. This provides a mean for the dynamic
608 linker to call DT_FINI functions for shared libraries that have
609 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
610 index aab14a0..b4fa3e7 100644
611 --- a/arch/alpha/include/asm/pgalloc.h
612 +++ b/arch/alpha/include/asm/pgalloc.h
613 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
614 pgd_set(pgd, pmd);
615 }
616
617 +static inline void
618 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
619 +{
620 + pgd_populate(mm, pgd, pmd);
621 +}
622 +
623 extern pgd_t *pgd_alloc(struct mm_struct *mm);
624
625 static inline void
626 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
627 index d8f9b7e..f6222fa 100644
628 --- a/arch/alpha/include/asm/pgtable.h
629 +++ b/arch/alpha/include/asm/pgtable.h
630 @@ -102,6 +102,17 @@ struct vm_area_struct;
631 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
632 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
633 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
634 +
635 +#ifdef CONFIG_PAX_PAGEEXEC
636 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
637 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
638 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
639 +#else
640 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
641 +# define PAGE_COPY_NOEXEC PAGE_COPY
642 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
643 +#endif
644 +
645 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
646
647 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
648 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
649 index 2fd00b7..cfd5069 100644
650 --- a/arch/alpha/kernel/module.c
651 +++ b/arch/alpha/kernel/module.c
652 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
653
654 /* The small sections were sorted to the end of the segment.
655 The following should definitely cover them. */
656 - gp = (u64)me->module_core + me->core_size - 0x8000;
657 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
658 got = sechdrs[me->arch.gotsecindex].sh_addr;
659
660 for (i = 0; i < n; i++) {
661 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
662 index 1402fcc..0b1abd2 100644
663 --- a/arch/alpha/kernel/osf_sys.c
664 +++ b/arch/alpha/kernel/osf_sys.c
665 @@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
666 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
667
668 static unsigned long
669 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
670 - unsigned long limit)
671 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
672 + unsigned long limit, unsigned long flags)
673 {
674 struct vm_unmapped_area_info info;
675 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
676
677 info.flags = 0;
678 info.length = len;
679 @@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
680 info.high_limit = limit;
681 info.align_mask = 0;
682 info.align_offset = 0;
683 + info.threadstack_offset = offset;
684 return vm_unmapped_area(&info);
685 }
686
687 @@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
688 merely specific addresses, but regions of memory -- perhaps
689 this feature should be incorporated into all ports? */
690
691 +#ifdef CONFIG_PAX_RANDMMAP
692 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
693 +#endif
694 +
695 if (addr) {
696 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
697 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
698 if (addr != (unsigned long) -ENOMEM)
699 return addr;
700 }
701
702 /* Next, try allocating at TASK_UNMAPPED_BASE. */
703 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
704 - len, limit);
705 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
706 +
707 if (addr != (unsigned long) -ENOMEM)
708 return addr;
709
710 /* Finally, try allocating in low memory. */
711 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
712 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
713
714 return addr;
715 }
716 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
717 index 98838a0..b304fb4 100644
718 --- a/arch/alpha/mm/fault.c
719 +++ b/arch/alpha/mm/fault.c
720 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
721 __reload_thread(pcb);
722 }
723
724 +#ifdef CONFIG_PAX_PAGEEXEC
725 +/*
726 + * PaX: decide what to do with offenders (regs->pc = fault address)
727 + *
728 + * returns 1 when task should be killed
729 + * 2 when patched PLT trampoline was detected
730 + * 3 when unpatched PLT trampoline was detected
731 + */
732 +static int pax_handle_fetch_fault(struct pt_regs *regs)
733 +{
734 +
735 +#ifdef CONFIG_PAX_EMUPLT
736 + int err;
737 +
738 + do { /* PaX: patched PLT emulation #1 */
739 + unsigned int ldah, ldq, jmp;
740 +
741 + err = get_user(ldah, (unsigned int *)regs->pc);
742 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
743 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
744 +
745 + if (err)
746 + break;
747 +
748 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
749 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
750 + jmp == 0x6BFB0000U)
751 + {
752 + unsigned long r27, addr;
753 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
754 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
755 +
756 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
757 + err = get_user(r27, (unsigned long *)addr);
758 + if (err)
759 + break;
760 +
761 + regs->r27 = r27;
762 + regs->pc = r27;
763 + return 2;
764 + }
765 + } while (0);
766 +
767 + do { /* PaX: patched PLT emulation #2 */
768 + unsigned int ldah, lda, br;
769 +
770 + err = get_user(ldah, (unsigned int *)regs->pc);
771 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
772 + err |= get_user(br, (unsigned int *)(regs->pc+8));
773 +
774 + if (err)
775 + break;
776 +
777 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
778 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
779 + (br & 0xFFE00000U) == 0xC3E00000U)
780 + {
781 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
782 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
783 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
784 +
785 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
786 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
787 + return 2;
788 + }
789 + } while (0);
790 +
791 + do { /* PaX: unpatched PLT emulation */
792 + unsigned int br;
793 +
794 + err = get_user(br, (unsigned int *)regs->pc);
795 +
796 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
797 + unsigned int br2, ldq, nop, jmp;
798 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
799 +
800 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
801 + err = get_user(br2, (unsigned int *)addr);
802 + err |= get_user(ldq, (unsigned int *)(addr+4));
803 + err |= get_user(nop, (unsigned int *)(addr+8));
804 + err |= get_user(jmp, (unsigned int *)(addr+12));
805 + err |= get_user(resolver, (unsigned long *)(addr+16));
806 +
807 + if (err)
808 + break;
809 +
810 + if (br2 == 0xC3600000U &&
811 + ldq == 0xA77B000CU &&
812 + nop == 0x47FF041FU &&
813 + jmp == 0x6B7B0000U)
814 + {
815 + regs->r28 = regs->pc+4;
816 + regs->r27 = addr+16;
817 + regs->pc = resolver;
818 + return 3;
819 + }
820 + }
821 + } while (0);
822 +#endif
823 +
824 + return 1;
825 +}
826 +
827 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
828 +{
829 + unsigned long i;
830 +
831 + printk(KERN_ERR "PAX: bytes at PC: ");
832 + for (i = 0; i < 5; i++) {
833 + unsigned int c;
834 + if (get_user(c, (unsigned int *)pc+i))
835 + printk(KERN_CONT "???????? ");
836 + else
837 + printk(KERN_CONT "%08x ", c);
838 + }
839 + printk("\n");
840 +}
841 +#endif
842
843 /*
844 * This routine handles page faults. It determines the address,
845 @@ -133,8 +251,29 @@ retry:
846 good_area:
847 si_code = SEGV_ACCERR;
848 if (cause < 0) {
849 - if (!(vma->vm_flags & VM_EXEC))
850 + if (!(vma->vm_flags & VM_EXEC)) {
851 +
852 +#ifdef CONFIG_PAX_PAGEEXEC
853 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
854 + goto bad_area;
855 +
856 + up_read(&mm->mmap_sem);
857 + switch (pax_handle_fetch_fault(regs)) {
858 +
859 +#ifdef CONFIG_PAX_EMUPLT
860 + case 2:
861 + case 3:
862 + return;
863 +#endif
864 +
865 + }
866 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
867 + do_group_exit(SIGKILL);
868 +#else
869 goto bad_area;
870 +#endif
871 +
872 + }
873 } else if (!cause) {
874 /* Allow reads even for write-only mappings */
875 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
876 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
877 index 47085a0..f975a53 100644
878 --- a/arch/arm/Kconfig
879 +++ b/arch/arm/Kconfig
880 @@ -1830,7 +1830,7 @@ config ALIGNMENT_TRAP
881
882 config UACCESS_WITH_MEMCPY
883 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
884 - depends on MMU
885 + depends on MMU && !PAX_MEMORY_UDEREF
886 default y if CPU_FEROCEON
887 help
888 Implement faster copy_to_user and clear_user methods for CPU
889 @@ -2102,6 +2102,7 @@ config XIP_PHYS_ADDR
890 config KEXEC
891 bool "Kexec system call (EXPERIMENTAL)"
892 depends on (!SMP || PM_SLEEP_SMP)
893 + depends on !GRKERNSEC_KMEM
894 help
895 kexec is a system call that implements the ability to shutdown your
896 current kernel, and to start another kernel. It is like a reboot
897 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
898 index 62d2cb5..09d45e3 100644
899 --- a/arch/arm/include/asm/atomic.h
900 +++ b/arch/arm/include/asm/atomic.h
901 @@ -18,17 +18,35 @@
902 #include <asm/barrier.h>
903 #include <asm/cmpxchg.h>
904
905 +#ifdef CONFIG_GENERIC_ATOMIC64
906 +#include <asm-generic/atomic64.h>
907 +#endif
908 +
909 #define ATOMIC_INIT(i) { (i) }
910
911 #ifdef __KERNEL__
912
913 +#define _ASM_EXTABLE(from, to) \
914 +" .pushsection __ex_table,\"a\"\n"\
915 +" .align 3\n" \
916 +" .long " #from ", " #to"\n" \
917 +" .popsection"
918 +
919 /*
920 * On ARM, ordinary assignment (str instruction) doesn't clear the local
921 * strex/ldrex monitor on some implementations. The reason we can use it for
922 * atomic_set() is the clrex or dummy strex done on every exception return.
923 */
924 #define atomic_read(v) (*(volatile int *)&(v)->counter)
925 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
926 +{
927 + return v->counter;
928 +}
929 #define atomic_set(v,i) (((v)->counter) = (i))
930 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
931 +{
932 + v->counter = i;
933 +}
934
935 #if __LINUX_ARM_ARCH__ >= 6
936
937 @@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
938
939 prefetchw(&v->counter);
940 __asm__ __volatile__("@ atomic_add\n"
941 +"1: ldrex %1, [%3]\n"
942 +" adds %0, %1, %4\n"
943 +
944 +#ifdef CONFIG_PAX_REFCOUNT
945 +" bvc 3f\n"
946 +"2: bkpt 0xf103\n"
947 +"3:\n"
948 +#endif
949 +
950 +" strex %1, %0, [%3]\n"
951 +" teq %1, #0\n"
952 +" bne 1b"
953 +
954 +#ifdef CONFIG_PAX_REFCOUNT
955 +"\n4:\n"
956 + _ASM_EXTABLE(2b, 4b)
957 +#endif
958 +
959 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960 + : "r" (&v->counter), "Ir" (i)
961 + : "cc");
962 +}
963 +
964 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
965 +{
966 + unsigned long tmp;
967 + int result;
968 +
969 + prefetchw(&v->counter);
970 + __asm__ __volatile__("@ atomic_add_unchecked\n"
971 "1: ldrex %0, [%3]\n"
972 " add %0, %0, %4\n"
973 " strex %1, %0, [%3]\n"
974 @@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
975 smp_mb();
976
977 __asm__ __volatile__("@ atomic_add_return\n"
978 +"1: ldrex %1, [%3]\n"
979 +" adds %0, %1, %4\n"
980 +
981 +#ifdef CONFIG_PAX_REFCOUNT
982 +" bvc 3f\n"
983 +" mov %0, %1\n"
984 +"2: bkpt 0xf103\n"
985 +"3:\n"
986 +#endif
987 +
988 +" strex %1, %0, [%3]\n"
989 +" teq %1, #0\n"
990 +" bne 1b"
991 +
992 +#ifdef CONFIG_PAX_REFCOUNT
993 +"\n4:\n"
994 + _ASM_EXTABLE(2b, 4b)
995 +#endif
996 +
997 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
998 + : "r" (&v->counter), "Ir" (i)
999 + : "cc");
1000 +
1001 + smp_mb();
1002 +
1003 + return result;
1004 +}
1005 +
1006 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1007 +{
1008 + unsigned long tmp;
1009 + int result;
1010 +
1011 + smp_mb();
1012 +
1013 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1014 "1: ldrex %0, [%3]\n"
1015 " add %0, %0, %4\n"
1016 " strex %1, %0, [%3]\n"
1017 @@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1018
1019 prefetchw(&v->counter);
1020 __asm__ __volatile__("@ atomic_sub\n"
1021 +"1: ldrex %1, [%3]\n"
1022 +" subs %0, %1, %4\n"
1023 +
1024 +#ifdef CONFIG_PAX_REFCOUNT
1025 +" bvc 3f\n"
1026 +"2: bkpt 0xf103\n"
1027 +"3:\n"
1028 +#endif
1029 +
1030 +" strex %1, %0, [%3]\n"
1031 +" teq %1, #0\n"
1032 +" bne 1b"
1033 +
1034 +#ifdef CONFIG_PAX_REFCOUNT
1035 +"\n4:\n"
1036 + _ASM_EXTABLE(2b, 4b)
1037 +#endif
1038 +
1039 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1040 + : "r" (&v->counter), "Ir" (i)
1041 + : "cc");
1042 +}
1043 +
1044 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1045 +{
1046 + unsigned long tmp;
1047 + int result;
1048 +
1049 + prefetchw(&v->counter);
1050 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
1051 "1: ldrex %0, [%3]\n"
1052 " sub %0, %0, %4\n"
1053 " strex %1, %0, [%3]\n"
1054 @@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1055 smp_mb();
1056
1057 __asm__ __volatile__("@ atomic_sub_return\n"
1058 -"1: ldrex %0, [%3]\n"
1059 -" sub %0, %0, %4\n"
1060 +"1: ldrex %1, [%3]\n"
1061 +" subs %0, %1, %4\n"
1062 +
1063 +#ifdef CONFIG_PAX_REFCOUNT
1064 +" bvc 3f\n"
1065 +" mov %0, %1\n"
1066 +"2: bkpt 0xf103\n"
1067 +"3:\n"
1068 +#endif
1069 +
1070 " strex %1, %0, [%3]\n"
1071 " teq %1, #0\n"
1072 " bne 1b"
1073 +
1074 +#ifdef CONFIG_PAX_REFCOUNT
1075 +"\n4:\n"
1076 + _ASM_EXTABLE(2b, 4b)
1077 +#endif
1078 +
1079 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1080 : "r" (&v->counter), "Ir" (i)
1081 : "cc");
1082 @@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1083 return oldval;
1084 }
1085
1086 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1087 +{
1088 + unsigned long oldval, res;
1089 +
1090 + smp_mb();
1091 +
1092 + do {
1093 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1094 + "ldrex %1, [%3]\n"
1095 + "mov %0, #0\n"
1096 + "teq %1, %4\n"
1097 + "strexeq %0, %5, [%3]\n"
1098 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1099 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1100 + : "cc");
1101 + } while (res);
1102 +
1103 + smp_mb();
1104 +
1105 + return oldval;
1106 +}
1107 +
1108 #else /* ARM_ARCH_6 */
1109
1110 #ifdef CONFIG_SMP
1111 @@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1112
1113 return val;
1114 }
1115 +
1116 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1117 +{
1118 + return atomic_add_return(i, v);
1119 +}
1120 +
1121 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1122 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1123 +{
1124 + (void) atomic_add_return(i, v);
1125 +}
1126
1127 static inline int atomic_sub_return(int i, atomic_t *v)
1128 {
1129 @@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1130 return val;
1131 }
1132 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1133 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1134 +{
1135 + (void) atomic_sub_return(i, v);
1136 +}
1137
1138 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1139 {
1140 @@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1141 return ret;
1142 }
1143
1144 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1145 +{
1146 + return atomic_cmpxchg(v, old, new);
1147 +}
1148 +
1149 #endif /* __LINUX_ARM_ARCH__ */
1150
1151 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1152 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1153 +{
1154 + return xchg(&v->counter, new);
1155 +}
1156
1157 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1158 {
1159 @@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1160 }
1161
1162 #define atomic_inc(v) atomic_add(1, v)
1163 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1164 +{
1165 + atomic_add_unchecked(1, v);
1166 +}
1167 #define atomic_dec(v) atomic_sub(1, v)
1168 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1169 +{
1170 + atomic_sub_unchecked(1, v);
1171 +}
1172
1173 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1174 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1175 +{
1176 + return atomic_add_return_unchecked(1, v) == 0;
1177 +}
1178 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1179 #define atomic_inc_return(v) (atomic_add_return(1, v))
1180 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1181 +{
1182 + return atomic_add_return_unchecked(1, v);
1183 +}
1184 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1185 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1186
1187 @@ -221,6 +410,14 @@ typedef struct {
1188 long long counter;
1189 } atomic64_t;
1190
1191 +#ifdef CONFIG_PAX_REFCOUNT
1192 +typedef struct {
1193 + long long counter;
1194 +} atomic64_unchecked_t;
1195 +#else
1196 +typedef atomic64_t atomic64_unchecked_t;
1197 +#endif
1198 +
1199 #define ATOMIC64_INIT(i) { (i) }
1200
1201 #ifdef CONFIG_ARM_LPAE
1202 @@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1203 return result;
1204 }
1205
1206 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1207 +{
1208 + long long result;
1209 +
1210 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1211 +" ldrd %0, %H0, [%1]"
1212 + : "=&r" (result)
1213 + : "r" (&v->counter), "Qo" (v->counter)
1214 + );
1215 +
1216 + return result;
1217 +}
1218 +
1219 static inline void atomic64_set(atomic64_t *v, long long i)
1220 {
1221 __asm__ __volatile__("@ atomic64_set\n"
1222 @@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1223 : "r" (&v->counter), "r" (i)
1224 );
1225 }
1226 +
1227 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1228 +{
1229 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1230 +" strd %2, %H2, [%1]"
1231 + : "=Qo" (v->counter)
1232 + : "r" (&v->counter), "r" (i)
1233 + );
1234 +}
1235 #else
1236 static inline long long atomic64_read(const atomic64_t *v)
1237 {
1238 @@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1239 return result;
1240 }
1241
1242 +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1243 +{
1244 + long long result;
1245 +
1246 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1247 +" ldrexd %0, %H0, [%1]"
1248 + : "=&r" (result)
1249 + : "r" (&v->counter), "Qo" (v->counter)
1250 + );
1251 +
1252 + return result;
1253 +}
1254 +
1255 static inline void atomic64_set(atomic64_t *v, long long i)
1256 {
1257 long long tmp;
1258 @@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1259 : "r" (&v->counter), "r" (i)
1260 : "cc");
1261 }
1262 +
1263 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1264 +{
1265 + long long tmp;
1266 +
1267 + prefetchw(&v->counter);
1268 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1269 +"1: ldrexd %0, %H0, [%2]\n"
1270 +" strexd %0, %3, %H3, [%2]\n"
1271 +" teq %0, #0\n"
1272 +" bne 1b"
1273 + : "=&r" (tmp), "=Qo" (v->counter)
1274 + : "r" (&v->counter), "r" (i)
1275 + : "cc");
1276 +}
1277 #endif
1278
1279 static inline void atomic64_add(long long i, atomic64_t *v)
1280 @@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1281 __asm__ __volatile__("@ atomic64_add\n"
1282 "1: ldrexd %0, %H0, [%3]\n"
1283 " adds %Q0, %Q0, %Q4\n"
1284 +" adcs %R0, %R0, %R4\n"
1285 +
1286 +#ifdef CONFIG_PAX_REFCOUNT
1287 +" bvc 3f\n"
1288 +"2: bkpt 0xf103\n"
1289 +"3:\n"
1290 +#endif
1291 +
1292 +" strexd %1, %0, %H0, [%3]\n"
1293 +" teq %1, #0\n"
1294 +" bne 1b"
1295 +
1296 +#ifdef CONFIG_PAX_REFCOUNT
1297 +"\n4:\n"
1298 + _ASM_EXTABLE(2b, 4b)
1299 +#endif
1300 +
1301 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1302 + : "r" (&v->counter), "r" (i)
1303 + : "cc");
1304 +}
1305 +
1306 +static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1307 +{
1308 + long long result;
1309 + unsigned long tmp;
1310 +
1311 + prefetchw(&v->counter);
1312 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1313 +"1: ldrexd %0, %H0, [%3]\n"
1314 +" adds %Q0, %Q0, %Q4\n"
1315 " adc %R0, %R0, %R4\n"
1316 " strexd %1, %0, %H0, [%3]\n"
1317 " teq %1, #0\n"
1318 @@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_add_return\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " adds %Q0, %Q0, %Q4\n"
1322 +" adcs %R0, %R0, %R4\n"
1323 +
1324 +#ifdef CONFIG_PAX_REFCOUNT
1325 +" bvc 3f\n"
1326 +" mov %0, %1\n"
1327 +" mov %H0, %H1\n"
1328 +"2: bkpt 0xf103\n"
1329 +"3:\n"
1330 +#endif
1331 +
1332 +" strexd %1, %0, %H0, [%3]\n"
1333 +" teq %1, #0\n"
1334 +" bne 1b"
1335 +
1336 +#ifdef CONFIG_PAX_REFCOUNT
1337 +"\n4:\n"
1338 + _ASM_EXTABLE(2b, 4b)
1339 +#endif
1340 +
1341 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1342 + : "r" (&v->counter), "r" (i)
1343 + : "cc");
1344 +
1345 + smp_mb();
1346 +
1347 + return result;
1348 +}
1349 +
1350 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1351 +{
1352 + long long result;
1353 + unsigned long tmp;
1354 +
1355 + smp_mb();
1356 +
1357 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1358 +"1: ldrexd %0, %H0, [%3]\n"
1359 +" adds %Q0, %Q0, %Q4\n"
1360 " adc %R0, %R0, %R4\n"
1361 " strexd %1, %0, %H0, [%3]\n"
1362 " teq %1, #0\n"
1363 @@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1364 __asm__ __volatile__("@ atomic64_sub\n"
1365 "1: ldrexd %0, %H0, [%3]\n"
1366 " subs %Q0, %Q0, %Q4\n"
1367 +" sbcs %R0, %R0, %R4\n"
1368 +
1369 +#ifdef CONFIG_PAX_REFCOUNT
1370 +" bvc 3f\n"
1371 +"2: bkpt 0xf103\n"
1372 +"3:\n"
1373 +#endif
1374 +
1375 +" strexd %1, %0, %H0, [%3]\n"
1376 +" teq %1, #0\n"
1377 +" bne 1b"
1378 +
1379 +#ifdef CONFIG_PAX_REFCOUNT
1380 +"\n4:\n"
1381 + _ASM_EXTABLE(2b, 4b)
1382 +#endif
1383 +
1384 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1385 + : "r" (&v->counter), "r" (i)
1386 + : "cc");
1387 +}
1388 +
1389 +static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1390 +{
1391 + long long result;
1392 + unsigned long tmp;
1393 +
1394 + prefetchw(&v->counter);
1395 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1396 +"1: ldrexd %0, %H0, [%3]\n"
1397 +" subs %Q0, %Q0, %Q4\n"
1398 " sbc %R0, %R0, %R4\n"
1399 " strexd %1, %0, %H0, [%3]\n"
1400 " teq %1, #0\n"
1401 @@ -344,16 +691,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1402 __asm__ __volatile__("@ atomic64_sub_return\n"
1403 "1: ldrexd %0, %H0, [%3]\n"
1404 " subs %Q0, %Q0, %Q4\n"
1405 -" sbc %R0, %R0, %R4\n"
1406 +" sbcs %R0, %R0, %R4\n"
1407 +
1408 +#ifdef CONFIG_PAX_REFCOUNT
1409 +" bvc 3f\n"
1410 +" mov %0, %1\n"
1411 +" mov %H0, %H1\n"
1412 +"2: bkpt 0xf103\n"
1413 +"3:\n"
1414 +#endif
1415 +
1416 " strexd %1, %0, %H0, [%3]\n"
1417 " teq %1, #0\n"
1418 " bne 1b"
1419 +
1420 +#ifdef CONFIG_PAX_REFCOUNT
1421 +"\n4:\n"
1422 + _ASM_EXTABLE(2b, 4b)
1423 +#endif
1424 +
1425 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1426 : "r" (&v->counter), "r" (i)
1427 : "cc");
1428
1429 - smp_mb();
1430 -
1431 return result;
1432 }
1433
1434 @@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1435 return oldval;
1436 }
1437
1438 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1439 + long long new)
1440 +{
1441 + long long oldval;
1442 + unsigned long res;
1443 +
1444 + smp_mb();
1445 +
1446 + do {
1447 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1448 + "ldrexd %1, %H1, [%3]\n"
1449 + "mov %0, #0\n"
1450 + "teq %1, %4\n"
1451 + "teqeq %H1, %H4\n"
1452 + "strexdeq %0, %5, %H5, [%3]"
1453 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1454 + : "r" (&ptr->counter), "r" (old), "r" (new)
1455 + : "cc");
1456 + } while (res);
1457 +
1458 + smp_mb();
1459 +
1460 + return oldval;
1461 +}
1462 +
1463 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1464 {
1465 long long result;
1466 @@ -406,20 +791,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1467 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1468 {
1469 long long result;
1470 - unsigned long tmp;
1471 + u64 tmp;
1472
1473 smp_mb();
1474
1475 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1476 -"1: ldrexd %0, %H0, [%3]\n"
1477 -" subs %Q0, %Q0, #1\n"
1478 -" sbc %R0, %R0, #0\n"
1479 +"1: ldrexd %1, %H1, [%3]\n"
1480 +" subs %Q0, %Q1, #1\n"
1481 +" sbcs %R0, %R1, #0\n"
1482 +
1483 +#ifdef CONFIG_PAX_REFCOUNT
1484 +" bvc 3f\n"
1485 +" mov %Q0, %Q1\n"
1486 +" mov %R0, %R1\n"
1487 +"2: bkpt 0xf103\n"
1488 +"3:\n"
1489 +#endif
1490 +
1491 " teq %R0, #0\n"
1492 -" bmi 2f\n"
1493 +" bmi 4f\n"
1494 " strexd %1, %0, %H0, [%3]\n"
1495 " teq %1, #0\n"
1496 " bne 1b\n"
1497 -"2:"
1498 +"4:\n"
1499 +
1500 +#ifdef CONFIG_PAX_REFCOUNT
1501 + _ASM_EXTABLE(2b, 4b)
1502 +#endif
1503 +
1504 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1505 : "r" (&v->counter)
1506 : "cc");
1507 @@ -442,13 +841,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1508 " teq %0, %5\n"
1509 " teqeq %H0, %H5\n"
1510 " moveq %1, #0\n"
1511 -" beq 2f\n"
1512 +" beq 4f\n"
1513 " adds %Q0, %Q0, %Q6\n"
1514 -" adc %R0, %R0, %R6\n"
1515 +" adcs %R0, %R0, %R6\n"
1516 +
1517 +#ifdef CONFIG_PAX_REFCOUNT
1518 +" bvc 3f\n"
1519 +"2: bkpt 0xf103\n"
1520 +"3:\n"
1521 +#endif
1522 +
1523 " strexd %2, %0, %H0, [%4]\n"
1524 " teq %2, #0\n"
1525 " bne 1b\n"
1526 -"2:"
1527 +"4:\n"
1528 +
1529 +#ifdef CONFIG_PAX_REFCOUNT
1530 + _ASM_EXTABLE(2b, 4b)
1531 +#endif
1532 +
1533 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1534 : "r" (&v->counter), "r" (u), "r" (a)
1535 : "cc");
1536 @@ -461,10 +872,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1537
1538 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1539 #define atomic64_inc(v) atomic64_add(1LL, (v))
1540 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1541 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1542 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1543 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1544 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1545 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1546 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1547 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1548 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1549 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1550 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1551 index 75fe66b..ba3dee4 100644
1552 --- a/arch/arm/include/asm/cache.h
1553 +++ b/arch/arm/include/asm/cache.h
1554 @@ -4,8 +4,10 @@
1555 #ifndef __ASMARM_CACHE_H
1556 #define __ASMARM_CACHE_H
1557
1558 +#include <linux/const.h>
1559 +
1560 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1561 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1562 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1563
1564 /*
1565 * Memory returned by kmalloc() may be used for DMA, so we must make
1566 @@ -24,5 +26,6 @@
1567 #endif
1568
1569 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1570 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1571
1572 #endif
1573 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1574 index ab91ebb..2c2afeb 100644
1575 --- a/arch/arm/include/asm/cacheflush.h
1576 +++ b/arch/arm/include/asm/cacheflush.h
1577 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1578 void (*dma_unmap_area)(const void *, size_t, int);
1579
1580 void (*dma_flush_range)(const void *, const void *);
1581 -};
1582 +} __no_const;
1583
1584 /*
1585 * Select the calling method
1586 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1587 index 6dcc164..b14d917 100644
1588 --- a/arch/arm/include/asm/checksum.h
1589 +++ b/arch/arm/include/asm/checksum.h
1590 @@ -37,7 +37,19 @@ __wsum
1591 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1592
1593 __wsum
1594 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1595 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1596 +
1597 +static inline __wsum
1598 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1599 +{
1600 + __wsum ret;
1601 + pax_open_userland();
1602 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1603 + pax_close_userland();
1604 + return ret;
1605 +}
1606 +
1607 +
1608
1609 /*
1610 * Fold a partial checksum without adding pseudo headers
1611 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1612 index df2fbba..63fe3e1 100644
1613 --- a/arch/arm/include/asm/cmpxchg.h
1614 +++ b/arch/arm/include/asm/cmpxchg.h
1615 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1616
1617 #define xchg(ptr,x) \
1618 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1619 +#define xchg_unchecked(ptr,x) \
1620 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1621
1622 #include <asm-generic/cmpxchg-local.h>
1623
1624 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1625 index 6ddbe44..b5e38b1 100644
1626 --- a/arch/arm/include/asm/domain.h
1627 +++ b/arch/arm/include/asm/domain.h
1628 @@ -48,18 +48,37 @@
1629 * Domain types
1630 */
1631 #define DOMAIN_NOACCESS 0
1632 -#define DOMAIN_CLIENT 1
1633 #ifdef CONFIG_CPU_USE_DOMAINS
1634 +#define DOMAIN_USERCLIENT 1
1635 +#define DOMAIN_KERNELCLIENT 1
1636 #define DOMAIN_MANAGER 3
1637 +#define DOMAIN_VECTORS DOMAIN_USER
1638 #else
1639 +
1640 +#ifdef CONFIG_PAX_KERNEXEC
1641 #define DOMAIN_MANAGER 1
1642 +#define DOMAIN_KERNEXEC 3
1643 +#else
1644 +#define DOMAIN_MANAGER 1
1645 +#endif
1646 +
1647 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1648 +#define DOMAIN_USERCLIENT 0
1649 +#define DOMAIN_UDEREF 1
1650 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1651 +#else
1652 +#define DOMAIN_USERCLIENT 1
1653 +#define DOMAIN_VECTORS DOMAIN_USER
1654 +#endif
1655 +#define DOMAIN_KERNELCLIENT 1
1656 +
1657 #endif
1658
1659 #define domain_val(dom,type) ((type) << (2*(dom)))
1660
1661 #ifndef __ASSEMBLY__
1662
1663 -#ifdef CONFIG_CPU_USE_DOMAINS
1664 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1665 static inline void set_domain(unsigned val)
1666 {
1667 asm volatile(
1668 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1669 isb();
1670 }
1671
1672 -#define modify_domain(dom,type) \
1673 - do { \
1674 - struct thread_info *thread = current_thread_info(); \
1675 - unsigned int domain = thread->cpu_domain; \
1676 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1677 - thread->cpu_domain = domain | domain_val(dom, type); \
1678 - set_domain(thread->cpu_domain); \
1679 - } while (0)
1680 -
1681 +extern void modify_domain(unsigned int dom, unsigned int type);
1682 #else
1683 static inline void set_domain(unsigned val) { }
1684 static inline void modify_domain(unsigned dom, unsigned type) { }
1685 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1686 index f4b46d3..abc9b2b 100644
1687 --- a/arch/arm/include/asm/elf.h
1688 +++ b/arch/arm/include/asm/elf.h
1689 @@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1690 the loader. We need to make sure that it is out of the way of the program
1691 that it will "exec", and that there is sufficient room for the brk. */
1692
1693 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1694 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1695 +
1696 +#ifdef CONFIG_PAX_ASLR
1697 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1698 +
1699 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1700 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1701 +#endif
1702
1703 /* When the program starts, a1 contains a pointer to a function to be
1704 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1705 @@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1706 extern void elf_set_personality(const struct elf32_hdr *);
1707 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1708
1709 -struct mm_struct;
1710 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1711 -#define arch_randomize_brk arch_randomize_brk
1712 -
1713 #ifdef CONFIG_MMU
1714 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1715 struct linux_binprm;
1716 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1717 index de53547..52b9a28 100644
1718 --- a/arch/arm/include/asm/fncpy.h
1719 +++ b/arch/arm/include/asm/fncpy.h
1720 @@ -81,7 +81,9 @@
1721 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1722 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1723 \
1724 + pax_open_kernel(); \
1725 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1726 + pax_close_kernel(); \
1727 flush_icache_range((unsigned long)(dest_buf), \
1728 (unsigned long)(dest_buf) + (size)); \
1729 \
1730 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1731 index e42cf59..7b94b8f 100644
1732 --- a/arch/arm/include/asm/futex.h
1733 +++ b/arch/arm/include/asm/futex.h
1734 @@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1735 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1736 return -EFAULT;
1737
1738 + pax_open_userland();
1739 +
1740 smp_mb();
1741 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1742 "1: ldrex %1, [%4]\n"
1743 @@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1744 : "cc", "memory");
1745 smp_mb();
1746
1747 + pax_close_userland();
1748 +
1749 *uval = val;
1750 return ret;
1751 }
1752 @@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1753 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1754 return -EFAULT;
1755
1756 + pax_open_userland();
1757 +
1758 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1759 "1: " TUSER(ldr) " %1, [%4]\n"
1760 " teq %1, %2\n"
1761 @@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1762 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1763 : "cc", "memory");
1764
1765 + pax_close_userland();
1766 +
1767 *uval = val;
1768 return ret;
1769 }
1770 @@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 return -EFAULT;
1772
1773 pagefault_disable(); /* implies preempt_disable() */
1774 + pax_open_userland();
1775
1776 switch (op) {
1777 case FUTEX_OP_SET:
1778 @@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1779 ret = -ENOSYS;
1780 }
1781
1782 + pax_close_userland();
1783 pagefault_enable(); /* subsumes preempt_enable() */
1784
1785 if (!ret) {
1786 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1787 index 83eb2f7..ed77159 100644
1788 --- a/arch/arm/include/asm/kmap_types.h
1789 +++ b/arch/arm/include/asm/kmap_types.h
1790 @@ -4,6 +4,6 @@
1791 /*
1792 * This is the "bare minimum". AIO seems to require this.
1793 */
1794 -#define KM_TYPE_NR 16
1795 +#define KM_TYPE_NR 17
1796
1797 #endif
1798 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1799 index 9e614a1..3302cca 100644
1800 --- a/arch/arm/include/asm/mach/dma.h
1801 +++ b/arch/arm/include/asm/mach/dma.h
1802 @@ -22,7 +22,7 @@ struct dma_ops {
1803 int (*residue)(unsigned int, dma_t *); /* optional */
1804 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1805 const char *type;
1806 -};
1807 +} __do_const;
1808
1809 struct dma_struct {
1810 void *addr; /* single DMA address */
1811 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1812 index 2fe141f..192dc01 100644
1813 --- a/arch/arm/include/asm/mach/map.h
1814 +++ b/arch/arm/include/asm/mach/map.h
1815 @@ -27,13 +27,16 @@ struct map_desc {
1816 #define MT_MINICLEAN 6
1817 #define MT_LOW_VECTORS 7
1818 #define MT_HIGH_VECTORS 8
1819 -#define MT_MEMORY 9
1820 +#define MT_MEMORY_RWX 9
1821 #define MT_ROM 10
1822 -#define MT_MEMORY_NONCACHED 11
1823 +#define MT_MEMORY_NONCACHED_RX 11
1824 #define MT_MEMORY_DTCM 12
1825 #define MT_MEMORY_ITCM 13
1826 #define MT_MEMORY_SO 14
1827 #define MT_MEMORY_DMA_READY 15
1828 +#define MT_MEMORY_RW 16
1829 +#define MT_MEMORY_RX 17
1830 +#define MT_MEMORY_NONCACHED_RW 18
1831
1832 #ifdef CONFIG_MMU
1833 extern void iotable_init(struct map_desc *, int);
1834 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835 index f94784f..9a09a4a 100644
1836 --- a/arch/arm/include/asm/outercache.h
1837 +++ b/arch/arm/include/asm/outercache.h
1838 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1839 #endif
1840 void (*set_debug)(unsigned long);
1841 void (*resume)(void);
1842 -};
1843 +} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848 index 4355f0e..cd9168e 100644
1849 --- a/arch/arm/include/asm/page.h
1850 +++ b/arch/arm/include/asm/page.h
1851 @@ -23,6 +23,7 @@
1852
1853 #else
1854
1855 +#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859 @@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863 -};
1864 +} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869 index 78a7793..e3dc06c 100644
1870 --- a/arch/arm/include/asm/pgalloc.h
1871 +++ b/arch/arm/include/asm/pgalloc.h
1872 @@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876 +#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885 +{
1886 + pud_populate(mm, pud, pmd);
1887 +}
1888 +
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900 @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905 +{
1906 +#ifdef CONFIG_ARM_LPAE
1907 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908 +#else
1909 + if (addr & SECTION_SIZE)
1910 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911 + else
1912 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913 +#endif
1914 + flush_pmd_entry(pmdp);
1915 +}
1916 +
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920 @@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1921 static inline void
1922 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1923 {
1924 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1925 + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1926 }
1927 #define pmd_pgtable(pmd) pmd_page(pmd)
1928
1929 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1930 index 5cfba15..f415e1a 100644
1931 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1932 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1933 @@ -20,12 +20,15 @@
1934 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1935 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1936 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1937 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1938 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1939 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1940 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1941 +
1942 /*
1943 * - section
1944 */
1945 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1946 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1947 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1948 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1949 @@ -37,6 +40,7 @@
1950 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1951 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1952 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1953 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1954
1955 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1956 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1957 @@ -66,6 +70,7 @@
1958 * - extended small page/tiny page
1959 */
1960 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1961 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1962 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1963 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1964 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1965 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1966 index 86a659a..70e0120 100644
1967 --- a/arch/arm/include/asm/pgtable-2level.h
1968 +++ b/arch/arm/include/asm/pgtable-2level.h
1969 @@ -126,6 +126,9 @@
1970 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1971 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1972
1973 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1974 +#define L_PTE_PXN (_AT(pteval_t, 0))
1975 +
1976 /*
1977 * These are the memory types, defined to be compatible with
1978 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1979 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1980 index 626989f..9d67a33 100644
1981 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1982 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1983 @@ -75,6 +75,7 @@
1984 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1985 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1986 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1987 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1988 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1989
1990 /*
1991 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1992 index 1d15673..04d626a 100644
1993 --- a/arch/arm/include/asm/pgtable-3level.h
1994 +++ b/arch/arm/include/asm/pgtable-3level.h
1995 @@ -82,6 +82,7 @@
1996 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1997 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1998 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1999 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2000 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2001 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2002 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2003 @@ -95,6 +96,7 @@
2004 /*
2005 * To be used in assembly code with the upper page attributes.
2006 */
2007 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
2008 #define L_PTE_XN_HIGH (1 << (54 - 32))
2009 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2010
2011 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2012 index 1571d12..b8a9b43 100644
2013 --- a/arch/arm/include/asm/pgtable.h
2014 +++ b/arch/arm/include/asm/pgtable.h
2015 @@ -33,6 +33,9 @@
2016 #include <asm/pgtable-2level.h>
2017 #endif
2018
2019 +#define ktla_ktva(addr) (addr)
2020 +#define ktva_ktla(addr) (addr)
2021 +
2022 /*
2023 * Just any arbitrary offset to the start of the vmalloc VM area: the
2024 * current 8MB value just means that there will be a 8MB "hole" after the
2025 @@ -48,6 +51,9 @@
2026 #define LIBRARY_TEXT_START 0x0c000000
2027
2028 #ifndef __ASSEMBLY__
2029 +extern pteval_t __supported_pte_mask;
2030 +extern pmdval_t __supported_pmd_mask;
2031 +
2032 extern void __pte_error(const char *file, int line, pte_t);
2033 extern void __pmd_error(const char *file, int line, pmd_t);
2034 extern void __pgd_error(const char *file, int line, pgd_t);
2035 @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2036 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2037 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2038
2039 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
2040 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2041 +
2042 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2043 +#include <asm/domain.h>
2044 +#include <linux/thread_info.h>
2045 +#include <linux/preempt.h>
2046 +
2047 +static inline int test_domain(int domain, int domaintype)
2048 +{
2049 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2050 +}
2051 +#endif
2052 +
2053 +#ifdef CONFIG_PAX_KERNEXEC
2054 +static inline unsigned long pax_open_kernel(void) {
2055 +#ifdef CONFIG_ARM_LPAE
2056 + /* TODO */
2057 +#else
2058 + preempt_disable();
2059 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2060 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2061 +#endif
2062 + return 0;
2063 +}
2064 +
2065 +static inline unsigned long pax_close_kernel(void) {
2066 +#ifdef CONFIG_ARM_LPAE
2067 + /* TODO */
2068 +#else
2069 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2070 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
2071 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2072 + preempt_enable_no_resched();
2073 +#endif
2074 + return 0;
2075 +}
2076 +#else
2077 +static inline unsigned long pax_open_kernel(void) { return 0; }
2078 +static inline unsigned long pax_close_kernel(void) { return 0; }
2079 +#endif
2080 +
2081 /*
2082 * This is the lowest virtual address we can permit any user space
2083 * mapping to be mapped at. This is particularly important for
2084 @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2085 /*
2086 * The pgprot_* and protection_map entries will be fixed up in runtime
2087 * to include the cachable and bufferable bits based on memory policy,
2088 - * as well as any architecture dependent bits like global/ASID and SMP
2089 - * shared mapping bits.
2090 + * as well as any architecture dependent bits like global/ASID, PXN,
2091 + * and SMP shared mapping bits.
2092 */
2093 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2094
2095 @@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2096 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2097 {
2098 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2099 - L_PTE_NONE | L_PTE_VALID;
2100 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2101 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2102 return pte;
2103 }
2104 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2105 index c4ae171..ea0c0c2 100644
2106 --- a/arch/arm/include/asm/psci.h
2107 +++ b/arch/arm/include/asm/psci.h
2108 @@ -29,7 +29,7 @@ struct psci_operations {
2109 int (*cpu_off)(struct psci_power_state state);
2110 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2111 int (*migrate)(unsigned long cpuid);
2112 -};
2113 +} __no_const;
2114
2115 extern struct psci_operations psci_ops;
2116 extern struct smp_operations psci_smp_ops;
2117 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2118 index 22a3b9b..7f214ee 100644
2119 --- a/arch/arm/include/asm/smp.h
2120 +++ b/arch/arm/include/asm/smp.h
2121 @@ -112,7 +112,7 @@ struct smp_operations {
2122 int (*cpu_disable)(unsigned int cpu);
2123 #endif
2124 #endif
2125 -};
2126 +} __no_const;
2127
2128 /*
2129 * set platform specific SMP operations
2130 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2131 index 71a06b2..8bb9ae1 100644
2132 --- a/arch/arm/include/asm/thread_info.h
2133 +++ b/arch/arm/include/asm/thread_info.h
2134 @@ -88,9 +88,9 @@ struct thread_info {
2135 .flags = 0, \
2136 .preempt_count = INIT_PREEMPT_COUNT, \
2137 .addr_limit = KERNEL_DS, \
2138 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2139 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2140 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2141 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2142 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2143 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2144 .restart_block = { \
2145 .fn = do_no_restart_syscall, \
2146 }, \
2147 @@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2148 #define TIF_SYSCALL_AUDIT 9
2149 #define TIF_SYSCALL_TRACEPOINT 10
2150 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2151 -#define TIF_NOHZ 12 /* in adaptive nohz mode */
2152 +/* within 8 bits of TIF_SYSCALL_TRACE
2153 + * to meet flexible second operand requirements
2154 + */
2155 +#define TIF_GRSEC_SETXID 12
2156 +#define TIF_NOHZ 13 /* in adaptive nohz mode */
2157 #define TIF_USING_IWMMXT 17
2158 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2159 #define TIF_RESTORE_SIGMASK 20
2160 @@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2161 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2162 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2163 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2164 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2165
2166 /* Checks for any syscall work in entry-common.S */
2167 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2168 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2169 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2170
2171 /*
2172 * Change these and you break ASM code in entry-common.S
2173 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2174 index 72abdc5..35acac1 100644
2175 --- a/arch/arm/include/asm/uaccess.h
2176 +++ b/arch/arm/include/asm/uaccess.h
2177 @@ -18,6 +18,7 @@
2178 #include <asm/domain.h>
2179 #include <asm/unified.h>
2180 #include <asm/compiler.h>
2181 +#include <asm/pgtable.h>
2182
2183 #if __LINUX_ARM_ARCH__ < 6
2184 #include <asm-generic/uaccess-unaligned.h>
2185 @@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2186 static inline void set_fs(mm_segment_t fs)
2187 {
2188 current_thread_info()->addr_limit = fs;
2189 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2190 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2191 }
2192
2193 #define segment_eq(a,b) ((a) == (b))
2194
2195 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2196 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2197 +
2198 +static inline void pax_open_userland(void)
2199 +{
2200 +
2201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2202 + if (segment_eq(get_fs(), USER_DS)) {
2203 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2204 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2205 + }
2206 +#endif
2207 +
2208 +}
2209 +
2210 +static inline void pax_close_userland(void)
2211 +{
2212 +
2213 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2214 + if (segment_eq(get_fs(), USER_DS)) {
2215 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2216 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2217 + }
2218 +#endif
2219 +
2220 +}
2221 +
2222 #define __addr_ok(addr) ({ \
2223 unsigned long flag; \
2224 __asm__("cmp %2, %0; movlo %0, #0" \
2225 @@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2226
2227 #define get_user(x,p) \
2228 ({ \
2229 + int __e; \
2230 might_fault(); \
2231 - __get_user_check(x,p); \
2232 + pax_open_userland(); \
2233 + __e = __get_user_check(x,p); \
2234 + pax_close_userland(); \
2235 + __e; \
2236 })
2237
2238 extern int __put_user_1(void *, unsigned int);
2239 @@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2240
2241 #define put_user(x,p) \
2242 ({ \
2243 + int __e; \
2244 might_fault(); \
2245 - __put_user_check(x,p); \
2246 + pax_open_userland(); \
2247 + __e = __put_user_check(x,p); \
2248 + pax_close_userland(); \
2249 + __e; \
2250 })
2251
2252 #else /* CONFIG_MMU */
2253 @@ -220,6 +256,7 @@ static inline void set_fs(mm_segment_t fs)
2254
2255 #endif /* CONFIG_MMU */
2256
2257 +#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2258 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2259
2260 #define user_addr_max() \
2261 @@ -237,13 +274,17 @@ static inline void set_fs(mm_segment_t fs)
2262 #define __get_user(x,ptr) \
2263 ({ \
2264 long __gu_err = 0; \
2265 + pax_open_userland(); \
2266 __get_user_err((x),(ptr),__gu_err); \
2267 + pax_close_userland(); \
2268 __gu_err; \
2269 })
2270
2271 #define __get_user_error(x,ptr,err) \
2272 ({ \
2273 + pax_open_userland(); \
2274 __get_user_err((x),(ptr),err); \
2275 + pax_close_userland(); \
2276 (void) 0; \
2277 })
2278
2279 @@ -319,13 +360,17 @@ do { \
2280 #define __put_user(x,ptr) \
2281 ({ \
2282 long __pu_err = 0; \
2283 + pax_open_userland(); \
2284 __put_user_err((x),(ptr),__pu_err); \
2285 + pax_close_userland(); \
2286 __pu_err; \
2287 })
2288
2289 #define __put_user_error(x,ptr,err) \
2290 ({ \
2291 + pax_open_userland(); \
2292 __put_user_err((x),(ptr),err); \
2293 + pax_close_userland(); \
2294 (void) 0; \
2295 })
2296
2297 @@ -425,11 +470,44 @@ do { \
2298
2299
2300 #ifdef CONFIG_MMU
2301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2305 +
2306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2307 +{
2308 + unsigned long ret;
2309 +
2310 + check_object_size(to, n, false);
2311 + pax_open_userland();
2312 + ret = ___copy_from_user(to, from, n);
2313 + pax_close_userland();
2314 + return ret;
2315 +}
2316 +
2317 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2318 +{
2319 + unsigned long ret;
2320 +
2321 + check_object_size(from, n, true);
2322 + pax_open_userland();
2323 + ret = ___copy_to_user(to, from, n);
2324 + pax_close_userland();
2325 + return ret;
2326 +}
2327 +
2328 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2329 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2330 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2331 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2332 +
2333 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2334 +{
2335 + unsigned long ret;
2336 + pax_open_userland();
2337 + ret = ___clear_user(addr, n);
2338 + pax_close_userland();
2339 + return ret;
2340 +}
2341 +
2342 #else
2343 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2344 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2345 @@ -438,6 +516,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2346
2347 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2348 {
2349 + if ((long)n < 0)
2350 + return n;
2351 +
2352 if (access_ok(VERIFY_READ, from, n))
2353 n = __copy_from_user(to, from, n);
2354 else /* security hole - plug it */
2355 @@ -447,6 +528,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2356
2357 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2358 {
2359 + if ((long)n < 0)
2360 + return n;
2361 +
2362 if (access_ok(VERIFY_WRITE, to, n))
2363 n = __copy_to_user(to, from, n);
2364 return n;
2365 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2366 index 5af0ed1..cea83883 100644
2367 --- a/arch/arm/include/uapi/asm/ptrace.h
2368 +++ b/arch/arm/include/uapi/asm/ptrace.h
2369 @@ -92,7 +92,7 @@
2370 * ARMv7 groups of PSR bits
2371 */
2372 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2373 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2374 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2375 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2376 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2377
2378 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2379 index 1f031dd..d9b5e4a 100644
2380 --- a/arch/arm/kernel/armksyms.c
2381 +++ b/arch/arm/kernel/armksyms.c
2382 @@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2383
2384 /* networking */
2385 EXPORT_SYMBOL(csum_partial);
2386 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2387 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2388 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2389 EXPORT_SYMBOL(__csum_ipv6_magic);
2390
2391 @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2392 #ifdef CONFIG_MMU
2393 EXPORT_SYMBOL(copy_page);
2394
2395 -EXPORT_SYMBOL(__copy_from_user);
2396 -EXPORT_SYMBOL(__copy_to_user);
2397 -EXPORT_SYMBOL(__clear_user);
2398 +EXPORT_SYMBOL(___copy_from_user);
2399 +EXPORT_SYMBOL(___copy_to_user);
2400 +EXPORT_SYMBOL(___clear_user);
2401
2402 EXPORT_SYMBOL(__get_user_1);
2403 EXPORT_SYMBOL(__get_user_2);
2404 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2405 index b3fb8c9..59cfab2 100644
2406 --- a/arch/arm/kernel/entry-armv.S
2407 +++ b/arch/arm/kernel/entry-armv.S
2408 @@ -47,6 +47,87 @@
2409 9997:
2410 .endm
2411
2412 + .macro pax_enter_kernel
2413 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2414 + @ make aligned space for saved DACR
2415 + sub sp, sp, #8
2416 + @ save regs
2417 + stmdb sp!, {r1, r2}
2418 + @ read DACR from cpu_domain into r1
2419 + mov r2, sp
2420 + @ assume 8K pages, since we have to split the immediate in two
2421 + bic r2, r2, #(0x1fc0)
2422 + bic r2, r2, #(0x3f)
2423 + ldr r1, [r2, #TI_CPU_DOMAIN]
2424 + @ store old DACR on stack
2425 + str r1, [sp, #8]
2426 +#ifdef CONFIG_PAX_KERNEXEC
2427 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2428 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2429 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2430 +#endif
2431 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2432 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2433 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2434 +#endif
2435 + @ write r1 to current_thread_info()->cpu_domain
2436 + str r1, [r2, #TI_CPU_DOMAIN]
2437 + @ write r1 to DACR
2438 + mcr p15, 0, r1, c3, c0, 0
2439 + @ instruction sync
2440 + instr_sync
2441 + @ restore regs
2442 + ldmia sp!, {r1, r2}
2443 +#endif
2444 + .endm
2445 +
2446 + .macro pax_open_userland
2447 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2448 + @ save regs
2449 + stmdb sp!, {r0, r1}
2450 + @ read DACR from cpu_domain into r1
2451 + mov r0, sp
2452 + @ assume 8K pages, since we have to split the immediate in two
2453 + bic r0, r0, #(0x1fc0)
2454 + bic r0, r0, #(0x3f)
2455 + ldr r1, [r0, #TI_CPU_DOMAIN]
2456 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2457 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2458 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2459 + @ write r1 to current_thread_info()->cpu_domain
2460 + str r1, [r0, #TI_CPU_DOMAIN]
2461 + @ write r1 to DACR
2462 + mcr p15, 0, r1, c3, c0, 0
2463 + @ instruction sync
2464 + instr_sync
2465 + @ restore regs
2466 + ldmia sp!, {r0, r1}
2467 +#endif
2468 + .endm
2469 +
2470 + .macro pax_close_userland
2471 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2472 + @ save regs
2473 + stmdb sp!, {r0, r1}
2474 + @ read DACR from cpu_domain into r1
2475 + mov r0, sp
2476 + @ assume 8K pages, since we have to split the immediate in two
2477 + bic r0, r0, #(0x1fc0)
2478 + bic r0, r0, #(0x3f)
2479 + ldr r1, [r0, #TI_CPU_DOMAIN]
2480 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2481 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2482 + @ write r1 to current_thread_info()->cpu_domain
2483 + str r1, [r0, #TI_CPU_DOMAIN]
2484 + @ write r1 to DACR
2485 + mcr p15, 0, r1, c3, c0, 0
2486 + @ instruction sync
2487 + instr_sync
2488 + @ restore regs
2489 + ldmia sp!, {r0, r1}
2490 +#endif
2491 + .endm
2492 +
2493 .macro pabt_helper
2494 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2495 #ifdef MULTI_PABORT
2496 @@ -89,11 +170,15 @@
2497 * Invalid mode handlers
2498 */
2499 .macro inv_entry, reason
2500 +
2501 + pax_enter_kernel
2502 +
2503 sub sp, sp, #S_FRAME_SIZE
2504 ARM( stmib sp, {r1 - lr} )
2505 THUMB( stmia sp, {r0 - r12} )
2506 THUMB( str sp, [sp, #S_SP] )
2507 THUMB( str lr, [sp, #S_LR] )
2508 +
2509 mov r1, #\reason
2510 .endm
2511
2512 @@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2513 .macro svc_entry, stack_hole=0
2514 UNWIND(.fnstart )
2515 UNWIND(.save {r0 - pc} )
2516 +
2517 + pax_enter_kernel
2518 +
2519 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2520 +
2521 #ifdef CONFIG_THUMB2_KERNEL
2522 SPFIX( str r0, [sp] ) @ temporarily saved
2523 SPFIX( mov r0, sp )
2524 @@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2525 ldmia r0, {r3 - r5}
2526 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2527 mov r6, #-1 @ "" "" "" ""
2528 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2529 + @ offset sp by 8 as done in pax_enter_kernel
2530 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2531 +#else
2532 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2533 +#endif
2534 SPFIX( addeq r2, r2, #4 )
2535 str r3, [sp, #-4]! @ save the "real" r0 copied
2536 @ from the exception stack
2537 @@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2538 .macro usr_entry
2539 UNWIND(.fnstart )
2540 UNWIND(.cantunwind ) @ don't unwind the user space
2541 +
2542 + pax_enter_kernel_user
2543 +
2544 sub sp, sp, #S_FRAME_SIZE
2545 ARM( stmib sp, {r1 - r12} )
2546 THUMB( stmia sp, {r0 - r12} )
2547 @@ -416,7 +513,9 @@ __und_usr:
2548 tst r3, #PSR_T_BIT @ Thumb mode?
2549 bne __und_usr_thumb
2550 sub r4, r2, #4 @ ARM instr at LR - 4
2551 + pax_open_userland
2552 1: ldrt r0, [r4]
2553 + pax_close_userland
2554 ARM_BE8(rev r0, r0) @ little endian instruction
2555
2556 @ r0 = 32-bit ARM instruction which caused the exception
2557 @@ -450,10 +549,14 @@ __und_usr_thumb:
2558 */
2559 .arch armv6t2
2560 #endif
2561 + pax_open_userland
2562 2: ldrht r5, [r4]
2563 + pax_close_userland
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566 + pax_open_userland
2567 3: ldrht r0, [r2]
2568 + pax_close_userland
2569 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2570 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2571 orr r0, r0, r5, lsl #16
2572 @@ -482,7 +585,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576 -4: mov pc, r9
2577 +4: pax_close_userland
2578 + mov pc, r9
2579 .popsection
2580 .pushsection __ex_table,"a"
2581 .long 1b, 4b
2582 @@ -692,7 +796,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586 -#ifdef CONFIG_CPU_USE_DOMAINS
2587 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591 @@ -701,7 +805,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595 -#ifdef CONFIG_CPU_USE_DOMAINS
2596 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601 index a2dcafd..1048b5a 100644
2602 --- a/arch/arm/kernel/entry-common.S
2603 +++ b/arch/arm/kernel/entry-common.S
2604 @@ -10,18 +10,46 @@
2605
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608 +#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611 +#include "entry-header.S"
2612 +
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618 + @ save regs
2619 + stmdb sp!, {r1, r2}
2620 + @ read DACR from cpu_domain into r1
2621 + mov r2, sp
2622 + @ assume 8K pages, since we have to split the immediate in two
2623 + bic r2, r2, #(0x1fc0)
2624 + bic r2, r2, #(0x3f)
2625 + ldr r1, [r2, #TI_CPU_DOMAIN]
2626 +#ifdef CONFIG_PAX_KERNEXEC
2627 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630 +#endif
2631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2632 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2633 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635 +#endif
2636 + @ write r1 to current_thread_info()->cpu_domain
2637 + str r1, [r2, #TI_CPU_DOMAIN]
2638 + @ write r1 to DACR
2639 + mcr p15, 0, r1, c3, c0, 0
2640 + @ instruction sync
2641 + instr_sync
2642 + @ restore regs
2643 + ldmia sp!, {r1, r2}
2644 +#endif
2645 .endm
2646 #endif
2647
2648 -#include "entry-header.S"
2649 -
2650 -
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654 @@ -411,6 +439,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658 + /*
2659 + * do this here to avoid a performance hit of wrapping the code above
2660 + * that directly dereferences userland to parse the SWI instruction
2661 + */
2662 + pax_enter_kernel_user
2663 +
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668 index 39f89fb..d612bd9 100644
2669 --- a/arch/arm/kernel/entry-header.S
2670 +++ b/arch/arm/kernel/entry-header.S
2671 @@ -184,6 +184,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675 + .macro pax_enter_kernel_user
2676 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677 + @ save regs
2678 + stmdb sp!, {r0, r1}
2679 + @ read DACR from cpu_domain into r1
2680 + mov r0, sp
2681 + @ assume 8K pages, since we have to split the immediate in two
2682 + bic r0, r0, #(0x1fc0)
2683 + bic r0, r0, #(0x3f)
2684 + ldr r1, [r0, #TI_CPU_DOMAIN]
2685 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2686 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688 +#endif
2689 +#ifdef CONFIG_PAX_KERNEXEC
2690 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693 +#endif
2694 + @ write r1 to current_thread_info()->cpu_domain
2695 + str r1, [r0, #TI_CPU_DOMAIN]
2696 + @ write r1 to DACR
2697 + mcr p15, 0, r1, c3, c0, 0
2698 + @ instruction sync
2699 + instr_sync
2700 + @ restore regs
2701 + ldmia sp!, {r0, r1}
2702 +#endif
2703 + .endm
2704 +
2705 + .macro pax_exit_kernel
2706 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707 + @ save regs
2708 + stmdb sp!, {r0, r1}
2709 + @ read old DACR from stack into r1
2710 + ldr r1, [sp, #(8 + S_SP)]
2711 + sub r1, r1, #8
2712 + ldr r1, [r1]
2713 +
2714 + @ write r1 to current_thread_info()->cpu_domain
2715 + mov r0, sp
2716 + @ assume 8K pages, since we have to split the immediate in two
2717 + bic r0, r0, #(0x1fc0)
2718 + bic r0, r0, #(0x3f)
2719 + str r1, [r0, #TI_CPU_DOMAIN]
2720 + @ write r1 to DACR
2721 + mcr p15, 0, r1, c3, c0, 0
2722 + @ instruction sync
2723 + instr_sync
2724 + @ restore regs
2725 + ldmia sp!, {r0, r1}
2726 +#endif
2727 + .endm
2728 +
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732 @@ -203,6 +257,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736 +
2737 + pax_exit_kernel
2738 +
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6)
2741 ldr r0, [sp]
2742 @@ -266,6 +323,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746 +
2747 + pax_exit_kernel
2748 +
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751 clrex @ clear the exclusive monitor
2752 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753 index 918875d..cd5fa27 100644
2754 --- a/arch/arm/kernel/fiq.c
2755 +++ b/arch/arm/kernel/fiq.c
2756 @@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760 + pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762 + pax_close_kernel();
2763 +
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768 index 32f317e..710ae07 100644
2769 --- a/arch/arm/kernel/head.S
2770 +++ b/arch/arm/kernel/head.S
2771 @@ -52,7 +52,9 @@
2772 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2773
2774 .macro pgtbl, rd, phys
2775 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2776 + mov \rd, #TEXT_OFFSET
2777 + sub \rd, #PG_DIR_SIZE
2778 + add \rd, \rd, \phys
2779 .endm
2780
2781 /*
2782 @@ -436,7 +438,7 @@ __enable_mmu:
2783 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2784 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2785 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2786 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2787 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2788 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2789 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2790 #endif
2791 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2792 index 45e4781..8eac93d 100644
2793 --- a/arch/arm/kernel/module.c
2794 +++ b/arch/arm/kernel/module.c
2795 @@ -38,12 +38,39 @@
2796 #endif
2797
2798 #ifdef CONFIG_MMU
2799 -void *module_alloc(unsigned long size)
2800 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2801 {
2802 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2803 + return NULL;
2804 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2805 - GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2806 + GFP_KERNEL, prot, NUMA_NO_NODE,
2807 __builtin_return_address(0));
2808 }
2809 +
2810 +void *module_alloc(unsigned long size)
2811 +{
2812 +
2813 +#ifdef CONFIG_PAX_KERNEXEC
2814 + return __module_alloc(size, PAGE_KERNEL);
2815 +#else
2816 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2817 +#endif
2818 +
2819 +}
2820 +
2821 +#ifdef CONFIG_PAX_KERNEXEC
2822 +void module_free_exec(struct module *mod, void *module_region)
2823 +{
2824 + module_free(mod, module_region);
2825 +}
2826 +EXPORT_SYMBOL(module_free_exec);
2827 +
2828 +void *module_alloc_exec(unsigned long size)
2829 +{
2830 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2831 +}
2832 +EXPORT_SYMBOL(module_alloc_exec);
2833 +#endif
2834 #endif
2835
2836 int
2837 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2838 index 07314af..c46655c 100644
2839 --- a/arch/arm/kernel/patch.c
2840 +++ b/arch/arm/kernel/patch.c
2841 @@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2842 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2843 int size;
2844
2845 + pax_open_kernel();
2846 if (thumb2 && __opcode_is_thumb16(insn)) {
2847 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2848 size = sizeof(u16);
2849 @@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2850 *(u32 *)addr = insn;
2851 size = sizeof(u32);
2852 }
2853 + pax_close_kernel();
2854
2855 flush_icache_range((uintptr_t)(addr),
2856 (uintptr_t)(addr) + size);
2857 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2858 index 92f7b15..7048500 100644
2859 --- a/arch/arm/kernel/process.c
2860 +++ b/arch/arm/kernel/process.c
2861 @@ -217,6 +217,7 @@ void machine_power_off(void)
2862
2863 if (pm_power_off)
2864 pm_power_off();
2865 + BUG();
2866 }
2867
2868 /*
2869 @@ -230,7 +231,7 @@ void machine_power_off(void)
2870 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2871 * to use. Implementing such co-ordination would be essentially impossible.
2872 */
2873 -void machine_restart(char *cmd)
2874 +__noreturn void machine_restart(char *cmd)
2875 {
2876 local_irq_disable();
2877 smp_send_stop();
2878 @@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2879
2880 show_regs_print_info(KERN_DEFAULT);
2881
2882 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2883 - print_symbol("LR is at %s\n", regs->ARM_lr);
2884 + printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2885 + printk("LR is at %pA\n", (void *)regs->ARM_lr);
2886 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2887 "sp : %08lx ip : %08lx fp : %08lx\n",
2888 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2889 @@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2890 return 0;
2891 }
2892
2893 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2894 -{
2895 - unsigned long range_end = mm->brk + 0x02000000;
2896 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2897 -}
2898 -
2899 #ifdef CONFIG_MMU
2900 #ifdef CONFIG_KUSER_HELPERS
2901 /*
2902 @@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2903
2904 static int __init gate_vma_init(void)
2905 {
2906 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2907 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2908 return 0;
2909 }
2910 arch_initcall(gate_vma_init);
2911 @@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2912
2913 const char *arch_vma_name(struct vm_area_struct *vma)
2914 {
2915 - return is_gate_vma(vma) ? "[vectors]" :
2916 - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2917 - "[sigpage]" : NULL;
2918 + return is_gate_vma(vma) ? "[vectors]" : NULL;
2919 }
2920
2921 -static struct page *signal_page;
2922 -extern struct page *get_signal_page(void);
2923 -
2924 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2925 {
2926 struct mm_struct *mm = current->mm;
2927 - unsigned long addr;
2928 - int ret;
2929 -
2930 - if (!signal_page)
2931 - signal_page = get_signal_page();
2932 - if (!signal_page)
2933 - return -ENOMEM;
2934
2935 down_write(&mm->mmap_sem);
2936 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2937 - if (IS_ERR_VALUE(addr)) {
2938 - ret = addr;
2939 - goto up_fail;
2940 - }
2941 -
2942 - ret = install_special_mapping(mm, addr, PAGE_SIZE,
2943 - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2944 - &signal_page);
2945 -
2946 - if (ret == 0)
2947 - mm->context.sigpage = addr;
2948 -
2949 - up_fail:
2950 + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2951 up_write(&mm->mmap_sem);
2952 - return ret;
2953 + return 0;
2954 }
2955 #endif
2956 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2957 index 4693188..4596c5e 100644
2958 --- a/arch/arm/kernel/psci.c
2959 +++ b/arch/arm/kernel/psci.c
2960 @@ -24,7 +24,7 @@
2961 #include <asm/opcodes-virt.h>
2962 #include <asm/psci.h>
2963
2964 -struct psci_operations psci_ops;
2965 +struct psci_operations psci_ops __read_only;
2966
2967 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2968
2969 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2970 index 0dd3b79..e018f64 100644
2971 --- a/arch/arm/kernel/ptrace.c
2972 +++ b/arch/arm/kernel/ptrace.c
2973 @@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2974 return current_thread_info()->syscall;
2975 }
2976
2977 +#ifdef CONFIG_GRKERNSEC_SETXID
2978 +extern void gr_delayed_cred_worker(void);
2979 +#endif
2980 +
2981 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2982 {
2983 current_thread_info()->syscall = scno;
2984
2985 +#ifdef CONFIG_GRKERNSEC_SETXID
2986 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2987 + gr_delayed_cred_worker();
2988 +#endif
2989 +
2990 /* Do the secure computing check first; failures should be fast. */
2991 if (secure_computing(scno) == -1)
2992 return -1;
2993 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2994 index 987a7f5..ab0c397 100644
2995 --- a/arch/arm/kernel/setup.c
2996 +++ b/arch/arm/kernel/setup.c
2997 @@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
2998 unsigned int elf_hwcap __read_mostly;
2999 EXPORT_SYMBOL(elf_hwcap);
3000
3001 +pteval_t __supported_pte_mask __read_only;
3002 +pmdval_t __supported_pmd_mask __read_only;
3003
3004 #ifdef MULTI_CPU
3005 -struct processor processor __read_mostly;
3006 +struct processor processor __read_only;
3007 #endif
3008 #ifdef MULTI_TLB
3009 -struct cpu_tlb_fns cpu_tlb __read_mostly;
3010 +struct cpu_tlb_fns cpu_tlb __read_only;
3011 #endif
3012 #ifdef MULTI_USER
3013 -struct cpu_user_fns cpu_user __read_mostly;
3014 +struct cpu_user_fns cpu_user __read_only;
3015 #endif
3016 #ifdef MULTI_CACHE
3017 -struct cpu_cache_fns cpu_cache __read_mostly;
3018 +struct cpu_cache_fns cpu_cache __read_only;
3019 #endif
3020 #ifdef CONFIG_OUTER_CACHE
3021 -struct outer_cache_fns outer_cache __read_mostly;
3022 +struct outer_cache_fns outer_cache __read_only;
3023 EXPORT_SYMBOL(outer_cache);
3024 #endif
3025
3026 @@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
3027 asm("mrc p15, 0, %0, c0, c1, 4"
3028 : "=r" (mmfr0));
3029 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3030 - (mmfr0 & 0x000000f0) >= 0x00000030)
3031 + (mmfr0 & 0x000000f0) >= 0x00000030) {
3032 cpu_arch = CPU_ARCH_ARMv7;
3033 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3034 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3035 + __supported_pte_mask |= L_PTE_PXN;
3036 + __supported_pmd_mask |= PMD_PXNTABLE;
3037 + }
3038 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3039 (mmfr0 & 0x000000f0) == 0x00000020)
3040 cpu_arch = CPU_ARCH_ARMv6;
3041 else
3042 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3043 index 04d6388..5115238 100644
3044 --- a/arch/arm/kernel/signal.c
3045 +++ b/arch/arm/kernel/signal.c
3046 @@ -23,8 +23,6 @@
3047
3048 extern const unsigned long sigreturn_codes[7];
3049
3050 -static unsigned long signal_return_offset;
3051 -
3052 #ifdef CONFIG_CRUNCH
3053 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3054 {
3055 @@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3056 * except when the MPU has protected the vectors
3057 * page from PL0
3058 */
3059 - retcode = mm->context.sigpage + signal_return_offset +
3060 - (idx << 2) + thumb;
3061 + retcode = mm->context.sigpage + (idx << 2) + thumb;
3062 } else
3063 #endif
3064 {
3065 @@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3066 } while (thread_flags & _TIF_WORK_MASK);
3067 return 0;
3068 }
3069 -
3070 -struct page *get_signal_page(void)
3071 -{
3072 - unsigned long ptr;
3073 - unsigned offset;
3074 - struct page *page;
3075 - void *addr;
3076 -
3077 - page = alloc_pages(GFP_KERNEL, 0);
3078 -
3079 - if (!page)
3080 - return NULL;
3081 -
3082 - addr = page_address(page);
3083 -
3084 - /* Give the signal return code some randomness */
3085 - offset = 0x200 + (get_random_int() & 0x7fc);
3086 - signal_return_offset = offset;
3087 -
3088 - /*
3089 - * Copy signal return handlers into the vector page, and
3090 - * set sigreturn to be a pointer to these.
3091 - */
3092 - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3093 -
3094 - ptr = (unsigned long)addr + offset;
3095 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3096 -
3097 - return page;
3098 -}
3099 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3100 index dc894ab..f929a0d 100644
3101 --- a/arch/arm/kernel/smp.c
3102 +++ b/arch/arm/kernel/smp.c
3103 @@ -73,7 +73,7 @@ enum ipi_msg_type {
3104
3105 static DECLARE_COMPLETION(cpu_running);
3106
3107 -static struct smp_operations smp_ops;
3108 +static struct smp_operations smp_ops __read_only;
3109
3110 void __init smp_set_ops(struct smp_operations *ops)
3111 {
3112 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3113 index 4636d56..ce4ec3d 100644
3114 --- a/arch/arm/kernel/traps.c
3115 +++ b/arch/arm/kernel/traps.c
3116 @@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3117 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3118 {
3119 #ifdef CONFIG_KALLSYMS
3120 - printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3121 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3122 #else
3123 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3124 #endif
3125 @@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3126 static int die_owner = -1;
3127 static unsigned int die_nest_count;
3128
3129 +extern void gr_handle_kernel_exploit(void);
3130 +
3131 static unsigned long oops_begin(void)
3132 {
3133 int cpu;
3134 @@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3135 panic("Fatal exception in interrupt");
3136 if (panic_on_oops)
3137 panic("Fatal exception");
3138 +
3139 + gr_handle_kernel_exploit();
3140 +
3141 if (signr)
3142 do_exit(signr);
3143 }
3144 @@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3145 * The user helper at 0xffff0fe0 must be used instead.
3146 * (see entry-armv.S for details)
3147 */
3148 + pax_open_kernel();
3149 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3150 + pax_close_kernel();
3151 }
3152 return 0;
3153
3154 @@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
3155 kuser_init(vectors_base);
3156
3157 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3158 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3159 +
3160 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3161 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3162 +#endif
3163 +
3164 #else /* ifndef CONFIG_CPU_V7M */
3165 /*
3166 * on V7-M there is no need to copy the vector table to a dedicated
3167 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3168 index 7bcee5c..e2f3249 100644
3169 --- a/arch/arm/kernel/vmlinux.lds.S
3170 +++ b/arch/arm/kernel/vmlinux.lds.S
3171 @@ -8,7 +8,11 @@
3172 #include <asm/thread_info.h>
3173 #include <asm/memory.h>
3174 #include <asm/page.h>
3175 -
3176 +
3177 +#ifdef CONFIG_PAX_KERNEXEC
3178 +#include <asm/pgtable.h>
3179 +#endif
3180 +
3181 #define PROC_INFO \
3182 . = ALIGN(4); \
3183 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3184 @@ -34,7 +38,7 @@
3185 #endif
3186
3187 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3188 - defined(CONFIG_GENERIC_BUG)
3189 + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3190 #define ARM_EXIT_KEEP(x) x
3191 #define ARM_EXIT_DISCARD(x)
3192 #else
3193 @@ -90,6 +94,11 @@ SECTIONS
3194 _text = .;
3195 HEAD_TEXT
3196 }
3197 +
3198 +#ifdef CONFIG_PAX_KERNEXEC
3199 + . = ALIGN(1<<SECTION_SHIFT);
3200 +#endif
3201 +
3202 .text : { /* Real text segment */
3203 _stext = .; /* Text and read-only data */
3204 __exception_text_start = .;
3205 @@ -112,6 +121,8 @@ SECTIONS
3206 ARM_CPU_KEEP(PROC_INFO)
3207 }
3208
3209 + _etext = .; /* End of text section */
3210 +
3211 RO_DATA(PAGE_SIZE)
3212
3213 . = ALIGN(4);
3214 @@ -142,7 +153,9 @@ SECTIONS
3215
3216 NOTES
3217
3218 - _etext = .; /* End of text and rodata section */
3219 +#ifdef CONFIG_PAX_KERNEXEC
3220 + . = ALIGN(1<<SECTION_SHIFT);
3221 +#endif
3222
3223 #ifndef CONFIG_XIP_KERNEL
3224 . = ALIGN(PAGE_SIZE);
3225 @@ -220,6 +233,11 @@ SECTIONS
3226 . = PAGE_OFFSET + TEXT_OFFSET;
3227 #else
3228 __init_end = .;
3229 +
3230 +#ifdef CONFIG_PAX_KERNEXEC
3231 + . = ALIGN(1<<SECTION_SHIFT);
3232 +#endif
3233 +
3234 . = ALIGN(THREAD_SIZE);
3235 __data_loc = .;
3236 #endif
3237 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3238 index 2a700e0..745b980 100644
3239 --- a/arch/arm/kvm/arm.c
3240 +++ b/arch/arm/kvm/arm.c
3241 @@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3242 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3243
3244 /* The VMID used in the VTTBR */
3245 -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3246 +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3247 static u8 kvm_next_vmid;
3248 static DEFINE_SPINLOCK(kvm_vmid_lock);
3249
3250 @@ -397,7 +397,7 @@ void force_vm_exit(const cpumask_t *mask)
3251 */
3252 static bool need_new_vmid_gen(struct kvm *kvm)
3253 {
3254 - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3255 + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3256 }
3257
3258 /**
3259 @@ -430,7 +430,7 @@ static void update_vttbr(struct kvm *kvm)
3260
3261 /* First user of a new VMID generation? */
3262 if (unlikely(kvm_next_vmid == 0)) {
3263 - atomic64_inc(&kvm_vmid_gen);
3264 + atomic64_inc_unchecked(&kvm_vmid_gen);
3265 kvm_next_vmid = 1;
3266
3267 /*
3268 @@ -447,7 +447,7 @@ static void update_vttbr(struct kvm *kvm)
3269 kvm_call_hyp(__kvm_flush_vm_context);
3270 }
3271
3272 - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3273 + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3274 kvm->arch.vmid = kvm_next_vmid;
3275 kvm_next_vmid++;
3276
3277 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3278 index 14a0d98..7771a7d 100644
3279 --- a/arch/arm/lib/clear_user.S
3280 +++ b/arch/arm/lib/clear_user.S
3281 @@ -12,14 +12,14 @@
3282
3283 .text
3284
3285 -/* Prototype: int __clear_user(void *addr, size_t sz)
3286 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3287 * Purpose : clear some user memory
3288 * Params : addr - user memory address to clear
3289 * : sz - number of bytes to clear
3290 * Returns : number of bytes NOT cleared
3291 */
3292 ENTRY(__clear_user_std)
3293 -WEAK(__clear_user)
3294 +WEAK(___clear_user)
3295 stmfd sp!, {r1, lr}
3296 mov r2, #0
3297 cmp r1, #4
3298 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3299 USER( strnebt r2, [r0])
3300 mov r0, #0
3301 ldmfd sp!, {r1, pc}
3302 -ENDPROC(__clear_user)
3303 +ENDPROC(___clear_user)
3304 ENDPROC(__clear_user_std)
3305
3306 .pushsection .fixup,"ax"
3307 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3308 index 66a477a..bee61d3 100644
3309 --- a/arch/arm/lib/copy_from_user.S
3310 +++ b/arch/arm/lib/copy_from_user.S
3311 @@ -16,7 +16,7 @@
3312 /*
3313 * Prototype:
3314 *
3315 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3316 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3317 *
3318 * Purpose:
3319 *
3320 @@ -84,11 +84,11 @@
3321
3322 .text
3323
3324 -ENTRY(__copy_from_user)
3325 +ENTRY(___copy_from_user)
3326
3327 #include "copy_template.S"
3328
3329 -ENDPROC(__copy_from_user)
3330 +ENDPROC(___copy_from_user)
3331
3332 .pushsection .fixup,"ax"
3333 .align 0
3334 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3335 index 6ee2f67..d1cce76 100644
3336 --- a/arch/arm/lib/copy_page.S
3337 +++ b/arch/arm/lib/copy_page.S
3338 @@ -10,6 +10,7 @@
3339 * ASM optimised string functions
3340 */
3341 #include <linux/linkage.h>
3342 +#include <linux/const.h>
3343 #include <asm/assembler.h>
3344 #include <asm/asm-offsets.h>
3345 #include <asm/cache.h>
3346 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3347 index d066df6..df28194 100644
3348 --- a/arch/arm/lib/copy_to_user.S
3349 +++ b/arch/arm/lib/copy_to_user.S
3350 @@ -16,7 +16,7 @@
3351 /*
3352 * Prototype:
3353 *
3354 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3355 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3356 *
3357 * Purpose:
3358 *
3359 @@ -88,11 +88,11 @@
3360 .text
3361
3362 ENTRY(__copy_to_user_std)
3363 -WEAK(__copy_to_user)
3364 +WEAK(___copy_to_user)
3365
3366 #include "copy_template.S"
3367
3368 -ENDPROC(__copy_to_user)
3369 +ENDPROC(___copy_to_user)
3370 ENDPROC(__copy_to_user_std)
3371
3372 .pushsection .fixup,"ax"
3373 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3374 index 7d08b43..f7ca7ea 100644
3375 --- a/arch/arm/lib/csumpartialcopyuser.S
3376 +++ b/arch/arm/lib/csumpartialcopyuser.S
3377 @@ -57,8 +57,8 @@
3378 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3379 */
3380
3381 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3382 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3383 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3384 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3385
3386 #include "csumpartialcopygeneric.S"
3387
3388 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3389 index 5306de3..aed6d03 100644
3390 --- a/arch/arm/lib/delay.c
3391 +++ b/arch/arm/lib/delay.c
3392 @@ -28,7 +28,7 @@
3393 /*
3394 * Default to the loop-based delay implementation.
3395 */
3396 -struct arm_delay_ops arm_delay_ops = {
3397 +struct arm_delay_ops arm_delay_ops __read_only = {
3398 .delay = __loop_delay,
3399 .const_udelay = __loop_const_udelay,
3400 .udelay = __loop_udelay,
3401 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3402 index 3e58d71..029817c 100644
3403 --- a/arch/arm/lib/uaccess_with_memcpy.c
3404 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3405 @@ -136,7 +136,7 @@ out:
3406 }
3407
3408 unsigned long
3409 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3410 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3411 {
3412 /*
3413 * This test is stubbed out of the main function above to keep
3414 @@ -190,7 +190,7 @@ out:
3415 return n;
3416 }
3417
3418 -unsigned long __clear_user(void __user *addr, unsigned long n)
3419 +unsigned long ___clear_user(void __user *addr, unsigned long n)
3420 {
3421 /* See rational for this in __copy_to_user() above. */
3422 if (n < 64)
3423 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3424 index f3407a5..bd4256f 100644
3425 --- a/arch/arm/mach-kirkwood/common.c
3426 +++ b/arch/arm/mach-kirkwood/common.c
3427 @@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3428 clk_gate_ops.disable(hw);
3429 }
3430
3431 -static struct clk_ops clk_gate_fn_ops;
3432 +static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3433 +{
3434 + return clk_gate_ops.is_enabled(hw);
3435 +}
3436 +
3437 +static struct clk_ops clk_gate_fn_ops = {
3438 + .enable = clk_gate_fn_enable,
3439 + .disable = clk_gate_fn_disable,
3440 + .is_enabled = clk_gate_fn_is_enabled,
3441 +};
3442
3443 static struct clk __init *clk_register_gate_fn(struct device *dev,
3444 const char *name,
3445 @@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3446 gate_fn->fn_en = fn_en;
3447 gate_fn->fn_dis = fn_dis;
3448
3449 - /* ops is the gate ops, but with our enable/disable functions */
3450 - if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3451 - clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3452 - clk_gate_fn_ops = clk_gate_ops;
3453 - clk_gate_fn_ops.enable = clk_gate_fn_enable;
3454 - clk_gate_fn_ops.disable = clk_gate_fn_disable;
3455 - }
3456 -
3457 clk = clk_register(dev, &gate_fn->gate.hw);
3458
3459 if (IS_ERR(clk))
3460 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3461 index 827d1500..2885dc6 100644
3462 --- a/arch/arm/mach-omap2/board-n8x0.c
3463 +++ b/arch/arm/mach-omap2/board-n8x0.c
3464 @@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3465 }
3466 #endif
3467
3468 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3469 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3470 .late_init = n8x0_menelaus_late_init,
3471 };
3472
3473 diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3474 index ab43755..ccfa231 100644
3475 --- a/arch/arm/mach-omap2/gpmc.c
3476 +++ b/arch/arm/mach-omap2/gpmc.c
3477 @@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3478 };
3479
3480 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3481 -static struct irq_chip gpmc_irq_chip;
3482 static int gpmc_irq_start;
3483
3484 static struct resource gpmc_mem_root;
3485 @@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3486
3487 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3488
3489 +static struct irq_chip gpmc_irq_chip = {
3490 + .name = "gpmc",
3491 + .irq_startup = gpmc_irq_noop_ret,
3492 + .irq_enable = gpmc_irq_enable,
3493 + .irq_disable = gpmc_irq_disable,
3494 + .irq_shutdown = gpmc_irq_noop,
3495 + .irq_ack = gpmc_irq_noop,
3496 + .irq_mask = gpmc_irq_noop,
3497 + .irq_unmask = gpmc_irq_noop,
3498 +
3499 +};
3500 +
3501 static int gpmc_setup_irq(void)
3502 {
3503 int i;
3504 @@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3505 return gpmc_irq_start;
3506 }
3507
3508 - gpmc_irq_chip.name = "gpmc";
3509 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3510 - gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3511 - gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3512 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3513 - gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3514 - gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3515 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3516 -
3517 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3518 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3519
3520 diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3521 index f991016..145ebeb 100644
3522 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3523 +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524 @@ -84,7 +84,7 @@ struct cpu_pm_ops {
3525 int (*finish_suspend)(unsigned long cpu_state);
3526 void (*resume)(void);
3527 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3528 -};
3529 +} __no_const;
3530
3531 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3532 static struct powerdomain *mpuss_pd;
3533 @@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3534 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3535 {}
3536
3537 -struct cpu_pm_ops omap_pm_ops = {
3538 +static struct cpu_pm_ops omap_pm_ops __read_only = {
3539 .finish_suspend = default_finish_suspend,
3540 .resume = dummy_cpu_resume,
3541 .scu_prepare = dummy_scu_prepare,
3542 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3543 index 3664562..72f85c6 100644
3544 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3545 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3546 @@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3547 return NOTIFY_OK;
3548 }
3549
3550 -static struct notifier_block __refdata irq_hotplug_notifier = {
3551 +static struct notifier_block irq_hotplug_notifier = {
3552 .notifier_call = irq_cpu_hotplug_notify,
3553 };
3554
3555 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3556 index e0a398c..a470fa5 100644
3557 --- a/arch/arm/mach-omap2/omap_device.c
3558 +++ b/arch/arm/mach-omap2/omap_device.c
3559 @@ -508,7 +508,7 @@ void omap_device_delete(struct omap_device *od)
3560 struct platform_device __init *omap_device_build(const char *pdev_name,
3561 int pdev_id,
3562 struct omap_hwmod *oh,
3563 - void *pdata, int pdata_len)
3564 + const void *pdata, int pdata_len)
3565 {
3566 struct omap_hwmod *ohs[] = { oh };
3567
3568 @@ -536,7 +536,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3569 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3570 int pdev_id,
3571 struct omap_hwmod **ohs,
3572 - int oh_cnt, void *pdata,
3573 + int oh_cnt, const void *pdata,
3574 int pdata_len)
3575 {
3576 int ret = -ENOMEM;
3577 diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3578 index 78c02b3..c94109a 100644
3579 --- a/arch/arm/mach-omap2/omap_device.h
3580 +++ b/arch/arm/mach-omap2/omap_device.h
3581 @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3582 /* Core code interface */
3583
3584 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3585 - struct omap_hwmod *oh, void *pdata,
3586 + struct omap_hwmod *oh, const void *pdata,
3587 int pdata_len);
3588
3589 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3590 struct omap_hwmod **oh, int oh_cnt,
3591 - void *pdata, int pdata_len);
3592 + const void *pdata, int pdata_len);
3593
3594 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3595 struct omap_hwmod **ohs, int oh_cnt);
3596 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3597 index 8a1b5e0..5f30074 100644
3598 --- a/arch/arm/mach-omap2/omap_hwmod.c
3599 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3600 @@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3601 int (*init_clkdm)(struct omap_hwmod *oh);
3602 void (*update_context_lost)(struct omap_hwmod *oh);
3603 int (*get_context_lost)(struct omap_hwmod *oh);
3604 -};
3605 +} __no_const;
3606
3607 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3608 -static struct omap_hwmod_soc_ops soc_ops;
3609 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3610
3611 /* omap_hwmod_list contains all registered struct omap_hwmods */
3612 static LIST_HEAD(omap_hwmod_list);
3613 diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3614 index 95fee54..cfa9cf1 100644
3615 --- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3616 +++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3617 @@ -10,6 +10,7 @@
3618
3619 #include <linux/kernel.h>
3620 #include <linux/init.h>
3621 +#include <asm/pgtable.h>
3622
3623 #include "powerdomain.h"
3624
3625 @@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3626
3627 void __init am43xx_powerdomains_init(void)
3628 {
3629 - omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3630 + pax_open_kernel();
3631 + *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632 + pax_close_kernel();
3633 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3634 pwrdm_register_pwrdms(powerdomains_am43xx);
3635 pwrdm_complete_init();
3636 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3637 index d15c7bb..b2d1f0c 100644
3638 --- a/arch/arm/mach-omap2/wd_timer.c
3639 +++ b/arch/arm/mach-omap2/wd_timer.c
3640 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3641 struct omap_hwmod *oh;
3642 char *oh_name = "wd_timer2";
3643 char *dev_name = "omap_wdt";
3644 - struct omap_wd_timer_platform_data pdata;
3645 + static struct omap_wd_timer_platform_data pdata = {
3646 + .read_reset_sources = prm_read_reset_sources
3647 + };
3648
3649 if (!cpu_class_is_omap2() || of_have_populated_dt())
3650 return 0;
3651 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3652 return -EINVAL;
3653 }
3654
3655 - pdata.read_reset_sources = prm_read_reset_sources;
3656 -
3657 pdev = omap_device_build(dev_name, id, oh, &pdata,
3658 sizeof(struct omap_wd_timer_platform_data));
3659 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3660 diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3661 index b82dcae..44ee5b6 100644
3662 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3663 +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3664 @@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3665 bool entered_lp2 = false;
3666
3667 if (tegra_pending_sgi())
3668 - ACCESS_ONCE(abort_flag) = true;
3669 + ACCESS_ONCE_RW(abort_flag) = true;
3670
3671 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3672
3673 diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3674 index bdb3564..cebb96f 100644
3675 --- a/arch/arm/mach-ux500/setup.h
3676 +++ b/arch/arm/mach-ux500/setup.h
3677 @@ -39,13 +39,6 @@ extern void ux500_timer_init(void);
3678 .type = MT_DEVICE, \
3679 }
3680
3681 -#define __MEM_DEV_DESC(x, sz) { \
3682 - .virtual = IO_ADDRESS(x), \
3683 - .pfn = __phys_to_pfn(x), \
3684 - .length = sz, \
3685 - .type = MT_MEMORY, \
3686 -}
3687 -
3688 extern struct smp_operations ux500_smp_ops;
3689 extern void ux500_cpu_die(unsigned int cpu);
3690
3691 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3692 index 1f8fed9..14d7823 100644
3693 --- a/arch/arm/mm/Kconfig
3694 +++ b/arch/arm/mm/Kconfig
3695 @@ -446,7 +446,7 @@ config CPU_32v5
3696
3697 config CPU_32v6
3698 bool
3699 - select CPU_USE_DOMAINS if CPU_V6 && MMU
3700 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3701 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3702
3703 config CPU_32v6K
3704 @@ -601,6 +601,7 @@ config CPU_CP15_MPU
3705
3706 config CPU_USE_DOMAINS
3707 bool
3708 + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3709 help
3710 This option enables or disables the use of domain switching
3711 via the set_fs() function.
3712 @@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3713 config KUSER_HELPERS
3714 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3715 default y
3716 + depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3717 help
3718 Warning: disabling this option may break user programs.
3719
3720 @@ -812,7 +814,7 @@ config KUSER_HELPERS
3721 See Documentation/arm/kernel_user_helpers.txt for details.
3722
3723 However, the fixed address nature of these helpers can be used
3724 - by ROP (return orientated programming) authors when creating
3725 + by ROP (Return Oriented Programming) authors when creating
3726 exploits.
3727
3728 If all of the binaries and libraries which run on your platform
3729 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3730 index 9240364..a2b8cf3 100644
3731 --- a/arch/arm/mm/alignment.c
3732 +++ b/arch/arm/mm/alignment.c
3733 @@ -212,10 +212,12 @@ union offset_union {
3734 #define __get16_unaligned_check(ins,val,addr) \
3735 do { \
3736 unsigned int err = 0, v, a = addr; \
3737 + pax_open_userland(); \
3738 __get8_unaligned_check(ins,v,a,err); \
3739 val = v << ((BE) ? 8 : 0); \
3740 __get8_unaligned_check(ins,v,a,err); \
3741 val |= v << ((BE) ? 0 : 8); \
3742 + pax_close_userland(); \
3743 if (err) \
3744 goto fault; \
3745 } while (0)
3746 @@ -229,6 +231,7 @@ union offset_union {
3747 #define __get32_unaligned_check(ins,val,addr) \
3748 do { \
3749 unsigned int err = 0, v, a = addr; \
3750 + pax_open_userland(); \
3751 __get8_unaligned_check(ins,v,a,err); \
3752 val = v << ((BE) ? 24 : 0); \
3753 __get8_unaligned_check(ins,v,a,err); \
3754 @@ -237,6 +240,7 @@ union offset_union {
3755 val |= v << ((BE) ? 8 : 16); \
3756 __get8_unaligned_check(ins,v,a,err); \
3757 val |= v << ((BE) ? 0 : 24); \
3758 + pax_close_userland(); \
3759 if (err) \
3760 goto fault; \
3761 } while (0)
3762 @@ -250,6 +254,7 @@ union offset_union {
3763 #define __put16_unaligned_check(ins,val,addr) \
3764 do { \
3765 unsigned int err = 0, v = val, a = addr; \
3766 + pax_open_userland(); \
3767 __asm__( FIRST_BYTE_16 \
3768 ARM( "1: "ins" %1, [%2], #1\n" ) \
3769 THUMB( "1: "ins" %1, [%2]\n" ) \
3770 @@ -269,6 +274,7 @@ union offset_union {
3771 " .popsection\n" \
3772 : "=r" (err), "=&r" (v), "=&r" (a) \
3773 : "0" (err), "1" (v), "2" (a)); \
3774 + pax_close_userland(); \
3775 if (err) \
3776 goto fault; \
3777 } while (0)
3778 @@ -282,6 +288,7 @@ union offset_union {
3779 #define __put32_unaligned_check(ins,val,addr) \
3780 do { \
3781 unsigned int err = 0, v = val, a = addr; \
3782 + pax_open_userland(); \
3783 __asm__( FIRST_BYTE_32 \
3784 ARM( "1: "ins" %1, [%2], #1\n" ) \
3785 THUMB( "1: "ins" %1, [%2]\n" ) \
3786 @@ -311,6 +318,7 @@ union offset_union {
3787 " .popsection\n" \
3788 : "=r" (err), "=&r" (v), "=&r" (a) \
3789 : "0" (err), "1" (v), "2" (a)); \
3790 + pax_close_userland(); \
3791 if (err) \
3792 goto fault; \
3793 } while (0)
3794 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3795 index 447da6f..77a5057 100644
3796 --- a/arch/arm/mm/cache-l2x0.c
3797 +++ b/arch/arm/mm/cache-l2x0.c
3798 @@ -45,7 +45,7 @@ struct l2x0_of_data {
3799 void (*setup)(const struct device_node *, u32 *, u32 *);
3800 void (*save)(void);
3801 struct outer_cache_fns outer_cache;
3802 -};
3803 +} __do_const;
3804
3805 static bool of_init = false;
3806
3807 diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3808 index 84e6f77..0b52f31 100644
3809 --- a/arch/arm/mm/context.c
3810 +++ b/arch/arm/mm/context.c
3811 @@ -43,7 +43,7 @@
3812 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3813
3814 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3815 -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3816 +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3817 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3818
3819 static DEFINE_PER_CPU(atomic64_t, active_asids);
3820 @@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3821 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3822 {
3823 u64 asid = atomic64_read(&mm->context.id);
3824 - u64 generation = atomic64_read(&asid_generation);
3825 + u64 generation = atomic64_read_unchecked(&asid_generation);
3826
3827 if (asid != 0 && is_reserved_asid(asid)) {
3828 /*
3829 @@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3830 */
3831 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3832 if (asid == NUM_USER_ASIDS) {
3833 - generation = atomic64_add_return(ASID_FIRST_VERSION,
3834 + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3835 &asid_generation);
3836 flush_context(cpu);
3837 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3838 @@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3839 cpu_set_reserved_ttbr0();
3840
3841 asid = atomic64_read(&mm->context.id);
3842 - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3843 + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3844 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3845 goto switch_mm_fastpath;
3846
3847 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3848 /* Check that our ASID belongs to the current generation. */
3849 asid = atomic64_read(&mm->context.id);
3850 - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3851 + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3852 asid = new_context(mm, cpu);
3853 atomic64_set(&mm->context.id, asid);
3854 }
3855 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3856 index eb8830a..5360ce7 100644
3857 --- a/arch/arm/mm/fault.c
3858 +++ b/arch/arm/mm/fault.c
3859 @@ -25,6 +25,7 @@
3860 #include <asm/system_misc.h>
3861 #include <asm/system_info.h>
3862 #include <asm/tlbflush.h>
3863 +#include <asm/sections.h>
3864
3865 #include "fault.h"
3866
3867 @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3868 if (fixup_exception(regs))
3869 return;
3870
3871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3872 + if (addr < TASK_SIZE) {
3873 + if (current->signal->curr_ip)
3874 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3875 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3876 + else
3877 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3878 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3879 + }
3880 +#endif
3881 +
3882 +#ifdef CONFIG_PAX_KERNEXEC
3883 + if ((fsr & FSR_WRITE) &&
3884 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3885 + (MODULES_VADDR <= addr && addr < MODULES_END)))
3886 + {
3887 + if (current->signal->curr_ip)
3888 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3889 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3890 + else
3891 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3892 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3893 + }
3894 +#endif
3895 +
3896 /*
3897 * No handler, we'll have to terminate things with extreme prejudice.
3898 */
3899 @@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3900 }
3901 #endif
3902
3903 +#ifdef CONFIG_PAX_PAGEEXEC
3904 + if (fsr & FSR_LNX_PF) {
3905 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3906 + do_group_exit(SIGKILL);
3907 + }
3908 +#endif
3909 +
3910 tsk->thread.address = addr;
3911 tsk->thread.error_code = fsr;
3912 tsk->thread.trap_no = 14;
3913 @@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3914 }
3915 #endif /* CONFIG_MMU */
3916
3917 +#ifdef CONFIG_PAX_PAGEEXEC
3918 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3919 +{
3920 + long i;
3921 +
3922 + printk(KERN_ERR "PAX: bytes at PC: ");
3923 + for (i = 0; i < 20; i++) {
3924 + unsigned char c;
3925 + if (get_user(c, (__force unsigned char __user *)pc+i))
3926 + printk(KERN_CONT "?? ");
3927 + else
3928 + printk(KERN_CONT "%02x ", c);
3929 + }
3930 + printk("\n");
3931 +
3932 + printk(KERN_ERR "PAX: bytes at SP-4: ");
3933 + for (i = -1; i < 20; i++) {
3934 + unsigned long c;
3935 + if (get_user(c, (__force unsigned long __user *)sp+i))
3936 + printk(KERN_CONT "???????? ");
3937 + else
3938 + printk(KERN_CONT "%08lx ", c);
3939 + }
3940 + printk("\n");
3941 +}
3942 +#endif
3943 +
3944 /*
3945 * First Level Translation Fault Handler
3946 *
3947 @@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3948 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3949 struct siginfo info;
3950
3951 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3952 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3953 + if (current->signal->curr_ip)
3954 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3955 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3956 + else
3957 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3958 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3959 + goto die;
3960 + }
3961 +#endif
3962 +
3963 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3964 return;
3965
3966 +die:
3967 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3968 inf->name, fsr, addr);
3969
3970 @@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3971 ifsr_info[nr].name = name;
3972 }
3973
3974 +asmlinkage int sys_sigreturn(struct pt_regs *regs);
3975 +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3976 +
3977 asmlinkage void __exception
3978 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3979 {
3980 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3981 struct siginfo info;
3982 + unsigned long pc = instruction_pointer(regs);
3983 +
3984 + if (user_mode(regs)) {
3985 + unsigned long sigpage = current->mm->context.sigpage;
3986 +
3987 + if (sigpage <= pc && pc < sigpage + 7*4) {
3988 + if (pc < sigpage + 3*4)
3989 + sys_sigreturn(regs);
3990 + else
3991 + sys_rt_sigreturn(regs);
3992 + return;
3993 + }
3994 + if (pc == 0xffff0f60UL) {
3995 + /*
3996 + * PaX: __kuser_cmpxchg64 emulation
3997 + */
3998 + // TODO
3999 + //regs->ARM_pc = regs->ARM_lr;
4000 + //return;
4001 + }
4002 + if (pc == 0xffff0fa0UL) {
4003 + /*
4004 + * PaX: __kuser_memory_barrier emulation
4005 + */
4006 + // dmb(); implied by the exception
4007 + regs->ARM_pc = regs->ARM_lr;
4008 + return;
4009 + }
4010 + if (pc == 0xffff0fc0UL) {
4011 + /*
4012 + * PaX: __kuser_cmpxchg emulation
4013 + */
4014 + // TODO
4015 + //long new;
4016 + //int op;
4017 +
4018 + //op = FUTEX_OP_SET << 28;
4019 + //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4020 + //regs->ARM_r0 = old != new;
4021 + //regs->ARM_pc = regs->ARM_lr;
4022 + //return;
4023 + }
4024 + if (pc == 0xffff0fe0UL) {
4025 + /*
4026 + * PaX: __kuser_get_tls emulation
4027 + */
4028 + regs->ARM_r0 = current_thread_info()->tp_value[0];
4029 + regs->ARM_pc = regs->ARM_lr;
4030 + return;
4031 + }
4032 + }
4033 +
4034 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4035 + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4036 + if (current->signal->curr_ip)
4037 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4038 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4039 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4040 + else
4041 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4042 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4043 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4044 + goto die;
4045 + }
4046 +#endif
4047 +
4048 +#ifdef CONFIG_PAX_REFCOUNT
4049 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4050 + unsigned int bkpt;
4051 +
4052 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4053 + current->thread.error_code = ifsr;
4054 + current->thread.trap_no = 0;
4055 + pax_report_refcount_overflow(regs);
4056 + fixup_exception(regs);
4057 + return;
4058 + }
4059 + }
4060 +#endif
4061
4062 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4063 return;
4064
4065 +die:
4066 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4067 inf->name, ifsr, addr);
4068
4069 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4070 index cf08bdf..772656c 100644
4071 --- a/arch/arm/mm/fault.h
4072 +++ b/arch/arm/mm/fault.h
4073 @@ -3,6 +3,7 @@
4074
4075 /*
4076 * Fault status register encodings. We steal bit 31 for our own purposes.
4077 + * Set when the FSR value is from an instruction fault.
4078 */
4079 #define FSR_LNX_PF (1 << 31)
4080 #define FSR_WRITE (1 << 11)
4081 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4082 }
4083 #endif
4084
4085 +/* valid for LPAE and !LPAE */
4086 +static inline int is_xn_fault(unsigned int fsr)
4087 +{
4088 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
4089 +}
4090 +
4091 +static inline int is_domain_fault(unsigned int fsr)
4092 +{
4093 + return ((fsr_fs(fsr) & 0xD) == 0x9);
4094 +}
4095 +
4096 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4097 unsigned long search_exception_table(unsigned long addr);
4098
4099 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4100 index 3e8f106..a0a1fe4 100644
4101 --- a/arch/arm/mm/init.c
4102 +++ b/arch/arm/mm/init.c
4103 @@ -30,6 +30,8 @@
4104 #include <asm/setup.h>
4105 #include <asm/tlb.h>
4106 #include <asm/fixmap.h>
4107 +#include <asm/system_info.h>
4108 +#include <asm/cp15.h>
4109
4110 #include <asm/mach/arch.h>
4111 #include <asm/mach/map.h>
4112 @@ -681,7 +683,46 @@ void free_initmem(void)
4113 {
4114 #ifdef CONFIG_HAVE_TCM
4115 extern char __tcm_start, __tcm_end;
4116 +#endif
4117
4118 +#ifdef CONFIG_PAX_KERNEXEC
4119 + unsigned long addr;
4120 + pgd_t *pgd;
4121 + pud_t *pud;
4122 + pmd_t *pmd;
4123 + int cpu_arch = cpu_architecture();
4124 + unsigned int cr = get_cr();
4125 +
4126 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4127 + /* make pages tables, etc before .text NX */
4128 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4129 + pgd = pgd_offset_k(addr);
4130 + pud = pud_offset(pgd, addr);
4131 + pmd = pmd_offset(pud, addr);
4132 + __section_update(pmd, addr, PMD_SECT_XN);
4133 + }
4134 + /* make init NX */
4135 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4136 + pgd = pgd_offset_k(addr);
4137 + pud = pud_offset(pgd, addr);
4138 + pmd = pmd_offset(pud, addr);
4139 + __section_update(pmd, addr, PMD_SECT_XN);
4140 + }
4141 + /* make kernel code/rodata RX */
4142 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4143 + pgd = pgd_offset_k(addr);
4144 + pud = pud_offset(pgd, addr);
4145 + pmd = pmd_offset(pud, addr);
4146 +#ifdef CONFIG_ARM_LPAE
4147 + __section_update(pmd, addr, PMD_SECT_RDONLY);
4148 +#else
4149 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4150 +#endif
4151 + }
4152 + }
4153 +#endif
4154 +
4155 +#ifdef CONFIG_HAVE_TCM
4156 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4157 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4158 #endif
4159 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4160 index f123d6e..04bf569 100644
4161 --- a/arch/arm/mm/ioremap.c
4162 +++ b/arch/arm/mm/ioremap.c
4163 @@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4164 unsigned int mtype;
4165
4166 if (cached)
4167 - mtype = MT_MEMORY;
4168 + mtype = MT_MEMORY_RX;
4169 else
4170 - mtype = MT_MEMORY_NONCACHED;
4171 + mtype = MT_MEMORY_NONCACHED_RX;
4172
4173 return __arm_ioremap_caller(phys_addr, size, mtype,
4174 __builtin_return_address(0));
4175 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4176 index 5e85ed3..b10a7ed 100644
4177 --- a/arch/arm/mm/mmap.c
4178 +++ b/arch/arm/mm/mmap.c
4179 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4180 struct vm_area_struct *vma;
4181 int do_align = 0;
4182 int aliasing = cache_is_vipt_aliasing();
4183 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4184 struct vm_unmapped_area_info info;
4185
4186 /*
4187 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4188 if (len > TASK_SIZE)
4189 return -ENOMEM;
4190
4191 +#ifdef CONFIG_PAX_RANDMMAP
4192 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4193 +#endif
4194 +
4195 if (addr) {
4196 if (do_align)
4197 addr = COLOUR_ALIGN(addr, pgoff);
4198 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4199 addr = PAGE_ALIGN(addr);
4200
4201 vma = find_vma(mm, addr);
4202 - if (TASK_SIZE - len >= addr &&
4203 - (!vma || addr + len <= vma->vm_start))
4204 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4205 return addr;
4206 }
4207
4208 @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4209 info.high_limit = TASK_SIZE;
4210 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4211 info.align_offset = pgoff << PAGE_SHIFT;
4212 + info.threadstack_offset = offset;
4213 return vm_unmapped_area(&info);
4214 }
4215
4216 @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4217 unsigned long addr = addr0;
4218 int do_align = 0;
4219 int aliasing = cache_is_vipt_aliasing();
4220 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4221 struct vm_unmapped_area_info info;
4222
4223 /*
4224 @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4225 return addr;
4226 }
4227
4228 +#ifdef CONFIG_PAX_RANDMMAP
4229 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4230 +#endif
4231 +
4232 /* requesting a specific address */
4233 if (addr) {
4234 if (do_align)
4235 @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4236 else
4237 addr = PAGE_ALIGN(addr);
4238 vma = find_vma(mm, addr);
4239 - if (TASK_SIZE - len >= addr &&
4240 - (!vma || addr + len <= vma->vm_start))
4241 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4242 return addr;
4243 }
4244
4245 @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4246 info.high_limit = mm->mmap_base;
4247 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4248 info.align_offset = pgoff << PAGE_SHIFT;
4249 + info.threadstack_offset = offset;
4250 addr = vm_unmapped_area(&info);
4251
4252 /*
4253 @@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4254 {
4255 unsigned long random_factor = 0UL;
4256
4257 +#ifdef CONFIG_PAX_RANDMMAP
4258 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4259 +#endif
4260 +
4261 /* 8 bits of randomness in 20 address space bits */
4262 if ((current->flags & PF_RANDOMIZE) &&
4263 !(current->personality & ADDR_NO_RANDOMIZE))
4264 @@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4265
4266 if (mmap_is_legacy()) {
4267 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4268 +
4269 +#ifdef CONFIG_PAX_RANDMMAP
4270 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4271 + mm->mmap_base += mm->delta_mmap;
4272 +#endif
4273 +
4274 mm->get_unmapped_area = arch_get_unmapped_area;
4275 } else {
4276 mm->mmap_base = mmap_base(random_factor);
4277 +
4278 +#ifdef CONFIG_PAX_RANDMMAP
4279 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4280 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4281 +#endif
4282 +
4283 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4284 }
4285 }
4286 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4287 index 911d433..8580952 100644
4288 --- a/arch/arm/mm/mmu.c
4289 +++ b/arch/arm/mm/mmu.c
4290 @@ -38,6 +38,22 @@
4291 #include "mm.h"
4292 #include "tcm.h"
4293
4294 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4295 +void modify_domain(unsigned int dom, unsigned int type)
4296 +{
4297 + struct thread_info *thread = current_thread_info();
4298 + unsigned int domain = thread->cpu_domain;
4299 + /*
4300 + * DOMAIN_MANAGER might be defined to some other value,
4301 + * use the arch-defined constant
4302 + */
4303 + domain &= ~domain_val(dom, 3);
4304 + thread->cpu_domain = domain | domain_val(dom, type);
4305 + set_domain(thread->cpu_domain);
4306 +}
4307 +EXPORT_SYMBOL(modify_domain);
4308 +#endif
4309 +
4310 /*
4311 * empty_zero_page is a special page that is used for
4312 * zero-initialized data and COW.
4313 @@ -230,11 +246,19 @@ __setup("noalign", noalign_setup);
4314
4315 #endif /* ifdef CONFIG_CPU_CP15 / else */
4316
4317 -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4318 +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4319 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4320 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4321
4322 -static struct mem_type mem_types[] = {
4323 +#ifdef CONFIG_PAX_KERNEXEC
4324 +#define L_PTE_KERNEXEC L_PTE_RDONLY
4325 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4326 +#else
4327 +#define L_PTE_KERNEXEC L_PTE_DIRTY
4328 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4329 +#endif
4330 +
4331 +static struct mem_type mem_types[] __read_only = {
4332 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4333 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4334 L_PTE_SHARED,
4335 @@ -266,16 +290,16 @@ static struct mem_type mem_types[] = {
4336 [MT_UNCACHED] = {
4337 .prot_pte = PROT_PTE_DEVICE,
4338 .prot_l1 = PMD_TYPE_TABLE,
4339 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4340 + .prot_sect = PROT_SECT_DEVICE,
4341 .domain = DOMAIN_IO,
4342 },
4343 [MT_CACHECLEAN] = {
4344 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4345 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4346 .domain = DOMAIN_KERNEL,
4347 },
4348 #ifndef CONFIG_ARM_LPAE
4349 [MT_MINICLEAN] = {
4350 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4351 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4352 .domain = DOMAIN_KERNEL,
4353 },
4354 #endif
4355 @@ -283,36 +307,54 @@ static struct mem_type mem_types[] = {
4356 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4357 L_PTE_RDONLY,
4358 .prot_l1 = PMD_TYPE_TABLE,
4359 - .domain = DOMAIN_USER,
4360 + .domain = DOMAIN_VECTORS,
4361 },
4362 [MT_HIGH_VECTORS] = {
4363 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4364 L_PTE_USER | L_PTE_RDONLY,
4365 .prot_l1 = PMD_TYPE_TABLE,
4366 - .domain = DOMAIN_USER,
4367 + .domain = DOMAIN_VECTORS,
4368 },
4369 - [MT_MEMORY] = {
4370 + [MT_MEMORY_RWX] = {
4371 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4374 .domain = DOMAIN_KERNEL,
4375 },
4376 + [MT_MEMORY_RW] = {
4377 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4378 + .prot_l1 = PMD_TYPE_TABLE,
4379 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4380 + .domain = DOMAIN_KERNEL,
4381 + },
4382 + [MT_MEMORY_RX] = {
4383 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4384 + .prot_l1 = PMD_TYPE_TABLE,
4385 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4386 + .domain = DOMAIN_KERNEL,
4387 + },
4388 [MT_ROM] = {
4389 - .prot_sect = PMD_TYPE_SECT,
4390 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4391 .domain = DOMAIN_KERNEL,
4392 },
4393 - [MT_MEMORY_NONCACHED] = {
4394 + [MT_MEMORY_NONCACHED_RW] = {
4395 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4396 L_PTE_MT_BUFFERABLE,
4397 .prot_l1 = PMD_TYPE_TABLE,
4398 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4399 .domain = DOMAIN_KERNEL,
4400 },
4401 + [MT_MEMORY_NONCACHED_RX] = {
4402 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4403 + L_PTE_MT_BUFFERABLE,
4404 + .prot_l1 = PMD_TYPE_TABLE,
4405 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4406 + .domain = DOMAIN_KERNEL,
4407 + },
4408 [MT_MEMORY_DTCM] = {
4409 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4410 - L_PTE_XN,
4411 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4412 .prot_l1 = PMD_TYPE_TABLE,
4413 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4414 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4415 .domain = DOMAIN_KERNEL,
4416 },
4417 [MT_MEMORY_ITCM] = {
4418 @@ -322,10 +364,10 @@ static struct mem_type mem_types[] = {
4419 },
4420 [MT_MEMORY_SO] = {
4421 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4422 - L_PTE_MT_UNCACHED | L_PTE_XN,
4423 + L_PTE_MT_UNCACHED,
4424 .prot_l1 = PMD_TYPE_TABLE,
4425 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4426 - PMD_SECT_UNCACHED | PMD_SECT_XN,
4427 + PMD_SECT_UNCACHED,
4428 .domain = DOMAIN_KERNEL,
4429 },
4430 [MT_MEMORY_DMA_READY] = {
4431 @@ -411,9 +453,35 @@ static void __init build_mem_type_table(void)
4432 * to prevent speculative instruction fetches.
4433 */
4434 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4435 + mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4436 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4437 + mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4439 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4440 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4441 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4442 +
4443 + /* Mark other regions on ARMv6+ as execute-never */
4444 +
4445 +#ifdef CONFIG_PAX_KERNEXEC
4446 + mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4447 + mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4448 + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4449 + mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4450 +#ifndef CONFIG_ARM_LPAE
4451 + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4452 + mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4453 +#endif
4454 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4455 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4456 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4457 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4458 + mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4459 + mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4460 +#endif
4461 +
4462 + mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4463 + mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4464 }
4465 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4466 /*
4467 @@ -475,6 +543,9 @@ static void __init build_mem_type_table(void)
4468 * from SVC mode and no access from userspace.
4469 */
4470 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 +#ifdef CONFIG_PAX_KERNEXEC
4472 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473 +#endif
4474 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476 #endif
4477 @@ -492,11 +563,17 @@ static void __init build_mem_type_table(void)
4478 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4479 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4480 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4481 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4482 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4483 + mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4484 + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4485 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4486 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4487 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4488 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4490 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4491 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4492 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4493 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4494 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4495 + mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4496 }
4497 }
4498
4499 @@ -507,15 +584,20 @@ static void __init build_mem_type_table(void)
4500 if (cpu_arch >= CPU_ARCH_ARMv6) {
4501 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4502 /* Non-cacheable Normal is XCB = 001 */
4503 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4504 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4505 + PMD_SECT_BUFFERED;
4506 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4507 PMD_SECT_BUFFERED;
4508 } else {
4509 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4510 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4511 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4512 + PMD_SECT_TEX(1);
4513 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4514 PMD_SECT_TEX(1);
4515 }
4516 } else {
4517 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4518 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4519 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4520 }
4521
4522 #ifdef CONFIG_ARM_LPAE
4523 @@ -531,6 +613,8 @@ static void __init build_mem_type_table(void)
4524 vecs_pgprot |= PTE_EXT_AF;
4525 #endif
4526
4527 + user_pgprot |= __supported_pte_mask;
4528 +
4529 for (i = 0; i < 16; i++) {
4530 pteval_t v = pgprot_val(protection_map[i]);
4531 protection_map[i] = __pgprot(v | user_pgprot);
4532 @@ -548,10 +632,15 @@ static void __init build_mem_type_table(void)
4533
4534 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4535 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4536 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4537 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4538 + mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4539 + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4540 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4541 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4542 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4543 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4544 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4545 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4546 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4547 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4548 mem_types[MT_ROM].prot_sect |= cp->pmd;
4549
4550 switch (cp->pmd) {
4551 @@ -1193,18 +1282,15 @@ void __init arm_mm_memblock_reserve(void)
4552 * called function. This means you can't use any function or debugging
4553 * method which may touch any device, otherwise the kernel _will_ crash.
4554 */
4555 +
4556 +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4557 +
4558 static void __init devicemaps_init(const struct machine_desc *mdesc)
4559 {
4560 struct map_desc map;
4561 unsigned long addr;
4562 - void *vectors;
4563
4564 - /*
4565 - * Allocate the vector page early.
4566 - */
4567 - vectors = early_alloc(PAGE_SIZE * 2);
4568 -
4569 - early_trap_init(vectors);
4570 + early_trap_init(&vectors);
4571
4572 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4573 pmd_clear(pmd_off_k(addr));
4574 @@ -1244,7 +1330,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4575 * location (0xffff0000). If we aren't using high-vectors, also
4576 * create a mapping at the low-vectors virtual address.
4577 */
4578 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4579 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4580 map.virtual = 0xffff0000;
4581 map.length = PAGE_SIZE;
4582 #ifdef CONFIG_KUSER_HELPERS
4583 @@ -1316,8 +1402,39 @@ static void __init map_lowmem(void)
4584 map.pfn = __phys_to_pfn(start);
4585 map.virtual = __phys_to_virt(start);
4586 map.length = end - start;
4587 - map.type = MT_MEMORY;
4588
4589 +#ifdef CONFIG_PAX_KERNEXEC
4590 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4591 + struct map_desc kernel;
4592 + struct map_desc initmap;
4593 +
4594 + /* when freeing initmem we will make this RW */
4595 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4596 + initmap.virtual = (unsigned long)__init_begin;
4597 + initmap.length = _sdata - __init_begin;
4598 + initmap.type = MT_MEMORY_RWX;
4599 + create_mapping(&initmap);
4600 +
4601 + /* when freeing initmem we will make this RX */
4602 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4603 + kernel.virtual = (unsigned long)_stext;
4604 + kernel.length = __init_begin - _stext;
4605 + kernel.type = MT_MEMORY_RWX;
4606 + create_mapping(&kernel);
4607 +
4608 + if (map.virtual < (unsigned long)_stext) {
4609 + map.length = (unsigned long)_stext - map.virtual;
4610 + map.type = MT_MEMORY_RWX;
4611 + create_mapping(&map);
4612 + }
4613 +
4614 + map.pfn = __phys_to_pfn(__pa(_sdata));
4615 + map.virtual = (unsigned long)_sdata;
4616 + map.length = end - __pa(_sdata);
4617 + }
4618 +#endif
4619 +
4620 + map.type = MT_MEMORY_RW;
4621 create_mapping(&map);
4622 }
4623 }
4624 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4625 index a5bc92d..0bb4730 100644
4626 --- a/arch/arm/plat-omap/sram.c
4627 +++ b/arch/arm/plat-omap/sram.c
4628 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4629 * Looks like we need to preserve some bootloader code at the
4630 * beginning of SRAM for jumping to flash for reboot to work...
4631 */
4632 + pax_open_kernel();
4633 memset_io(omap_sram_base + omap_sram_skip, 0,
4634 omap_sram_size - omap_sram_skip);
4635 + pax_close_kernel();
4636 }
4637 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4638 index ce6d763..cfea917 100644
4639 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4640 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4641 @@ -47,7 +47,7 @@ struct samsung_dma_ops {
4642 int (*started)(unsigned ch);
4643 int (*flush)(unsigned ch);
4644 int (*stop)(unsigned ch);
4645 -};
4646 +} __no_const;
4647
4648 extern void *samsung_dmadev_get_ops(void);
4649 extern void *s3c_dma_get_ops(void);
4650 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4651 index 7ecc2b2..5e56c66 100644
4652 --- a/arch/arm64/include/asm/uaccess.h
4653 +++ b/arch/arm64/include/asm/uaccess.h
4654 @@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4655 flag; \
4656 })
4657
4658 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4659 #define access_ok(type, addr, size) __range_ok(addr, size)
4660
4661 /*
4662 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4663 index c3a58a1..78fbf54 100644
4664 --- a/arch/avr32/include/asm/cache.h
4665 +++ b/arch/avr32/include/asm/cache.h
4666 @@ -1,8 +1,10 @@
4667 #ifndef __ASM_AVR32_CACHE_H
4668 #define __ASM_AVR32_CACHE_H
4669
4670 +#include <linux/const.h>
4671 +
4672 #define L1_CACHE_SHIFT 5
4673 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4674 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4675
4676 /*
4677 * Memory returned by kmalloc() may be used for DMA, so we must make
4678 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4679 index d232888..87c8df1 100644
4680 --- a/arch/avr32/include/asm/elf.h
4681 +++ b/arch/avr32/include/asm/elf.h
4682 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4683 the loader. We need to make sure that it is out of the way of the program
4684 that it will "exec", and that there is sufficient room for the brk. */
4685
4686 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4687 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4688
4689 +#ifdef CONFIG_PAX_ASLR
4690 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4691 +
4692 +#define PAX_DELTA_MMAP_LEN 15
4693 +#define PAX_DELTA_STACK_LEN 15
4694 +#endif
4695
4696 /* This yields a mask that user programs can use to figure out what
4697 instruction set this CPU supports. This could be done in user space,
4698 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4699 index 479330b..53717a8 100644
4700 --- a/arch/avr32/include/asm/kmap_types.h
4701 +++ b/arch/avr32/include/asm/kmap_types.h
4702 @@ -2,9 +2,9 @@
4703 #define __ASM_AVR32_KMAP_TYPES_H
4704
4705 #ifdef CONFIG_DEBUG_HIGHMEM
4706 -# define KM_TYPE_NR 29
4707 +# define KM_TYPE_NR 30
4708 #else
4709 -# define KM_TYPE_NR 14
4710 +# define KM_TYPE_NR 15
4711 #endif
4712
4713 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4714 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4715 index 0eca933..eb78c7b 100644
4716 --- a/arch/avr32/mm/fault.c
4717 +++ b/arch/avr32/mm/fault.c
4718 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4719
4720 int exception_trace = 1;
4721
4722 +#ifdef CONFIG_PAX_PAGEEXEC
4723 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4724 +{
4725 + unsigned long i;
4726 +
4727 + printk(KERN_ERR "PAX: bytes at PC: ");
4728 + for (i = 0; i < 20; i++) {
4729 + unsigned char c;
4730 + if (get_user(c, (unsigned char *)pc+i))
4731 + printk(KERN_CONT "???????? ");
4732 + else
4733 + printk(KERN_CONT "%02x ", c);
4734 + }
4735 + printk("\n");
4736 +}
4737 +#endif
4738 +
4739 /*
4740 * This routine handles page faults. It determines the address and the
4741 * problem, and then passes it off to one of the appropriate routines.
4742 @@ -176,6 +193,16 @@ bad_area:
4743 up_read(&mm->mmap_sem);
4744
4745 if (user_mode(regs)) {
4746 +
4747 +#ifdef CONFIG_PAX_PAGEEXEC
4748 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4749 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4750 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4751 + do_group_exit(SIGKILL);
4752 + }
4753 + }
4754 +#endif
4755 +
4756 if (exception_trace && printk_ratelimit())
4757 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4758 "sp %08lx ecr %lu\n",
4759 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4760 index 568885a..f8008df 100644
4761 --- a/arch/blackfin/include/asm/cache.h
4762 +++ b/arch/blackfin/include/asm/cache.h
4763 @@ -7,6 +7,7 @@
4764 #ifndef __ARCH_BLACKFIN_CACHE_H
4765 #define __ARCH_BLACKFIN_CACHE_H
4766
4767 +#include <linux/const.h>
4768 #include <linux/linkage.h> /* for asmlinkage */
4769
4770 /*
4771 @@ -14,7 +15,7 @@
4772 * Blackfin loads 32 bytes for cache
4773 */
4774 #define L1_CACHE_SHIFT 5
4775 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4776 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4777 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4778
4779 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4780 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4781 index aea2718..3639a60 100644
4782 --- a/arch/cris/include/arch-v10/arch/cache.h
4783 +++ b/arch/cris/include/arch-v10/arch/cache.h
4784 @@ -1,8 +1,9 @@
4785 #ifndef _ASM_ARCH_CACHE_H
4786 #define _ASM_ARCH_CACHE_H
4787
4788 +#include <linux/const.h>
4789 /* Etrax 100LX have 32-byte cache-lines. */
4790 -#define L1_CACHE_BYTES 32
4791 #define L1_CACHE_SHIFT 5
4792 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4793
4794 #endif /* _ASM_ARCH_CACHE_H */
4795 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4796 index 7caf25d..ee65ac5 100644
4797 --- a/arch/cris/include/arch-v32/arch/cache.h
4798 +++ b/arch/cris/include/arch-v32/arch/cache.h
4799 @@ -1,11 +1,12 @@
4800 #ifndef _ASM_CRIS_ARCH_CACHE_H
4801 #define _ASM_CRIS_ARCH_CACHE_H
4802
4803 +#include <linux/const.h>
4804 #include <arch/hwregs/dma.h>
4805
4806 /* A cache-line is 32 bytes. */
4807 -#define L1_CACHE_BYTES 32
4808 #define L1_CACHE_SHIFT 5
4809 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4810
4811 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4812
4813 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4814 index b86329d..6709906 100644
4815 --- a/arch/frv/include/asm/atomic.h
4816 +++ b/arch/frv/include/asm/atomic.h
4817 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4818 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4819 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4820
4821 +#define atomic64_read_unchecked(v) atomic64_read(v)
4822 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4823 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4824 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4825 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4826 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4827 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4828 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4829 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4830 +
4831 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4832 {
4833 int c, old;
4834 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4835 index 2797163..c2a401d 100644
4836 --- a/arch/frv/include/asm/cache.h
4837 +++ b/arch/frv/include/asm/cache.h
4838 @@ -12,10 +12,11 @@
4839 #ifndef __ASM_CACHE_H
4840 #define __ASM_CACHE_H
4841
4842 +#include <linux/const.h>
4843
4844 /* bytes per L1 cache line */
4845 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4846 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4847 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4848
4849 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4850 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4851 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4852 index 43901f2..0d8b865 100644
4853 --- a/arch/frv/include/asm/kmap_types.h
4854 +++ b/arch/frv/include/asm/kmap_types.h
4855 @@ -2,6 +2,6 @@
4856 #ifndef _ASM_KMAP_TYPES_H
4857 #define _ASM_KMAP_TYPES_H
4858
4859 -#define KM_TYPE_NR 17
4860 +#define KM_TYPE_NR 18
4861
4862 #endif
4863 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4864 index 836f147..4cf23f5 100644
4865 --- a/arch/frv/mm/elf-fdpic.c
4866 +++ b/arch/frv/mm/elf-fdpic.c
4867 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4868 {
4869 struct vm_area_struct *vma;
4870 struct vm_unmapped_area_info info;
4871 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4872
4873 if (len > TASK_SIZE)
4874 return -ENOMEM;
4875 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4876 if (addr) {
4877 addr = PAGE_ALIGN(addr);
4878 vma = find_vma(current->mm, addr);
4879 - if (TASK_SIZE - len >= addr &&
4880 - (!vma || addr + len <= vma->vm_start))
4881 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4882 goto success;
4883 }
4884
4885 @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4886 info.high_limit = (current->mm->start_stack - 0x00200000);
4887 info.align_mask = 0;
4888 info.align_offset = 0;
4889 + info.threadstack_offset = offset;
4890 addr = vm_unmapped_area(&info);
4891 if (!(addr & ~PAGE_MASK))
4892 goto success;
4893 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4894 index f4ca594..adc72fd6 100644
4895 --- a/arch/hexagon/include/asm/cache.h
4896 +++ b/arch/hexagon/include/asm/cache.h
4897 @@ -21,9 +21,11 @@
4898 #ifndef __ASM_CACHE_H
4899 #define __ASM_CACHE_H
4900
4901 +#include <linux/const.h>
4902 +
4903 /* Bytes per L1 cache line */
4904 -#define L1_CACHE_SHIFT (5)
4905 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4906 +#define L1_CACHE_SHIFT 5
4907 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4908
4909 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4910 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4911 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4912 index 4e4119b..dd7de0a 100644
4913 --- a/arch/ia64/Kconfig
4914 +++ b/arch/ia64/Kconfig
4915 @@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4916 config KEXEC
4917 bool "kexec system call"
4918 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4919 + depends on !GRKERNSEC_KMEM
4920 help
4921 kexec is a system call that implements the ability to shutdown your
4922 current kernel, and to start another kernel. It is like a reboot
4923 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4924 index 6e6fe18..a6ae668 100644
4925 --- a/arch/ia64/include/asm/atomic.h
4926 +++ b/arch/ia64/include/asm/atomic.h
4927 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4928 #define atomic64_inc(v) atomic64_add(1, (v))
4929 #define atomic64_dec(v) atomic64_sub(1, (v))
4930
4931 +#define atomic64_read_unchecked(v) atomic64_read(v)
4932 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4933 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4934 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4935 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4936 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4937 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4938 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4939 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4940 +
4941 /* Atomic operations are already serializing */
4942 #define smp_mb__before_atomic_dec() barrier()
4943 #define smp_mb__after_atomic_dec() barrier()
4944 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4945 index 988254a..e1ee885 100644
4946 --- a/arch/ia64/include/asm/cache.h
4947 +++ b/arch/ia64/include/asm/cache.h
4948 @@ -1,6 +1,7 @@
4949 #ifndef _ASM_IA64_CACHE_H
4950 #define _ASM_IA64_CACHE_H
4951
4952 +#include <linux/const.h>
4953
4954 /*
4955 * Copyright (C) 1998-2000 Hewlett-Packard Co
4956 @@ -9,7 +10,7 @@
4957
4958 /* Bytes per L1 (data) cache line. */
4959 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4960 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4961 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4962
4963 #ifdef CONFIG_SMP
4964 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4965 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4966 index 5a83c5c..4d7f553 100644
4967 --- a/arch/ia64/include/asm/elf.h
4968 +++ b/arch/ia64/include/asm/elf.h
4969 @@ -42,6 +42,13 @@
4970 */
4971 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4972
4973 +#ifdef CONFIG_PAX_ASLR
4974 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4975 +
4976 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4977 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4978 +#endif
4979 +
4980 #define PT_IA_64_UNWIND 0x70000001
4981
4982 /* IA-64 relocations: */
4983 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4984 index 5767cdf..7462574 100644
4985 --- a/arch/ia64/include/asm/pgalloc.h
4986 +++ b/arch/ia64/include/asm/pgalloc.h
4987 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4988 pgd_val(*pgd_entry) = __pa(pud);
4989 }
4990
4991 +static inline void
4992 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4993 +{
4994 + pgd_populate(mm, pgd_entry, pud);
4995 +}
4996 +
4997 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4998 {
4999 return quicklist_alloc(0, GFP_KERNEL, NULL);
5000 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5001 pud_val(*pud_entry) = __pa(pmd);
5002 }
5003
5004 +static inline void
5005 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5006 +{
5007 + pud_populate(mm, pud_entry, pmd);
5008 +}
5009 +
5010 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5011 {
5012 return quicklist_alloc(0, GFP_KERNEL, NULL);
5013 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5014 index 7935115..c0eca6a 100644
5015 --- a/arch/ia64/include/asm/pgtable.h
5016 +++ b/arch/ia64/include/asm/pgtable.h
5017 @@ -12,7 +12,7 @@
5018 * David Mosberger-Tang <davidm@hpl.hp.com>
5019 */
5020
5021 -
5022 +#include <linux/const.h>
5023 #include <asm/mman.h>
5024 #include <asm/page.h>
5025 #include <asm/processor.h>
5026 @@ -142,6 +142,17 @@
5027 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5028 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5029 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5030 +
5031 +#ifdef CONFIG_PAX_PAGEEXEC
5032 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5033 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5034 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5035 +#else
5036 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5037 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5038 +# define PAGE_COPY_NOEXEC PAGE_COPY
5039 +#endif
5040 +
5041 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5042 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5043 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5044 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5045 index 45698cd..e8e2dbc 100644
5046 --- a/arch/ia64/include/asm/spinlock.h
5047 +++ b/arch/ia64/include/asm/spinlock.h
5048 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5049 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5050
5051 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5052 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5053 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5054 }
5055
5056 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5057 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5058 index 449c8c0..3d4b1e9 100644
5059 --- a/arch/ia64/include/asm/uaccess.h
5060 +++ b/arch/ia64/include/asm/uaccess.h
5061 @@ -70,6 +70,7 @@
5062 && ((segment).seg == KERNEL_DS.seg \
5063 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5064 })
5065 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5066 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5067
5068 /*
5069 @@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5070 static inline unsigned long
5071 __copy_to_user (void __user *to, const void *from, unsigned long count)
5072 {
5073 + if (count > INT_MAX)
5074 + return count;
5075 +
5076 + if (!__builtin_constant_p(count))
5077 + check_object_size(from, count, true);
5078 +
5079 return __copy_user(to, (__force void __user *) from, count);
5080 }
5081
5082 static inline unsigned long
5083 __copy_from_user (void *to, const void __user *from, unsigned long count)
5084 {
5085 + if (count > INT_MAX)
5086 + return count;
5087 +
5088 + if (!__builtin_constant_p(count))
5089 + check_object_size(to, count, false);
5090 +
5091 return __copy_user((__force void __user *) to, from, count);
5092 }
5093
5094 @@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5095 ({ \
5096 void __user *__cu_to = (to); \
5097 const void *__cu_from = (from); \
5098 - long __cu_len = (n); \
5099 + unsigned long __cu_len = (n); \
5100 \
5101 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
5102 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5103 + if (!__builtin_constant_p(n)) \
5104 + check_object_size(__cu_from, __cu_len, true); \
5105 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5106 + } \
5107 __cu_len; \
5108 })
5109
5110 @@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5111 ({ \
5112 void *__cu_to = (to); \
5113 const void __user *__cu_from = (from); \
5114 - long __cu_len = (n); \
5115 + unsigned long __cu_len = (n); \
5116 \
5117 __chk_user_ptr(__cu_from); \
5118 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
5119 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5120 + if (!__builtin_constant_p(n)) \
5121 + check_object_size(__cu_to, __cu_len, false); \
5122 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5123 + } \
5124 __cu_len; \
5125 })
5126
5127 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5128 index 24603be..948052d 100644
5129 --- a/arch/ia64/kernel/module.c
5130 +++ b/arch/ia64/kernel/module.c
5131 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5132 void
5133 module_free (struct module *mod, void *module_region)
5134 {
5135 - if (mod && mod->arch.init_unw_table &&
5136 - module_region == mod->module_init) {
5137 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5138 unw_remove_unwind_table(mod->arch.init_unw_table);
5139 mod->arch.init_unw_table = NULL;
5140 }
5141 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5142 }
5143
5144 static inline int
5145 +in_init_rx (const struct module *mod, uint64_t addr)
5146 +{
5147 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5148 +}
5149 +
5150 +static inline int
5151 +in_init_rw (const struct module *mod, uint64_t addr)
5152 +{
5153 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5154 +}
5155 +
5156 +static inline int
5157 in_init (const struct module *mod, uint64_t addr)
5158 {
5159 - return addr - (uint64_t) mod->module_init < mod->init_size;
5160 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5161 +}
5162 +
5163 +static inline int
5164 +in_core_rx (const struct module *mod, uint64_t addr)
5165 +{
5166 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5167 +}
5168 +
5169 +static inline int
5170 +in_core_rw (const struct module *mod, uint64_t addr)
5171 +{
5172 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5173 }
5174
5175 static inline int
5176 in_core (const struct module *mod, uint64_t addr)
5177 {
5178 - return addr - (uint64_t) mod->module_core < mod->core_size;
5179 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5180 }
5181
5182 static inline int
5183 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5184 break;
5185
5186 case RV_BDREL:
5187 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5188 + if (in_init_rx(mod, val))
5189 + val -= (uint64_t) mod->module_init_rx;
5190 + else if (in_init_rw(mod, val))
5191 + val -= (uint64_t) mod->module_init_rw;
5192 + else if (in_core_rx(mod, val))
5193 + val -= (uint64_t) mod->module_core_rx;
5194 + else if (in_core_rw(mod, val))
5195 + val -= (uint64_t) mod->module_core_rw;
5196 break;
5197
5198 case RV_LTV:
5199 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5200 * addresses have been selected...
5201 */
5202 uint64_t gp;
5203 - if (mod->core_size > MAX_LTOFF)
5204 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5205 /*
5206 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5207 * at the end of the module.
5208 */
5209 - gp = mod->core_size - MAX_LTOFF / 2;
5210 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5211 else
5212 - gp = mod->core_size / 2;
5213 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5214 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5215 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5216 mod->arch.gp = gp;
5217 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5218 }
5219 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5220 index ab33328..f39506c 100644
5221 --- a/arch/ia64/kernel/palinfo.c
5222 +++ b/arch/ia64/kernel/palinfo.c
5223 @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5224 return NOTIFY_OK;
5225 }
5226
5227 -static struct notifier_block __refdata palinfo_cpu_notifier =
5228 +static struct notifier_block palinfo_cpu_notifier =
5229 {
5230 .notifier_call = palinfo_cpu_callback,
5231 .priority = 0,
5232 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5233 index 41e33f8..65180b2a 100644
5234 --- a/arch/ia64/kernel/sys_ia64.c
5235 +++ b/arch/ia64/kernel/sys_ia64.c
5236 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5237 unsigned long align_mask = 0;
5238 struct mm_struct *mm = current->mm;
5239 struct vm_unmapped_area_info info;
5240 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5241
5242 if (len > RGN_MAP_LIMIT)
5243 return -ENOMEM;
5244 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5245 if (REGION_NUMBER(addr) == RGN_HPAGE)
5246 addr = 0;
5247 #endif
5248 +
5249 +#ifdef CONFIG_PAX_RANDMMAP
5250 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5251 + addr = mm->free_area_cache;
5252 + else
5253 +#endif
5254 +
5255 if (!addr)
5256 addr = TASK_UNMAPPED_BASE;
5257
5258 @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5259 info.high_limit = TASK_SIZE;
5260 info.align_mask = align_mask;
5261 info.align_offset = 0;
5262 + info.threadstack_offset = offset;
5263 return vm_unmapped_area(&info);
5264 }
5265
5266 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5267 index 0ccb28f..8992469 100644
5268 --- a/arch/ia64/kernel/vmlinux.lds.S
5269 +++ b/arch/ia64/kernel/vmlinux.lds.S
5270 @@ -198,7 +198,7 @@ SECTIONS {
5271 /* Per-cpu data: */
5272 . = ALIGN(PERCPU_PAGE_SIZE);
5273 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5274 - __phys_per_cpu_start = __per_cpu_load;
5275 + __phys_per_cpu_start = per_cpu_load;
5276 /*
5277 * ensure percpu data fits
5278 * into percpu page size
5279 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5280 index 7225dad..2a7c8256 100644
5281 --- a/arch/ia64/mm/fault.c
5282 +++ b/arch/ia64/mm/fault.c
5283 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5284 return pte_present(pte);
5285 }
5286
5287 +#ifdef CONFIG_PAX_PAGEEXEC
5288 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5289 +{
5290 + unsigned long i;
5291 +
5292 + printk(KERN_ERR "PAX: bytes at PC: ");
5293 + for (i = 0; i < 8; i++) {
5294 + unsigned int c;
5295 + if (get_user(c, (unsigned int *)pc+i))
5296 + printk(KERN_CONT "???????? ");
5297 + else
5298 + printk(KERN_CONT "%08x ", c);
5299 + }
5300 + printk("\n");
5301 +}
5302 +#endif
5303 +
5304 # define VM_READ_BIT 0
5305 # define VM_WRITE_BIT 1
5306 # define VM_EXEC_BIT 2
5307 @@ -151,8 +168,21 @@ retry:
5308 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5309 goto bad_area;
5310
5311 - if ((vma->vm_flags & mask) != mask)
5312 + if ((vma->vm_flags & mask) != mask) {
5313 +
5314 +#ifdef CONFIG_PAX_PAGEEXEC
5315 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5316 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5317 + goto bad_area;
5318 +
5319 + up_read(&mm->mmap_sem);
5320 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5321 + do_group_exit(SIGKILL);
5322 + }
5323 +#endif
5324 +
5325 goto bad_area;
5326 + }
5327
5328 /*
5329 * If for any reason at all we couldn't handle the fault, make
5330 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5331 index 68232db..6ca80af 100644
5332 --- a/arch/ia64/mm/hugetlbpage.c
5333 +++ b/arch/ia64/mm/hugetlbpage.c
5334 @@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5335 unsigned long pgoff, unsigned long flags)
5336 {
5337 struct vm_unmapped_area_info info;
5338 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5339
5340 if (len > RGN_MAP_LIMIT)
5341 return -ENOMEM;
5342 @@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5343 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5344 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5345 info.align_offset = 0;
5346 + info.threadstack_offset = offset;
5347 return vm_unmapped_area(&info);
5348 }
5349
5350 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5351 index 88504ab..cbb6c9f 100644
5352 --- a/arch/ia64/mm/init.c
5353 +++ b/arch/ia64/mm/init.c
5354 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5355 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5356 vma->vm_end = vma->vm_start + PAGE_SIZE;
5357 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5358 +
5359 +#ifdef CONFIG_PAX_PAGEEXEC
5360 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5361 + vma->vm_flags &= ~VM_EXEC;
5362 +
5363 +#ifdef CONFIG_PAX_MPROTECT
5364 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
5365 + vma->vm_flags &= ~VM_MAYEXEC;
5366 +#endif
5367 +
5368 + }
5369 +#endif
5370 +
5371 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5372 down_write(&current->mm->mmap_sem);
5373 if (insert_vm_struct(current->mm, vma)) {
5374 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5375 index 40b3ee9..8c2c112 100644
5376 --- a/arch/m32r/include/asm/cache.h
5377 +++ b/arch/m32r/include/asm/cache.h
5378 @@ -1,8 +1,10 @@
5379 #ifndef _ASM_M32R_CACHE_H
5380 #define _ASM_M32R_CACHE_H
5381
5382 +#include <linux/const.h>
5383 +
5384 /* L1 cache line size */
5385 #define L1_CACHE_SHIFT 4
5386 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5387 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5388
5389 #endif /* _ASM_M32R_CACHE_H */
5390 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5391 index 82abd15..d95ae5d 100644
5392 --- a/arch/m32r/lib/usercopy.c
5393 +++ b/arch/m32r/lib/usercopy.c
5394 @@ -14,6 +14,9 @@
5395 unsigned long
5396 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5397 {
5398 + if ((long)n < 0)
5399 + return n;
5400 +
5401 prefetch(from);
5402 if (access_ok(VERIFY_WRITE, to, n))
5403 __copy_user(to,from,n);
5404 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5405 unsigned long
5406 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5407 {
5408 + if ((long)n < 0)
5409 + return n;
5410 +
5411 prefetchw(to);
5412 if (access_ok(VERIFY_READ, from, n))
5413 __copy_user_zeroing(to,from,n);
5414 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5415 index 0395c51..5f26031 100644
5416 --- a/arch/m68k/include/asm/cache.h
5417 +++ b/arch/m68k/include/asm/cache.h
5418 @@ -4,9 +4,11 @@
5419 #ifndef __ARCH_M68K_CACHE_H
5420 #define __ARCH_M68K_CACHE_H
5421
5422 +#include <linux/const.h>
5423 +
5424 /* bytes per L1 cache line */
5425 #define L1_CACHE_SHIFT 4
5426 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5427 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5428
5429 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5430
5431 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5432 index 0424315..defcca9 100644
5433 --- a/arch/metag/mm/hugetlbpage.c
5434 +++ b/arch/metag/mm/hugetlbpage.c
5435 @@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5436 info.high_limit = TASK_SIZE;
5437 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5438 info.align_offset = 0;
5439 + info.threadstack_offset = 0;
5440 return vm_unmapped_area(&info);
5441 }
5442
5443 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5444 index 4efe96a..60e8699 100644
5445 --- a/arch/microblaze/include/asm/cache.h
5446 +++ b/arch/microblaze/include/asm/cache.h
5447 @@ -13,11 +13,12 @@
5448 #ifndef _ASM_MICROBLAZE_CACHE_H
5449 #define _ASM_MICROBLAZE_CACHE_H
5450
5451 +#include <linux/const.h>
5452 #include <asm/registers.h>
5453
5454 #define L1_CACHE_SHIFT 5
5455 /* word-granular cache in microblaze */
5456 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5457 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5458
5459 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5460
5461 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5462 index 650de39..6982b02 100644
5463 --- a/arch/mips/Kconfig
5464 +++ b/arch/mips/Kconfig
5465 @@ -2268,6 +2268,7 @@ source "kernel/Kconfig.preempt"
5466
5467 config KEXEC
5468 bool "Kexec system call"
5469 + depends on !GRKERNSEC_KMEM
5470 help
5471 kexec is a system call that implements the ability to shutdown your
5472 current kernel, and to start another kernel. It is like a reboot
5473 diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5474 index 02f2444..506969c 100644
5475 --- a/arch/mips/cavium-octeon/dma-octeon.c
5476 +++ b/arch/mips/cavium-octeon/dma-octeon.c
5477 @@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5478 if (dma_release_from_coherent(dev, order, vaddr))
5479 return;
5480
5481 - swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5482 + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5483 }
5484
5485 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5486 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5487 index 7eed2f2..c4e385d 100644
5488 --- a/arch/mips/include/asm/atomic.h
5489 +++ b/arch/mips/include/asm/atomic.h
5490 @@ -21,15 +21,39 @@
5491 #include <asm/cmpxchg.h>
5492 #include <asm/war.h>
5493
5494 +#ifdef CONFIG_GENERIC_ATOMIC64
5495 +#include <asm-generic/atomic64.h>
5496 +#endif
5497 +
5498 #define ATOMIC_INIT(i) { (i) }
5499
5500 +#ifdef CONFIG_64BIT
5501 +#define _ASM_EXTABLE(from, to) \
5502 +" .section __ex_table,\"a\"\n" \
5503 +" .dword " #from ", " #to"\n" \
5504 +" .previous\n"
5505 +#else
5506 +#define _ASM_EXTABLE(from, to) \
5507 +" .section __ex_table,\"a\"\n" \
5508 +" .word " #from ", " #to"\n" \
5509 +" .previous\n"
5510 +#endif
5511 +
5512 /*
5513 * atomic_read - read atomic variable
5514 * @v: pointer of type atomic_t
5515 *
5516 * Atomically reads the value of @v.
5517 */
5518 -#define atomic_read(v) (*(volatile int *)&(v)->counter)
5519 +static inline int atomic_read(const atomic_t *v)
5520 +{
5521 + return (*(volatile const int *) &v->counter);
5522 +}
5523 +
5524 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5525 +{
5526 + return (*(volatile const int *) &v->counter);
5527 +}
5528
5529 /*
5530 * atomic_set - set atomic variable
5531 @@ -38,7 +62,15 @@
5532 *
5533 * Atomically sets the value of @v to @i.
5534 */
5535 -#define atomic_set(v, i) ((v)->counter = (i))
5536 +static inline void atomic_set(atomic_t *v, int i)
5537 +{
5538 + v->counter = i;
5539 +}
5540 +
5541 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5542 +{
5543 + v->counter = i;
5544 +}
5545
5546 /*
5547 * atomic_add - add integer to atomic variable
5548 @@ -47,7 +79,67 @@
5549 *
5550 * Atomically adds @i to @v.
5551 */
5552 -static __inline__ void atomic_add(int i, atomic_t * v)
5553 +static __inline__ void atomic_add(int i, atomic_t *v)
5554 +{
5555 + int temp;
5556 +
5557 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5558 + __asm__ __volatile__(
5559 + " .set mips3 \n"
5560 + "1: ll %0, %1 # atomic_add \n"
5561 +#ifdef CONFIG_PAX_REFCOUNT
5562 + /* Exception on overflow. */
5563 + "2: add %0, %2 \n"
5564 +#else
5565 + " addu %0, %2 \n"
5566 +#endif
5567 + " sc %0, %1 \n"
5568 + " beqzl %0, 1b \n"
5569 +#ifdef CONFIG_PAX_REFCOUNT
5570 + "3: \n"
5571 + _ASM_EXTABLE(2b, 3b)
5572 +#endif
5573 + " .set mips0 \n"
5574 + : "=&r" (temp), "+m" (v->counter)
5575 + : "Ir" (i));
5576 + } else if (kernel_uses_llsc) {
5577 + __asm__ __volatile__(
5578 + " .set mips3 \n"
5579 + "1: ll %0, %1 # atomic_add \n"
5580 +#ifdef CONFIG_PAX_REFCOUNT
5581 + /* Exception on overflow. */
5582 + "2: add %0, %2 \n"
5583 +#else
5584 + " addu %0, %2 \n"
5585 +#endif
5586 + " sc %0, %1 \n"
5587 + " beqz %0, 1b \n"
5588 +#ifdef CONFIG_PAX_REFCOUNT
5589 + "3: \n"
5590 + _ASM_EXTABLE(2b, 3b)
5591 +#endif
5592 + " .set mips0 \n"
5593 + : "=&r" (temp), "+m" (v->counter)
5594 + : "Ir" (i));
5595 + } else {
5596 + unsigned long flags;
5597 +
5598 + raw_local_irq_save(flags);
5599 + __asm__ __volatile__(
5600 +#ifdef CONFIG_PAX_REFCOUNT
5601 + /* Exception on overflow. */
5602 + "1: add %0, %1 \n"
5603 + "2: \n"
5604 + _ASM_EXTABLE(1b, 2b)
5605 +#else
5606 + " addu %0, %1 \n"
5607 +#endif
5608 + : "+r" (v->counter) : "Ir" (i));
5609 + raw_local_irq_restore(flags);
5610 + }
5611 +}
5612 +
5613 +static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5614 {
5615 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5616 int temp;
5617 @@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5618 *
5619 * Atomically subtracts @i from @v.
5620 */
5621 -static __inline__ void atomic_sub(int i, atomic_t * v)
5622 +static __inline__ void atomic_sub(int i, atomic_t *v)
5623 +{
5624 + int temp;
5625 +
5626 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5627 + __asm__ __volatile__(
5628 + " .set mips3 \n"
5629 + "1: ll %0, %1 # atomic64_sub \n"
5630 +#ifdef CONFIG_PAX_REFCOUNT
5631 + /* Exception on overflow. */
5632 + "2: sub %0, %2 \n"
5633 +#else
5634 + " subu %0, %2 \n"
5635 +#endif
5636 + " sc %0, %1 \n"
5637 + " beqzl %0, 1b \n"
5638 +#ifdef CONFIG_PAX_REFCOUNT
5639 + "3: \n"
5640 + _ASM_EXTABLE(2b, 3b)
5641 +#endif
5642 + " .set mips0 \n"
5643 + : "=&r" (temp), "+m" (v->counter)
5644 + : "Ir" (i));
5645 + } else if (kernel_uses_llsc) {
5646 + __asm__ __volatile__(
5647 + " .set mips3 \n"
5648 + "1: ll %0, %1 # atomic64_sub \n"
5649 +#ifdef CONFIG_PAX_REFCOUNT
5650 + /* Exception on overflow. */
5651 + "2: sub %0, %2 \n"
5652 +#else
5653 + " subu %0, %2 \n"
5654 +#endif
5655 + " sc %0, %1 \n"
5656 + " beqz %0, 1b \n"
5657 +#ifdef CONFIG_PAX_REFCOUNT
5658 + "3: \n"
5659 + _ASM_EXTABLE(2b, 3b)
5660 +#endif
5661 + " .set mips0 \n"
5662 + : "=&r" (temp), "+m" (v->counter)
5663 + : "Ir" (i));
5664 + } else {
5665 + unsigned long flags;
5666 +
5667 + raw_local_irq_save(flags);
5668 + __asm__ __volatile__(
5669 +#ifdef CONFIG_PAX_REFCOUNT
5670 + /* Exception on overflow. */
5671 + "1: sub %0, %1 \n"
5672 + "2: \n"
5673 + _ASM_EXTABLE(1b, 2b)
5674 +#else
5675 + " subu %0, %1 \n"
5676 +#endif
5677 + : "+r" (v->counter) : "Ir" (i));
5678 + raw_local_irq_restore(flags);
5679 + }
5680 +}
5681 +
5682 +static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5683 {
5684 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5685 int temp;
5686 @@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5687 /*
5688 * Same as above, but return the result value
5689 */
5690 -static __inline__ int atomic_add_return(int i, atomic_t * v)
5691 +static __inline__ int atomic_add_return(int i, atomic_t *v)
5692 +{
5693 + int result;
5694 + int temp;
5695 +
5696 + smp_mb__before_llsc();
5697 +
5698 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5699 + __asm__ __volatile__(
5700 + " .set mips3 \n"
5701 + "1: ll %1, %2 # atomic_add_return \n"
5702 +#ifdef CONFIG_PAX_REFCOUNT
5703 + "2: add %0, %1, %3 \n"
5704 +#else
5705 + " addu %0, %1, %3 \n"
5706 +#endif
5707 + " sc %0, %2 \n"
5708 + " beqzl %0, 1b \n"
5709 +#ifdef CONFIG_PAX_REFCOUNT
5710 + " b 4f \n"
5711 + " .set noreorder \n"
5712 + "3: b 5f \n"
5713 + " move %0, %1 \n"
5714 + " .set reorder \n"
5715 + _ASM_EXTABLE(2b, 3b)
5716 +#endif
5717 + "4: addu %0, %1, %3 \n"
5718 +#ifdef CONFIG_PAX_REFCOUNT
5719 + "5: \n"
5720 +#endif
5721 + " .set mips0 \n"
5722 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5723 + : "Ir" (i));
5724 + } else if (kernel_uses_llsc) {
5725 + __asm__ __volatile__(
5726 + " .set mips3 \n"
5727 + "1: ll %1, %2 # atomic_add_return \n"
5728 +#ifdef CONFIG_PAX_REFCOUNT
5729 + "2: add %0, %1, %3 \n"
5730 +#else
5731 + " addu %0, %1, %3 \n"
5732 +#endif
5733 + " sc %0, %2 \n"
5734 + " bnez %0, 4f \n"
5735 + " b 1b \n"
5736 +#ifdef CONFIG_PAX_REFCOUNT
5737 + " .set noreorder \n"
5738 + "3: b 5f \n"
5739 + " move %0, %1 \n"
5740 + " .set reorder \n"
5741 + _ASM_EXTABLE(2b, 3b)
5742 +#endif
5743 + "4: addu %0, %1, %3 \n"
5744 +#ifdef CONFIG_PAX_REFCOUNT
5745 + "5: \n"
5746 +#endif
5747 + " .set mips0 \n"
5748 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5749 + : "Ir" (i));
5750 + } else {
5751 + unsigned long flags;
5752 +
5753 + raw_local_irq_save(flags);
5754 + __asm__ __volatile__(
5755 + " lw %0, %1 \n"
5756 +#ifdef CONFIG_PAX_REFCOUNT
5757 + /* Exception on overflow. */
5758 + "1: add %0, %2 \n"
5759 +#else
5760 + " addu %0, %2 \n"
5761 +#endif
5762 + " sw %0, %1 \n"
5763 +#ifdef CONFIG_PAX_REFCOUNT
5764 + /* Note: Dest reg is not modified on overflow */
5765 + "2: \n"
5766 + _ASM_EXTABLE(1b, 2b)
5767 +#endif
5768 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5769 + raw_local_irq_restore(flags);
5770 + }
5771 +
5772 + smp_llsc_mb();
5773 +
5774 + return result;
5775 +}
5776 +
5777 +static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5778 {
5779 int result;
5780
5781 @@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5782 return result;
5783 }
5784
5785 -static __inline__ int atomic_sub_return(int i, atomic_t * v)
5786 +static __inline__ int atomic_sub_return(int i, atomic_t *v)
5787 +{
5788 + int result;
5789 + int temp;
5790 +
5791 + smp_mb__before_llsc();
5792 +
5793 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5794 + __asm__ __volatile__(
5795 + " .set mips3 \n"
5796 + "1: ll %1, %2 # atomic_sub_return \n"
5797 +#ifdef CONFIG_PAX_REFCOUNT
5798 + "2: sub %0, %1, %3 \n"
5799 +#else
5800 + " subu %0, %1, %3 \n"
5801 +#endif
5802 + " sc %0, %2 \n"
5803 + " beqzl %0, 1b \n"
5804 +#ifdef CONFIG_PAX_REFCOUNT
5805 + " b 4f \n"
5806 + " .set noreorder \n"
5807 + "3: b 5f \n"
5808 + " move %0, %1 \n"
5809 + " .set reorder \n"
5810 + _ASM_EXTABLE(2b, 3b)
5811 +#endif
5812 + "4: subu %0, %1, %3 \n"
5813 +#ifdef CONFIG_PAX_REFCOUNT
5814 + "5: \n"
5815 +#endif
5816 + " .set mips0 \n"
5817 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5818 + : "Ir" (i), "m" (v->counter)
5819 + : "memory");
5820 + } else if (kernel_uses_llsc) {
5821 + __asm__ __volatile__(
5822 + " .set mips3 \n"
5823 + "1: ll %1, %2 # atomic_sub_return \n"
5824 +#ifdef CONFIG_PAX_REFCOUNT
5825 + "2: sub %0, %1, %3 \n"
5826 +#else
5827 + " subu %0, %1, %3 \n"
5828 +#endif
5829 + " sc %0, %2 \n"
5830 + " bnez %0, 4f \n"
5831 + " b 1b \n"
5832 +#ifdef CONFIG_PAX_REFCOUNT
5833 + " .set noreorder \n"
5834 + "3: b 5f \n"
5835 + " move %0, %1 \n"
5836 + " .set reorder \n"
5837 + _ASM_EXTABLE(2b, 3b)
5838 +#endif
5839 + "4: subu %0, %1, %3 \n"
5840 +#ifdef CONFIG_PAX_REFCOUNT
5841 + "5: \n"
5842 +#endif
5843 + " .set mips0 \n"
5844 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5845 + : "Ir" (i));
5846 + } else {
5847 + unsigned long flags;
5848 +
5849 + raw_local_irq_save(flags);
5850 + __asm__ __volatile__(
5851 + " lw %0, %1 \n"
5852 +#ifdef CONFIG_PAX_REFCOUNT
5853 + /* Exception on overflow. */
5854 + "1: sub %0, %2 \n"
5855 +#else
5856 + " subu %0, %2 \n"
5857 +#endif
5858 + " sw %0, %1 \n"
5859 +#ifdef CONFIG_PAX_REFCOUNT
5860 + /* Note: Dest reg is not modified on overflow */
5861 + "2: \n"
5862 + _ASM_EXTABLE(1b, 2b)
5863 +#endif
5864 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5865 + raw_local_irq_restore(flags);
5866 + }
5867 +
5868 + smp_llsc_mb();
5869 +
5870 + return result;
5871 +}
5872 +static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5873 {
5874 int result;
5875
5876 @@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5877 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5878 * The function returns the old value of @v minus @i.
5879 */
5880 -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5881 +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5882 {
5883 int result;
5884
5885 @@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5886 return result;
5887 }
5888
5889 -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5890 -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5891 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5892 +{
5893 + return cmpxchg(&v->counter, old, new);
5894 +}
5895 +
5896 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5897 + int new)
5898 +{
5899 + return cmpxchg(&(v->counter), old, new);
5900 +}
5901 +
5902 +static inline int atomic_xchg(atomic_t *v, int new)
5903 +{
5904 + return xchg(&v->counter, new);
5905 +}
5906 +
5907 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5908 +{
5909 + return xchg(&(v->counter), new);
5910 +}
5911
5912 /**
5913 * __atomic_add_unless - add unless the number is a given value
5914 @@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5915
5916 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5917 #define atomic_inc_return(v) atomic_add_return(1, (v))
5918 +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5919 +{
5920 + return atomic_add_return_unchecked(1, v);
5921 +}
5922
5923 /*
5924 * atomic_sub_and_test - subtract value from variable and test result
5925 @@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5926 * other cases.
5927 */
5928 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5929 +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5930 +{
5931 + return atomic_add_return_unchecked(1, v) == 0;
5932 +}
5933
5934 /*
5935 * atomic_dec_and_test - decrement by 1 and test
5936 @@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5937 * Atomically increments @v by 1.
5938 */
5939 #define atomic_inc(v) atomic_add(1, (v))
5940 +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5941 +{
5942 + atomic_add_unchecked(1, v);
5943 +}
5944
5945 /*
5946 * atomic_dec - decrement and test
5947 @@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5948 * Atomically decrements @v by 1.
5949 */
5950 #define atomic_dec(v) atomic_sub(1, (v))
5951 +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5952 +{
5953 + atomic_sub_unchecked(1, v);
5954 +}
5955
5956 /*
5957 * atomic_add_negative - add and test if negative
5958 @@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5959 * @v: pointer of type atomic64_t
5960 *
5961 */
5962 -#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5963 +static inline long atomic64_read(const atomic64_t *v)
5964 +{
5965 + return (*(volatile const long *) &v->counter);
5966 +}
5967 +
5968 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5969 +{
5970 + return (*(volatile const long *) &v->counter);
5971 +}
5972
5973 /*
5974 * atomic64_set - set atomic variable
5975 * @v: pointer of type atomic64_t
5976 * @i: required value
5977 */
5978 -#define atomic64_set(v, i) ((v)->counter = (i))
5979 +static inline void atomic64_set(atomic64_t *v, long i)
5980 +{
5981 + v->counter = i;
5982 +}
5983 +
5984 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5985 +{
5986 + v->counter = i;
5987 +}
5988
5989 /*
5990 * atomic64_add - add integer to atomic variable
5991 @@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5992 *
5993 * Atomically adds @i to @v.
5994 */
5995 -static __inline__ void atomic64_add(long i, atomic64_t * v)
5996 +static __inline__ void atomic64_add(long i, atomic64_t *v)
5997 +{
5998 + long temp;
5999 +
6000 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6001 + __asm__ __volatile__(
6002 + " .set mips3 \n"
6003 + "1: lld %0, %1 # atomic64_add \n"
6004 +#ifdef CONFIG_PAX_REFCOUNT
6005 + /* Exception on overflow. */
6006 + "2: dadd %0, %2 \n"
6007 +#else
6008 + " daddu %0, %2 \n"
6009 +#endif
6010 + " scd %0, %1 \n"
6011 + " beqzl %0, 1b \n"
6012 +#ifdef CONFIG_PAX_REFCOUNT
6013 + "3: \n"
6014 + _ASM_EXTABLE(2b, 3b)
6015 +#endif
6016 + " .set mips0 \n"
6017 + : "=&r" (temp), "+m" (v->counter)
6018 + : "Ir" (i));
6019 + } else if (kernel_uses_llsc) {
6020 + __asm__ __volatile__(
6021 + " .set mips3 \n"
6022 + "1: lld %0, %1 # atomic64_add \n"
6023 +#ifdef CONFIG_PAX_REFCOUNT
6024 + /* Exception on overflow. */
6025 + "2: dadd %0, %2 \n"
6026 +#else
6027 + " daddu %0, %2 \n"
6028 +#endif
6029 + " scd %0, %1 \n"
6030 + " beqz %0, 1b \n"
6031 +#ifdef CONFIG_PAX_REFCOUNT
6032 + "3: \n"
6033 + _ASM_EXTABLE(2b, 3b)
6034 +#endif
6035 + " .set mips0 \n"
6036 + : "=&r" (temp), "+m" (v->counter)
6037 + : "Ir" (i));
6038 + } else {
6039 + unsigned long flags;
6040 +
6041 + raw_local_irq_save(flags);
6042 + __asm__ __volatile__(
6043 +#ifdef CONFIG_PAX_REFCOUNT
6044 + /* Exception on overflow. */
6045 + "1: dadd %0, %1 \n"
6046 + "2: \n"
6047 + _ASM_EXTABLE(1b, 2b)
6048 +#else
6049 + " daddu %0, %1 \n"
6050 +#endif
6051 + : "+r" (v->counter) : "Ir" (i));
6052 + raw_local_irq_restore(flags);
6053 + }
6054 +}
6055 +static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6056 {
6057 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6058 long temp;
6059 @@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6060 *
6061 * Atomically subtracts @i from @v.
6062 */
6063 -static __inline__ void atomic64_sub(long i, atomic64_t * v)
6064 +static __inline__ void atomic64_sub(long i, atomic64_t *v)
6065 +{
6066 + long temp;
6067 +
6068 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6069 + __asm__ __volatile__(
6070 + " .set mips3 \n"
6071 + "1: lld %0, %1 # atomic64_sub \n"
6072 +#ifdef CONFIG_PAX_REFCOUNT
6073 + /* Exception on overflow. */
6074 + "2: dsub %0, %2 \n"
6075 +#else
6076 + " dsubu %0, %2 \n"
6077 +#endif
6078 + " scd %0, %1 \n"
6079 + " beqzl %0, 1b \n"
6080 +#ifdef CONFIG_PAX_REFCOUNT
6081 + "3: \n"
6082 + _ASM_EXTABLE(2b, 3b)
6083 +#endif
6084 + " .set mips0 \n"
6085 + : "=&r" (temp), "+m" (v->counter)
6086 + : "Ir" (i));
6087 + } else if (kernel_uses_llsc) {
6088 + __asm__ __volatile__(
6089 + " .set mips3 \n"
6090 + "1: lld %0, %1 # atomic64_sub \n"
6091 +#ifdef CONFIG_PAX_REFCOUNT
6092 + /* Exception on overflow. */
6093 + "2: dsub %0, %2 \n"
6094 +#else
6095 + " dsubu %0, %2 \n"
6096 +#endif
6097 + " scd %0, %1 \n"
6098 + " beqz %0, 1b \n"
6099 +#ifdef CONFIG_PAX_REFCOUNT
6100 + "3: \n"
6101 + _ASM_EXTABLE(2b, 3b)
6102 +#endif
6103 + " .set mips0 \n"
6104 + : "=&r" (temp), "+m" (v->counter)
6105 + : "Ir" (i));
6106 + } else {
6107 + unsigned long flags;
6108 +
6109 + raw_local_irq_save(flags);
6110 + __asm__ __volatile__(
6111 +#ifdef CONFIG_PAX_REFCOUNT
6112 + /* Exception on overflow. */
6113 + "1: dsub %0, %1 \n"
6114 + "2: \n"
6115 + _ASM_EXTABLE(1b, 2b)
6116 +#else
6117 + " dsubu %0, %1 \n"
6118 +#endif
6119 + : "+r" (v->counter) : "Ir" (i));
6120 + raw_local_irq_restore(flags);
6121 + }
6122 +}
6123 +
6124 +static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6125 {
6126 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6127 long temp;
6128 @@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6129 /*
6130 * Same as above, but return the result value
6131 */
6132 -static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6133 +static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6134 +{
6135 + long result;
6136 + long temp;
6137 +
6138 + smp_mb__before_llsc();
6139 +
6140 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6141 + __asm__ __volatile__(
6142 + " .set mips3 \n"
6143 + "1: lld %1, %2 # atomic64_add_return \n"
6144 +#ifdef CONFIG_PAX_REFCOUNT
6145 + "2: dadd %0, %1, %3 \n"
6146 +#else
6147 + " daddu %0, %1, %3 \n"
6148 +#endif
6149 + " scd %0, %2 \n"
6150 + " beqzl %0, 1b \n"
6151 +#ifdef CONFIG_PAX_REFCOUNT
6152 + " b 4f \n"
6153 + " .set noreorder \n"
6154 + "3: b 5f \n"
6155 + " move %0, %1 \n"
6156 + " .set reorder \n"
6157 + _ASM_EXTABLE(2b, 3b)
6158 +#endif
6159 + "4: daddu %0, %1, %3 \n"
6160 +#ifdef CONFIG_PAX_REFCOUNT
6161 + "5: \n"
6162 +#endif
6163 + " .set mips0 \n"
6164 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6165 + : "Ir" (i));
6166 + } else if (kernel_uses_llsc) {
6167 + __asm__ __volatile__(
6168 + " .set mips3 \n"
6169 + "1: lld %1, %2 # atomic64_add_return \n"
6170 +#ifdef CONFIG_PAX_REFCOUNT
6171 + "2: dadd %0, %1, %3 \n"
6172 +#else
6173 + " daddu %0, %1, %3 \n"
6174 +#endif
6175 + " scd %0, %2 \n"
6176 + " bnez %0, 4f \n"
6177 + " b 1b \n"
6178 +#ifdef CONFIG_PAX_REFCOUNT
6179 + " .set noreorder \n"
6180 + "3: b 5f \n"
6181 + " move %0, %1 \n"
6182 + " .set reorder \n"
6183 + _ASM_EXTABLE(2b, 3b)
6184 +#endif
6185 + "4: daddu %0, %1, %3 \n"
6186 +#ifdef CONFIG_PAX_REFCOUNT
6187 + "5: \n"
6188 +#endif
6189 + " .set mips0 \n"
6190 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6191 + : "Ir" (i), "m" (v->counter)
6192 + : "memory");
6193 + } else {
6194 + unsigned long flags;
6195 +
6196 + raw_local_irq_save(flags);
6197 + __asm__ __volatile__(
6198 + " ld %0, %1 \n"
6199 +#ifdef CONFIG_PAX_REFCOUNT
6200 + /* Exception on overflow. */
6201 + "1: dadd %0, %2 \n"
6202 +#else
6203 + " daddu %0, %2 \n"
6204 +#endif
6205 + " sd %0, %1 \n"
6206 +#ifdef CONFIG_PAX_REFCOUNT
6207 + /* Note: Dest reg is not modified on overflow */
6208 + "2: \n"
6209 + _ASM_EXTABLE(1b, 2b)
6210 +#endif
6211 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6212 + raw_local_irq_restore(flags);
6213 + }
6214 +
6215 + smp_llsc_mb();
6216 +
6217 + return result;
6218 +}
6219 +static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6220 {
6221 long result;
6222
6223 @@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6224 return result;
6225 }
6226
6227 -static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6228 +static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6229 +{
6230 + long result;
6231 + long temp;
6232 +
6233 + smp_mb__before_llsc();
6234 +
6235 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6236 + long temp;
6237 +
6238 + __asm__ __volatile__(
6239 + " .set mips3 \n"
6240 + "1: lld %1, %2 # atomic64_sub_return \n"
6241 +#ifdef CONFIG_PAX_REFCOUNT
6242 + "2: dsub %0, %1, %3 \n"
6243 +#else
6244 + " dsubu %0, %1, %3 \n"
6245 +#endif
6246 + " scd %0, %2 \n"
6247 + " beqzl %0, 1b \n"
6248 +#ifdef CONFIG_PAX_REFCOUNT
6249 + " b 4f \n"
6250 + " .set noreorder \n"
6251 + "3: b 5f \n"
6252 + " move %0, %1 \n"
6253 + " .set reorder \n"
6254 + _ASM_EXTABLE(2b, 3b)
6255 +#endif
6256 + "4: dsubu %0, %1, %3 \n"
6257 +#ifdef CONFIG_PAX_REFCOUNT
6258 + "5: \n"
6259 +#endif
6260 + " .set mips0 \n"
6261 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6262 + : "Ir" (i), "m" (v->counter)
6263 + : "memory");
6264 + } else if (kernel_uses_llsc) {
6265 + __asm__ __volatile__(
6266 + " .set mips3 \n"
6267 + "1: lld %1, %2 # atomic64_sub_return \n"
6268 +#ifdef CONFIG_PAX_REFCOUNT
6269 + "2: dsub %0, %1, %3 \n"
6270 +#else
6271 + " dsubu %0, %1, %3 \n"
6272 +#endif
6273 + " scd %0, %2 \n"
6274 + " bnez %0, 4f \n"
6275 + " b 1b \n"
6276 +#ifdef CONFIG_PAX_REFCOUNT
6277 + " .set noreorder \n"
6278 + "3: b 5f \n"
6279 + " move %0, %1 \n"
6280 + " .set reorder \n"
6281 + _ASM_EXTABLE(2b, 3b)
6282 +#endif
6283 + "4: dsubu %0, %1, %3 \n"
6284 +#ifdef CONFIG_PAX_REFCOUNT
6285 + "5: \n"
6286 +#endif
6287 + " .set mips0 \n"
6288 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6289 + : "Ir" (i), "m" (v->counter)
6290 + : "memory");
6291 + } else {
6292 + unsigned long flags;
6293 +
6294 + raw_local_irq_save(flags);
6295 + __asm__ __volatile__(
6296 + " ld %0, %1 \n"
6297 +#ifdef CONFIG_PAX_REFCOUNT
6298 + /* Exception on overflow. */
6299 + "1: dsub %0, %2 \n"
6300 +#else
6301 + " dsubu %0, %2 \n"
6302 +#endif
6303 + " sd %0, %1 \n"
6304 +#ifdef CONFIG_PAX_REFCOUNT
6305 + /* Note: Dest reg is not modified on overflow */
6306 + "2: \n"
6307 + _ASM_EXTABLE(1b, 2b)
6308 +#endif
6309 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6310 + raw_local_irq_restore(flags);
6311 + }
6312 +
6313 + smp_llsc_mb();
6314 +
6315 + return result;
6316 +}
6317 +
6318 +static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6319 {
6320 long result;
6321
6322 @@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6323 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6324 * The function returns the old value of @v minus @i.
6325 */
6326 -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6327 +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6328 {
6329 long result;
6330
6331 @@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6332 return result;
6333 }
6334
6335 -#define atomic64_cmpxchg(v, o, n) \
6336 - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6337 -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6338 +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6339 +{
6340 + return cmpxchg(&v->counter, old, new);
6341 +}
6342 +
6343 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6344 + long new)
6345 +{
6346 + return cmpxchg(&(v->counter), old, new);
6347 +}
6348 +
6349 +static inline long atomic64_xchg(atomic64_t *v, long new)
6350 +{
6351 + return xchg(&v->counter, new);
6352 +}
6353 +
6354 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6355 +{
6356 + return xchg(&(v->counter), new);
6357 +}
6358
6359 /**
6360 * atomic64_add_unless - add unless the number is a given value
6361 @@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6362
6363 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6364 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6365 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6366
6367 /*
6368 * atomic64_sub_and_test - subtract value from variable and test result
6369 @@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6370 * other cases.
6371 */
6372 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6373 +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6374
6375 /*
6376 * atomic64_dec_and_test - decrement by 1 and test
6377 @@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6378 * Atomically increments @v by 1.
6379 */
6380 #define atomic64_inc(v) atomic64_add(1, (v))
6381 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6382
6383 /*
6384 * atomic64_dec - decrement and test
6385 @@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6386 * Atomically decrements @v by 1.
6387 */
6388 #define atomic64_dec(v) atomic64_sub(1, (v))
6389 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6390
6391 /*
6392 * atomic64_add_negative - add and test if negative
6393 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6394 index b4db69f..8f3b093 100644
6395 --- a/arch/mips/include/asm/cache.h
6396 +++ b/arch/mips/include/asm/cache.h
6397 @@ -9,10 +9,11 @@
6398 #ifndef _ASM_CACHE_H
6399 #define _ASM_CACHE_H
6400
6401 +#include <linux/const.h>
6402 #include <kmalloc.h>
6403
6404 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6405 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6406 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6407
6408 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6409 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6410 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6411 index a66359e..d3d474a 100644
6412 --- a/arch/mips/include/asm/elf.h
6413 +++ b/arch/mips/include/asm/elf.h
6414 @@ -373,13 +373,16 @@ extern const char *__elf_platform;
6415 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6416 #endif
6417
6418 +#ifdef CONFIG_PAX_ASLR
6419 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6420 +
6421 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6422 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6423 +#endif
6424 +
6425 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6426 struct linux_binprm;
6427 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6428 int uses_interp);
6429
6430 -struct mm_struct;
6431 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6432 -#define arch_randomize_brk arch_randomize_brk
6433 -
6434 #endif /* _ASM_ELF_H */
6435 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6436 index c1f6afa..38cc6e9 100644
6437 --- a/arch/mips/include/asm/exec.h
6438 +++ b/arch/mips/include/asm/exec.h
6439 @@ -12,6 +12,6 @@
6440 #ifndef _ASM_EXEC_H
6441 #define _ASM_EXEC_H
6442
6443 -extern unsigned long arch_align_stack(unsigned long sp);
6444 +#define arch_align_stack(x) ((x) & ~0xfUL)
6445
6446 #endif /* _ASM_EXEC_H */
6447 diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
6448 index ce35c9a..434321c 100644
6449 --- a/arch/mips/include/asm/ftrace.h
6450 +++ b/arch/mips/include/asm/ftrace.h
6451 @@ -22,12 +22,12 @@ extern void _mcount(void);
6452 #define safe_load(load, src, dst, error) \
6453 do { \
6454 asm volatile ( \
6455 - "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
6456 - " li %[" STR(error) "], 0\n" \
6457 + "1: " load " %[dest], 0(%[source])\n" \
6458 + " li %[err], 0\n" \
6459 "2:\n" \
6460 \
6461 ".section .fixup, \"ax\"\n" \
6462 - "3: li %[" STR(error) "], 1\n" \
6463 + "3: li %[err], 1\n" \
6464 " j 2b\n" \
6465 ".previous\n" \
6466 \
6467 @@ -35,8 +35,8 @@ do { \
6468 STR(PTR) "\t1b, 3b\n\t" \
6469 ".previous\n" \
6470 \
6471 - : [dst] "=&r" (dst), [error] "=r" (error)\
6472 - : [src] "r" (src) \
6473 + : [dest] "=&r" (dst), [err] "=r" (error)\
6474 + : [source] "r" (src) \
6475 : "memory" \
6476 ); \
6477 } while (0)
6478 @@ -44,12 +44,12 @@ do { \
6479 #define safe_store(store, src, dst, error) \
6480 do { \
6481 asm volatile ( \
6482 - "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
6483 - " li %[" STR(error) "], 0\n" \
6484 + "1: " store " %[source], 0(%[dest])\n"\
6485 + " li %[err], 0\n" \
6486 "2:\n" \
6487 \
6488 ".section .fixup, \"ax\"\n" \
6489 - "3: li %[" STR(error) "], 1\n" \
6490 + "3: li %[err], 1\n" \
6491 " j 2b\n" \
6492 ".previous\n" \
6493 \
6494 @@ -57,8 +57,8 @@ do { \
6495 STR(PTR) "\t1b, 3b\n\t" \
6496 ".previous\n" \
6497 \
6498 - : [error] "=r" (error) \
6499 - : [dst] "r" (dst), [src] "r" (src)\
6500 + : [err] "=r" (error) \
6501 + : [dest] "r" (dst), [source] "r" (src)\
6502 : "memory" \
6503 ); \
6504 } while (0)
6505 diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6506 index 9e8ef59..1139d6b 100644
6507 --- a/arch/mips/include/asm/hw_irq.h
6508 +++ b/arch/mips/include/asm/hw_irq.h
6509 @@ -10,7 +10,7 @@
6510
6511 #include <linux/atomic.h>
6512
6513 -extern atomic_t irq_err_count;
6514 +extern atomic_unchecked_t irq_err_count;
6515
6516 /*
6517 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6518 diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6519 index d44622c..64990d2 100644
6520 --- a/arch/mips/include/asm/local.h
6521 +++ b/arch/mips/include/asm/local.h
6522 @@ -12,15 +12,25 @@ typedef struct
6523 atomic_long_t a;
6524 } local_t;
6525
6526 +typedef struct {
6527 + atomic_long_unchecked_t a;
6528 +} local_unchecked_t;
6529 +
6530 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6531
6532 #define local_read(l) atomic_long_read(&(l)->a)
6533 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6534 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6535 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6536
6537 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6538 +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6539 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6540 +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6541 #define local_inc(l) atomic_long_inc(&(l)->a)
6542 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6543 #define local_dec(l) atomic_long_dec(&(l)->a)
6544 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6545
6546 /*
6547 * Same as above, but return the result value
6548 @@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6549 return result;
6550 }
6551
6552 +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6553 +{
6554 + unsigned long result;
6555 +
6556 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6557 + unsigned long temp;
6558 +
6559 + __asm__ __volatile__(
6560 + " .set mips3 \n"
6561 + "1:" __LL "%1, %2 # local_add_return \n"
6562 + " addu %0, %1, %3 \n"
6563 + __SC "%0, %2 \n"
6564 + " beqzl %0, 1b \n"
6565 + " addu %0, %1, %3 \n"
6566 + " .set mips0 \n"
6567 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6568 + : "Ir" (i), "m" (l->a.counter)
6569 + : "memory");
6570 + } else if (kernel_uses_llsc) {
6571 + unsigned long temp;
6572 +
6573 + __asm__ __volatile__(
6574 + " .set mips3 \n"
6575 + "1:" __LL "%1, %2 # local_add_return \n"
6576 + " addu %0, %1, %3 \n"
6577 + __SC "%0, %2 \n"
6578 + " beqz %0, 1b \n"
6579 + " addu %0, %1, %3 \n"
6580 + " .set mips0 \n"
6581 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6582 + : "Ir" (i), "m" (l->a.counter)
6583 + : "memory");
6584 + } else {
6585 + unsigned long flags;
6586 +
6587 + local_irq_save(flags);
6588 + result = l->a.counter;
6589 + result += i;
6590 + l->a.counter = result;
6591 + local_irq_restore(flags);
6592 + }
6593 +
6594 + return result;
6595 +}
6596 +
6597 static __inline__ long local_sub_return(long i, local_t * l)
6598 {
6599 unsigned long result;
6600 @@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6601
6602 #define local_cmpxchg(l, o, n) \
6603 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6604 +#define local_cmpxchg_unchecked(l, o, n) \
6605 + ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6606 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6607
6608 /**
6609 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6610 index f6be474..12ad554 100644
6611 --- a/arch/mips/include/asm/page.h
6612 +++ b/arch/mips/include/asm/page.h
6613 @@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6614 #ifdef CONFIG_CPU_MIPS32
6615 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6616 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6617 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6618 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6619 #else
6620 typedef struct { unsigned long long pte; } pte_t;
6621 #define pte_val(x) ((x).pte)
6622 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6623 index b336037..5b874cc 100644
6624 --- a/arch/mips/include/asm/pgalloc.h
6625 +++ b/arch/mips/include/asm/pgalloc.h
6626 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6627 {
6628 set_pud(pud, __pud((unsigned long)pmd));
6629 }
6630 +
6631 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6632 +{
6633 + pud_populate(mm, pud, pmd);
6634 +}
6635 #endif
6636
6637 /*
6638 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6639 index 008324d..f67c239 100644
6640 --- a/arch/mips/include/asm/pgtable.h
6641 +++ b/arch/mips/include/asm/pgtable.h
6642 @@ -20,6 +20,9 @@
6643 #include <asm/io.h>
6644 #include <asm/pgtable-bits.h>
6645
6646 +#define ktla_ktva(addr) (addr)
6647 +#define ktva_ktla(addr) (addr)
6648 +
6649 struct mm_struct;
6650 struct vm_area_struct;
6651
6652 diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6653 index 25da651..ae2a259 100644
6654 --- a/arch/mips/include/asm/smtc_proc.h
6655 +++ b/arch/mips/include/asm/smtc_proc.h
6656 @@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6657
6658 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6659
6660 -extern atomic_t smtc_fpu_recoveries;
6661 +extern atomic_unchecked_t smtc_fpu_recoveries;
6662
6663 #endif /* __ASM_SMTC_PROC_H */
6664 diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
6665 index 81c8913..81d8432 100644
6666 --- a/arch/mips/include/asm/syscall.h
6667 +++ b/arch/mips/include/asm/syscall.h
6668 @@ -29,7 +29,7 @@ static inline long syscall_get_nr(struct task_struct *task,
6669 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
6670 struct task_struct *task, struct pt_regs *regs, unsigned int n)
6671 {
6672 - unsigned long usp = regs->regs[29];
6673 + unsigned long usp __maybe_unused = regs->regs[29];
6674
6675 switch (n) {
6676 case 0: case 1: case 2: case 3:
6677 @@ -39,14 +39,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
6678
6679 #ifdef CONFIG_32BIT
6680 case 4: case 5: case 6: case 7:
6681 - return get_user(*arg, (int *)usp + 4 * n);
6682 + return get_user(*arg, (int *)usp + n);
6683 #endif
6684
6685 #ifdef CONFIG_64BIT
6686 case 4: case 5: case 6: case 7:
6687 #ifdef CONFIG_MIPS32_O32
6688 if (test_thread_flag(TIF_32BIT_REGS))
6689 - return get_user(*arg, (int *)usp + 4 * n);
6690 + return get_user(*arg, (int *)usp + n);
6691 else
6692 #endif
6693 *arg = regs->regs[4 + n];
6694 @@ -83,11 +83,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
6695 unsigned int i, unsigned int n,
6696 unsigned long *args)
6697 {
6698 - unsigned long arg;
6699 int ret;
6700
6701 while (n--)
6702 - ret |= mips_get_syscall_arg(&arg, task, regs, i++);
6703 + ret |= mips_get_syscall_arg(args++, task, regs, i++);
6704
6705 /*
6706 * No way to communicate an error because this is a void function.
6707 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6708 index 4f58ef6..5e7081b 100644
6709 --- a/arch/mips/include/asm/thread_info.h
6710 +++ b/arch/mips/include/asm/thread_info.h
6711 @@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6712 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6713 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6714 #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
6715 +/* li takes a 32bit immediate */
6716 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6717 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6718
6719 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6720 @@ -132,13 +134,14 @@ static inline struct thread_info *current_thread_info(void)
6721 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6722 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6723 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6724 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6725
6726 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6727 - _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6728 + _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6729
6730 /* work to do in syscall_trace_leave() */
6731 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6732 - _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6733 + _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6734
6735 /* work to do on interrupt/exception return */
6736 #define _TIF_WORK_MASK \
6737 @@ -146,7 +149,7 @@ static inline struct thread_info *current_thread_info(void)
6738 /* work to do on any return to u-space */
6739 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6740 _TIF_WORK_SYSCALL_EXIT | \
6741 - _TIF_SYSCALL_TRACEPOINT)
6742 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6743
6744 /*
6745 * We stash processor id into a COP0 register to retrieve it fast
6746 diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6747 index f3fa375..3af6637 100644
6748 --- a/arch/mips/include/asm/uaccess.h
6749 +++ b/arch/mips/include/asm/uaccess.h
6750 @@ -128,6 +128,7 @@ extern u64 __ua_limit;
6751 __ok == 0; \
6752 })
6753
6754 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6755 #define access_ok(type, addr, size) \
6756 likely(__access_ok((addr), (size), __access_mask))
6757
6758 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6759 index 1188e00..41cf144 100644
6760 --- a/arch/mips/kernel/binfmt_elfn32.c
6761 +++ b/arch/mips/kernel/binfmt_elfn32.c
6762 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6763 #undef ELF_ET_DYN_BASE
6764 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6765
6766 +#ifdef CONFIG_PAX_ASLR
6767 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6768 +
6769 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6770 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6771 +#endif
6772 +
6773 #include <asm/processor.h>
6774 #include <linux/module.h>
6775 #include <linux/elfcore.h>
6776 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6777 index 202e581..689ca79 100644
6778 --- a/arch/mips/kernel/binfmt_elfo32.c
6779 +++ b/arch/mips/kernel/binfmt_elfo32.c
6780 @@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6781 #undef ELF_ET_DYN_BASE
6782 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6783
6784 +#ifdef CONFIG_PAX_ASLR
6785 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6786 +
6787 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6788 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6789 +#endif
6790 +
6791 #include <asm/processor.h>
6792
6793 /*
6794 diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
6795 index 185ba25..374ed74 100644
6796 --- a/arch/mips/kernel/ftrace.c
6797 +++ b/arch/mips/kernel/ftrace.c
6798 @@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
6799 safe_store_code(new_code1, ip, faulted);
6800 if (unlikely(faulted))
6801 return -EFAULT;
6802 - ip += 4;
6803 - safe_store_code(new_code2, ip, faulted);
6804 + safe_store_code(new_code2, ip + 4, faulted);
6805 if (unlikely(faulted))
6806 return -EFAULT;
6807 - flush_icache_range(ip, ip + 8); /* original ip + 12 */
6808 + flush_icache_range(ip, ip + 8);
6809 return 0;
6810 }
6811 #endif
6812 diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6813 index 2b91fe8..fe4f6b4 100644
6814 --- a/arch/mips/kernel/i8259.c
6815 +++ b/arch/mips/kernel/i8259.c
6816 @@ -205,7 +205,7 @@ spurious_8259A_irq:
6817 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6818 spurious_irq_mask |= irqmask;
6819 }
6820 - atomic_inc(&irq_err_count);
6821 + atomic_inc_unchecked(&irq_err_count);
6822 /*
6823 * Theoretically we do not have to handle this IRQ,
6824 * but in Linux this does not cause problems and is
6825 diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6826 index 44a1f79..2bd6aa3 100644
6827 --- a/arch/mips/kernel/irq-gt641xx.c
6828 +++ b/arch/mips/kernel/irq-gt641xx.c
6829 @@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6830 }
6831 }
6832
6833 - atomic_inc(&irq_err_count);
6834 + atomic_inc_unchecked(&irq_err_count);
6835 }
6836
6837 void __init gt641xx_irq_init(void)
6838 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6839 index d1fea7a..45602ea 100644
6840 --- a/arch/mips/kernel/irq.c
6841 +++ b/arch/mips/kernel/irq.c
6842 @@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6843 printk("unexpected IRQ # %d\n", irq);
6844 }
6845
6846 -atomic_t irq_err_count;
6847 +atomic_unchecked_t irq_err_count;
6848
6849 int arch_show_interrupts(struct seq_file *p, int prec)
6850 {
6851 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6852 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6853 return 0;
6854 }
6855
6856 asmlinkage void spurious_interrupt(void)
6857 {
6858 - atomic_inc(&irq_err_count);
6859 + atomic_inc_unchecked(&irq_err_count);
6860 }
6861
6862 void __init init_IRQ(void)
6863 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6864 index ddc7610..8c58f17 100644
6865 --- a/arch/mips/kernel/process.c
6866 +++ b/arch/mips/kernel/process.c
6867 @@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6868 out:
6869 return pc;
6870 }
6871 -
6872 -/*
6873 - * Don't forget that the stack pointer must be aligned on a 8 bytes
6874 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6875 - */
6876 -unsigned long arch_align_stack(unsigned long sp)
6877 -{
6878 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6879 - sp -= get_random_int() & ~PAGE_MASK;
6880 -
6881 - return sp & ALMASK;
6882 -}
6883 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6884 index b52e1d2..1a3ca09 100644
6885 --- a/arch/mips/kernel/ptrace.c
6886 +++ b/arch/mips/kernel/ptrace.c
6887 @@ -652,6 +652,10 @@ long arch_ptrace(struct task_struct *child, long request,
6888 return ret;
6889 }
6890
6891 +#ifdef CONFIG_GRKERNSEC_SETXID
6892 +extern void gr_delayed_cred_worker(void);
6893 +#endif
6894 +
6895 /*
6896 * Notification of system call entry/exit
6897 * - triggered by current->work.syscall_trace
6898 @@ -668,6 +672,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6899 tracehook_report_syscall_entry(regs))
6900 ret = -1;
6901
6902 +#ifdef CONFIG_GRKERNSEC_SETXID
6903 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6904 + gr_delayed_cred_worker();
6905 +#endif
6906 +
6907 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6908 trace_sys_enter(regs, regs->regs[2]);
6909
6910 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6911 index 07fc524..b9d7f28 100644
6912 --- a/arch/mips/kernel/reset.c
6913 +++ b/arch/mips/kernel/reset.c
6914 @@ -13,6 +13,7 @@
6915 #include <linux/reboot.h>
6916
6917 #include <asm/reboot.h>
6918 +#include <asm/bug.h>
6919
6920 /*
6921 * Urgs ... Too many MIPS machines to handle this in a generic way.
6922 @@ -29,16 +30,19 @@ void machine_restart(char *command)
6923 {
6924 if (_machine_restart)
6925 _machine_restart(command);
6926 + BUG();
6927 }
6928
6929 void machine_halt(void)
6930 {
6931 if (_machine_halt)
6932 _machine_halt();
6933 + BUG();
6934 }
6935
6936 void machine_power_off(void)
6937 {
6938 if (pm_power_off)
6939 pm_power_off();
6940 + BUG();
6941 }
6942 diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6943 index c10aa84..9ec2e60 100644
6944 --- a/arch/mips/kernel/smtc-proc.c
6945 +++ b/arch/mips/kernel/smtc-proc.c
6946 @@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6947
6948 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6949
6950 -atomic_t smtc_fpu_recoveries;
6951 +atomic_unchecked_t smtc_fpu_recoveries;
6952
6953 static int smtc_proc_show(struct seq_file *m, void *v)
6954 {
6955 @@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6956 for(i = 0; i < NR_CPUS; i++)
6957 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6958 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6959 - atomic_read(&smtc_fpu_recoveries));
6960 + atomic_read_unchecked(&smtc_fpu_recoveries));
6961 return 0;
6962 }
6963
6964 @@ -73,7 +73,7 @@ void init_smtc_stats(void)
6965 smtc_cpu_stats[i].selfipis = 0;
6966 }
6967
6968 - atomic_set(&smtc_fpu_recoveries, 0);
6969 + atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6970
6971 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6972 }
6973 diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6974 index dfc1b91..11a2c07 100644
6975 --- a/arch/mips/kernel/smtc.c
6976 +++ b/arch/mips/kernel/smtc.c
6977 @@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6978 }
6979 smtc_ipi_qdump();
6980 printk("%d Recoveries of \"stolen\" FPU\n",
6981 - atomic_read(&smtc_fpu_recoveries));
6982 + atomic_read_unchecked(&smtc_fpu_recoveries));
6983 }
6984
6985
6986 diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6987 index 84536bf..79caa4d 100644
6988 --- a/arch/mips/kernel/sync-r4k.c
6989 +++ b/arch/mips/kernel/sync-r4k.c
6990 @@ -21,8 +21,8 @@
6991 #include <asm/mipsregs.h>
6992
6993 static atomic_t count_start_flag = ATOMIC_INIT(0);
6994 -static atomic_t count_count_start = ATOMIC_INIT(0);
6995 -static atomic_t count_count_stop = ATOMIC_INIT(0);
6996 +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6997 +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6998 static atomic_t count_reference = ATOMIC_INIT(0);
6999
7000 #define COUNTON 100
7001 @@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
7002
7003 for (i = 0; i < NR_LOOPS; i++) {
7004 /* slaves loop on '!= 2' */
7005 - while (atomic_read(&count_count_start) != 1)
7006 + while (atomic_read_unchecked(&count_count_start) != 1)
7007 mb();
7008 - atomic_set(&count_count_stop, 0);
7009 + atomic_set_unchecked(&count_count_stop, 0);
7010 smp_wmb();
7011
7012 /* this lets the slaves write their count register */
7013 - atomic_inc(&count_count_start);
7014 + atomic_inc_unchecked(&count_count_start);
7015
7016 /*
7017 * Everyone initialises count in the last loop:
7018 @@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
7019 /*
7020 * Wait for all slaves to leave the synchronization point:
7021 */
7022 - while (atomic_read(&count_count_stop) != 1)
7023 + while (atomic_read_unchecked(&count_count_stop) != 1)
7024 mb();
7025 - atomic_set(&count_count_start, 0);
7026 + atomic_set_unchecked(&count_count_start, 0);
7027 smp_wmb();
7028 - atomic_inc(&count_count_stop);
7029 + atomic_inc_unchecked(&count_count_stop);
7030 }
7031 /* Arrange for an interrupt in a short while */
7032 write_c0_compare(read_c0_count() + COUNTON);
7033 @@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
7034 initcount = atomic_read(&count_reference);
7035
7036 for (i = 0; i < NR_LOOPS; i++) {
7037 - atomic_inc(&count_count_start);
7038 - while (atomic_read(&count_count_start) != 2)
7039 + atomic_inc_unchecked(&count_count_start);
7040 + while (atomic_read_unchecked(&count_count_start) != 2)
7041 mb();
7042
7043 /*
7044 @@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
7045 if (i == NR_LOOPS-1)
7046 write_c0_count(initcount);
7047
7048 - atomic_inc(&count_count_stop);
7049 - while (atomic_read(&count_count_stop) != 2)
7050 + atomic_inc_unchecked(&count_count_stop);
7051 + while (atomic_read_unchecked(&count_count_stop) != 2)
7052 mb();
7053 }
7054 /* Arrange for an interrupt in a short while */
7055 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7056 index f9c8746..78b64e3 100644
7057 --- a/arch/mips/kernel/traps.c
7058 +++ b/arch/mips/kernel/traps.c
7059 @@ -690,7 +690,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7060 siginfo_t info;
7061
7062 prev_state = exception_enter();
7063 - die_if_kernel("Integer overflow", regs);
7064 + if (unlikely(!user_mode(regs))) {
7065 +
7066 +#ifdef CONFIG_PAX_REFCOUNT
7067 + if (fixup_exception(regs)) {
7068 + pax_report_refcount_overflow(regs);
7069 + exception_exit(prev_state);
7070 + return;
7071 + }
7072 +#endif
7073 +
7074 + die("Integer overflow", regs);
7075 + }
7076
7077 info.si_code = FPE_INTOVF;
7078 info.si_signo = SIGFPE;
7079 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7080 index becc42b..9e43d4b 100644
7081 --- a/arch/mips/mm/fault.c
7082 +++ b/arch/mips/mm/fault.c
7083 @@ -28,6 +28,23 @@
7084 #include <asm/highmem.h> /* For VMALLOC_END */
7085 #include <linux/kdebug.h>
7086
7087 +#ifdef CONFIG_PAX_PAGEEXEC
7088 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7089 +{
7090 + unsigned long i;
7091 +
7092 + printk(KERN_ERR "PAX: bytes at PC: ");
7093 + for (i = 0; i < 5; i++) {
7094 + unsigned int c;
7095 + if (get_user(c, (unsigned int *)pc+i))
7096 + printk(KERN_CONT "???????? ");
7097 + else
7098 + printk(KERN_CONT "%08x ", c);
7099 + }
7100 + printk("\n");
7101 +}
7102 +#endif
7103 +
7104 /*
7105 * This routine handles page faults. It determines the address,
7106 * and the problem, and then passes it off to one of the appropriate
7107 @@ -199,6 +216,14 @@ bad_area:
7108 bad_area_nosemaphore:
7109 /* User mode accesses just cause a SIGSEGV */
7110 if (user_mode(regs)) {
7111 +
7112 +#ifdef CONFIG_PAX_PAGEEXEC
7113 + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7114 + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7115 + do_group_exit(SIGKILL);
7116 + }
7117 +#endif
7118 +
7119 tsk->thread.cp0_badvaddr = address;
7120 tsk->thread.error_code = write;
7121 #if 0
7122 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7123 index f1baadd..5472dca 100644
7124 --- a/arch/mips/mm/mmap.c
7125 +++ b/arch/mips/mm/mmap.c
7126 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7127 struct vm_area_struct *vma;
7128 unsigned long addr = addr0;
7129 int do_color_align;
7130 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7131 struct vm_unmapped_area_info info;
7132
7133 if (unlikely(len > TASK_SIZE))
7134 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7135 do_color_align = 1;
7136
7137 /* requesting a specific address */
7138 +
7139 +#ifdef CONFIG_PAX_RANDMMAP
7140 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7141 +#endif
7142 +
7143 if (addr) {
7144 if (do_color_align)
7145 addr = COLOUR_ALIGN(addr, pgoff);
7146 @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7147 addr = PAGE_ALIGN(addr);
7148
7149 vma = find_vma(mm, addr);
7150 - if (TASK_SIZE - len >= addr &&
7151 - (!vma || addr + len <= vma->vm_start))
7152 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7153 return addr;
7154 }
7155
7156 info.length = len;
7157 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7158 info.align_offset = pgoff << PAGE_SHIFT;
7159 + info.threadstack_offset = offset;
7160
7161 if (dir == DOWN) {
7162 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7163 @@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7164 {
7165 unsigned long random_factor = 0UL;
7166
7167 +#ifdef CONFIG_PAX_RANDMMAP
7168 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7169 +#endif
7170 +
7171 if (current->flags & PF_RANDOMIZE) {
7172 random_factor = get_random_int();
7173 random_factor = random_factor << PAGE_SHIFT;
7174 @@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7175
7176 if (mmap_is_legacy()) {
7177 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7178 +
7179 +#ifdef CONFIG_PAX_RANDMMAP
7180 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7181 + mm->mmap_base += mm->delta_mmap;
7182 +#endif
7183 +
7184 mm->get_unmapped_area = arch_get_unmapped_area;
7185 } else {
7186 mm->mmap_base = mmap_base(random_factor);
7187 +
7188 +#ifdef CONFIG_PAX_RANDMMAP
7189 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7190 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7191 +#endif
7192 +
7193 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7194 }
7195 }
7196
7197 -static inline unsigned long brk_rnd(void)
7198 -{
7199 - unsigned long rnd = get_random_int();
7200 -
7201 - rnd = rnd << PAGE_SHIFT;
7202 - /* 8MB for 32bit, 256MB for 64bit */
7203 - if (TASK_IS_32BIT_ADDR)
7204 - rnd = rnd & 0x7ffffful;
7205 - else
7206 - rnd = rnd & 0xffffffful;
7207 -
7208 - return rnd;
7209 -}
7210 -
7211 -unsigned long arch_randomize_brk(struct mm_struct *mm)
7212 -{
7213 - unsigned long base = mm->brk;
7214 - unsigned long ret;
7215 -
7216 - ret = PAGE_ALIGN(base + brk_rnd());
7217 -
7218 - if (ret < mm->brk)
7219 - return mm->brk;
7220 -
7221 - return ret;
7222 -}
7223 -
7224 int __virt_addr_valid(const volatile void *kaddr)
7225 {
7226 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7227 diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7228 index 59cccd9..f39ac2f 100644
7229 --- a/arch/mips/pci/pci-octeon.c
7230 +++ b/arch/mips/pci/pci-octeon.c
7231 @@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7232
7233
7234 static struct pci_ops octeon_pci_ops = {
7235 - octeon_read_config,
7236 - octeon_write_config,
7237 + .read = octeon_read_config,
7238 + .write = octeon_write_config,
7239 };
7240
7241 static struct resource octeon_pci_mem_resource = {
7242 diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7243 index 5e36c33..eb4a17b 100644
7244 --- a/arch/mips/pci/pcie-octeon.c
7245 +++ b/arch/mips/pci/pcie-octeon.c
7246 @@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7247 }
7248
7249 static struct pci_ops octeon_pcie0_ops = {
7250 - octeon_pcie0_read_config,
7251 - octeon_pcie0_write_config,
7252 + .read = octeon_pcie0_read_config,
7253 + .write = octeon_pcie0_write_config,
7254 };
7255
7256 static struct resource octeon_pcie0_mem_resource = {
7257 @@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7258 };
7259
7260 static struct pci_ops octeon_pcie1_ops = {
7261 - octeon_pcie1_read_config,
7262 - octeon_pcie1_write_config,
7263 + .read = octeon_pcie1_read_config,
7264 + .write = octeon_pcie1_write_config,
7265 };
7266
7267 static struct resource octeon_pcie1_mem_resource = {
7268 @@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7269 };
7270
7271 static struct pci_ops octeon_dummy_ops = {
7272 - octeon_dummy_read_config,
7273 - octeon_dummy_write_config,
7274 + .read = octeon_dummy_read_config,
7275 + .write = octeon_dummy_write_config,
7276 };
7277
7278 static struct resource octeon_dummy_mem_resource = {
7279 diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7280 index a2358b4..7cead4f 100644
7281 --- a/arch/mips/sgi-ip27/ip27-nmi.c
7282 +++ b/arch/mips/sgi-ip27/ip27-nmi.c
7283 @@ -187,9 +187,9 @@ void
7284 cont_nmi_dump(void)
7285 {
7286 #ifndef REAL_NMI_SIGNAL
7287 - static atomic_t nmied_cpus = ATOMIC_INIT(0);
7288 + static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7289
7290 - atomic_inc(&nmied_cpus);
7291 + atomic_inc_unchecked(&nmied_cpus);
7292 #endif
7293 /*
7294 * Only allow 1 cpu to proceed
7295 @@ -233,7 +233,7 @@ cont_nmi_dump(void)
7296 udelay(10000);
7297 }
7298 #else
7299 - while (atomic_read(&nmied_cpus) != num_online_cpus());
7300 + while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7301 #endif
7302
7303 /*
7304 diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7305 index a046b30..6799527 100644
7306 --- a/arch/mips/sni/rm200.c
7307 +++ b/arch/mips/sni/rm200.c
7308 @@ -270,7 +270,7 @@ spurious_8259A_irq:
7309 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7310 spurious_irq_mask |= irqmask;
7311 }
7312 - atomic_inc(&irq_err_count);
7313 + atomic_inc_unchecked(&irq_err_count);
7314 /*
7315 * Theoretically we do not have to handle this IRQ,
7316 * but in Linux this does not cause problems and is
7317 diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7318 index 41e873b..34d33a7 100644
7319 --- a/arch/mips/vr41xx/common/icu.c
7320 +++ b/arch/mips/vr41xx/common/icu.c
7321 @@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7322
7323 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7324
7325 - atomic_inc(&irq_err_count);
7326 + atomic_inc_unchecked(&irq_err_count);
7327
7328 return -1;
7329 }
7330 diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7331 index ae0e4ee..e8f0692 100644
7332 --- a/arch/mips/vr41xx/common/irq.c
7333 +++ b/arch/mips/vr41xx/common/irq.c
7334 @@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7335 irq_cascade_t *cascade;
7336
7337 if (irq >= NR_IRQS) {
7338 - atomic_inc(&irq_err_count);
7339 + atomic_inc_unchecked(&irq_err_count);
7340 return;
7341 }
7342
7343 @@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7344 ret = cascade->get_irq(irq);
7345 irq = ret;
7346 if (ret < 0)
7347 - atomic_inc(&irq_err_count);
7348 + atomic_inc_unchecked(&irq_err_count);
7349 else
7350 irq_dispatch(irq);
7351 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7352 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7353 index 967d144..db12197 100644
7354 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7355 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7356 @@ -11,12 +11,14 @@
7357 #ifndef _ASM_PROC_CACHE_H
7358 #define _ASM_PROC_CACHE_H
7359
7360 +#include <linux/const.h>
7361 +
7362 /* L1 cache */
7363
7364 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7365 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7366 -#define L1_CACHE_BYTES 16 /* bytes per entry */
7367 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7368 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7369 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7370
7371 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7372 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7373 index bcb5df2..84fabd2 100644
7374 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7375 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7376 @@ -16,13 +16,15 @@
7377 #ifndef _ASM_PROC_CACHE_H
7378 #define _ASM_PROC_CACHE_H
7379
7380 +#include <linux/const.h>
7381 +
7382 /*
7383 * L1 cache
7384 */
7385 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7386 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7387 -#define L1_CACHE_BYTES 32 /* bytes per entry */
7388 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7389 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7390 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7391
7392 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7393 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7394 index 4ce7a01..449202a 100644
7395 --- a/arch/openrisc/include/asm/cache.h
7396 +++ b/arch/openrisc/include/asm/cache.h
7397 @@ -19,11 +19,13 @@
7398 #ifndef __ASM_OPENRISC_CACHE_H
7399 #define __ASM_OPENRISC_CACHE_H
7400
7401 +#include <linux/const.h>
7402 +
7403 /* FIXME: How can we replace these with values from the CPU...
7404 * they shouldn't be hard-coded!
7405 */
7406
7407 -#define L1_CACHE_BYTES 16
7408 #define L1_CACHE_SHIFT 4
7409 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7410
7411 #endif /* __ASM_OPENRISC_CACHE_H */
7412 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7413 index 472886c..00e7df9 100644
7414 --- a/arch/parisc/include/asm/atomic.h
7415 +++ b/arch/parisc/include/asm/atomic.h
7416 @@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7417 return dec;
7418 }
7419
7420 +#define atomic64_read_unchecked(v) atomic64_read(v)
7421 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7422 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7423 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7424 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7425 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7426 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7427 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7428 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7429 +
7430 #endif /* !CONFIG_64BIT */
7431
7432
7433 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7434 index 47f11c7..3420df2 100644
7435 --- a/arch/parisc/include/asm/cache.h
7436 +++ b/arch/parisc/include/asm/cache.h
7437 @@ -5,6 +5,7 @@
7438 #ifndef __ARCH_PARISC_CACHE_H
7439 #define __ARCH_PARISC_CACHE_H
7440
7441 +#include <linux/const.h>
7442
7443 /*
7444 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7445 @@ -15,13 +16,13 @@
7446 * just ruin performance.
7447 */
7448 #ifdef CONFIG_PA20
7449 -#define L1_CACHE_BYTES 64
7450 #define L1_CACHE_SHIFT 6
7451 #else
7452 -#define L1_CACHE_BYTES 32
7453 #define L1_CACHE_SHIFT 5
7454 #endif
7455
7456 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7457 +
7458 #ifndef __ASSEMBLY__
7459
7460 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7461 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7462 index ad2b503..bdf1651 100644
7463 --- a/arch/parisc/include/asm/elf.h
7464 +++ b/arch/parisc/include/asm/elf.h
7465 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7466
7467 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7468
7469 +#ifdef CONFIG_PAX_ASLR
7470 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
7471 +
7472 +#define PAX_DELTA_MMAP_LEN 16
7473 +#define PAX_DELTA_STACK_LEN 16
7474 +#endif
7475 +
7476 /* This yields a mask that user programs can use to figure out what
7477 instruction set this CPU supports. This could be done in user space,
7478 but it's not easy, and we've already done it here. */
7479 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7480 index f213f5b..0af3e8e 100644
7481 --- a/arch/parisc/include/asm/pgalloc.h
7482 +++ b/arch/parisc/include/asm/pgalloc.h
7483 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7484 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7485 }
7486
7487 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7488 +{
7489 + pgd_populate(mm, pgd, pmd);
7490 +}
7491 +
7492 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7493 {
7494 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7495 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7496 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7497 #define pmd_free(mm, x) do { } while (0)
7498 #define pgd_populate(mm, pmd, pte) BUG()
7499 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
7500
7501 #endif
7502
7503 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7504 index 34899b5..02dd060 100644
7505 --- a/arch/parisc/include/asm/pgtable.h
7506 +++ b/arch/parisc/include/asm/pgtable.h
7507 @@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7508 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7509 #define PAGE_COPY PAGE_EXECREAD
7510 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7511 +
7512 +#ifdef CONFIG_PAX_PAGEEXEC
7513 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7514 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7515 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7516 +#else
7517 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
7518 +# define PAGE_COPY_NOEXEC PAGE_COPY
7519 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
7520 +#endif
7521 +
7522 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7523 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7524 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7525 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7526 index 4006964..fcb3cc2 100644
7527 --- a/arch/parisc/include/asm/uaccess.h
7528 +++ b/arch/parisc/include/asm/uaccess.h
7529 @@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7530 const void __user *from,
7531 unsigned long n)
7532 {
7533 - int sz = __compiletime_object_size(to);
7534 + size_t sz = __compiletime_object_size(to);
7535 int ret = -EFAULT;
7536
7537 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7538 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7539 ret = __copy_from_user(to, from, n);
7540 else
7541 copy_from_user_overflow();
7542 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7543 index 50dfafc..b9fc230 100644
7544 --- a/arch/parisc/kernel/module.c
7545 +++ b/arch/parisc/kernel/module.c
7546 @@ -98,16 +98,38 @@
7547
7548 /* three functions to determine where in the module core
7549 * or init pieces the location is */
7550 +static inline int in_init_rx(struct module *me, void *loc)
7551 +{
7552 + return (loc >= me->module_init_rx &&
7553 + loc < (me->module_init_rx + me->init_size_rx));
7554 +}
7555 +
7556 +static inline int in_init_rw(struct module *me, void *loc)
7557 +{
7558 + return (loc >= me->module_init_rw &&
7559 + loc < (me->module_init_rw + me->init_size_rw));
7560 +}
7561 +
7562 static inline int in_init(struct module *me, void *loc)
7563 {
7564 - return (loc >= me->module_init &&
7565 - loc <= (me->module_init + me->init_size));
7566 + return in_init_rx(me, loc) || in_init_rw(me, loc);
7567 +}
7568 +
7569 +static inline int in_core_rx(struct module *me, void *loc)
7570 +{
7571 + return (loc >= me->module_core_rx &&
7572 + loc < (me->module_core_rx + me->core_size_rx));
7573 +}
7574 +
7575 +static inline int in_core_rw(struct module *me, void *loc)
7576 +{
7577 + return (loc >= me->module_core_rw &&
7578 + loc < (me->module_core_rw + me->core_size_rw));
7579 }
7580
7581 static inline int in_core(struct module *me, void *loc)
7582 {
7583 - return (loc >= me->module_core &&
7584 - loc <= (me->module_core + me->core_size));
7585 + return in_core_rx(me, loc) || in_core_rw(me, loc);
7586 }
7587
7588 static inline int in_local(struct module *me, void *loc)
7589 @@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7590 }
7591
7592 /* align things a bit */
7593 - me->core_size = ALIGN(me->core_size, 16);
7594 - me->arch.got_offset = me->core_size;
7595 - me->core_size += gots * sizeof(struct got_entry);
7596 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7597 + me->arch.got_offset = me->core_size_rw;
7598 + me->core_size_rw += gots * sizeof(struct got_entry);
7599
7600 - me->core_size = ALIGN(me->core_size, 16);
7601 - me->arch.fdesc_offset = me->core_size;
7602 - me->core_size += fdescs * sizeof(Elf_Fdesc);
7603 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7604 + me->arch.fdesc_offset = me->core_size_rw;
7605 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7606
7607 me->arch.got_max = gots;
7608 me->arch.fdesc_max = fdescs;
7609 @@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7610
7611 BUG_ON(value == 0);
7612
7613 - got = me->module_core + me->arch.got_offset;
7614 + got = me->module_core_rw + me->arch.got_offset;
7615 for (i = 0; got[i].addr; i++)
7616 if (got[i].addr == value)
7617 goto out;
7618 @@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7619 #ifdef CONFIG_64BIT
7620 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7621 {
7622 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7623 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7624
7625 if (!value) {
7626 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7627 @@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7628
7629 /* Create new one */
7630 fdesc->addr = value;
7631 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7632 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7633 return (Elf_Addr)fdesc;
7634 }
7635 #endif /* CONFIG_64BIT */
7636 @@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7637
7638 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7639 end = table + sechdrs[me->arch.unwind_section].sh_size;
7640 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7641 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7642
7643 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7644 me->arch.unwind_section, table, end, gp);
7645 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7646 index 0d3a9d4..20a99b0 100644
7647 --- a/arch/parisc/kernel/sys_parisc.c
7648 +++ b/arch/parisc/kernel/sys_parisc.c
7649 @@ -33,9 +33,11 @@
7650 #include <linux/utsname.h>
7651 #include <linux/personality.h>
7652
7653 -static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7654 +static unsigned long get_unshared_area(unsigned long addr, unsigned long len,
7655 + unsigned long flags)
7656 {
7657 struct vm_unmapped_area_info info;
7658 + unsigned long offset = gr_rand_threadstack_offset(current->mm, NULL, flags);
7659
7660 info.flags = 0;
7661 info.length = len;
7662 @@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7663 info.high_limit = TASK_SIZE;
7664 info.align_mask = 0;
7665 info.align_offset = 0;
7666 + info.threadstack_offset = offset;
7667 return vm_unmapped_area(&info);
7668 }
7669
7670 @@ -69,9 +72,10 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7671 }
7672
7673 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7674 - unsigned long len, unsigned long pgoff)
7675 + unsigned long len, unsigned long pgoff, unsigned long flags)
7676 {
7677 struct vm_unmapped_area_info info;
7678 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7679
7680 info.flags = 0;
7681 info.length = len;
7682 @@ -79,6 +83,7 @@ static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7683 info.high_limit = TASK_SIZE;
7684 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7685 info.align_offset = shared_align_offset(filp, pgoff);
7686 + info.threadstack_offset = offset;
7687 return vm_unmapped_area(&info);
7688 }
7689
7690 @@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7691 return -EINVAL;
7692 return addr;
7693 }
7694 - if (!addr)
7695 + if (!addr) {
7696 addr = TASK_UNMAPPED_BASE;
7697
7698 +#ifdef CONFIG_PAX_RANDMMAP
7699 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7700 + addr += current->mm->delta_mmap;
7701 +#endif
7702 +
7703 + }
7704 +
7705 if (filp || (flags & MAP_SHARED))
7706 - addr = get_shared_area(filp, addr, len, pgoff);
7707 + addr = get_shared_area(filp, addr, len, pgoff, flags);
7708 else
7709 - addr = get_unshared_area(addr, len);
7710 + addr = get_unshared_area(addr, len, flags);
7711
7712 return addr;
7713 }
7714 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7715 index 1cd1d0c..44ec918 100644
7716 --- a/arch/parisc/kernel/traps.c
7717 +++ b/arch/parisc/kernel/traps.c
7718 @@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7719
7720 down_read(&current->mm->mmap_sem);
7721 vma = find_vma(current->mm,regs->iaoq[0]);
7722 - if (vma && (regs->iaoq[0] >= vma->vm_start)
7723 - && (vma->vm_flags & VM_EXEC)) {
7724 -
7725 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7726 fault_address = regs->iaoq[0];
7727 fault_space = regs->iasq[0];
7728
7729 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7730 index 9d08c71..e2b4d20 100644
7731 --- a/arch/parisc/mm/fault.c
7732 +++ b/arch/parisc/mm/fault.c
7733 @@ -15,6 +15,7 @@
7734 #include <linux/sched.h>
7735 #include <linux/interrupt.h>
7736 #include <linux/module.h>
7737 +#include <linux/unistd.h>
7738
7739 #include <asm/uaccess.h>
7740 #include <asm/traps.h>
7741 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7742 static unsigned long
7743 parisc_acctyp(unsigned long code, unsigned int inst)
7744 {
7745 - if (code == 6 || code == 16)
7746 + if (code == 6 || code == 7 || code == 16)
7747 return VM_EXEC;
7748
7749 switch (inst & 0xf0000000) {
7750 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7751 }
7752 #endif
7753
7754 +#ifdef CONFIG_PAX_PAGEEXEC
7755 +/*
7756 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7757 + *
7758 + * returns 1 when task should be killed
7759 + * 2 when rt_sigreturn trampoline was detected
7760 + * 3 when unpatched PLT trampoline was detected
7761 + */
7762 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7763 +{
7764 +
7765 +#ifdef CONFIG_PAX_EMUPLT
7766 + int err;
7767 +
7768 + do { /* PaX: unpatched PLT emulation */
7769 + unsigned int bl, depwi;
7770 +
7771 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7772 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7773 +
7774 + if (err)
7775 + break;
7776 +
7777 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7778 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7779 +
7780 + err = get_user(ldw, (unsigned int *)addr);
7781 + err |= get_user(bv, (unsigned int *)(addr+4));
7782 + err |= get_user(ldw2, (unsigned int *)(addr+8));
7783 +
7784 + if (err)
7785 + break;
7786 +
7787 + if (ldw == 0x0E801096U &&
7788 + bv == 0xEAC0C000U &&
7789 + ldw2 == 0x0E881095U)
7790 + {
7791 + unsigned int resolver, map;
7792 +
7793 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7794 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7795 + if (err)
7796 + break;
7797 +
7798 + regs->gr[20] = instruction_pointer(regs)+8;
7799 + regs->gr[21] = map;
7800 + regs->gr[22] = resolver;
7801 + regs->iaoq[0] = resolver | 3UL;
7802 + regs->iaoq[1] = regs->iaoq[0] + 4;
7803 + return 3;
7804 + }
7805 + }
7806 + } while (0);
7807 +#endif
7808 +
7809 +#ifdef CONFIG_PAX_EMUTRAMP
7810 +
7811 +#ifndef CONFIG_PAX_EMUSIGRT
7812 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7813 + return 1;
7814 +#endif
7815 +
7816 + do { /* PaX: rt_sigreturn emulation */
7817 + unsigned int ldi1, ldi2, bel, nop;
7818 +
7819 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7820 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7821 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7822 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7823 +
7824 + if (err)
7825 + break;
7826 +
7827 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7828 + ldi2 == 0x3414015AU &&
7829 + bel == 0xE4008200U &&
7830 + nop == 0x08000240U)
7831 + {
7832 + regs->gr[25] = (ldi1 & 2) >> 1;
7833 + regs->gr[20] = __NR_rt_sigreturn;
7834 + regs->gr[31] = regs->iaoq[1] + 16;
7835 + regs->sr[0] = regs->iasq[1];
7836 + regs->iaoq[0] = 0x100UL;
7837 + regs->iaoq[1] = regs->iaoq[0] + 4;
7838 + regs->iasq[0] = regs->sr[2];
7839 + regs->iasq[1] = regs->sr[2];
7840 + return 2;
7841 + }
7842 + } while (0);
7843 +#endif
7844 +
7845 + return 1;
7846 +}
7847 +
7848 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7849 +{
7850 + unsigned long i;
7851 +
7852 + printk(KERN_ERR "PAX: bytes at PC: ");
7853 + for (i = 0; i < 5; i++) {
7854 + unsigned int c;
7855 + if (get_user(c, (unsigned int *)pc+i))
7856 + printk(KERN_CONT "???????? ");
7857 + else
7858 + printk(KERN_CONT "%08x ", c);
7859 + }
7860 + printk("\n");
7861 +}
7862 +#endif
7863 +
7864 int fixup_exception(struct pt_regs *regs)
7865 {
7866 const struct exception_table_entry *fix;
7867 @@ -210,8 +321,33 @@ retry:
7868
7869 good_area:
7870
7871 - if ((vma->vm_flags & acc_type) != acc_type)
7872 + if ((vma->vm_flags & acc_type) != acc_type) {
7873 +
7874 +#ifdef CONFIG_PAX_PAGEEXEC
7875 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7876 + (address & ~3UL) == instruction_pointer(regs))
7877 + {
7878 + up_read(&mm->mmap_sem);
7879 + switch (pax_handle_fetch_fault(regs)) {
7880 +
7881 +#ifdef CONFIG_PAX_EMUPLT
7882 + case 3:
7883 + return;
7884 +#endif
7885 +
7886 +#ifdef CONFIG_PAX_EMUTRAMP
7887 + case 2:
7888 + return;
7889 +#endif
7890 +
7891 + }
7892 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7893 + do_group_exit(SIGKILL);
7894 + }
7895 +#endif
7896 +
7897 goto bad_area;
7898 + }
7899
7900 /*
7901 * If for any reason at all we couldn't handle the fault, make
7902 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7903 index b44b52c..4cd253c 100644
7904 --- a/arch/powerpc/Kconfig
7905 +++ b/arch/powerpc/Kconfig
7906 @@ -382,6 +382,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7907 config KEXEC
7908 bool "kexec system call"
7909 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7910 + depends on !GRKERNSEC_KMEM
7911 help
7912 kexec is a system call that implements the ability to shutdown your
7913 current kernel, and to start another kernel. It is like a reboot
7914 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7915 index e3b1d41..8e81edf 100644
7916 --- a/arch/powerpc/include/asm/atomic.h
7917 +++ b/arch/powerpc/include/asm/atomic.h
7918 @@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7919 return t1;
7920 }
7921
7922 +#define atomic64_read_unchecked(v) atomic64_read(v)
7923 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7924 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7925 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7926 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7927 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7928 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7929 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7930 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7931 +
7932 #endif /* __powerpc64__ */
7933
7934 #endif /* __KERNEL__ */
7935 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7936 index 9e495c9..b6878e5 100644
7937 --- a/arch/powerpc/include/asm/cache.h
7938 +++ b/arch/powerpc/include/asm/cache.h
7939 @@ -3,6 +3,7 @@
7940
7941 #ifdef __KERNEL__
7942
7943 +#include <linux/const.h>
7944
7945 /* bytes per L1 cache line */
7946 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7947 @@ -22,7 +23,7 @@
7948 #define L1_CACHE_SHIFT 7
7949 #endif
7950
7951 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7952 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7953
7954 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7955
7956 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7957 index 935b5e7..7001d2d 100644
7958 --- a/arch/powerpc/include/asm/elf.h
7959 +++ b/arch/powerpc/include/asm/elf.h
7960 @@ -28,8 +28,19 @@
7961 the loader. We need to make sure that it is out of the way of the program
7962 that it will "exec", and that there is sufficient room for the brk. */
7963
7964 -extern unsigned long randomize_et_dyn(unsigned long base);
7965 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7966 +#define ELF_ET_DYN_BASE (0x20000000)
7967 +
7968 +#ifdef CONFIG_PAX_ASLR
7969 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7970 +
7971 +#ifdef __powerpc64__
7972 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7973 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7974 +#else
7975 +#define PAX_DELTA_MMAP_LEN 15
7976 +#define PAX_DELTA_STACK_LEN 15
7977 +#endif
7978 +#endif
7979
7980 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
7981
7982 @@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7983 (0x7ff >> (PAGE_SHIFT - 12)) : \
7984 (0x3ffff >> (PAGE_SHIFT - 12)))
7985
7986 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7987 -#define arch_randomize_brk arch_randomize_brk
7988 -
7989 -
7990 #ifdef CONFIG_SPU_BASE
7991 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7992 #define NT_SPU 1
7993 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7994 index 8196e9c..d83a9f3 100644
7995 --- a/arch/powerpc/include/asm/exec.h
7996 +++ b/arch/powerpc/include/asm/exec.h
7997 @@ -4,6 +4,6 @@
7998 #ifndef _ASM_POWERPC_EXEC_H
7999 #define _ASM_POWERPC_EXEC_H
8000
8001 -extern unsigned long arch_align_stack(unsigned long sp);
8002 +#define arch_align_stack(x) ((x) & ~0xfUL)
8003
8004 #endif /* _ASM_POWERPC_EXEC_H */
8005 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8006 index 5acabbd..7ea14fa 100644
8007 --- a/arch/powerpc/include/asm/kmap_types.h
8008 +++ b/arch/powerpc/include/asm/kmap_types.h
8009 @@ -10,7 +10,7 @@
8010 * 2 of the License, or (at your option) any later version.
8011 */
8012
8013 -#define KM_TYPE_NR 16
8014 +#define KM_TYPE_NR 17
8015
8016 #endif /* __KERNEL__ */
8017 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8018 diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8019 index b8da913..60b608a 100644
8020 --- a/arch/powerpc/include/asm/local.h
8021 +++ b/arch/powerpc/include/asm/local.h
8022 @@ -9,15 +9,26 @@ typedef struct
8023 atomic_long_t a;
8024 } local_t;
8025
8026 +typedef struct
8027 +{
8028 + atomic_long_unchecked_t a;
8029 +} local_unchecked_t;
8030 +
8031 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8032
8033 #define local_read(l) atomic_long_read(&(l)->a)
8034 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8035 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8036 +#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8037
8038 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8039 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8040 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8041 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8042 #define local_inc(l) atomic_long_inc(&(l)->a)
8043 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8044 #define local_dec(l) atomic_long_dec(&(l)->a)
8045 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8046
8047 static __inline__ long local_add_return(long a, local_t *l)
8048 {
8049 @@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8050
8051 return t;
8052 }
8053 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8054
8055 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8056
8057 @@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8058
8059 return t;
8060 }
8061 +#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8062
8063 static __inline__ long local_inc_return(local_t *l)
8064 {
8065 @@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8066
8067 #define local_cmpxchg(l, o, n) \
8068 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8069 +#define local_cmpxchg_unchecked(l, o, n) \
8070 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
8071 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8072
8073 /**
8074 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8075 index 8565c25..2865190 100644
8076 --- a/arch/powerpc/include/asm/mman.h
8077 +++ b/arch/powerpc/include/asm/mman.h
8078 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8079 }
8080 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8081
8082 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8083 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8084 {
8085 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8086 }
8087 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8088 index 32e4e21..62afb12 100644
8089 --- a/arch/powerpc/include/asm/page.h
8090 +++ b/arch/powerpc/include/asm/page.h
8091 @@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8092 * and needs to be executable. This means the whole heap ends
8093 * up being executable.
8094 */
8095 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8096 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8097 +#define VM_DATA_DEFAULT_FLAGS32 \
8098 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8099 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8100
8101 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8102 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8103 @@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8104 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8105 #endif
8106
8107 +#define ktla_ktva(addr) (addr)
8108 +#define ktva_ktla(addr) (addr)
8109 +
8110 #ifndef CONFIG_PPC_BOOK3S_64
8111 /*
8112 * Use the top bit of the higher-level page table entries to indicate whether
8113 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8114 index 88693ce..ac6f9ab 100644
8115 --- a/arch/powerpc/include/asm/page_64.h
8116 +++ b/arch/powerpc/include/asm/page_64.h
8117 @@ -153,15 +153,18 @@ do { \
8118 * stack by default, so in the absence of a PT_GNU_STACK program header
8119 * we turn execute permission off.
8120 */
8121 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8122 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8123 +#define VM_STACK_DEFAULT_FLAGS32 \
8124 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8125 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8126
8127 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8128 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8129
8130 +#ifndef CONFIG_PAX_PAGEEXEC
8131 #define VM_STACK_DEFAULT_FLAGS \
8132 (is_32bit_task() ? \
8133 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8134 +#endif
8135
8136 #include <asm-generic/getorder.h>
8137
8138 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8139 index 4b0be20..c15a27d 100644
8140 --- a/arch/powerpc/include/asm/pgalloc-64.h
8141 +++ b/arch/powerpc/include/asm/pgalloc-64.h
8142 @@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8143 #ifndef CONFIG_PPC_64K_PAGES
8144
8145 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8146 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8147
8148 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8149 {
8150 @@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8151 pud_set(pud, (unsigned long)pmd);
8152 }
8153
8154 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8155 +{
8156 + pud_populate(mm, pud, pmd);
8157 +}
8158 +
8159 #define pmd_populate(mm, pmd, pte_page) \
8160 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8161 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8162 @@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8163 #endif
8164
8165 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8166 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8167
8168 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8169 pte_t *pte)
8170 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8171 index 7d6eacf..14c0240 100644
8172 --- a/arch/powerpc/include/asm/pgtable.h
8173 +++ b/arch/powerpc/include/asm/pgtable.h
8174 @@ -2,6 +2,7 @@
8175 #define _ASM_POWERPC_PGTABLE_H
8176 #ifdef __KERNEL__
8177
8178 +#include <linux/const.h>
8179 #ifndef __ASSEMBLY__
8180 #include <asm/processor.h> /* For TASK_SIZE */
8181 #include <asm/mmu.h>
8182 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8183 index 4aad413..85d86bf 100644
8184 --- a/arch/powerpc/include/asm/pte-hash32.h
8185 +++ b/arch/powerpc/include/asm/pte-hash32.h
8186 @@ -21,6 +21,7 @@
8187 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8188 #define _PAGE_USER 0x004 /* usermode access allowed */
8189 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8190 +#define _PAGE_EXEC _PAGE_GUARDED
8191 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8192 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8193 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8194 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8195 index fa8388e..f985549 100644
8196 --- a/arch/powerpc/include/asm/reg.h
8197 +++ b/arch/powerpc/include/asm/reg.h
8198 @@ -239,6 +239,7 @@
8199 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8200 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8201 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8202 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8203 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8204 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8205 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8206 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8207 index 084e080..9415a3d 100644
8208 --- a/arch/powerpc/include/asm/smp.h
8209 +++ b/arch/powerpc/include/asm/smp.h
8210 @@ -51,7 +51,7 @@ struct smp_ops_t {
8211 int (*cpu_disable)(void);
8212 void (*cpu_die)(unsigned int nr);
8213 int (*cpu_bootable)(unsigned int nr);
8214 -};
8215 +} __no_const;
8216
8217 extern void smp_send_debugger_break(void);
8218 extern void start_secondary_resume(void);
8219 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8220 index 9854c56..7517190 100644
8221 --- a/arch/powerpc/include/asm/thread_info.h
8222 +++ b/arch/powerpc/include/asm/thread_info.h
8223 @@ -91,7 +91,6 @@ static inline struct thread_info *current_thread_info(void)
8224 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
8225 TIF_NEED_RESCHED */
8226 #define TIF_32BIT 4 /* 32 bit binary */
8227 -#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
8228 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
8229 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
8230 #define TIF_SINGLESTEP 8 /* singlestepping active */
8231 @@ -108,6 +107,9 @@ static inline struct thread_info *current_thread_info(void)
8232 #if defined(CONFIG_PPC64)
8233 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8234 #endif
8235 +#define TIF_PERFMON_WORK 19 /* work for pfm_handle_work() */
8236 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8237 +#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
8238
8239 /* as above, but as bit values */
8240 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8241 @@ -127,9 +129,10 @@ static inline struct thread_info *current_thread_info(void)
8242 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8243 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8244 #define _TIF_NOHZ (1<<TIF_NOHZ)
8245 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8246 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8247 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8248 - _TIF_NOHZ)
8249 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
8250
8251 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8252 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
8253 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8254 index 9485b43..3bd3c16 100644
8255 --- a/arch/powerpc/include/asm/uaccess.h
8256 +++ b/arch/powerpc/include/asm/uaccess.h
8257 @@ -58,6 +58,7 @@
8258
8259 #endif
8260
8261 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8262 #define access_ok(type, addr, size) \
8263 (__chk_user_ptr(addr), \
8264 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8265 @@ -318,52 +319,6 @@ do { \
8266 extern unsigned long __copy_tofrom_user(void __user *to,
8267 const void __user *from, unsigned long size);
8268
8269 -#ifndef __powerpc64__
8270 -
8271 -static inline unsigned long copy_from_user(void *to,
8272 - const void __user *from, unsigned long n)
8273 -{
8274 - unsigned long over;
8275 -
8276 - if (access_ok(VERIFY_READ, from, n))
8277 - return __copy_tofrom_user((__force void __user *)to, from, n);
8278 - if ((unsigned long)from < TASK_SIZE) {
8279 - over = (unsigned long)from + n - TASK_SIZE;
8280 - return __copy_tofrom_user((__force void __user *)to, from,
8281 - n - over) + over;
8282 - }
8283 - return n;
8284 -}
8285 -
8286 -static inline unsigned long copy_to_user(void __user *to,
8287 - const void *from, unsigned long n)
8288 -{
8289 - unsigned long over;
8290 -
8291 - if (access_ok(VERIFY_WRITE, to, n))
8292 - return __copy_tofrom_user(to, (__force void __user *)from, n);
8293 - if ((unsigned long)to < TASK_SIZE) {
8294 - over = (unsigned long)to + n - TASK_SIZE;
8295 - return __copy_tofrom_user(to, (__force void __user *)from,
8296 - n - over) + over;
8297 - }
8298 - return n;
8299 -}
8300 -
8301 -#else /* __powerpc64__ */
8302 -
8303 -#define __copy_in_user(to, from, size) \
8304 - __copy_tofrom_user((to), (from), (size))
8305 -
8306 -extern unsigned long copy_from_user(void *to, const void __user *from,
8307 - unsigned long n);
8308 -extern unsigned long copy_to_user(void __user *to, const void *from,
8309 - unsigned long n);
8310 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
8311 - unsigned long n);
8312 -
8313 -#endif /* __powerpc64__ */
8314 -
8315 static inline unsigned long __copy_from_user_inatomic(void *to,
8316 const void __user *from, unsigned long n)
8317 {
8318 @@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8319 if (ret == 0)
8320 return 0;
8321 }
8322 +
8323 + if (!__builtin_constant_p(n))
8324 + check_object_size(to, n, false);
8325 +
8326 return __copy_tofrom_user((__force void __user *)to, from, n);
8327 }
8328
8329 @@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8330 if (ret == 0)
8331 return 0;
8332 }
8333 +
8334 + if (!__builtin_constant_p(n))
8335 + check_object_size(from, n, true);
8336 +
8337 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8338 }
8339
8340 @@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8341 return __copy_to_user_inatomic(to, from, size);
8342 }
8343
8344 +#ifndef __powerpc64__
8345 +
8346 +static inline unsigned long __must_check copy_from_user(void *to,
8347 + const void __user *from, unsigned long n)
8348 +{
8349 + unsigned long over;
8350 +
8351 + if ((long)n < 0)
8352 + return n;
8353 +
8354 + if (access_ok(VERIFY_READ, from, n)) {
8355 + if (!__builtin_constant_p(n))
8356 + check_object_size(to, n, false);
8357 + return __copy_tofrom_user((__force void __user *)to, from, n);
8358 + }
8359 + if ((unsigned long)from < TASK_SIZE) {
8360 + over = (unsigned long)from + n - TASK_SIZE;
8361 + if (!__builtin_constant_p(n - over))
8362 + check_object_size(to, n - over, false);
8363 + return __copy_tofrom_user((__force void __user *)to, from,
8364 + n - over) + over;
8365 + }
8366 + return n;
8367 +}
8368 +
8369 +static inline unsigned long __must_check copy_to_user(void __user *to,
8370 + const void *from, unsigned long n)
8371 +{
8372 + unsigned long over;
8373 +
8374 + if ((long)n < 0)
8375 + return n;
8376 +
8377 + if (access_ok(VERIFY_WRITE, to, n)) {
8378 + if (!__builtin_constant_p(n))
8379 + check_object_size(from, n, true);
8380 + return __copy_tofrom_user(to, (__force void __user *)from, n);
8381 + }
8382 + if ((unsigned long)to < TASK_SIZE) {
8383 + over = (unsigned long)to + n - TASK_SIZE;
8384 + if (!__builtin_constant_p(n))
8385 + check_object_size(from, n - over, true);
8386 + return __copy_tofrom_user(to, (__force void __user *)from,
8387 + n - over) + over;
8388 + }
8389 + return n;
8390 +}
8391 +
8392 +#else /* __powerpc64__ */
8393 +
8394 +#define __copy_in_user(to, from, size) \
8395 + __copy_tofrom_user((to), (from), (size))
8396 +
8397 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8398 +{
8399 + if ((long)n < 0 || n > INT_MAX)
8400 + return n;
8401 +
8402 + if (!__builtin_constant_p(n))
8403 + check_object_size(to, n, false);
8404 +
8405 + if (likely(access_ok(VERIFY_READ, from, n)))
8406 + n = __copy_from_user(to, from, n);
8407 + else
8408 + memset(to, 0, n);
8409 + return n;
8410 +}
8411 +
8412 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8413 +{
8414 + if ((long)n < 0 || n > INT_MAX)
8415 + return n;
8416 +
8417 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
8418 + if (!__builtin_constant_p(n))
8419 + check_object_size(from, n, true);
8420 + n = __copy_to_user(to, from, n);
8421 + }
8422 + return n;
8423 +}
8424 +
8425 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
8426 + unsigned long n);
8427 +
8428 +#endif /* __powerpc64__ */
8429 +
8430 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8431
8432 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8433 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8434 index 445cb6e..4f80c5d 100644
8435 --- a/arch/powerpc/kernel/Makefile
8436 +++ b/arch/powerpc/kernel/Makefile
8437 @@ -26,6 +26,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8438 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8439 endif
8440
8441 +CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8442 +
8443 obj-y := cputable.o ptrace.o syscalls.o \
8444 irq.o align.o signal_32.o pmc.o vdso.o \
8445 process.o systbl.o idle.o \
8446 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8447 index e775156..af2d1c0 100644
8448 --- a/arch/powerpc/kernel/exceptions-64e.S
8449 +++ b/arch/powerpc/kernel/exceptions-64e.S
8450 @@ -759,6 +759,7 @@ storage_fault_common:
8451 std r14,_DAR(r1)
8452 std r15,_DSISR(r1)
8453 addi r3,r1,STACK_FRAME_OVERHEAD
8454 + bl .save_nvgprs
8455 mr r4,r14
8456 mr r5,r15
8457 ld r14,PACA_EXGEN+EX_R14(r13)
8458 @@ -767,8 +768,7 @@ storage_fault_common:
8459 cmpdi r3,0
8460 bne- 1f
8461 b .ret_from_except_lite
8462 -1: bl .save_nvgprs
8463 - mr r5,r3
8464 +1: mr r5,r3
8465 addi r3,r1,STACK_FRAME_OVERHEAD
8466 ld r4,_DAR(r1)
8467 bl .bad_page_fault
8468 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8469 index 9f905e4..1d6b3d2 100644
8470 --- a/arch/powerpc/kernel/exceptions-64s.S
8471 +++ b/arch/powerpc/kernel/exceptions-64s.S
8472 @@ -1390,10 +1390,10 @@ handle_page_fault:
8473 11: ld r4,_DAR(r1)
8474 ld r5,_DSISR(r1)
8475 addi r3,r1,STACK_FRAME_OVERHEAD
8476 + bl .save_nvgprs
8477 bl .do_page_fault
8478 cmpdi r3,0
8479 beq+ 12f
8480 - bl .save_nvgprs
8481 mr r5,r3
8482 addi r3,r1,STACK_FRAME_OVERHEAD
8483 lwz r4,_DAR(r1)
8484 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8485 index 6cff040..74ac5d1 100644
8486 --- a/arch/powerpc/kernel/module_32.c
8487 +++ b/arch/powerpc/kernel/module_32.c
8488 @@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8489 me->arch.core_plt_section = i;
8490 }
8491 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8492 - printk("Module doesn't contain .plt or .init.plt sections.\n");
8493 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8494 return -ENOEXEC;
8495 }
8496
8497 @@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
8498
8499 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8500 /* Init, or core PLT? */
8501 - if (location >= mod->module_core
8502 - && location < mod->module_core + mod->core_size)
8503 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8504 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8505 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8506 - else
8507 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8508 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8509 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8510 + else {
8511 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8512 + return ~0UL;
8513 + }
8514
8515 /* Find this entry, or if that fails, the next avail. entry */
8516 while (entry->jump[0]) {
8517 @@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8518 }
8519 #ifdef CONFIG_DYNAMIC_FTRACE
8520 module->arch.tramp =
8521 - do_plt_call(module->module_core,
8522 + do_plt_call(module->module_core_rx,
8523 (unsigned long)ftrace_caller,
8524 sechdrs, module);
8525 #endif
8526 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8527 index ea2f6a3..dbb2be3 100644
8528 --- a/arch/powerpc/kernel/process.c
8529 +++ b/arch/powerpc/kernel/process.c
8530 @@ -888,8 +888,8 @@ void show_regs(struct pt_regs * regs)
8531 * Lookup NIP late so we have the best change of getting the
8532 * above info out without failing
8533 */
8534 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8535 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8536 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8537 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8538 #endif
8539 show_stack(current, (unsigned long *) regs->gpr[1]);
8540 if (!user_mode(regs))
8541 @@ -1385,10 +1385,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8542 newsp = stack[0];
8543 ip = stack[STACK_FRAME_LR_SAVE];
8544 if (!firstframe || ip != lr) {
8545 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8546 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8548 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8549 - printk(" (%pS)",
8550 + printk(" (%pA)",
8551 (void *)current->ret_stack[curr_frame].ret);
8552 curr_frame--;
8553 }
8554 @@ -1408,7 +1408,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8555 struct pt_regs *regs = (struct pt_regs *)
8556 (sp + STACK_FRAME_OVERHEAD);
8557 lr = regs->link;
8558 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
8559 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
8560 regs->trap, (void *)regs->nip, (void *)lr);
8561 firstframe = 1;
8562 }
8563 @@ -1444,58 +1444,3 @@ void notrace __ppc64_runlatch_off(void)
8564 mtspr(SPRN_CTRLT, ctrl);
8565 }
8566 #endif /* CONFIG_PPC64 */
8567 -
8568 -unsigned long arch_align_stack(unsigned long sp)
8569 -{
8570 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8571 - sp -= get_random_int() & ~PAGE_MASK;
8572 - return sp & ~0xf;
8573 -}
8574 -
8575 -static inline unsigned long brk_rnd(void)
8576 -{
8577 - unsigned long rnd = 0;
8578 -
8579 - /* 8MB for 32bit, 1GB for 64bit */
8580 - if (is_32bit_task())
8581 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8582 - else
8583 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8584 -
8585 - return rnd << PAGE_SHIFT;
8586 -}
8587 -
8588 -unsigned long arch_randomize_brk(struct mm_struct *mm)
8589 -{
8590 - unsigned long base = mm->brk;
8591 - unsigned long ret;
8592 -
8593 -#ifdef CONFIG_PPC_STD_MMU_64
8594 - /*
8595 - * If we are using 1TB segments and we are allowed to randomise
8596 - * the heap, we can put it above 1TB so it is backed by a 1TB
8597 - * segment. Otherwise the heap will be in the bottom 1TB
8598 - * which always uses 256MB segments and this may result in a
8599 - * performance penalty.
8600 - */
8601 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8602 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8603 -#endif
8604 -
8605 - ret = PAGE_ALIGN(base + brk_rnd());
8606 -
8607 - if (ret < mm->brk)
8608 - return mm->brk;
8609 -
8610 - return ret;
8611 -}
8612 -
8613 -unsigned long randomize_et_dyn(unsigned long base)
8614 -{
8615 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8616 -
8617 - if (ret < base)
8618 - return base;
8619 -
8620 - return ret;
8621 -}
8622 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8623 index 2e3d2bf..35df241 100644
8624 --- a/arch/powerpc/kernel/ptrace.c
8625 +++ b/arch/powerpc/kernel/ptrace.c
8626 @@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8627 return ret;
8628 }
8629
8630 +#ifdef CONFIG_GRKERNSEC_SETXID
8631 +extern void gr_delayed_cred_worker(void);
8632 +#endif
8633 +
8634 /*
8635 * We must return the syscall number to actually look up in the table.
8636 * This can be -1L to skip running any syscall at all.
8637 @@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8638
8639 secure_computing_strict(regs->gpr[0]);
8640
8641 +#ifdef CONFIG_GRKERNSEC_SETXID
8642 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8643 + gr_delayed_cred_worker();
8644 +#endif
8645 +
8646 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8647 tracehook_report_syscall_entry(regs))
8648 /*
8649 @@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8650 {
8651 int step;
8652
8653 +#ifdef CONFIG_GRKERNSEC_SETXID
8654 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8655 + gr_delayed_cred_worker();
8656 +#endif
8657 +
8658 audit_syscall_exit(regs);
8659
8660 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8661 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8662 index 68027bf..b26fd31 100644
8663 --- a/arch/powerpc/kernel/signal_32.c
8664 +++ b/arch/powerpc/kernel/signal_32.c
8665 @@ -1004,7 +1004,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8666 /* Save user registers on the stack */
8667 frame = &rt_sf->uc.uc_mcontext;
8668 addr = frame;
8669 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8670 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8671 sigret = 0;
8672 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8673 } else {
8674 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8675 index 448245f..b9bae83 100644
8676 --- a/arch/powerpc/kernel/signal_64.c
8677 +++ b/arch/powerpc/kernel/signal_64.c
8678 @@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8679 #endif
8680
8681 /* Set up to return from userspace. */
8682 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8683 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8684 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8685 } else {
8686 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8687 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8688 index 907a472..4ba206f 100644
8689 --- a/arch/powerpc/kernel/traps.c
8690 +++ b/arch/powerpc/kernel/traps.c
8691 @@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8692 return flags;
8693 }
8694
8695 +extern void gr_handle_kernel_exploit(void);
8696 +
8697 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8698 int signr)
8699 {
8700 @@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8701 panic("Fatal exception in interrupt");
8702 if (panic_on_oops)
8703 panic("Fatal exception");
8704 +
8705 + gr_handle_kernel_exploit();
8706 +
8707 do_exit(signr);
8708 }
8709
8710 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8711 index 094e45c..d82b848 100644
8712 --- a/arch/powerpc/kernel/vdso.c
8713 +++ b/arch/powerpc/kernel/vdso.c
8714 @@ -35,6 +35,7 @@
8715 #include <asm/vdso.h>
8716 #include <asm/vdso_datapage.h>
8717 #include <asm/setup.h>
8718 +#include <asm/mman.h>
8719
8720 #undef DEBUG
8721
8722 @@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8723 vdso_base = VDSO32_MBASE;
8724 #endif
8725
8726 - current->mm->context.vdso_base = 0;
8727 + current->mm->context.vdso_base = ~0UL;
8728
8729 /* vDSO has a problem and was disabled, just don't "enable" it for the
8730 * process
8731 @@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8732 vdso_base = get_unmapped_area(NULL, vdso_base,
8733 (vdso_pages << PAGE_SHIFT) +
8734 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8735 - 0, 0);
8736 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
8737 if (IS_ERR_VALUE(vdso_base)) {
8738 rc = vdso_base;
8739 goto fail_mmapsem;
8740 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
8741 index 9ae9768..87c3448 100644
8742 --- a/arch/powerpc/kvm/powerpc.c
8743 +++ b/arch/powerpc/kvm/powerpc.c
8744 @@ -1141,7 +1141,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
8745 }
8746 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
8747
8748 -int kvm_arch_init(void *opaque)
8749 +int kvm_arch_init(const void *opaque)
8750 {
8751 return 0;
8752 }
8753 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8754 index 5eea6f3..5d10396 100644
8755 --- a/arch/powerpc/lib/usercopy_64.c
8756 +++ b/arch/powerpc/lib/usercopy_64.c
8757 @@ -9,22 +9,6 @@
8758 #include <linux/module.h>
8759 #include <asm/uaccess.h>
8760
8761 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8762 -{
8763 - if (likely(access_ok(VERIFY_READ, from, n)))
8764 - n = __copy_from_user(to, from, n);
8765 - else
8766 - memset(to, 0, n);
8767 - return n;
8768 -}
8769 -
8770 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8771 -{
8772 - if (likely(access_ok(VERIFY_WRITE, to, n)))
8773 - n = __copy_to_user(to, from, n);
8774 - return n;
8775 -}
8776 -
8777 unsigned long copy_in_user(void __user *to, const void __user *from,
8778 unsigned long n)
8779 {
8780 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8781 return n;
8782 }
8783
8784 -EXPORT_SYMBOL(copy_from_user);
8785 -EXPORT_SYMBOL(copy_to_user);
8786 EXPORT_SYMBOL(copy_in_user);
8787
8788 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8789 index 51ab9e7..7d3c78b 100644
8790 --- a/arch/powerpc/mm/fault.c
8791 +++ b/arch/powerpc/mm/fault.c
8792 @@ -33,6 +33,10 @@
8793 #include <linux/magic.h>
8794 #include <linux/ratelimit.h>
8795 #include <linux/context_tracking.h>
8796 +#include <linux/slab.h>
8797 +#include <linux/pagemap.h>
8798 +#include <linux/compiler.h>
8799 +#include <linux/unistd.h>
8800
8801 #include <asm/firmware.h>
8802 #include <asm/page.h>
8803 @@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8804 }
8805 #endif
8806
8807 +#ifdef CONFIG_PAX_PAGEEXEC
8808 +/*
8809 + * PaX: decide what to do with offenders (regs->nip = fault address)
8810 + *
8811 + * returns 1 when task should be killed
8812 + */
8813 +static int pax_handle_fetch_fault(struct pt_regs *regs)
8814 +{
8815 + return 1;
8816 +}
8817 +
8818 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8819 +{
8820 + unsigned long i;
8821 +
8822 + printk(KERN_ERR "PAX: bytes at PC: ");
8823 + for (i = 0; i < 5; i++) {
8824 + unsigned int c;
8825 + if (get_user(c, (unsigned int __user *)pc+i))
8826 + printk(KERN_CONT "???????? ");
8827 + else
8828 + printk(KERN_CONT "%08x ", c);
8829 + }
8830 + printk("\n");
8831 +}
8832 +#endif
8833 +
8834 /*
8835 * Check whether the instruction at regs->nip is a store using
8836 * an update addressing form which will update r1.
8837 @@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8838 * indicate errors in DSISR but can validly be set in SRR1.
8839 */
8840 if (trap == 0x400)
8841 - error_code &= 0x48200000;
8842 + error_code &= 0x58200000;
8843 else
8844 is_write = error_code & DSISR_ISSTORE;
8845 #else
8846 @@ -378,7 +409,7 @@ good_area:
8847 * "undefined". Of those that can be set, this is the only
8848 * one which seems bad.
8849 */
8850 - if (error_code & 0x10000000)
8851 + if (error_code & DSISR_GUARDED)
8852 /* Guarded storage error. */
8853 goto bad_area;
8854 #endif /* CONFIG_8xx */
8855 @@ -393,7 +424,7 @@ good_area:
8856 * processors use the same I/D cache coherency mechanism
8857 * as embedded.
8858 */
8859 - if (error_code & DSISR_PROTFAULT)
8860 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8861 goto bad_area;
8862 #endif /* CONFIG_PPC_STD_MMU */
8863
8864 @@ -483,6 +514,23 @@ bad_area:
8865 bad_area_nosemaphore:
8866 /* User mode accesses cause a SIGSEGV */
8867 if (user_mode(regs)) {
8868 +
8869 +#ifdef CONFIG_PAX_PAGEEXEC
8870 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8871 +#ifdef CONFIG_PPC_STD_MMU
8872 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8873 +#else
8874 + if (is_exec && regs->nip == address) {
8875 +#endif
8876 + switch (pax_handle_fetch_fault(regs)) {
8877 + }
8878 +
8879 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8880 + do_group_exit(SIGKILL);
8881 + }
8882 + }
8883 +#endif
8884 +
8885 _exception(SIGSEGV, regs, code, address);
8886 goto bail;
8887 }
8888 diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8889 index cb8bdbe..cde4bc7 100644
8890 --- a/arch/powerpc/mm/mmap.c
8891 +++ b/arch/powerpc/mm/mmap.c
8892 @@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
8893 return sysctl_legacy_va_layout;
8894 }
8895
8896 -static unsigned long mmap_rnd(void)
8897 +static unsigned long mmap_rnd(struct mm_struct *mm)
8898 {
8899 unsigned long rnd = 0;
8900
8901 +#ifdef CONFIG_PAX_RANDMMAP
8902 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8903 +#endif
8904 +
8905 if (current->flags & PF_RANDOMIZE) {
8906 /* 8MB for 32bit, 1GB for 64bit */
8907 if (is_32bit_task())
8908 @@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
8909 return rnd << PAGE_SHIFT;
8910 }
8911
8912 -static inline unsigned long mmap_base(void)
8913 +static inline unsigned long mmap_base(struct mm_struct *mm)
8914 {
8915 unsigned long gap = rlimit(RLIMIT_STACK);
8916
8917 @@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
8918 else if (gap > MAX_GAP)
8919 gap = MAX_GAP;
8920
8921 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
8922 + return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
8923 }
8924
8925 /*
8926 @@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8927 */
8928 if (mmap_is_legacy()) {
8929 mm->mmap_base = TASK_UNMAPPED_BASE;
8930 +
8931 +#ifdef CONFIG_PAX_RANDMMAP
8932 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8933 + mm->mmap_base += mm->delta_mmap;
8934 +#endif
8935 +
8936 mm->get_unmapped_area = arch_get_unmapped_area;
8937 } else {
8938 - mm->mmap_base = mmap_base();
8939 + mm->mmap_base = mmap_base(mm);
8940 +
8941 +#ifdef CONFIG_PAX_RANDMMAP
8942 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8943 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8944 +#endif
8945 +
8946 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8947 }
8948 }
8949 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8950 index 7ce9cf3..a964087 100644
8951 --- a/arch/powerpc/mm/slice.c
8952 +++ b/arch/powerpc/mm/slice.c
8953 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8954 if ((mm->task_size - len) < addr)
8955 return 0;
8956 vma = find_vma(mm, addr);
8957 - return (!vma || (addr + len) <= vma->vm_start);
8958 + return check_heap_stack_gap(vma, addr, len, 0);
8959 }
8960
8961 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8962 @@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8963 info.align_offset = 0;
8964
8965 addr = TASK_UNMAPPED_BASE;
8966 +
8967 +#ifdef CONFIG_PAX_RANDMMAP
8968 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8969 + addr += mm->delta_mmap;
8970 +#endif
8971 +
8972 while (addr < TASK_SIZE) {
8973 info.low_limit = addr;
8974 if (!slice_scan_available(addr, available, 1, &addr))
8975 @@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8976 if (fixed && addr > (mm->task_size - len))
8977 return -EINVAL;
8978
8979 +#ifdef CONFIG_PAX_RANDMMAP
8980 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8981 + addr = 0;
8982 +#endif
8983 +
8984 /* If hint, make sure it matches our alignment restrictions */
8985 if (!fixed && addr) {
8986 addr = _ALIGN_UP(addr, 1ul << pshift);
8987 diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8988 index 4278acf..67fd0e6 100644
8989 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8990 +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8991 @@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
8992 }
8993
8994 static struct pci_ops scc_pciex_pci_ops = {
8995 - scc_pciex_read_config,
8996 - scc_pciex_write_config,
8997 + .read = scc_pciex_read_config,
8998 + .write = scc_pciex_write_config,
8999 };
9000
9001 static void pciex_clear_intr_all(unsigned int __iomem *base)
9002 diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9003 index 9098692..3d54cd1 100644
9004 --- a/arch/powerpc/platforms/cell/spufs/file.c
9005 +++ b/arch/powerpc/platforms/cell/spufs/file.c
9006 @@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9007 return VM_FAULT_NOPAGE;
9008 }
9009
9010 -static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9011 +static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9012 unsigned long address,
9013 - void *buf, int len, int write)
9014 + void *buf, size_t len, int write)
9015 {
9016 struct spu_context *ctx = vma->vm_file->private_data;
9017 unsigned long offset = address - vma->vm_start;
9018 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9019 index fa9aaf7..3f5d836 100644
9020 --- a/arch/s390/include/asm/atomic.h
9021 +++ b/arch/s390/include/asm/atomic.h
9022 @@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9023 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9024 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9025
9026 +#define atomic64_read_unchecked(v) atomic64_read(v)
9027 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9028 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9029 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9030 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9031 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
9032 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9033 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
9034 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9035 +
9036 #define smp_mb__before_atomic_dec() smp_mb()
9037 #define smp_mb__after_atomic_dec() smp_mb()
9038 #define smp_mb__before_atomic_inc() smp_mb()
9039 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9040 index 4d7ccac..d03d0ad 100644
9041 --- a/arch/s390/include/asm/cache.h
9042 +++ b/arch/s390/include/asm/cache.h
9043 @@ -9,8 +9,10 @@
9044 #ifndef __ARCH_S390_CACHE_H
9045 #define __ARCH_S390_CACHE_H
9046
9047 -#define L1_CACHE_BYTES 256
9048 +#include <linux/const.h>
9049 +
9050 #define L1_CACHE_SHIFT 8
9051 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9052 #define NET_SKB_PAD 32
9053
9054 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9055 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9056 index 78f4f87..598ce39 100644
9057 --- a/arch/s390/include/asm/elf.h
9058 +++ b/arch/s390/include/asm/elf.h
9059 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9060 the loader. We need to make sure that it is out of the way of the program
9061 that it will "exec", and that there is sufficient room for the brk. */
9062
9063 -extern unsigned long randomize_et_dyn(unsigned long base);
9064 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9065 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9066 +
9067 +#ifdef CONFIG_PAX_ASLR
9068 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9069 +
9070 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9071 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9072 +#endif
9073
9074 /* This yields a mask that user programs can use to figure out what
9075 instruction set this CPU supports. */
9076 @@ -222,9 +228,6 @@ struct linux_binprm;
9077 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9078 int arch_setup_additional_pages(struct linux_binprm *, int);
9079
9080 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9081 -#define arch_randomize_brk arch_randomize_brk
9082 -
9083 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9084
9085 #endif
9086 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9087 index c4a93d6..4d2a9b4 100644
9088 --- a/arch/s390/include/asm/exec.h
9089 +++ b/arch/s390/include/asm/exec.h
9090 @@ -7,6 +7,6 @@
9091 #ifndef __ASM_EXEC_H
9092 #define __ASM_EXEC_H
9093
9094 -extern unsigned long arch_align_stack(unsigned long sp);
9095 +#define arch_align_stack(x) ((x) & ~0xfUL)
9096
9097 #endif /* __ASM_EXEC_H */
9098 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9099 index 79330af..254cf37 100644
9100 --- a/arch/s390/include/asm/uaccess.h
9101 +++ b/arch/s390/include/asm/uaccess.h
9102 @@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9103 __range_ok((unsigned long)(addr), (size)); \
9104 })
9105
9106 +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9107 #define access_ok(type, addr, size) __access_ok(addr, size)
9108
9109 /*
9110 @@ -245,6 +246,10 @@ static inline unsigned long __must_check
9111 copy_to_user(void __user *to, const void *from, unsigned long n)
9112 {
9113 might_fault();
9114 +
9115 + if ((long)n < 0)
9116 + return n;
9117 +
9118 return __copy_to_user(to, from, n);
9119 }
9120
9121 @@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
9122 static inline unsigned long __must_check
9123 __copy_from_user(void *to, const void __user *from, unsigned long n)
9124 {
9125 + if ((long)n < 0)
9126 + return n;
9127 +
9128 return uaccess.copy_from_user(n, from, to);
9129 }
9130
9131 @@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9132 static inline unsigned long __must_check
9133 copy_from_user(void *to, const void __user *from, unsigned long n)
9134 {
9135 - unsigned int sz = __compiletime_object_size(to);
9136 + size_t sz = __compiletime_object_size(to);
9137
9138 might_fault();
9139 - if (unlikely(sz != -1 && sz < n)) {
9140 +
9141 + if ((long)n < 0)
9142 + return n;
9143 +
9144 + if (unlikely(sz != (size_t)-1 && sz < n)) {
9145 copy_from_user_overflow();
9146 return n;
9147 }
9148 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9149 index b89b591..fd9609d 100644
9150 --- a/arch/s390/kernel/module.c
9151 +++ b/arch/s390/kernel/module.c
9152 @@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9153
9154 /* Increase core size by size of got & plt and set start
9155 offsets for got and plt. */
9156 - me->core_size = ALIGN(me->core_size, 4);
9157 - me->arch.got_offset = me->core_size;
9158 - me->core_size += me->arch.got_size;
9159 - me->arch.plt_offset = me->core_size;
9160 - me->core_size += me->arch.plt_size;
9161 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
9162 + me->arch.got_offset = me->core_size_rw;
9163 + me->core_size_rw += me->arch.got_size;
9164 + me->arch.plt_offset = me->core_size_rx;
9165 + me->core_size_rx += me->arch.plt_size;
9166 return 0;
9167 }
9168
9169 @@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9170 if (info->got_initialized == 0) {
9171 Elf_Addr *gotent;
9172
9173 - gotent = me->module_core + me->arch.got_offset +
9174 + gotent = me->module_core_rw + me->arch.got_offset +
9175 info->got_offset;
9176 *gotent = val;
9177 info->got_initialized = 1;
9178 @@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9179 rc = apply_rela_bits(loc, val, 0, 64, 0);
9180 else if (r_type == R_390_GOTENT ||
9181 r_type == R_390_GOTPLTENT) {
9182 - val += (Elf_Addr) me->module_core - loc;
9183 + val += (Elf_Addr) me->module_core_rw - loc;
9184 rc = apply_rela_bits(loc, val, 1, 32, 1);
9185 }
9186 break;
9187 @@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9188 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9189 if (info->plt_initialized == 0) {
9190 unsigned int *ip;
9191 - ip = me->module_core + me->arch.plt_offset +
9192 + ip = me->module_core_rx + me->arch.plt_offset +
9193 info->plt_offset;
9194 #ifndef CONFIG_64BIT
9195 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9196 @@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9197 val - loc + 0xffffUL < 0x1ffffeUL) ||
9198 (r_type == R_390_PLT32DBL &&
9199 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9200 - val = (Elf_Addr) me->module_core +
9201 + val = (Elf_Addr) me->module_core_rx +
9202 me->arch.plt_offset +
9203 info->plt_offset;
9204 val += rela->r_addend - loc;
9205 @@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9206 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9207 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9208 val = val + rela->r_addend -
9209 - ((Elf_Addr) me->module_core + me->arch.got_offset);
9210 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9211 if (r_type == R_390_GOTOFF16)
9212 rc = apply_rela_bits(loc, val, 0, 16, 0);
9213 else if (r_type == R_390_GOTOFF32)
9214 @@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9215 break;
9216 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9217 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9218 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
9219 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9220 rela->r_addend - loc;
9221 if (r_type == R_390_GOTPC)
9222 rc = apply_rela_bits(loc, val, 1, 32, 0);
9223 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9224 index 7ed0d4e..1dfc145 100644
9225 --- a/arch/s390/kernel/process.c
9226 +++ b/arch/s390/kernel/process.c
9227 @@ -242,39 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9228 }
9229 return 0;
9230 }
9231 -
9232 -unsigned long arch_align_stack(unsigned long sp)
9233 -{
9234 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9235 - sp -= get_random_int() & ~PAGE_MASK;
9236 - return sp & ~0xf;
9237 -}
9238 -
9239 -static inline unsigned long brk_rnd(void)
9240 -{
9241 - /* 8MB for 32bit, 1GB for 64bit */
9242 - if (is_32bit_task())
9243 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9244 - else
9245 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9246 -}
9247 -
9248 -unsigned long arch_randomize_brk(struct mm_struct *mm)
9249 -{
9250 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
9251 -
9252 - if (ret < mm->brk)
9253 - return mm->brk;
9254 - return ret;
9255 -}
9256 -
9257 -unsigned long randomize_et_dyn(unsigned long base)
9258 -{
9259 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9260 -
9261 - if (!(current->flags & PF_RANDOMIZE))
9262 - return base;
9263 - if (ret < base)
9264 - return base;
9265 - return ret;
9266 -}
9267 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9268 index 9b436c2..54fbf0a 100644
9269 --- a/arch/s390/mm/mmap.c
9270 +++ b/arch/s390/mm/mmap.c
9271 @@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9272 */
9273 if (mmap_is_legacy()) {
9274 mm->mmap_base = mmap_base_legacy();
9275 +
9276 +#ifdef CONFIG_PAX_RANDMMAP
9277 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9278 + mm->mmap_base += mm->delta_mmap;
9279 +#endif
9280 +
9281 mm->get_unmapped_area = arch_get_unmapped_area;
9282 } else {
9283 mm->mmap_base = mmap_base();
9284 +
9285 +#ifdef CONFIG_PAX_RANDMMAP
9286 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9287 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9288 +#endif
9289 +
9290 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9291 }
9292 }
9293 @@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9294 */
9295 if (mmap_is_legacy()) {
9296 mm->mmap_base = mmap_base_legacy();
9297 +
9298 +#ifdef CONFIG_PAX_RANDMMAP
9299 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9300 + mm->mmap_base += mm->delta_mmap;
9301 +#endif
9302 +
9303 mm->get_unmapped_area = s390_get_unmapped_area;
9304 } else {
9305 mm->mmap_base = mmap_base();
9306 +
9307 +#ifdef CONFIG_PAX_RANDMMAP
9308 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9309 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9310 +#endif
9311 +
9312 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9313 }
9314 }
9315 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9316 index ae3d59f..f65f075 100644
9317 --- a/arch/score/include/asm/cache.h
9318 +++ b/arch/score/include/asm/cache.h
9319 @@ -1,7 +1,9 @@
9320 #ifndef _ASM_SCORE_CACHE_H
9321 #define _ASM_SCORE_CACHE_H
9322
9323 +#include <linux/const.h>
9324 +
9325 #define L1_CACHE_SHIFT 4
9326 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9327 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9328
9329 #endif /* _ASM_SCORE_CACHE_H */
9330 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9331 index f9f3cd5..58ff438 100644
9332 --- a/arch/score/include/asm/exec.h
9333 +++ b/arch/score/include/asm/exec.h
9334 @@ -1,6 +1,6 @@
9335 #ifndef _ASM_SCORE_EXEC_H
9336 #define _ASM_SCORE_EXEC_H
9337
9338 -extern unsigned long arch_align_stack(unsigned long sp);
9339 +#define arch_align_stack(x) (x)
9340
9341 #endif /* _ASM_SCORE_EXEC_H */
9342 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9343 index a1519ad3..e8ac1ff 100644
9344 --- a/arch/score/kernel/process.c
9345 +++ b/arch/score/kernel/process.c
9346 @@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9347
9348 return task_pt_regs(task)->cp0_epc;
9349 }
9350 -
9351 -unsigned long arch_align_stack(unsigned long sp)
9352 -{
9353 - return sp;
9354 -}
9355 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9356 index ef9e555..331bd29 100644
9357 --- a/arch/sh/include/asm/cache.h
9358 +++ b/arch/sh/include/asm/cache.h
9359 @@ -9,10 +9,11 @@
9360 #define __ASM_SH_CACHE_H
9361 #ifdef __KERNEL__
9362
9363 +#include <linux/const.h>
9364 #include <linux/init.h>
9365 #include <cpu/cache.h>
9366
9367 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9368 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9369
9370 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9371
9372 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9373 index 6777177..cb5e44f 100644
9374 --- a/arch/sh/mm/mmap.c
9375 +++ b/arch/sh/mm/mmap.c
9376 @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9377 struct mm_struct *mm = current->mm;
9378 struct vm_area_struct *vma;
9379 int do_colour_align;
9380 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9381 struct vm_unmapped_area_info info;
9382
9383 if (flags & MAP_FIXED) {
9384 @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9385 if (filp || (flags & MAP_SHARED))
9386 do_colour_align = 1;
9387
9388 +#ifdef CONFIG_PAX_RANDMMAP
9389 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9390 +#endif
9391 +
9392 if (addr) {
9393 if (do_colour_align)
9394 addr = COLOUR_ALIGN(addr, pgoff);
9395 @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9396 addr = PAGE_ALIGN(addr);
9397
9398 vma = find_vma(mm, addr);
9399 - if (TASK_SIZE - len >= addr &&
9400 - (!vma || addr + len <= vma->vm_start))
9401 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9402 return addr;
9403 }
9404
9405 info.flags = 0;
9406 info.length = len;
9407 - info.low_limit = TASK_UNMAPPED_BASE;
9408 + info.low_limit = mm->mmap_base;
9409 info.high_limit = TASK_SIZE;
9410 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9411 info.align_offset = pgoff << PAGE_SHIFT;
9412 @@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9413 struct mm_struct *mm = current->mm;
9414 unsigned long addr = addr0;
9415 int do_colour_align;
9416 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9417 struct vm_unmapped_area_info info;
9418
9419 if (flags & MAP_FIXED) {
9420 @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9421 if (filp || (flags & MAP_SHARED))
9422 do_colour_align = 1;
9423
9424 +#ifdef CONFIG_PAX_RANDMMAP
9425 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9426 +#endif
9427 +
9428 /* requesting a specific address */
9429 if (addr) {
9430 if (do_colour_align)
9431 @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9432 addr = PAGE_ALIGN(addr);
9433
9434 vma = find_vma(mm, addr);
9435 - if (TASK_SIZE - len >= addr &&
9436 - (!vma || addr + len <= vma->vm_start))
9437 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9438 return addr;
9439 }
9440
9441 @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9442 VM_BUG_ON(addr != -ENOMEM);
9443 info.flags = 0;
9444 info.low_limit = TASK_UNMAPPED_BASE;
9445 +
9446 +#ifdef CONFIG_PAX_RANDMMAP
9447 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9448 + info.low_limit += mm->delta_mmap;
9449 +#endif
9450 +
9451 info.high_limit = TASK_SIZE;
9452 addr = vm_unmapped_area(&info);
9453 }
9454 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9455 index be56a24..443328f 100644
9456 --- a/arch/sparc/include/asm/atomic_64.h
9457 +++ b/arch/sparc/include/asm/atomic_64.h
9458 @@ -14,18 +14,40 @@
9459 #define ATOMIC64_INIT(i) { (i) }
9460
9461 #define atomic_read(v) (*(volatile int *)&(v)->counter)
9462 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9463 +{
9464 + return v->counter;
9465 +}
9466 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
9467 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9468 +{
9469 + return v->counter;
9470 +}
9471
9472 #define atomic_set(v, i) (((v)->counter) = i)
9473 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9474 +{
9475 + v->counter = i;
9476 +}
9477 #define atomic64_set(v, i) (((v)->counter) = i)
9478 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9479 +{
9480 + v->counter = i;
9481 +}
9482
9483 extern void atomic_add(int, atomic_t *);
9484 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
9485 extern void atomic64_add(long, atomic64_t *);
9486 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
9487 extern void atomic_sub(int, atomic_t *);
9488 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
9489 extern void atomic64_sub(long, atomic64_t *);
9490 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
9491
9492 extern int atomic_add_ret(int, atomic_t *);
9493 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
9494 extern long atomic64_add_ret(long, atomic64_t *);
9495 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
9496 extern int atomic_sub_ret(int, atomic_t *);
9497 extern long atomic64_sub_ret(long, atomic64_t *);
9498
9499 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9500 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
9501
9502 #define atomic_inc_return(v) atomic_add_ret(1, v)
9503 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9504 +{
9505 + return atomic_add_ret_unchecked(1, v);
9506 +}
9507 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
9508 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9509 +{
9510 + return atomic64_add_ret_unchecked(1, v);
9511 +}
9512
9513 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
9514 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
9515
9516 #define atomic_add_return(i, v) atomic_add_ret(i, v)
9517 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9518 +{
9519 + return atomic_add_ret_unchecked(i, v);
9520 +}
9521 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
9522 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9523 +{
9524 + return atomic64_add_ret_unchecked(i, v);
9525 +}
9526
9527 /*
9528 * atomic_inc_and_test - increment and test
9529 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9530 * other cases.
9531 */
9532 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9533 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9534 +{
9535 + return atomic_inc_return_unchecked(v) == 0;
9536 +}
9537 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9538
9539 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9540 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9541 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9542
9543 #define atomic_inc(v) atomic_add(1, v)
9544 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9545 +{
9546 + atomic_add_unchecked(1, v);
9547 +}
9548 #define atomic64_inc(v) atomic64_add(1, v)
9549 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9550 +{
9551 + atomic64_add_unchecked(1, v);
9552 +}
9553
9554 #define atomic_dec(v) atomic_sub(1, v)
9555 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9556 +{
9557 + atomic_sub_unchecked(1, v);
9558 +}
9559 #define atomic64_dec(v) atomic64_sub(1, v)
9560 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9561 +{
9562 + atomic64_sub_unchecked(1, v);
9563 +}
9564
9565 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9566 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9567
9568 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9569 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9570 +{
9571 + return cmpxchg(&v->counter, old, new);
9572 +}
9573 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9574 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9575 +{
9576 + return xchg(&v->counter, new);
9577 +}
9578
9579 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9580 {
9581 - int c, old;
9582 + int c, old, new;
9583 c = atomic_read(v);
9584 for (;;) {
9585 - if (unlikely(c == (u)))
9586 + if (unlikely(c == u))
9587 break;
9588 - old = atomic_cmpxchg((v), c, c + (a));
9589 +
9590 + asm volatile("addcc %2, %0, %0\n"
9591 +
9592 +#ifdef CONFIG_PAX_REFCOUNT
9593 + "tvs %%icc, 6\n"
9594 +#endif
9595 +
9596 + : "=r" (new)
9597 + : "0" (c), "ir" (a)
9598 + : "cc");
9599 +
9600 + old = atomic_cmpxchg(v, c, new);
9601 if (likely(old == c))
9602 break;
9603 c = old;
9604 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9605 #define atomic64_cmpxchg(v, o, n) \
9606 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9607 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9608 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9609 +{
9610 + return xchg(&v->counter, new);
9611 +}
9612
9613 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9614 {
9615 - long c, old;
9616 + long c, old, new;
9617 c = atomic64_read(v);
9618 for (;;) {
9619 - if (unlikely(c == (u)))
9620 + if (unlikely(c == u))
9621 break;
9622 - old = atomic64_cmpxchg((v), c, c + (a));
9623 +
9624 + asm volatile("addcc %2, %0, %0\n"
9625 +
9626 +#ifdef CONFIG_PAX_REFCOUNT
9627 + "tvs %%xcc, 6\n"
9628 +#endif
9629 +
9630 + : "=r" (new)
9631 + : "0" (c), "ir" (a)
9632 + : "cc");
9633 +
9634 + old = atomic64_cmpxchg(v, c, new);
9635 if (likely(old == c))
9636 break;
9637 c = old;
9638 }
9639 - return c != (u);
9640 + return c != u;
9641 }
9642
9643 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9644 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9645 index 5bb6991..5c2132e 100644
9646 --- a/arch/sparc/include/asm/cache.h
9647 +++ b/arch/sparc/include/asm/cache.h
9648 @@ -7,10 +7,12 @@
9649 #ifndef _SPARC_CACHE_H
9650 #define _SPARC_CACHE_H
9651
9652 +#include <linux/const.h>
9653 +
9654 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9655
9656 #define L1_CACHE_SHIFT 5
9657 -#define L1_CACHE_BYTES 32
9658 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9659
9660 #ifdef CONFIG_SPARC32
9661 #define SMP_CACHE_BYTES_SHIFT 5
9662 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9663 index a24e41f..47677ff 100644
9664 --- a/arch/sparc/include/asm/elf_32.h
9665 +++ b/arch/sparc/include/asm/elf_32.h
9666 @@ -114,6 +114,13 @@ typedef struct {
9667
9668 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9669
9670 +#ifdef CONFIG_PAX_ASLR
9671 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
9672 +
9673 +#define PAX_DELTA_MMAP_LEN 16
9674 +#define PAX_DELTA_STACK_LEN 16
9675 +#endif
9676 +
9677 /* This yields a mask that user programs can use to figure out what
9678 instruction set this cpu supports. This can NOT be done in userspace
9679 on Sparc. */
9680 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9681 index 370ca1e..d4f4a98 100644
9682 --- a/arch/sparc/include/asm/elf_64.h
9683 +++ b/arch/sparc/include/asm/elf_64.h
9684 @@ -189,6 +189,13 @@ typedef struct {
9685 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9686 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9687
9688 +#ifdef CONFIG_PAX_ASLR
9689 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9690 +
9691 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9692 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9693 +#endif
9694 +
9695 extern unsigned long sparc64_elf_hwcap;
9696 #define ELF_HWCAP sparc64_elf_hwcap
9697
9698 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9699 index 9b1c36d..209298b 100644
9700 --- a/arch/sparc/include/asm/pgalloc_32.h
9701 +++ b/arch/sparc/include/asm/pgalloc_32.h
9702 @@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9703 }
9704
9705 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9706 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9707
9708 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9709 unsigned long address)
9710 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9711 index bcfe063..b333142 100644
9712 --- a/arch/sparc/include/asm/pgalloc_64.h
9713 +++ b/arch/sparc/include/asm/pgalloc_64.h
9714 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9715 }
9716
9717 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9718 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9719
9720 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9721 {
9722 diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
9723 index 59ba6f6..4518128 100644
9724 --- a/arch/sparc/include/asm/pgtable.h
9725 +++ b/arch/sparc/include/asm/pgtable.h
9726 @@ -5,4 +5,8 @@
9727 #else
9728 #include <asm/pgtable_32.h>
9729 #endif
9730 +
9731 +#define ktla_ktva(addr) (addr)
9732 +#define ktva_ktla(addr) (addr)
9733 +
9734 #endif
9735 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9736 index 502f632..da1917f 100644
9737 --- a/arch/sparc/include/asm/pgtable_32.h
9738 +++ b/arch/sparc/include/asm/pgtable_32.h
9739 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9740 #define PAGE_SHARED SRMMU_PAGE_SHARED
9741 #define PAGE_COPY SRMMU_PAGE_COPY
9742 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9743 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9744 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9745 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9746 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9747
9748 /* Top-level page directory - dummy used by init-mm.
9749 @@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9750
9751 /* xwr */
9752 #define __P000 PAGE_NONE
9753 -#define __P001 PAGE_READONLY
9754 -#define __P010 PAGE_COPY
9755 -#define __P011 PAGE_COPY
9756 +#define __P001 PAGE_READONLY_NOEXEC
9757 +#define __P010 PAGE_COPY_NOEXEC
9758 +#define __P011 PAGE_COPY_NOEXEC
9759 #define __P100 PAGE_READONLY
9760 #define __P101 PAGE_READONLY
9761 #define __P110 PAGE_COPY
9762 #define __P111 PAGE_COPY
9763
9764 #define __S000 PAGE_NONE
9765 -#define __S001 PAGE_READONLY
9766 -#define __S010 PAGE_SHARED
9767 -#define __S011 PAGE_SHARED
9768 +#define __S001 PAGE_READONLY_NOEXEC
9769 +#define __S010 PAGE_SHARED_NOEXEC
9770 +#define __S011 PAGE_SHARED_NOEXEC
9771 #define __S100 PAGE_READONLY
9772 #define __S101 PAGE_READONLY
9773 #define __S110 PAGE_SHARED
9774 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9775 index 79da178..c2eede8 100644
9776 --- a/arch/sparc/include/asm/pgtsrmmu.h
9777 +++ b/arch/sparc/include/asm/pgtsrmmu.h
9778 @@ -115,6 +115,11 @@
9779 SRMMU_EXEC | SRMMU_REF)
9780 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9781 SRMMU_EXEC | SRMMU_REF)
9782 +
9783 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9784 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9785 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9786 +
9787 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9788 SRMMU_DIRTY | SRMMU_REF)
9789
9790 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9791 index 9689176..63c18ea 100644
9792 --- a/arch/sparc/include/asm/spinlock_64.h
9793 +++ b/arch/sparc/include/asm/spinlock_64.h
9794 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9795
9796 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9797
9798 -static void inline arch_read_lock(arch_rwlock_t *lock)
9799 +static inline void arch_read_lock(arch_rwlock_t *lock)
9800 {
9801 unsigned long tmp1, tmp2;
9802
9803 __asm__ __volatile__ (
9804 "1: ldsw [%2], %0\n"
9805 " brlz,pn %0, 2f\n"
9806 -"4: add %0, 1, %1\n"
9807 +"4: addcc %0, 1, %1\n"
9808 +
9809 +#ifdef CONFIG_PAX_REFCOUNT
9810 +" tvs %%icc, 6\n"
9811 +#endif
9812 +
9813 " cas [%2], %0, %1\n"
9814 " cmp %0, %1\n"
9815 " bne,pn %%icc, 1b\n"
9816 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9817 " .previous"
9818 : "=&r" (tmp1), "=&r" (tmp2)
9819 : "r" (lock)
9820 - : "memory");
9821 + : "memory", "cc");
9822 }
9823
9824 -static int inline arch_read_trylock(arch_rwlock_t *lock)
9825 +static inline int arch_read_trylock(arch_rwlock_t *lock)
9826 {
9827 int tmp1, tmp2;
9828
9829 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9830 "1: ldsw [%2], %0\n"
9831 " brlz,a,pn %0, 2f\n"
9832 " mov 0, %0\n"
9833 -" add %0, 1, %1\n"
9834 +" addcc %0, 1, %1\n"
9835 +
9836 +#ifdef CONFIG_PAX_REFCOUNT
9837 +" tvs %%icc, 6\n"
9838 +#endif
9839 +
9840 " cas [%2], %0, %1\n"
9841 " cmp %0, %1\n"
9842 " bne,pn %%icc, 1b\n"
9843 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9844 return tmp1;
9845 }
9846
9847 -static void inline arch_read_unlock(arch_rwlock_t *lock)
9848 +static inline void arch_read_unlock(arch_rwlock_t *lock)
9849 {
9850 unsigned long tmp1, tmp2;
9851
9852 __asm__ __volatile__(
9853 "1: lduw [%2], %0\n"
9854 -" sub %0, 1, %1\n"
9855 +" subcc %0, 1, %1\n"
9856 +
9857 +#ifdef CONFIG_PAX_REFCOUNT
9858 +" tvs %%icc, 6\n"
9859 +#endif
9860 +
9861 " cas [%2], %0, %1\n"
9862 " cmp %0, %1\n"
9863 " bne,pn %%xcc, 1b\n"
9864 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9865 : "memory");
9866 }
9867
9868 -static void inline arch_write_lock(arch_rwlock_t *lock)
9869 +static inline void arch_write_lock(arch_rwlock_t *lock)
9870 {
9871 unsigned long mask, tmp1, tmp2;
9872
9873 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9874 : "memory");
9875 }
9876
9877 -static void inline arch_write_unlock(arch_rwlock_t *lock)
9878 +static inline void arch_write_unlock(arch_rwlock_t *lock)
9879 {
9880 __asm__ __volatile__(
9881 " stw %%g0, [%0]"
9882 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9883 : "memory");
9884 }
9885
9886 -static int inline arch_write_trylock(arch_rwlock_t *lock)
9887 +static inline int arch_write_trylock(arch_rwlock_t *lock)
9888 {
9889 unsigned long mask, tmp1, tmp2, result;
9890
9891 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9892 index 96efa7a..16858bf 100644
9893 --- a/arch/sparc/include/asm/thread_info_32.h
9894 +++ b/arch/sparc/include/asm/thread_info_32.h
9895 @@ -49,6 +49,8 @@ struct thread_info {
9896 unsigned long w_saved;
9897
9898 struct restart_block restart_block;
9899 +
9900 + unsigned long lowest_stack;
9901 };
9902
9903 /*
9904 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9905 index a5f01ac..703b554 100644
9906 --- a/arch/sparc/include/asm/thread_info_64.h
9907 +++ b/arch/sparc/include/asm/thread_info_64.h
9908 @@ -63,6 +63,8 @@ struct thread_info {
9909 struct pt_regs *kern_una_regs;
9910 unsigned int kern_una_insn;
9911
9912 + unsigned long lowest_stack;
9913 +
9914 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9915 };
9916
9917 @@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
9918 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
9919 /* flag bit 4 is available */
9920 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9921 -/* flag bit 6 is available */
9922 +#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9923 #define TIF_32BIT 7 /* 32-bit binary */
9924 #define TIF_NOHZ 8 /* in adaptive nohz mode */
9925 #define TIF_SECCOMP 9 /* secure computing */
9926 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9927 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9928 +
9929 /* NOTE: Thread flags >= 12 should be ones we have no interest
9930 * in using in assembly, else we can't use the mask as
9931 * an immediate value in instructions such as andcc.
9932 @@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9933 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9934 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9935 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9936 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9937
9938 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9939 _TIF_DO_NOTIFY_RESUME_MASK | \
9940 _TIF_NEED_RESCHED)
9941 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9942
9943 +#define _TIF_WORK_SYSCALL \
9944 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9945 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
9946 +
9947 +
9948 /*
9949 * Thread-synchronous status.
9950 *
9951 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9952 index 0167d26..767bb0c 100644
9953 --- a/arch/sparc/include/asm/uaccess.h
9954 +++ b/arch/sparc/include/asm/uaccess.h
9955 @@ -1,5 +1,6 @@
9956 #ifndef ___ASM_SPARC_UACCESS_H
9957 #define ___ASM_SPARC_UACCESS_H
9958 +
9959 #if defined(__sparc__) && defined(__arch64__)
9960 #include <asm/uaccess_64.h>
9961 #else
9962 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9963 index 53a28dd..50c38c3 100644
9964 --- a/arch/sparc/include/asm/uaccess_32.h
9965 +++ b/arch/sparc/include/asm/uaccess_32.h
9966 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9967
9968 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9969 {
9970 - if (n && __access_ok((unsigned long) to, n))
9971 + if ((long)n < 0)
9972 + return n;
9973 +
9974 + if (n && __access_ok((unsigned long) to, n)) {
9975 + if (!__builtin_constant_p(n))
9976 + check_object_size(from, n, true);
9977 return __copy_user(to, (__force void __user *) from, n);
9978 - else
9979 + } else
9980 return n;
9981 }
9982
9983 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9984 {
9985 + if ((long)n < 0)
9986 + return n;
9987 +
9988 + if (!__builtin_constant_p(n))
9989 + check_object_size(from, n, true);
9990 +
9991 return __copy_user(to, (__force void __user *) from, n);
9992 }
9993
9994 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9995 {
9996 - if (n && __access_ok((unsigned long) from, n))
9997 + if ((long)n < 0)
9998 + return n;
9999 +
10000 + if (n && __access_ok((unsigned long) from, n)) {
10001 + if (!__builtin_constant_p(n))
10002 + check_object_size(to, n, false);
10003 return __copy_user((__force void __user *) to, from, n);
10004 - else
10005 + } else
10006 return n;
10007 }
10008
10009 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10010 {
10011 + if ((long)n < 0)
10012 + return n;
10013 +
10014 return __copy_user((__force void __user *) to, from, n);
10015 }
10016
10017 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10018 index ad7e178..c9e7423 100644
10019 --- a/arch/sparc/include/asm/uaccess_64.h
10020 +++ b/arch/sparc/include/asm/uaccess_64.h
10021 @@ -10,6 +10,7 @@
10022 #include <linux/compiler.h>
10023 #include <linux/string.h>
10024 #include <linux/thread_info.h>
10025 +#include <linux/kernel.h>
10026 #include <asm/asi.h>
10027 #include <asm/spitfire.h>
10028 #include <asm-generic/uaccess-unaligned.h>
10029 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
10030 static inline unsigned long __must_check
10031 copy_from_user(void *to, const void __user *from, unsigned long size)
10032 {
10033 - unsigned long ret = ___copy_from_user(to, from, size);
10034 + unsigned long ret;
10035
10036 + if ((long)size < 0 || size > INT_MAX)
10037 + return size;
10038 +
10039 + if (!__builtin_constant_p(size))
10040 + check_object_size(to, size, false);
10041 +
10042 + ret = ___copy_from_user(to, from, size);
10043 if (unlikely(ret))
10044 ret = copy_from_user_fixup(to, from, size);
10045
10046 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
10047 static inline unsigned long __must_check
10048 copy_to_user(void __user *to, const void *from, unsigned long size)
10049 {
10050 - unsigned long ret = ___copy_to_user(to, from, size);
10051 + unsigned long ret;
10052
10053 + if ((long)size < 0 || size > INT_MAX)
10054 + return size;
10055 +
10056 + if (!__builtin_constant_p(size))
10057 + check_object_size(from, size, true);
10058 +
10059 + ret = ___copy_to_user(to, from, size);
10060 if (unlikely(ret))
10061 ret = copy_to_user_fixup(to, from, size);
10062 return ret;
10063 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10064 index d15cc17..d0ae796 100644
10065 --- a/arch/sparc/kernel/Makefile
10066 +++ b/arch/sparc/kernel/Makefile
10067 @@ -4,7 +4,7 @@
10068 #
10069
10070 asflags-y := -ansi
10071 -ccflags-y := -Werror
10072 +#ccflags-y := -Werror
10073
10074 extra-y := head_$(BITS).o
10075
10076 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10077 index fdd819d..5af08c8 100644
10078 --- a/arch/sparc/kernel/process_32.c
10079 +++ b/arch/sparc/kernel/process_32.c
10080 @@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
10081
10082 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10083 r->psr, r->pc, r->npc, r->y, print_tainted());
10084 - printk("PC: <%pS>\n", (void *) r->pc);
10085 + printk("PC: <%pA>\n", (void *) r->pc);
10086 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10087 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10088 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10089 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10090 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10091 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10092 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10093 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10094
10095 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10096 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10097 @@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10098 rw = (struct reg_window32 *) fp;
10099 pc = rw->ins[7];
10100 printk("[%08lx : ", pc);
10101 - printk("%pS ] ", (void *) pc);
10102 + printk("%pA ] ", (void *) pc);
10103 fp = rw->ins[6];
10104 } while (++count < 16);
10105 printk("\n");
10106 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10107 index 32a280e..84fc6a9 100644
10108 --- a/arch/sparc/kernel/process_64.c
10109 +++ b/arch/sparc/kernel/process_64.c
10110 @@ -159,7 +159,7 @@ static void show_regwindow(struct pt_regs *regs)
10111 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10112 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10113 if (regs->tstate & TSTATE_PRIV)
10114 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10115 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10116 }
10117
10118 void show_regs(struct pt_regs *regs)
10119 @@ -168,7 +168,7 @@ void show_regs(struct pt_regs *regs)
10120
10121 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10122 regs->tpc, regs->tnpc, regs->y, print_tainted());
10123 - printk("TPC: <%pS>\n", (void *) regs->tpc);
10124 + printk("TPC: <%pA>\n", (void *) regs->tpc);
10125 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10126 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10127 regs->u_regs[3]);
10128 @@ -181,7 +181,7 @@ void show_regs(struct pt_regs *regs)
10129 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10130 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10131 regs->u_regs[15]);
10132 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10133 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10134 show_regwindow(regs);
10135 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10136 }
10137 @@ -270,7 +270,7 @@ void arch_trigger_all_cpu_backtrace(void)
10138 ((tp && tp->task) ? tp->task->pid : -1));
10139
10140 if (gp->tstate & TSTATE_PRIV) {
10141 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10142 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10143 (void *) gp->tpc,
10144 (void *) gp->o7,
10145 (void *) gp->i7,
10146 diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10147 index 79cc0d1..ec62734 100644
10148 --- a/arch/sparc/kernel/prom_common.c
10149 +++ b/arch/sparc/kernel/prom_common.c
10150 @@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10151
10152 unsigned int prom_early_allocated __initdata;
10153
10154 -static struct of_pdt_ops prom_sparc_ops __initdata = {
10155 +static struct of_pdt_ops prom_sparc_ops __initconst = {
10156 .nextprop = prom_common_nextprop,
10157 .getproplen = prom_getproplen,
10158 .getproperty = prom_getproperty,
10159 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10160 index c13c9f2..d572c34 100644
10161 --- a/arch/sparc/kernel/ptrace_64.c
10162 +++ b/arch/sparc/kernel/ptrace_64.c
10163 @@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10164 return ret;
10165 }
10166
10167 +#ifdef CONFIG_GRKERNSEC_SETXID
10168 +extern void gr_delayed_cred_worker(void);
10169 +#endif
10170 +
10171 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10172 {
10173 int ret = 0;
10174 @@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10175 if (test_thread_flag(TIF_NOHZ))
10176 user_exit();
10177
10178 +#ifdef CONFIG_GRKERNSEC_SETXID
10179 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10180 + gr_delayed_cred_worker();
10181 +#endif
10182 +
10183 if (test_thread_flag(TIF_SYSCALL_TRACE))
10184 ret = tracehook_report_syscall_entry(regs);
10185
10186 @@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10187 if (test_thread_flag(TIF_NOHZ))
10188 user_exit();
10189
10190 +#ifdef CONFIG_GRKERNSEC_SETXID
10191 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10192 + gr_delayed_cred_worker();
10193 +#endif
10194 +
10195 audit_syscall_exit(regs);
10196
10197 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10198 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10199 index b085311..6f885f7 100644
10200 --- a/arch/sparc/kernel/smp_64.c
10201 +++ b/arch/sparc/kernel/smp_64.c
10202 @@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
10203 extern unsigned long xcall_flush_dcache_page_spitfire;
10204
10205 #ifdef CONFIG_DEBUG_DCFLUSH
10206 -extern atomic_t dcpage_flushes;
10207 -extern atomic_t dcpage_flushes_xcall;
10208 +extern atomic_unchecked_t dcpage_flushes;
10209 +extern atomic_unchecked_t dcpage_flushes_xcall;
10210 #endif
10211
10212 static inline void __local_flush_dcache_page(struct page *page)
10213 @@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10214 return;
10215
10216 #ifdef CONFIG_DEBUG_DCFLUSH
10217 - atomic_inc(&dcpage_flushes);
10218 + atomic_inc_unchecked(&dcpage_flushes);
10219 #endif
10220
10221 this_cpu = get_cpu();
10222 @@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10223 xcall_deliver(data0, __pa(pg_addr),
10224 (u64) pg_addr, cpumask_of(cpu));
10225 #ifdef CONFIG_DEBUG_DCFLUSH
10226 - atomic_inc(&dcpage_flushes_xcall);
10227 + atomic_inc_unchecked(&dcpage_flushes_xcall);
10228 #endif
10229 }
10230 }
10231 @@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10232 preempt_disable();
10233
10234 #ifdef CONFIG_DEBUG_DCFLUSH
10235 - atomic_inc(&dcpage_flushes);
10236 + atomic_inc_unchecked(&dcpage_flushes);
10237 #endif
10238 data0 = 0;
10239 pg_addr = page_address(page);
10240 @@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10241 xcall_deliver(data0, __pa(pg_addr),
10242 (u64) pg_addr, cpu_online_mask);
10243 #ifdef CONFIG_DEBUG_DCFLUSH
10244 - atomic_inc(&dcpage_flushes_xcall);
10245 + atomic_inc_unchecked(&dcpage_flushes_xcall);
10246 #endif
10247 }
10248 __local_flush_dcache_page(page);
10249 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10250 index 3a8d184..49498a8 100644
10251 --- a/arch/sparc/kernel/sys_sparc_32.c
10252 +++ b/arch/sparc/kernel/sys_sparc_32.c
10253 @@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10254 if (len > TASK_SIZE - PAGE_SIZE)
10255 return -ENOMEM;
10256 if (!addr)
10257 - addr = TASK_UNMAPPED_BASE;
10258 + addr = current->mm->mmap_base;
10259
10260 info.flags = 0;
10261 info.length = len;
10262 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10263 index beb0b5a..5a153f7 100644
10264 --- a/arch/sparc/kernel/sys_sparc_64.c
10265 +++ b/arch/sparc/kernel/sys_sparc_64.c
10266 @@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10267 struct vm_area_struct * vma;
10268 unsigned long task_size = TASK_SIZE;
10269 int do_color_align;
10270 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10271 struct vm_unmapped_area_info info;
10272
10273 if (flags & MAP_FIXED) {
10274 /* We do not accept a shared mapping if it would violate
10275 * cache aliasing constraints.
10276 */
10277 - if ((flags & MAP_SHARED) &&
10278 + if ((filp || (flags & MAP_SHARED)) &&
10279 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10280 return -EINVAL;
10281 return addr;
10282 @@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10283 if (filp || (flags & MAP_SHARED))
10284 do_color_align = 1;
10285
10286 +#ifdef CONFIG_PAX_RANDMMAP
10287 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10288 +#endif
10289 +
10290 if (addr) {
10291 if (do_color_align)
10292 addr = COLOR_ALIGN(addr, pgoff);
10293 @@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10294 addr = PAGE_ALIGN(addr);
10295
10296 vma = find_vma(mm, addr);
10297 - if (task_size - len >= addr &&
10298 - (!vma || addr + len <= vma->vm_start))
10299 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10300 return addr;
10301 }
10302
10303 info.flags = 0;
10304 info.length = len;
10305 - info.low_limit = TASK_UNMAPPED_BASE;
10306 + info.low_limit = mm->mmap_base;
10307 info.high_limit = min(task_size, VA_EXCLUDE_START);
10308 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10309 info.align_offset = pgoff << PAGE_SHIFT;
10310 + info.threadstack_offset = offset;
10311 addr = vm_unmapped_area(&info);
10312
10313 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10314 VM_BUG_ON(addr != -ENOMEM);
10315 info.low_limit = VA_EXCLUDE_END;
10316 +
10317 +#ifdef CONFIG_PAX_RANDMMAP
10318 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10319 + info.low_limit += mm->delta_mmap;
10320 +#endif
10321 +
10322 info.high_limit = task_size;
10323 addr = vm_unmapped_area(&info);
10324 }
10325 @@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10326 unsigned long task_size = STACK_TOP32;
10327 unsigned long addr = addr0;
10328 int do_color_align;
10329 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10330 struct vm_unmapped_area_info info;
10331
10332 /* This should only ever run for 32-bit processes. */
10333 @@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10334 /* We do not accept a shared mapping if it would violate
10335 * cache aliasing constraints.
10336 */
10337 - if ((flags & MAP_SHARED) &&
10338 + if ((filp || (flags & MAP_SHARED)) &&
10339 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10340 return -EINVAL;
10341 return addr;
10342 @@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10343 if (filp || (flags & MAP_SHARED))
10344 do_color_align = 1;
10345
10346 +#ifdef CONFIG_PAX_RANDMMAP
10347 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10348 +#endif
10349 +
10350 /* requesting a specific address */
10351 if (addr) {
10352 if (do_color_align)
10353 @@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10354 addr = PAGE_ALIGN(addr);
10355
10356 vma = find_vma(mm, addr);
10357 - if (task_size - len >= addr &&
10358 - (!vma || addr + len <= vma->vm_start))
10359 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10360 return addr;
10361 }
10362
10363 @@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10364 info.high_limit = mm->mmap_base;
10365 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10366 info.align_offset = pgoff << PAGE_SHIFT;
10367 + info.threadstack_offset = offset;
10368 addr = vm_unmapped_area(&info);
10369
10370 /*
10371 @@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10372 VM_BUG_ON(addr != -ENOMEM);
10373 info.flags = 0;
10374 info.low_limit = TASK_UNMAPPED_BASE;
10375 +
10376 +#ifdef CONFIG_PAX_RANDMMAP
10377 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10378 + info.low_limit += mm->delta_mmap;
10379 +#endif
10380 +
10381 info.high_limit = STACK_TOP32;
10382 addr = vm_unmapped_area(&info);
10383 }
10384 @@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10385 EXPORT_SYMBOL(get_fb_unmapped_area);
10386
10387 /* Essentially the same as PowerPC. */
10388 -static unsigned long mmap_rnd(void)
10389 +static unsigned long mmap_rnd(struct mm_struct *mm)
10390 {
10391 unsigned long rnd = 0UL;
10392
10393 +#ifdef CONFIG_PAX_RANDMMAP
10394 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10395 +#endif
10396 +
10397 if (current->flags & PF_RANDOMIZE) {
10398 unsigned long val = get_random_int();
10399 if (test_thread_flag(TIF_32BIT))
10400 @@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
10401
10402 void arch_pick_mmap_layout(struct mm_struct *mm)
10403 {
10404 - unsigned long random_factor = mmap_rnd();
10405 + unsigned long random_factor = mmap_rnd(mm);
10406 unsigned long gap;
10407
10408 /*
10409 @@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10410 gap == RLIM_INFINITY ||
10411 sysctl_legacy_va_layout) {
10412 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10413 +
10414 +#ifdef CONFIG_PAX_RANDMMAP
10415 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10416 + mm->mmap_base += mm->delta_mmap;
10417 +#endif
10418 +
10419 mm->get_unmapped_area = arch_get_unmapped_area;
10420 } else {
10421 /* We know it's 32-bit */
10422 @@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10423 gap = (task_size / 6 * 5);
10424
10425 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10426 +
10427 +#ifdef CONFIG_PAX_RANDMMAP
10428 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10429 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10430 +#endif
10431 +
10432 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10433 }
10434 }
10435 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10436 index 87729ff..d87fb1f 100644
10437 --- a/arch/sparc/kernel/syscalls.S
10438 +++ b/arch/sparc/kernel/syscalls.S
10439 @@ -52,7 +52,7 @@ sys32_rt_sigreturn:
10440 #endif
10441 .align 32
10442 1: ldx [%g6 + TI_FLAGS], %l5
10443 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10444 + andcc %l5, _TIF_WORK_SYSCALL, %g0
10445 be,pt %icc, rtrap
10446 nop
10447 call syscall_trace_leave
10448 @@ -184,12 +184,13 @@ linux_sparc_syscall32:
10449
10450 srl %i3, 0, %o3 ! IEU0
10451 srl %i2, 0, %o2 ! IEU0 Group
10452 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10453 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10454 bne,pn %icc, linux_syscall_trace32 ! CTI
10455 mov %i0, %l5 ! IEU1
10456 5: call %l7 ! CTI Group brk forced
10457 srl %i5, 0, %o5 ! IEU1
10458 - ba,a,pt %xcc, 3f
10459 + ba,pt %xcc, 3f
10460 + sra %o0, 0, %o0
10461
10462 /* Linux native system calls enter here... */
10463 .align 32
10464 @@ -207,7 +208,7 @@ linux_sparc_syscall:
10465
10466 mov %i3, %o3 ! IEU1
10467 mov %i4, %o4 ! IEU0 Group
10468 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10469 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10470 bne,pn %icc, linux_syscall_trace ! CTI Group
10471 mov %i0, %l5 ! IEU0
10472 2: call %l7 ! CTI Group brk forced
10473 @@ -217,13 +218,12 @@ linux_sparc_syscall:
10474 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
10475 ret_sys_call:
10476 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
10477 - sra %o0, 0, %o0
10478 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
10479 sllx %g2, 32, %g2
10480
10481 cmp %o0, -ERESTART_RESTARTBLOCK
10482 bgeu,pn %xcc, 1f
10483 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10484 + andcc %l0, _TIF_WORK_SYSCALL, %g0
10485 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10486
10487 2:
10488 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10489 index 6629829..036032d 100644
10490 --- a/arch/sparc/kernel/traps_32.c
10491 +++ b/arch/sparc/kernel/traps_32.c
10492 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10493 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10494 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10495
10496 +extern void gr_handle_kernel_exploit(void);
10497 +
10498 void die_if_kernel(char *str, struct pt_regs *regs)
10499 {
10500 static int die_counter;
10501 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10502 count++ < 30 &&
10503 (((unsigned long) rw) >= PAGE_OFFSET) &&
10504 !(((unsigned long) rw) & 0x7)) {
10505 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
10506 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
10507 (void *) rw->ins[7]);
10508 rw = (struct reg_window32 *)rw->ins[6];
10509 }
10510 }
10511 printk("Instruction DUMP:");
10512 instruction_dump ((unsigned long *) regs->pc);
10513 - if(regs->psr & PSR_PS)
10514 + if(regs->psr & PSR_PS) {
10515 + gr_handle_kernel_exploit();
10516 do_exit(SIGKILL);
10517 + }
10518 do_exit(SIGSEGV);
10519 }
10520
10521 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10522 index 4ced92f..965eeed 100644
10523 --- a/arch/sparc/kernel/traps_64.c
10524 +++ b/arch/sparc/kernel/traps_64.c
10525 @@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10526 i + 1,
10527 p->trapstack[i].tstate, p->trapstack[i].tpc,
10528 p->trapstack[i].tnpc, p->trapstack[i].tt);
10529 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10530 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10531 }
10532 }
10533
10534 @@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10535
10536 lvl -= 0x100;
10537 if (regs->tstate & TSTATE_PRIV) {
10538 +
10539 +#ifdef CONFIG_PAX_REFCOUNT
10540 + if (lvl == 6)
10541 + pax_report_refcount_overflow(regs);
10542 +#endif
10543 +
10544 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10545 die_if_kernel(buffer, regs);
10546 }
10547 @@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10548 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10549 {
10550 char buffer[32];
10551 -
10552 +
10553 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10554 0, lvl, SIGTRAP) == NOTIFY_STOP)
10555 return;
10556
10557 +#ifdef CONFIG_PAX_REFCOUNT
10558 + if (lvl == 6)
10559 + pax_report_refcount_overflow(regs);
10560 +#endif
10561 +
10562 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10563
10564 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10565 @@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10566 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10567 printk("%s" "ERROR(%d): ",
10568 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10569 - printk("TPC<%pS>\n", (void *) regs->tpc);
10570 + printk("TPC<%pA>\n", (void *) regs->tpc);
10571 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10572 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10573 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10574 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10575 smp_processor_id(),
10576 (type & 0x1) ? 'I' : 'D',
10577 regs->tpc);
10578 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10579 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10580 panic("Irrecoverable Cheetah+ parity error.");
10581 }
10582
10583 @@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10584 smp_processor_id(),
10585 (type & 0x1) ? 'I' : 'D',
10586 regs->tpc);
10587 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10588 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10589 }
10590
10591 struct sun4v_error_entry {
10592 @@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
10593 /*0x38*/u64 reserved_5;
10594 };
10595
10596 -static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10597 -static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10598 +static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10599 +static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10600
10601 static const char *sun4v_err_type_to_str(u8 type)
10602 {
10603 @@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10604 }
10605
10606 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10607 - int cpu, const char *pfx, atomic_t *ocnt)
10608 + int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10609 {
10610 u64 *raw_ptr = (u64 *) ent;
10611 u32 attrs;
10612 @@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10613
10614 show_regs(regs);
10615
10616 - if ((cnt = atomic_read(ocnt)) != 0) {
10617 - atomic_set(ocnt, 0);
10618 + if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10619 + atomic_set_unchecked(ocnt, 0);
10620 wmb();
10621 printk("%s: Queue overflowed %d times.\n",
10622 pfx, cnt);
10623 @@ -2046,7 +2057,7 @@ out:
10624 */
10625 void sun4v_resum_overflow(struct pt_regs *regs)
10626 {
10627 - atomic_inc(&sun4v_resum_oflow_cnt);
10628 + atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10629 }
10630
10631 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10632 @@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10633 /* XXX Actually even this can make not that much sense. Perhaps
10634 * XXX we should just pull the plug and panic directly from here?
10635 */
10636 - atomic_inc(&sun4v_nonresum_oflow_cnt);
10637 + atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10638 }
10639
10640 unsigned long sun4v_err_itlb_vaddr;
10641 @@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10642
10643 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10644 regs->tpc, tl);
10645 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10646 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10647 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10648 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10649 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10650 (void *) regs->u_regs[UREG_I7]);
10651 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10652 "pte[%lx] error[%lx]\n",
10653 @@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10654
10655 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10656 regs->tpc, tl);
10657 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10658 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10659 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10660 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10661 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10662 (void *) regs->u_regs[UREG_I7]);
10663 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10664 "pte[%lx] error[%lx]\n",
10665 @@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10666 fp = (unsigned long)sf->fp + STACK_BIAS;
10667 }
10668
10669 - printk(" [%016lx] %pS\n", pc, (void *) pc);
10670 + printk(" [%016lx] %pA\n", pc, (void *) pc);
10671 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10672 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10673 int index = tsk->curr_ret_stack;
10674 if (tsk->ret_stack && index >= graph) {
10675 pc = tsk->ret_stack[index - graph].ret;
10676 - printk(" [%016lx] %pS\n", pc, (void *) pc);
10677 + printk(" [%016lx] %pA\n", pc, (void *) pc);
10678 graph++;
10679 }
10680 }
10681 @@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10682 return (struct reg_window *) (fp + STACK_BIAS);
10683 }
10684
10685 +extern void gr_handle_kernel_exploit(void);
10686 +
10687 void die_if_kernel(char *str, struct pt_regs *regs)
10688 {
10689 static int die_counter;
10690 @@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10691 while (rw &&
10692 count++ < 30 &&
10693 kstack_valid(tp, (unsigned long) rw)) {
10694 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
10695 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
10696 (void *) rw->ins[7]);
10697
10698 rw = kernel_stack_up(rw);
10699 @@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10700 }
10701 user_instruction_dump ((unsigned int __user *) regs->tpc);
10702 }
10703 - if (regs->tstate & TSTATE_PRIV)
10704 + if (regs->tstate & TSTATE_PRIV) {
10705 + gr_handle_kernel_exploit();
10706 do_exit(SIGKILL);
10707 + }
10708 do_exit(SIGSEGV);
10709 }
10710 EXPORT_SYMBOL(die_if_kernel);
10711 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10712 index 3c1a7cb..73e1923 100644
10713 --- a/arch/sparc/kernel/unaligned_64.c
10714 +++ b/arch/sparc/kernel/unaligned_64.c
10715 @@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
10716 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10717
10718 if (__ratelimit(&ratelimit)) {
10719 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
10720 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
10721 regs->tpc, (void *) regs->tpc);
10722 }
10723 }
10724 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10725 index dbe119b..089c7c1 100644
10726 --- a/arch/sparc/lib/Makefile
10727 +++ b/arch/sparc/lib/Makefile
10728 @@ -2,7 +2,7 @@
10729 #
10730
10731 asflags-y := -ansi -DST_DIV0=0x02
10732 -ccflags-y := -Werror
10733 +#ccflags-y := -Werror
10734
10735 lib-$(CONFIG_SPARC32) += ashrdi3.o
10736 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10737 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10738 index 85c233d..68500e0 100644
10739 --- a/arch/sparc/lib/atomic_64.S
10740 +++ b/arch/sparc/lib/atomic_64.S
10741 @@ -17,7 +17,12 @@
10742 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10743 BACKOFF_SETUP(%o2)
10744 1: lduw [%o1], %g1
10745 - add %g1, %o0, %g7
10746 + addcc %g1, %o0, %g7
10747 +
10748 +#ifdef CONFIG_PAX_REFCOUNT
10749 + tvs %icc, 6
10750 +#endif
10751 +
10752 cas [%o1], %g1, %g7
10753 cmp %g1, %g7
10754 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10755 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10756 2: BACKOFF_SPIN(%o2, %o3, 1b)
10757 ENDPROC(atomic_add)
10758
10759 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10760 + BACKOFF_SETUP(%o2)
10761 +1: lduw [%o1], %g1
10762 + add %g1, %o0, %g7
10763 + cas [%o1], %g1, %g7
10764 + cmp %g1, %g7
10765 + bne,pn %icc, 2f
10766 + nop
10767 + retl
10768 + nop
10769 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10770 +ENDPROC(atomic_add_unchecked)
10771 +
10772 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10773 BACKOFF_SETUP(%o2)
10774 1: lduw [%o1], %g1
10775 - sub %g1, %o0, %g7
10776 + subcc %g1, %o0, %g7
10777 +
10778 +#ifdef CONFIG_PAX_REFCOUNT
10779 + tvs %icc, 6
10780 +#endif
10781 +
10782 cas [%o1], %g1, %g7
10783 cmp %g1, %g7
10784 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10785 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10786 2: BACKOFF_SPIN(%o2, %o3, 1b)
10787 ENDPROC(atomic_sub)
10788
10789 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10790 + BACKOFF_SETUP(%o2)
10791 +1: lduw [%o1], %g1
10792 + sub %g1, %o0, %g7
10793 + cas [%o1], %g1, %g7
10794 + cmp %g1, %g7
10795 + bne,pn %icc, 2f
10796 + nop
10797 + retl
10798 + nop
10799 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10800 +ENDPROC(atomic_sub_unchecked)
10801 +
10802 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10803 BACKOFF_SETUP(%o2)
10804 1: lduw [%o1], %g1
10805 - add %g1, %o0, %g7
10806 + addcc %g1, %o0, %g7
10807 +
10808 +#ifdef CONFIG_PAX_REFCOUNT
10809 + tvs %icc, 6
10810 +#endif
10811 +
10812 cas [%o1], %g1, %g7
10813 cmp %g1, %g7
10814 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10815 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10816 2: BACKOFF_SPIN(%o2, %o3, 1b)
10817 ENDPROC(atomic_add_ret)
10818
10819 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10820 + BACKOFF_SETUP(%o2)
10821 +1: lduw [%o1], %g1
10822 + addcc %g1, %o0, %g7
10823 + cas [%o1], %g1, %g7
10824 + cmp %g1, %g7
10825 + bne,pn %icc, 2f
10826 + add %g7, %o0, %g7
10827 + sra %g7, 0, %o0
10828 + retl
10829 + nop
10830 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10831 +ENDPROC(atomic_add_ret_unchecked)
10832 +
10833 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10834 BACKOFF_SETUP(%o2)
10835 1: lduw [%o1], %g1
10836 - sub %g1, %o0, %g7
10837 + subcc %g1, %o0, %g7
10838 +
10839 +#ifdef CONFIG_PAX_REFCOUNT
10840 + tvs %icc, 6
10841 +#endif
10842 +
10843 cas [%o1], %g1, %g7
10844 cmp %g1, %g7
10845 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10846 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10847 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10848 BACKOFF_SETUP(%o2)
10849 1: ldx [%o1], %g1
10850 - add %g1, %o0, %g7
10851 + addcc %g1, %o0, %g7
10852 +
10853 +#ifdef CONFIG_PAX_REFCOUNT
10854 + tvs %xcc, 6
10855 +#endif
10856 +
10857 casx [%o1], %g1, %g7
10858 cmp %g1, %g7
10859 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10860 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10861 2: BACKOFF_SPIN(%o2, %o3, 1b)
10862 ENDPROC(atomic64_add)
10863
10864 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10865 + BACKOFF_SETUP(%o2)
10866 +1: ldx [%o1], %g1
10867 + addcc %g1, %o0, %g7
10868 + casx [%o1], %g1, %g7
10869 + cmp %g1, %g7
10870 + bne,pn %xcc, 2f
10871 + nop
10872 + retl
10873 + nop
10874 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10875 +ENDPROC(atomic64_add_unchecked)
10876 +
10877 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10878 BACKOFF_SETUP(%o2)
10879 1: ldx [%o1], %g1
10880 - sub %g1, %o0, %g7
10881 + subcc %g1, %o0, %g7
10882 +
10883 +#ifdef CONFIG_PAX_REFCOUNT
10884 + tvs %xcc, 6
10885 +#endif
10886 +
10887 casx [%o1], %g1, %g7
10888 cmp %g1, %g7
10889 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10890 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10891 2: BACKOFF_SPIN(%o2, %o3, 1b)
10892 ENDPROC(atomic64_sub)
10893
10894 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10895 + BACKOFF_SETUP(%o2)
10896 +1: ldx [%o1], %g1
10897 + subcc %g1, %o0, %g7
10898 + casx [%o1], %g1, %g7
10899 + cmp %g1, %g7
10900 + bne,pn %xcc, 2f
10901 + nop
10902 + retl
10903 + nop
10904 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10905 +ENDPROC(atomic64_sub_unchecked)
10906 +
10907 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10908 BACKOFF_SETUP(%o2)
10909 1: ldx [%o1], %g1
10910 - add %g1, %o0, %g7
10911 + addcc %g1, %o0, %g7
10912 +
10913 +#ifdef CONFIG_PAX_REFCOUNT
10914 + tvs %xcc, 6
10915 +#endif
10916 +
10917 casx [%o1], %g1, %g7
10918 cmp %g1, %g7
10919 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10920 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10921 2: BACKOFF_SPIN(%o2, %o3, 1b)
10922 ENDPROC(atomic64_add_ret)
10923
10924 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10925 + BACKOFF_SETUP(%o2)
10926 +1: ldx [%o1], %g1
10927 + addcc %g1, %o0, %g7
10928 + casx [%o1], %g1, %g7
10929 + cmp %g1, %g7
10930 + bne,pn %xcc, 2f
10931 + add %g7, %o0, %g7
10932 + mov %g7, %o0
10933 + retl
10934 + nop
10935 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10936 +ENDPROC(atomic64_add_ret_unchecked)
10937 +
10938 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10939 BACKOFF_SETUP(%o2)
10940 1: ldx [%o1], %g1
10941 - sub %g1, %o0, %g7
10942 + subcc %g1, %o0, %g7
10943 +
10944 +#ifdef CONFIG_PAX_REFCOUNT
10945 + tvs %xcc, 6
10946 +#endif
10947 +
10948 casx [%o1], %g1, %g7
10949 cmp %g1, %g7
10950 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10951 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10952 index 323335b..ed85ea2 100644
10953 --- a/arch/sparc/lib/ksyms.c
10954 +++ b/arch/sparc/lib/ksyms.c
10955 @@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10956
10957 /* Atomic counter implementation. */
10958 EXPORT_SYMBOL(atomic_add);
10959 +EXPORT_SYMBOL(atomic_add_unchecked);
10960 EXPORT_SYMBOL(atomic_add_ret);
10961 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
10962 EXPORT_SYMBOL(atomic_sub);
10963 +EXPORT_SYMBOL(atomic_sub_unchecked);
10964 EXPORT_SYMBOL(atomic_sub_ret);
10965 EXPORT_SYMBOL(atomic64_add);
10966 +EXPORT_SYMBOL(atomic64_add_unchecked);
10967 EXPORT_SYMBOL(atomic64_add_ret);
10968 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10969 EXPORT_SYMBOL(atomic64_sub);
10970 +EXPORT_SYMBOL(atomic64_sub_unchecked);
10971 EXPORT_SYMBOL(atomic64_sub_ret);
10972 EXPORT_SYMBOL(atomic64_dec_if_positive);
10973
10974 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10975 index 30c3ecc..736f015 100644
10976 --- a/arch/sparc/mm/Makefile
10977 +++ b/arch/sparc/mm/Makefile
10978 @@ -2,7 +2,7 @@
10979 #
10980
10981 asflags-y := -ansi
10982 -ccflags-y := -Werror
10983 +#ccflags-y := -Werror
10984
10985 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10986 obj-y += fault_$(BITS).o
10987 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10988 index 59dbd46..1dd7f5e 100644
10989 --- a/arch/sparc/mm/fault_32.c
10990 +++ b/arch/sparc/mm/fault_32.c
10991 @@ -21,6 +21,9 @@
10992 #include <linux/perf_event.h>
10993 #include <linux/interrupt.h>
10994 #include <linux/kdebug.h>
10995 +#include <linux/slab.h>
10996 +#include <linux/pagemap.h>
10997 +#include <linux/compiler.h>
10998
10999 #include <asm/page.h>
11000 #include <asm/pgtable.h>
11001 @@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11002 return safe_compute_effective_address(regs, insn);
11003 }
11004
11005 +#ifdef CONFIG_PAX_PAGEEXEC
11006 +#ifdef CONFIG_PAX_DLRESOLVE
11007 +static void pax_emuplt_close(struct vm_area_struct *vma)
11008 +{
11009 + vma->vm_mm->call_dl_resolve = 0UL;
11010 +}
11011 +
11012 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11013 +{
11014 + unsigned int *kaddr;
11015 +
11016 + vmf->page = alloc_page(GFP_HIGHUSER);
11017 + if (!vmf->page)
11018 + return VM_FAULT_OOM;
11019 +
11020 + kaddr = kmap(vmf->page);
11021 + memset(kaddr, 0, PAGE_SIZE);
11022 + kaddr[0] = 0x9DE3BFA8U; /* save */
11023 + flush_dcache_page(vmf->page);
11024 + kunmap(vmf->page);
11025 + return VM_FAULT_MAJOR;
11026 +}
11027 +
11028 +static const struct vm_operations_struct pax_vm_ops = {
11029 + .close = pax_emuplt_close,
11030 + .fault = pax_emuplt_fault
11031 +};
11032 +
11033 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11034 +{
11035 + int ret;
11036 +
11037 + INIT_LIST_HEAD(&vma->anon_vma_chain);
11038 + vma->vm_mm = current->mm;
11039 + vma->vm_start = addr;
11040 + vma->vm_end = addr + PAGE_SIZE;
11041 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11042 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11043 + vma->vm_ops = &pax_vm_ops;
11044 +
11045 + ret = insert_vm_struct(current->mm, vma);
11046 + if (ret)
11047 + return ret;
11048 +
11049 + ++current->mm->total_vm;
11050 + return 0;
11051 +}
11052 +#endif
11053 +
11054 +/*
11055 + * PaX: decide what to do with offenders (regs->pc = fault address)
11056 + *
11057 + * returns 1 when task should be killed
11058 + * 2 when patched PLT trampoline was detected
11059 + * 3 when unpatched PLT trampoline was detected
11060 + */
11061 +static int pax_handle_fetch_fault(struct pt_regs *regs)
11062 +{
11063 +
11064 +#ifdef CONFIG_PAX_EMUPLT
11065 + int err;
11066 +
11067 + do { /* PaX: patched PLT emulation #1 */
11068 + unsigned int sethi1, sethi2, jmpl;
11069 +
11070 + err = get_user(sethi1, (unsigned int *)regs->pc);
11071 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11072 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11073 +
11074 + if (err)
11075 + break;
11076 +
11077 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11078 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
11079 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
11080 + {
11081 + unsigned int addr;
11082 +
11083 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11084 + addr = regs->u_regs[UREG_G1];
11085 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11086 + regs->pc = addr;
11087 + regs->npc = addr+4;
11088 + return 2;
11089 + }
11090 + } while (0);
11091 +
11092 + do { /* PaX: patched PLT emulation #2 */
11093 + unsigned int ba;
11094 +
11095 + err = get_user(ba, (unsigned int *)regs->pc);
11096 +
11097 + if (err)
11098 + break;
11099 +
11100 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11101 + unsigned int addr;
11102 +
11103 + if ((ba & 0xFFC00000U) == 0x30800000U)
11104 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11105 + else
11106 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11107 + regs->pc = addr;
11108 + regs->npc = addr+4;
11109 + return 2;
11110 + }
11111 + } while (0);
11112 +
11113 + do { /* PaX: patched PLT emulation #3 */
11114 + unsigned int sethi, bajmpl, nop;
11115 +
11116 + err = get_user(sethi, (unsigned int *)regs->pc);
11117 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11118 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
11119 +
11120 + if (err)
11121 + break;
11122 +
11123 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11124 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11125 + nop == 0x01000000U)
11126 + {
11127 + unsigned int addr;
11128 +
11129 + addr = (sethi & 0x003FFFFFU) << 10;
11130 + regs->u_regs[UREG_G1] = addr;
11131 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11132 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11133 + else
11134 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11135 + regs->pc = addr;
11136 + regs->npc = addr+4;
11137 + return 2;
11138 + }
11139 + } while (0);
11140 +
11141 + do { /* PaX: unpatched PLT emulation step 1 */
11142 + unsigned int sethi, ba, nop;
11143 +
11144 + err = get_user(sethi, (unsigned int *)regs->pc);
11145 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
11146 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
11147 +
11148 + if (err)
11149 + break;
11150 +
11151 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11152 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11153 + nop == 0x01000000U)
11154 + {
11155 + unsigned int addr, save, call;
11156 +
11157 + if ((ba & 0xFFC00000U) == 0x30800000U)
11158 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11159 + else
11160 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11161 +
11162 + err = get_user(save, (unsigned int *)addr);
11163 + err |= get_user(call, (unsigned int *)(addr+4));
11164 + err |= get_user(nop, (unsigned int *)(addr+8));
11165 + if (err)
11166 + break;
11167 +
11168 +#ifdef CONFIG_PAX_DLRESOLVE
11169 + if (save == 0x9DE3BFA8U &&
11170 + (call & 0xC0000000U) == 0x40000000U &&
11171 + nop == 0x01000000U)
11172 + {
11173 + struct vm_area_struct *vma;
11174 + unsigned long call_dl_resolve;
11175 +
11176 + down_read(&current->mm->mmap_sem);
11177 + call_dl_resolve = current->mm->call_dl_resolve;
11178 + up_read(&current->mm->mmap_sem);
11179 + if (likely(call_dl_resolve))
11180 + goto emulate;
11181 +
11182 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11183 +
11184 + down_write(&current->mm->mmap_sem);
11185 + if (current->mm->call_dl_resolve) {
11186 + call_dl_resolve = current->mm->call_dl_resolve;
11187 + up_write(&current->mm->mmap_sem);
11188 + if (vma)
11189 + kmem_cache_free(vm_area_cachep, vma);
11190 + goto emulate;
11191 + }
11192 +
11193 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11194 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11195 + up_write(&current->mm->mmap_sem);
11196 + if (vma)
11197 + kmem_cache_free(vm_area_cachep, vma);
11198 + return 1;
11199 + }
11200 +
11201 + if (pax_insert_vma(vma, call_dl_resolve)) {
11202 + up_write(&current->mm->mmap_sem);
11203 + kmem_cache_free(vm_area_cachep, vma);
11204 + return 1;
11205 + }
11206 +
11207 + current->mm->call_dl_resolve = call_dl_resolve;
11208 + up_write(&current->mm->mmap_sem);
11209 +
11210 +emulate:
11211 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11212 + regs->pc = call_dl_resolve;
11213 + regs->npc = addr+4;
11214 + return 3;
11215 + }
11216 +#endif
11217 +
11218 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11219 + if ((save & 0xFFC00000U) == 0x05000000U &&
11220 + (call & 0xFFFFE000U) == 0x85C0A000U &&
11221 + nop == 0x01000000U)
11222 + {
11223 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11224 + regs->u_regs[UREG_G2] = addr + 4;
11225 + addr = (save & 0x003FFFFFU) << 10;
11226 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11227 + regs->pc = addr;
11228 + regs->npc = addr+4;
11229 + return 3;
11230 + }
11231 + }
11232 + } while (0);
11233 +
11234 + do { /* PaX: unpatched PLT emulation step 2 */
11235 + unsigned int save, call, nop;
11236 +
11237 + err = get_user(save, (unsigned int *)(regs->pc-4));
11238 + err |= get_user(call, (unsigned int *)regs->pc);
11239 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
11240 + if (err)
11241 + break;
11242 +
11243 + if (save == 0x9DE3BFA8U &&
11244 + (call & 0xC0000000U) == 0x40000000U &&
11245 + nop == 0x01000000U)
11246 + {
11247 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11248 +
11249 + regs->u_regs[UREG_RETPC] = regs->pc;
11250 + regs->pc = dl_resolve;
11251 + regs->npc = dl_resolve+4;
11252 + return 3;
11253 + }
11254 + } while (0);
11255 +#endif
11256 +
11257 + return 1;
11258 +}
11259 +
11260 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11261 +{
11262 + unsigned long i;
11263 +
11264 + printk(KERN_ERR "PAX: bytes at PC: ");
11265 + for (i = 0; i < 8; i++) {
11266 + unsigned int c;
11267 + if (get_user(c, (unsigned int *)pc+i))
11268 + printk(KERN_CONT "???????? ");
11269 + else
11270 + printk(KERN_CONT "%08x ", c);
11271 + }
11272 + printk("\n");
11273 +}
11274 +#endif
11275 +
11276 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11277 int text_fault)
11278 {
11279 @@ -229,6 +503,24 @@ good_area:
11280 if (!(vma->vm_flags & VM_WRITE))
11281 goto bad_area;
11282 } else {
11283 +
11284 +#ifdef CONFIG_PAX_PAGEEXEC
11285 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11286 + up_read(&mm->mmap_sem);
11287 + switch (pax_handle_fetch_fault(regs)) {
11288 +
11289 +#ifdef CONFIG_PAX_EMUPLT
11290 + case 2:
11291 + case 3:
11292 + return;
11293 +#endif
11294 +
11295 + }
11296 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11297 + do_group_exit(SIGKILL);
11298 + }
11299 +#endif
11300 +
11301 /* Allow reads even for write-only mappings */
11302 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11303 goto bad_area;
11304 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11305 index 69bb818..6ca35c8 100644
11306 --- a/arch/sparc/mm/fault_64.c
11307 +++ b/arch/sparc/mm/fault_64.c
11308 @@ -22,6 +22,9 @@
11309 #include <linux/kdebug.h>
11310 #include <linux/percpu.h>
11311 #include <linux/context_tracking.h>
11312 +#include <linux/slab.h>
11313 +#include <linux/pagemap.h>
11314 +#include <linux/compiler.h>
11315
11316 #include <asm/page.h>
11317 #include <asm/pgtable.h>
11318 @@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11319 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11320 regs->tpc);
11321 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11322 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11323 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11324 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11325 dump_stack();
11326 unhandled_fault(regs->tpc, current, regs);
11327 @@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
11328 show_regs(regs);
11329 }
11330
11331 +#ifdef CONFIG_PAX_PAGEEXEC
11332 +#ifdef CONFIG_PAX_DLRESOLVE
11333 +static void pax_emuplt_close(struct vm_area_struct *vma)
11334 +{
11335 + vma->vm_mm->call_dl_resolve = 0UL;
11336 +}
11337 +
11338 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11339 +{
11340 + unsigned int *kaddr;
11341 +
11342 + vmf->page = alloc_page(GFP_HIGHUSER);
11343 + if (!vmf->page)
11344 + return VM_FAULT_OOM;
11345 +
11346 + kaddr = kmap(vmf->page);
11347 + memset(kaddr, 0, PAGE_SIZE);
11348 + kaddr[0] = 0x9DE3BFA8U; /* save */
11349 + flush_dcache_page(vmf->page);
11350 + kunmap(vmf->page);
11351 + return VM_FAULT_MAJOR;
11352 +}
11353 +
11354 +static const struct vm_operations_struct pax_vm_ops = {
11355 + .close = pax_emuplt_close,
11356 + .fault = pax_emuplt_fault
11357 +};
11358 +
11359 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11360 +{
11361 + int ret;
11362 +
11363 + INIT_LIST_HEAD(&vma->anon_vma_chain);
11364 + vma->vm_mm = current->mm;
11365 + vma->vm_start = addr;
11366 + vma->vm_end = addr + PAGE_SIZE;
11367 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11368 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11369 + vma->vm_ops = &pax_vm_ops;
11370 +
11371 + ret = insert_vm_struct(current->mm, vma);
11372 + if (ret)
11373 + return ret;
11374 +
11375 + ++current->mm->total_vm;
11376 + return 0;
11377 +}
11378 +#endif
11379 +
11380 +/*
11381 + * PaX: decide what to do with offenders (regs->tpc = fault address)
11382 + *
11383 + * returns 1 when task should be killed
11384 + * 2 when patched PLT trampoline was detected
11385 + * 3 when unpatched PLT trampoline was detected
11386 + */
11387 +static int pax_handle_fetch_fault(struct pt_regs *regs)
11388 +{
11389 +
11390 +#ifdef CONFIG_PAX_EMUPLT
11391 + int err;
11392 +
11393 + do { /* PaX: patched PLT emulation #1 */
11394 + unsigned int sethi1, sethi2, jmpl;
11395 +
11396 + err = get_user(sethi1, (unsigned int *)regs->tpc);
11397 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11398 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11399 +
11400 + if (err)
11401 + break;
11402 +
11403 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11404 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
11405 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
11406 + {
11407 + unsigned long addr;
11408 +
11409 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11410 + addr = regs->u_regs[UREG_G1];
11411 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11412 +
11413 + if (test_thread_flag(TIF_32BIT))
11414 + addr &= 0xFFFFFFFFUL;
11415 +
11416 + regs->tpc = addr;
11417 + regs->tnpc = addr+4;
11418 + return 2;
11419 + }
11420 + } while (0);
11421 +
11422 + do { /* PaX: patched PLT emulation #2 */
11423 + unsigned int ba;
11424 +
11425 + err = get_user(ba, (unsigned int *)regs->tpc);
11426 +
11427 + if (err)
11428 + break;
11429 +
11430 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11431 + unsigned long addr;
11432 +
11433 + if ((ba & 0xFFC00000U) == 0x30800000U)
11434 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11435 + else
11436 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11437 +
11438 + if (test_thread_flag(TIF_32BIT))
11439 + addr &= 0xFFFFFFFFUL;
11440 +
11441 + regs->tpc = addr;
11442 + regs->tnpc = addr+4;
11443 + return 2;
11444 + }
11445 + } while (0);
11446 +
11447 + do { /* PaX: patched PLT emulation #3 */
11448 + unsigned int sethi, bajmpl, nop;
11449 +
11450 + err = get_user(sethi, (unsigned int *)regs->tpc);
11451 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11452 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11453 +
11454 + if (err)
11455 + break;
11456 +
11457 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11458 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11459 + nop == 0x01000000U)
11460 + {
11461 + unsigned long addr;
11462 +
11463 + addr = (sethi & 0x003FFFFFU) << 10;
11464 + regs->u_regs[UREG_G1] = addr;
11465 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11466 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11467 + else
11468 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11469 +
11470 + if (test_thread_flag(TIF_32BIT))
11471 + addr &= 0xFFFFFFFFUL;
11472 +
11473 + regs->tpc = addr;
11474 + regs->tnpc = addr+4;
11475 + return 2;
11476 + }
11477 + } while (0);
11478 +
11479 + do { /* PaX: patched PLT emulation #4 */
11480 + unsigned int sethi, mov1, call, mov2;
11481 +
11482 + err = get_user(sethi, (unsigned int *)regs->tpc);
11483 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11484 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
11485 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11486 +
11487 + if (err)
11488 + break;
11489 +
11490 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11491 + mov1 == 0x8210000FU &&
11492 + (call & 0xC0000000U) == 0x40000000U &&
11493 + mov2 == 0x9E100001U)
11494 + {
11495 + unsigned long addr;
11496 +
11497 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11498 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11499 +
11500 + if (test_thread_flag(TIF_32BIT))
11501 + addr &= 0xFFFFFFFFUL;
11502 +
11503 + regs->tpc = addr;
11504 + regs->tnpc = addr+4;
11505 + return 2;
11506 + }
11507 + } while (0);
11508 +
11509 + do { /* PaX: patched PLT emulation #5 */
11510 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11511 +
11512 + err = get_user(sethi, (unsigned int *)regs->tpc);
11513 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11514 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11515 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11516 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11517 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11518 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11519 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11520 +
11521 + if (err)
11522 + break;
11523 +
11524 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11525 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
11526 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11527 + (or1 & 0xFFFFE000U) == 0x82106000U &&
11528 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
11529 + sllx == 0x83287020U &&
11530 + jmpl == 0x81C04005U &&
11531 + nop == 0x01000000U)
11532 + {
11533 + unsigned long addr;
11534 +
11535 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11536 + regs->u_regs[UREG_G1] <<= 32;
11537 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11538 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11539 + regs->tpc = addr;
11540 + regs->tnpc = addr+4;
11541 + return 2;
11542 + }
11543 + } while (0);
11544 +
11545 + do { /* PaX: patched PLT emulation #6 */
11546 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11547 +
11548 + err = get_user(sethi, (unsigned int *)regs->tpc);
11549 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11550 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11551 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11552 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
11553 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11554 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11555 +
11556 + if (err)
11557 + break;
11558 +
11559 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11560 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
11561 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11562 + sllx == 0x83287020U &&
11563 + (or & 0xFFFFE000U) == 0x8A116000U &&
11564 + jmpl == 0x81C04005U &&
11565 + nop == 0x01000000U)
11566 + {
11567 + unsigned long addr;
11568 +
11569 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11570 + regs->u_regs[UREG_G1] <<= 32;
11571 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11572 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11573 + regs->tpc = addr;
11574 + regs->tnpc = addr+4;
11575 + return 2;
11576 + }
11577 + } while (0);
11578 +
11579 + do { /* PaX: unpatched PLT emulation step 1 */
11580 + unsigned int sethi, ba, nop;
11581 +
11582 + err = get_user(sethi, (unsigned int *)regs->tpc);
11583 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11584 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11585 +
11586 + if (err)
11587 + break;
11588 +
11589 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11590 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11591 + nop == 0x01000000U)
11592 + {
11593 + unsigned long addr;
11594 + unsigned int save, call;
11595 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11596 +
11597 + if ((ba & 0xFFC00000U) == 0x30800000U)
11598 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11599 + else
11600 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11601 +
11602 + if (test_thread_flag(TIF_32BIT))
11603 + addr &= 0xFFFFFFFFUL;
11604 +
11605 + err = get_user(save, (unsigned int *)addr);
11606 + err |= get_user(call, (unsigned int *)(addr+4));
11607 + err |= get_user(nop, (unsigned int *)(addr+8));
11608 + if (err)
11609 + break;
11610 +
11611 +#ifdef CONFIG_PAX_DLRESOLVE
11612 + if (save == 0x9DE3BFA8U &&
11613 + (call & 0xC0000000U) == 0x40000000U &&
11614 + nop == 0x01000000U)
11615 + {
11616 + struct vm_area_struct *vma;
11617 + unsigned long call_dl_resolve;
11618 +
11619 + down_read(&current->mm->mmap_sem);
11620 + call_dl_resolve = current->mm->call_dl_resolve;
11621 + up_read(&current->mm->mmap_sem);
11622 + if (likely(call_dl_resolve))
11623 + goto emulate;
11624 +
11625 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11626 +
11627 + down_write(&current->mm->mmap_sem);
11628 + if (current->mm->call_dl_resolve) {
11629 + call_dl_resolve = current->mm->call_dl_resolve;
11630 + up_write(&current->mm->mmap_sem);
11631 + if (vma)
11632 + kmem_cache_free(vm_area_cachep, vma);
11633 + goto emulate;
11634 + }
11635 +
11636 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11637 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11638 + up_write(&current->mm->mmap_sem);
11639 + if (vma)
11640 + kmem_cache_free(vm_area_cachep, vma);
11641 + return 1;
11642 + }
11643 +
11644 + if (pax_insert_vma(vma, call_dl_resolve)) {
11645 + up_write(&current->mm->mmap_sem);
11646 + kmem_cache_free(vm_area_cachep, vma);
11647 + return 1;
11648 + }
11649 +
11650 + current->mm->call_dl_resolve = call_dl_resolve;
11651 + up_write(&current->mm->mmap_sem);
11652 +
11653 +emulate:
11654 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11655 + regs->tpc = call_dl_resolve;
11656 + regs->tnpc = addr+4;
11657 + return 3;
11658 + }
11659 +#endif
11660 +
11661 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11662 + if ((save & 0xFFC00000U) == 0x05000000U &&
11663 + (call & 0xFFFFE000U) == 0x85C0A000U &&
11664 + nop == 0x01000000U)
11665 + {
11666 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11667 + regs->u_regs[UREG_G2] = addr + 4;
11668 + addr = (save & 0x003FFFFFU) << 10;
11669 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11670 +
11671 + if (test_thread_flag(TIF_32BIT))
11672 + addr &= 0xFFFFFFFFUL;
11673 +
11674 + regs->tpc = addr;
11675 + regs->tnpc = addr+4;
11676 + return 3;
11677 + }
11678 +
11679 + /* PaX: 64-bit PLT stub */
11680 + err = get_user(sethi1, (unsigned int *)addr);
11681 + err |= get_user(sethi2, (unsigned int *)(addr+4));
11682 + err |= get_user(or1, (unsigned int *)(addr+8));
11683 + err |= get_user(or2, (unsigned int *)(addr+12));
11684 + err |= get_user(sllx, (unsigned int *)(addr+16));
11685 + err |= get_user(add, (unsigned int *)(addr+20));
11686 + err |= get_user(jmpl, (unsigned int *)(addr+24));
11687 + err |= get_user(nop, (unsigned int *)(addr+28));
11688 + if (err)
11689 + break;
11690 +
11691 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11692 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11693 + (or1 & 0xFFFFE000U) == 0x88112000U &&
11694 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
11695 + sllx == 0x89293020U &&
11696 + add == 0x8A010005U &&
11697 + jmpl == 0x89C14000U &&
11698 + nop == 0x01000000U)
11699 + {
11700 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11701 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11702 + regs->u_regs[UREG_G4] <<= 32;
11703 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11704 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11705 + regs->u_regs[UREG_G4] = addr + 24;
11706 + addr = regs->u_regs[UREG_G5];
11707 + regs->tpc = addr;
11708 + regs->tnpc = addr+4;
11709 + return 3;
11710 + }
11711 + }
11712 + } while (0);
11713 +
11714 +#ifdef CONFIG_PAX_DLRESOLVE
11715 + do { /* PaX: unpatched PLT emulation step 2 */
11716 + unsigned int save, call, nop;
11717 +
11718 + err = get_user(save, (unsigned int *)(regs->tpc-4));
11719 + err |= get_user(call, (unsigned int *)regs->tpc);
11720 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11721 + if (err)
11722 + break;
11723 +
11724 + if (save == 0x9DE3BFA8U &&
11725 + (call & 0xC0000000U) == 0x40000000U &&
11726 + nop == 0x01000000U)
11727 + {
11728 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11729 +
11730 + if (test_thread_flag(TIF_32BIT))
11731 + dl_resolve &= 0xFFFFFFFFUL;
11732 +
11733 + regs->u_regs[UREG_RETPC] = regs->tpc;
11734 + regs->tpc = dl_resolve;
11735 + regs->tnpc = dl_resolve+4;
11736 + return 3;
11737 + }
11738 + } while (0);
11739 +#endif
11740 +
11741 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11742 + unsigned int sethi, ba, nop;
11743 +
11744 + err = get_user(sethi, (unsigned int *)regs->tpc);
11745 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11746 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11747 +
11748 + if (err)
11749 + break;
11750 +
11751 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11752 + (ba & 0xFFF00000U) == 0x30600000U &&
11753 + nop == 0x01000000U)
11754 + {
11755 + unsigned long addr;
11756 +
11757 + addr = (sethi & 0x003FFFFFU) << 10;
11758 + regs->u_regs[UREG_G1] = addr;
11759 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11760 +
11761 + if (test_thread_flag(TIF_32BIT))
11762 + addr &= 0xFFFFFFFFUL;
11763 +
11764 + regs->tpc = addr;
11765 + regs->tnpc = addr+4;
11766 + return 2;
11767 + }
11768 + } while (0);
11769 +
11770 +#endif
11771 +
11772 + return 1;
11773 +}
11774 +
11775 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11776 +{
11777 + unsigned long i;
11778 +
11779 + printk(KERN_ERR "PAX: bytes at PC: ");
11780 + for (i = 0; i < 8; i++) {
11781 + unsigned int c;
11782 + if (get_user(c, (unsigned int *)pc+i))
11783 + printk(KERN_CONT "???????? ");
11784 + else
11785 + printk(KERN_CONT "%08x ", c);
11786 + }
11787 + printk("\n");
11788 +}
11789 +#endif
11790 +
11791 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11792 {
11793 enum ctx_state prev_state = exception_enter();
11794 @@ -344,6 +807,29 @@ retry:
11795 if (!vma)
11796 goto bad_area;
11797
11798 +#ifdef CONFIG_PAX_PAGEEXEC
11799 + /* PaX: detect ITLB misses on non-exec pages */
11800 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11801 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11802 + {
11803 + if (address != regs->tpc)
11804 + goto good_area;
11805 +
11806 + up_read(&mm->mmap_sem);
11807 + switch (pax_handle_fetch_fault(regs)) {
11808 +
11809 +#ifdef CONFIG_PAX_EMUPLT
11810 + case 2:
11811 + case 3:
11812 + return;
11813 +#endif
11814 +
11815 + }
11816 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11817 + do_group_exit(SIGKILL);
11818 + }
11819 +#endif
11820 +
11821 /* Pure DTLB misses do not tell us whether the fault causing
11822 * load/store/atomic was a write or not, it only says that there
11823 * was no match. So in such a case we (carefully) read the
11824 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11825 index 3096317..a7b7654 100644
11826 --- a/arch/sparc/mm/hugetlbpage.c
11827 +++ b/arch/sparc/mm/hugetlbpage.c
11828 @@ -26,7 +26,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11829 unsigned long addr,
11830 unsigned long len,
11831 unsigned long pgoff,
11832 - unsigned long flags)
11833 + unsigned long flags,
11834 + unsigned long offset)
11835 {
11836 unsigned long task_size = TASK_SIZE;
11837 struct vm_unmapped_area_info info;
11838 @@ -36,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11839
11840 info.flags = 0;
11841 info.length = len;
11842 - info.low_limit = TASK_UNMAPPED_BASE;
11843 + info.low_limit = mm->mmap_base;
11844 info.high_limit = min(task_size, VA_EXCLUDE_START);
11845 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11846 info.align_offset = 0;
11847 + info.threadstack_offset = offset;
11848 addr = vm_unmapped_area(&info);
11849
11850 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11851 VM_BUG_ON(addr != -ENOMEM);
11852 info.low_limit = VA_EXCLUDE_END;
11853 +
11854 +#ifdef CONFIG_PAX_RANDMMAP
11855 + if (mm->pax_flags & MF_PAX_RANDMMAP)
11856 + info.low_limit += mm->delta_mmap;
11857 +#endif
11858 +
11859 info.high_limit = task_size;
11860 addr = vm_unmapped_area(&info);
11861 }
11862 @@ -56,7 +64,8 @@ static unsigned long
11863 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11864 const unsigned long len,
11865 const unsigned long pgoff,
11866 - const unsigned long flags)
11867 + const unsigned long flags,
11868 + const unsigned long offset)
11869 {
11870 struct mm_struct *mm = current->mm;
11871 unsigned long addr = addr0;
11872 @@ -71,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11873 info.high_limit = mm->mmap_base;
11874 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11875 info.align_offset = 0;
11876 + info.threadstack_offset = offset;
11877 addr = vm_unmapped_area(&info);
11878
11879 /*
11880 @@ -83,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11881 VM_BUG_ON(addr != -ENOMEM);
11882 info.flags = 0;
11883 info.low_limit = TASK_UNMAPPED_BASE;
11884 +
11885 +#ifdef CONFIG_PAX_RANDMMAP
11886 + if (mm->pax_flags & MF_PAX_RANDMMAP)
11887 + info.low_limit += mm->delta_mmap;
11888 +#endif
11889 +
11890 info.high_limit = STACK_TOP32;
11891 addr = vm_unmapped_area(&info);
11892 }
11893 @@ -97,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11894 struct mm_struct *mm = current->mm;
11895 struct vm_area_struct *vma;
11896 unsigned long task_size = TASK_SIZE;
11897 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11898
11899 if (test_thread_flag(TIF_32BIT))
11900 task_size = STACK_TOP32;
11901 @@ -112,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11902 return addr;
11903 }
11904
11905 +#ifdef CONFIG_PAX_RANDMMAP
11906 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11907 +#endif
11908 +
11909 if (addr) {
11910 addr = ALIGN(addr, HPAGE_SIZE);
11911 vma = find_vma(mm, addr);
11912 - if (task_size - len >= addr &&
11913 - (!vma || addr + len <= vma->vm_start))
11914 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11915 return addr;
11916 }
11917 if (mm->get_unmapped_area == arch_get_unmapped_area)
11918 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11919 - pgoff, flags);
11920 + pgoff, flags, offset);
11921 else
11922 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11923 - pgoff, flags);
11924 + pgoff, flags, offset);
11925 }
11926
11927 pte_t *huge_pte_alloc(struct mm_struct *mm,
11928 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11929 index 5322e53..f820c5e 100644
11930 --- a/arch/sparc/mm/init_64.c
11931 +++ b/arch/sparc/mm/init_64.c
11932 @@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11933 int num_kernel_image_mappings;
11934
11935 #ifdef CONFIG_DEBUG_DCFLUSH
11936 -atomic_t dcpage_flushes = ATOMIC_INIT(0);
11937 +atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11938 #ifdef CONFIG_SMP
11939 -atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11940 +atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11941 #endif
11942 #endif
11943
11944 @@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11945 {
11946 BUG_ON(tlb_type == hypervisor);
11947 #ifdef CONFIG_DEBUG_DCFLUSH
11948 - atomic_inc(&dcpage_flushes);
11949 + atomic_inc_unchecked(&dcpage_flushes);
11950 #endif
11951
11952 #ifdef DCACHE_ALIASING_POSSIBLE
11953 @@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11954
11955 #ifdef CONFIG_DEBUG_DCFLUSH
11956 seq_printf(m, "DCPageFlushes\t: %d\n",
11957 - atomic_read(&dcpage_flushes));
11958 + atomic_read_unchecked(&dcpage_flushes));
11959 #ifdef CONFIG_SMP
11960 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11961 - atomic_read(&dcpage_flushes_xcall));
11962 + atomic_read_unchecked(&dcpage_flushes_xcall));
11963 #endif /* CONFIG_SMP */
11964 #endif /* CONFIG_DEBUG_DCFLUSH */
11965 }
11966 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11967 index b3692ce..e4517c9 100644
11968 --- a/arch/tile/Kconfig
11969 +++ b/arch/tile/Kconfig
11970 @@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
11971
11972 config KEXEC
11973 bool "kexec system call"
11974 + depends on !GRKERNSEC_KMEM
11975 ---help---
11976 kexec is a system call that implements the ability to shutdown your
11977 current kernel, and to start another kernel. It is like a reboot
11978 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11979 index ad220ee..2f537b3 100644
11980 --- a/arch/tile/include/asm/atomic_64.h
11981 +++ b/arch/tile/include/asm/atomic_64.h
11982 @@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11983
11984 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11985
11986 +#define atomic64_read_unchecked(v) atomic64_read(v)
11987 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11988 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11989 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11990 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11991 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
11992 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11993 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
11994 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11995 +
11996 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11997 #define smp_mb__before_atomic_dec() smp_mb()
11998 #define smp_mb__after_atomic_dec() smp_mb()
11999 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12000 index 6160761..00cac88 100644
12001 --- a/arch/tile/include/asm/cache.h
12002 +++ b/arch/tile/include/asm/cache.h
12003 @@ -15,11 +15,12 @@
12004 #ifndef _ASM_TILE_CACHE_H
12005 #define _ASM_TILE_CACHE_H
12006
12007 +#include <linux/const.h>
12008 #include <arch/chip.h>
12009
12010 /* bytes per L1 data cache line */
12011 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12012 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12013 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12014
12015 /* bytes per L2 cache line */
12016 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12017 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12018 index b6cde32..c0cb736 100644
12019 --- a/arch/tile/include/asm/uaccess.h
12020 +++ b/arch/tile/include/asm/uaccess.h
12021 @@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12022 const void __user *from,
12023 unsigned long n)
12024 {
12025 - int sz = __compiletime_object_size(to);
12026 + size_t sz = __compiletime_object_size(to);
12027
12028 - if (likely(sz == -1 || sz >= n))
12029 + if (likely(sz == (size_t)-1 || sz >= n))
12030 n = _copy_from_user(to, from, n);
12031 else
12032 copy_from_user_overflow();
12033 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12034 index 0cb3bba..7338b2d 100644
12035 --- a/arch/tile/mm/hugetlbpage.c
12036 +++ b/arch/tile/mm/hugetlbpage.c
12037 @@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12038 info.high_limit = TASK_SIZE;
12039 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12040 info.align_offset = 0;
12041 + info.threadstack_offset = 0;
12042 return vm_unmapped_area(&info);
12043 }
12044
12045 @@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12046 info.high_limit = current->mm->mmap_base;
12047 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12048 info.align_offset = 0;
12049 + info.threadstack_offset = 0;
12050 addr = vm_unmapped_area(&info);
12051
12052 /*
12053 diff --git a/arch/um/Makefile b/arch/um/Makefile
12054 index 36e658a..71a5c5a 100644
12055 --- a/arch/um/Makefile
12056 +++ b/arch/um/Makefile
12057 @@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12058 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12059 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12060
12061 +ifdef CONSTIFY_PLUGIN
12062 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12063 +endif
12064 +
12065 #This will adjust *FLAGS accordingly to the platform.
12066 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12067
12068 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12069 index 19e1bdd..3665b77 100644
12070 --- a/arch/um/include/asm/cache.h
12071 +++ b/arch/um/include/asm/cache.h
12072 @@ -1,6 +1,7 @@
12073 #ifndef __UM_CACHE_H
12074 #define __UM_CACHE_H
12075
12076 +#include <linux/const.h>
12077
12078 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12079 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12080 @@ -12,6 +13,6 @@
12081 # define L1_CACHE_SHIFT 5
12082 #endif
12083
12084 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12085 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12086
12087 #endif
12088 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12089 index 2e0a6b1..a64d0f5 100644
12090 --- a/arch/um/include/asm/kmap_types.h
12091 +++ b/arch/um/include/asm/kmap_types.h
12092 @@ -8,6 +8,6 @@
12093
12094 /* No more #include "asm/arch/kmap_types.h" ! */
12095
12096 -#define KM_TYPE_NR 14
12097 +#define KM_TYPE_NR 15
12098
12099 #endif
12100 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12101 index 5ff53d9..5850cdf 100644
12102 --- a/arch/um/include/asm/page.h
12103 +++ b/arch/um/include/asm/page.h
12104 @@ -14,6 +14,9 @@
12105 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12106 #define PAGE_MASK (~(PAGE_SIZE-1))
12107
12108 +#define ktla_ktva(addr) (addr)
12109 +#define ktva_ktla(addr) (addr)
12110 +
12111 #ifndef __ASSEMBLY__
12112
12113 struct page;
12114 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12115 index 0032f92..cd151e0 100644
12116 --- a/arch/um/include/asm/pgtable-3level.h
12117 +++ b/arch/um/include/asm/pgtable-3level.h
12118 @@ -58,6 +58,7 @@
12119 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12120 #define pud_populate(mm, pud, pmd) \
12121 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12122 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12123
12124 #ifdef CONFIG_64BIT
12125 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12126 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12127 index eecc414..48adb87 100644
12128 --- a/arch/um/kernel/process.c
12129 +++ b/arch/um/kernel/process.c
12130 @@ -356,22 +356,6 @@ int singlestepping(void * t)
12131 return 2;
12132 }
12133
12134 -/*
12135 - * Only x86 and x86_64 have an arch_align_stack().
12136 - * All other arches have "#define arch_align_stack(x) (x)"
12137 - * in their asm/system.h
12138 - * As this is included in UML from asm-um/system-generic.h,
12139 - * we can use it to behave as the subarch does.
12140 - */
12141 -#ifndef arch_align_stack
12142 -unsigned long arch_align_stack(unsigned long sp)
12143 -{
12144 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12145 - sp -= get_random_int() % 8192;
12146 - return sp & ~0xf;
12147 -}
12148 -#endif
12149 -
12150 unsigned long get_wchan(struct task_struct *p)
12151 {
12152 unsigned long stack_page, sp, ip;
12153 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12154 index ad8f795..2c7eec6 100644
12155 --- a/arch/unicore32/include/asm/cache.h
12156 +++ b/arch/unicore32/include/asm/cache.h
12157 @@ -12,8 +12,10 @@
12158 #ifndef __UNICORE_CACHE_H__
12159 #define __UNICORE_CACHE_H__
12160
12161 -#define L1_CACHE_SHIFT (5)
12162 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12163 +#include <linux/const.h>
12164 +
12165 +#define L1_CACHE_SHIFT 5
12166 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12167
12168 /*
12169 * Memory returned by kmalloc() may be used for DMA, so we must make
12170 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12171 index 0952ecd..9cf578c 100644
12172 --- a/arch/x86/Kconfig
12173 +++ b/arch/x86/Kconfig
12174 @@ -249,7 +249,7 @@ config X86_HT
12175
12176 config X86_32_LAZY_GS
12177 def_bool y
12178 - depends on X86_32 && !CC_STACKPROTECTOR
12179 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12180
12181 config ARCH_HWEIGHT_CFLAGS
12182 string
12183 @@ -602,6 +602,7 @@ config SCHED_OMIT_FRAME_POINTER
12184
12185 menuconfig HYPERVISOR_GUEST
12186 bool "Linux guest support"
12187 + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12188 ---help---
12189 Say Y here to enable options for running Linux under various hyper-
12190 visors. This option enables basic hypervisor detection and platform
12191 @@ -1127,7 +1128,7 @@ choice
12192
12193 config NOHIGHMEM
12194 bool "off"
12195 - depends on !X86_NUMAQ
12196 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12197 ---help---
12198 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12199 However, the address space of 32-bit x86 processors is only 4
12200 @@ -1164,7 +1165,7 @@ config NOHIGHMEM
12201
12202 config HIGHMEM4G
12203 bool "4GB"
12204 - depends on !X86_NUMAQ
12205 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12206 ---help---
12207 Select this if you have a 32-bit processor and between 1 and 4
12208 gigabytes of physical RAM.
12209 @@ -1217,7 +1218,7 @@ config PAGE_OFFSET
12210 hex
12211 default 0xB0000000 if VMSPLIT_3G_OPT
12212 default 0x80000000 if VMSPLIT_2G
12213 - default 0x78000000 if VMSPLIT_2G_OPT
12214 + default 0x70000000 if VMSPLIT_2G_OPT
12215 default 0x40000000 if VMSPLIT_1G
12216 default 0xC0000000
12217 depends on X86_32
12218 @@ -1619,6 +1620,7 @@ config SECCOMP
12219
12220 config CC_STACKPROTECTOR
12221 bool "Enable -fstack-protector buffer overflow detection"
12222 + depends on X86_64 || !PAX_MEMORY_UDEREF
12223 ---help---
12224 This option turns on the -fstack-protector GCC feature. This
12225 feature puts, at the beginning of functions, a canary value on
12226 @@ -1637,6 +1639,7 @@ source kernel/Kconfig.hz
12227
12228 config KEXEC
12229 bool "kexec system call"
12230 + depends on !GRKERNSEC_KMEM
12231 ---help---
12232 kexec is a system call that implements the ability to shutdown your
12233 current kernel, and to start another kernel. It is like a reboot
12234 @@ -1738,6 +1741,8 @@ config X86_NEED_RELOCS
12235 config PHYSICAL_ALIGN
12236 hex "Alignment value to which kernel should be aligned"
12237 default "0x1000000"
12238 + range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12239 + range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12240 range 0x2000 0x1000000 if X86_32
12241 range 0x200000 0x1000000 if X86_64
12242 ---help---
12243 @@ -1817,9 +1822,10 @@ config DEBUG_HOTPLUG_CPU0
12244 If unsure, say N.
12245
12246 config COMPAT_VDSO
12247 - def_bool y
12248 + def_bool n
12249 prompt "Compat VDSO support"
12250 depends on X86_32 || IA32_EMULATION
12251 + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12252 ---help---
12253 Map the 32-bit VDSO to the predictable old-style address too.
12254
12255 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12256 index c026cca..14657ae 100644
12257 --- a/arch/x86/Kconfig.cpu
12258 +++ b/arch/x86/Kconfig.cpu
12259 @@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12260
12261 config X86_F00F_BUG
12262 def_bool y
12263 - depends on M586MMX || M586TSC || M586 || M486
12264 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12265
12266 config X86_INVD_BUG
12267 def_bool y
12268 @@ -327,7 +327,7 @@ config X86_INVD_BUG
12269
12270 config X86_ALIGNMENT_16
12271 def_bool y
12272 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12273 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12274
12275 config X86_INTEL_USERCOPY
12276 def_bool y
12277 @@ -373,7 +373,7 @@ config X86_CMPXCHG64
12278 # generates cmov.
12279 config X86_CMOV
12280 def_bool y
12281 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12282 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12283
12284 config X86_MINIMUM_CPU_FAMILY
12285 int
12286 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12287 index 0f3621e..282f24b 100644
12288 --- a/arch/x86/Kconfig.debug
12289 +++ b/arch/x86/Kconfig.debug
12290 @@ -84,7 +84,7 @@ config X86_PTDUMP
12291 config DEBUG_RODATA
12292 bool "Write protect kernel read-only data structures"
12293 default y
12294 - depends on DEBUG_KERNEL
12295 + depends on DEBUG_KERNEL && BROKEN
12296 ---help---
12297 Mark the kernel read-only data as write-protected in the pagetables,
12298 in order to catch accidental (and incorrect) writes to such const
12299 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
12300
12301 config DEBUG_SET_MODULE_RONX
12302 bool "Set loadable kernel module data as NX and text as RO"
12303 - depends on MODULES
12304 + depends on MODULES && BROKEN
12305 ---help---
12306 This option helps catch unintended modifications to loadable
12307 kernel module's text and read-only data. It also prevents execution
12308 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12309 index 57d0215..b4373fb 100644
12310 --- a/arch/x86/Makefile
12311 +++ b/arch/x86/Makefile
12312 @@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
12313 # CPU-specific tuning. Anything which can be shared with UML should go here.
12314 include $(srctree)/arch/x86/Makefile_32.cpu
12315 KBUILD_CFLAGS += $(cflags-y)
12316 -
12317 - # temporary until string.h is fixed
12318 - KBUILD_CFLAGS += -ffreestanding
12319 else
12320 BITS := 64
12321 UTS_MACHINE := x86_64
12322 CHECKFLAGS += -D__x86_64__ -m64
12323
12324 + biarch := $(call cc-option,-m64)
12325 KBUILD_AFLAGS += -m64
12326 KBUILD_CFLAGS += -m64
12327
12328 @@ -89,6 +87,9 @@ else
12329 KBUILD_CFLAGS += -maccumulate-outgoing-args
12330 endif
12331
12332 +# temporary until string.h is fixed
12333 +KBUILD_CFLAGS += -ffreestanding
12334 +
12335 ifdef CONFIG_CC_STACKPROTECTOR
12336 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12337 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
12338 @@ -247,3 +248,12 @@ define archhelp
12339 echo ' FDINITRD=file initrd for the booted kernel'
12340 echo ' kvmconfig - Enable additional options for guest kernel support'
12341 endef
12342 +
12343 +define OLD_LD
12344 +
12345 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12346 +*** Please upgrade your binutils to 2.18 or newer
12347 +endef
12348 +
12349 +archprepare:
12350 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12351 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12352 index d9c1195..a26ca0d 100644
12353 --- a/arch/x86/boot/Makefile
12354 +++ b/arch/x86/boot/Makefile
12355 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
12356 $(call cc-option, -fno-unit-at-a-time)) \
12357 $(call cc-option, -fno-stack-protector) \
12358 $(call cc-option, -mpreferred-stack-boundary=2)
12359 +ifdef CONSTIFY_PLUGIN
12360 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12361 +endif
12362 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12363 GCOV_PROFILE := n
12364
12365 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12366 index 878e4b9..20537ab 100644
12367 --- a/arch/x86/boot/bitops.h
12368 +++ b/arch/x86/boot/bitops.h
12369 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12370 u8 v;
12371 const u32 *p = (const u32 *)addr;
12372
12373 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12374 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12375 return v;
12376 }
12377
12378 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12379
12380 static inline void set_bit(int nr, void *addr)
12381 {
12382 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12383 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12384 }
12385
12386 #endif /* BOOT_BITOPS_H */
12387 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12388 index ef72bae..353a184 100644
12389 --- a/arch/x86/boot/boot.h
12390 +++ b/arch/x86/boot/boot.h
12391 @@ -85,7 +85,7 @@ static inline void io_delay(void)
12392 static inline u16 ds(void)
12393 {
12394 u16 seg;
12395 - asm("movw %%ds,%0" : "=rm" (seg));
12396 + asm volatile("movw %%ds,%0" : "=rm" (seg));
12397 return seg;
12398 }
12399
12400 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
12401 static inline int memcmp(const void *s1, const void *s2, size_t len)
12402 {
12403 u8 diff;
12404 - asm("repe; cmpsb; setnz %0"
12405 + asm volatile("repe; cmpsb; setnz %0"
12406 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
12407 return diff;
12408 }
12409 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12410 index c8a6792..2402765 100644
12411 --- a/arch/x86/boot/compressed/Makefile
12412 +++ b/arch/x86/boot/compressed/Makefile
12413 @@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12414 KBUILD_CFLAGS += -mno-mmx -mno-sse
12415 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12416 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12417 +ifdef CONSTIFY_PLUGIN
12418 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12419 +endif
12420
12421 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12422 GCOV_PROFILE := n
12423 diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12424 index a53440e..c3dbf1e 100644
12425 --- a/arch/x86/boot/compressed/efi_stub_32.S
12426 +++ b/arch/x86/boot/compressed/efi_stub_32.S
12427 @@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12428 * parameter 2, ..., param n. To make things easy, we save the return
12429 * address of efi_call_phys in a global variable.
12430 */
12431 - popl %ecx
12432 - movl %ecx, saved_return_addr(%edx)
12433 - /* get the function pointer into ECX*/
12434 - popl %ecx
12435 - movl %ecx, efi_rt_function_ptr(%edx)
12436 + popl saved_return_addr(%edx)
12437 + popl efi_rt_function_ptr(%edx)
12438
12439 /*
12440 * 3. Call the physical function.
12441 */
12442 - call *%ecx
12443 + call *efi_rt_function_ptr(%edx)
12444
12445 /*
12446 * 4. Balance the stack. And because EAX contain the return value,
12447 @@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12448 1: popl %edx
12449 subl $1b, %edx
12450
12451 - movl efi_rt_function_ptr(%edx), %ecx
12452 - pushl %ecx
12453 + pushl efi_rt_function_ptr(%edx)
12454
12455 /*
12456 * 10. Push the saved return address onto the stack and return.
12457 */
12458 - movl saved_return_addr(%edx), %ecx
12459 - pushl %ecx
12460 - ret
12461 + jmpl *saved_return_addr(%edx)
12462 ENDPROC(efi_call_phys)
12463 .previous
12464
12465 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12466 index 5d6f689..9d06730 100644
12467 --- a/arch/x86/boot/compressed/head_32.S
12468 +++ b/arch/x86/boot/compressed/head_32.S
12469 @@ -118,7 +118,7 @@ preferred_addr:
12470 notl %eax
12471 andl %eax, %ebx
12472 #else
12473 - movl $LOAD_PHYSICAL_ADDR, %ebx
12474 + movl $____LOAD_PHYSICAL_ADDR, %ebx
12475 #endif
12476
12477 /* Target address to relocate to for decompression */
12478 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12479 index c337422..2c5be72 100644
12480 --- a/arch/x86/boot/compressed/head_64.S
12481 +++ b/arch/x86/boot/compressed/head_64.S
12482 @@ -95,7 +95,7 @@ ENTRY(startup_32)
12483 notl %eax
12484 andl %eax, %ebx
12485 #else
12486 - movl $LOAD_PHYSICAL_ADDR, %ebx
12487 + movl $____LOAD_PHYSICAL_ADDR, %ebx
12488 #endif
12489
12490 /* Target address to relocate to for decompression */
12491 @@ -270,7 +270,7 @@ preferred_addr:
12492 notq %rax
12493 andq %rax, %rbp
12494 #else
12495 - movq $LOAD_PHYSICAL_ADDR, %rbp
12496 + movq $____LOAD_PHYSICAL_ADDR, %rbp
12497 #endif
12498
12499 /* Target address to relocate to for decompression */
12500 @@ -362,8 +362,8 @@ gdt:
12501 .long gdt
12502 .word 0
12503 .quad 0x0000000000000000 /* NULL descriptor */
12504 - .quad 0x00af9a000000ffff /* __KERNEL_CS */
12505 - .quad 0x00cf92000000ffff /* __KERNEL_DS */
12506 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
12507 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
12508 .quad 0x0080890000000000 /* TS descriptor */
12509 .quad 0x0000000000000000 /* TS continued */
12510 gdt_end:
12511 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12512 index 434f077..b6b4b38 100644
12513 --- a/arch/x86/boot/compressed/misc.c
12514 +++ b/arch/x86/boot/compressed/misc.c
12515 @@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12516 * Calculate the delta between where vmlinux was linked to load
12517 * and where it was actually loaded.
12518 */
12519 - delta = min_addr - LOAD_PHYSICAL_ADDR;
12520 + delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12521 if (!delta) {
12522 debug_putstr("No relocation needed... ");
12523 return;
12524 @@ -380,7 +380,7 @@ static void parse_elf(void *output)
12525 case PT_LOAD:
12526 #ifdef CONFIG_RELOCATABLE
12527 dest = output;
12528 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12529 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12530 #else
12531 dest = (void *)(phdr->p_paddr);
12532 #endif
12533 @@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12534 error("Destination address too large");
12535 #endif
12536 #ifndef CONFIG_RELOCATABLE
12537 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12538 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12539 error("Wrong destination address");
12540 #endif
12541
12542 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12543 index 4d3ff03..e4972ff 100644
12544 --- a/arch/x86/boot/cpucheck.c
12545 +++ b/arch/x86/boot/cpucheck.c
12546 @@ -74,7 +74,7 @@ static int has_fpu(void)
12547 u16 fcw = -1, fsw = -1;
12548 u32 cr0;
12549
12550 - asm("movl %%cr0,%0" : "=r" (cr0));
12551 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
12552 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12553 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12554 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12555 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12556 {
12557 u32 f0, f1;
12558
12559 - asm("pushfl ; "
12560 + asm volatile("pushfl ; "
12561 "pushfl ; "
12562 "popl %0 ; "
12563 "movl %0,%1 ; "
12564 @@ -115,7 +115,7 @@ static void get_flags(void)
12565 set_bit(X86_FEATURE_FPU, cpu.flags);
12566
12567 if (has_eflag(X86_EFLAGS_ID)) {
12568 - asm("cpuid"
12569 + asm volatile("cpuid"
12570 : "=a" (max_intel_level),
12571 "=b" (cpu_vendor[0]),
12572 "=d" (cpu_vendor[1]),
12573 @@ -124,7 +124,7 @@ static void get_flags(void)
12574
12575 if (max_intel_level >= 0x00000001 &&
12576 max_intel_level <= 0x0000ffff) {
12577 - asm("cpuid"
12578 + asm volatile("cpuid"
12579 : "=a" (tfms),
12580 "=c" (cpu.flags[4]),
12581 "=d" (cpu.flags[0])
12582 @@ -136,7 +136,7 @@ static void get_flags(void)
12583 cpu.model += ((tfms >> 16) & 0xf) << 4;
12584 }
12585
12586 - asm("cpuid"
12587 + asm volatile("cpuid"
12588 : "=a" (max_amd_level)
12589 : "a" (0x80000000)
12590 : "ebx", "ecx", "edx");
12591 @@ -144,7 +144,7 @@ static void get_flags(void)
12592 if (max_amd_level >= 0x80000001 &&
12593 max_amd_level <= 0x8000ffff) {
12594 u32 eax = 0x80000001;
12595 - asm("cpuid"
12596 + asm volatile("cpuid"
12597 : "+a" (eax),
12598 "=c" (cpu.flags[6]),
12599 "=d" (cpu.flags[1])
12600 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12601 u32 ecx = MSR_K7_HWCR;
12602 u32 eax, edx;
12603
12604 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12605 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12606 eax &= ~(1 << 15);
12607 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12608 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12609
12610 get_flags(); /* Make sure it really did something */
12611 err = check_flags();
12612 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12613 u32 ecx = MSR_VIA_FCR;
12614 u32 eax, edx;
12615
12616 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12617 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12618 eax |= (1<<1)|(1<<7);
12619 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12620 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12621
12622 set_bit(X86_FEATURE_CX8, cpu.flags);
12623 err = check_flags();
12624 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12625 u32 eax, edx;
12626 u32 level = 1;
12627
12628 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12629 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12630 - asm("cpuid"
12631 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12632 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12633 + asm volatile("cpuid"
12634 : "+a" (level), "=d" (cpu.flags[0])
12635 : : "ecx", "ebx");
12636 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12637 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12638
12639 err = check_flags();
12640 }
12641 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12642 index 9ec06a1..2c25e79 100644
12643 --- a/arch/x86/boot/header.S
12644 +++ b/arch/x86/boot/header.S
12645 @@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12646 # single linked list of
12647 # struct setup_data
12648
12649 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12650 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12651
12652 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12653 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12654 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12655 +#else
12656 #define VO_INIT_SIZE (VO__end - VO__text)
12657 +#endif
12658 #if ZO_INIT_SIZE > VO_INIT_SIZE
12659 #define INIT_SIZE ZO_INIT_SIZE
12660 #else
12661 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12662 index db75d07..8e6d0af 100644
12663 --- a/arch/x86/boot/memory.c
12664 +++ b/arch/x86/boot/memory.c
12665 @@ -19,7 +19,7 @@
12666
12667 static int detect_memory_e820(void)
12668 {
12669 - int count = 0;
12670 + unsigned int count = 0;
12671 struct biosregs ireg, oreg;
12672 struct e820entry *desc = boot_params.e820_map;
12673 static struct e820entry buf; /* static so it is zeroed */
12674 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12675 index 11e8c6e..fdbb1ed 100644
12676 --- a/arch/x86/boot/video-vesa.c
12677 +++ b/arch/x86/boot/video-vesa.c
12678 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12679
12680 boot_params.screen_info.vesapm_seg = oreg.es;
12681 boot_params.screen_info.vesapm_off = oreg.di;
12682 + boot_params.screen_info.vesapm_size = oreg.cx;
12683 }
12684
12685 /*
12686 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12687 index 43eda28..5ab5fdb 100644
12688 --- a/arch/x86/boot/video.c
12689 +++ b/arch/x86/boot/video.c
12690 @@ -96,7 +96,7 @@ static void store_mode_params(void)
12691 static unsigned int get_entry(void)
12692 {
12693 char entry_buf[4];
12694 - int i, len = 0;
12695 + unsigned int i, len = 0;
12696 int key;
12697 unsigned int v;
12698
12699 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12700 index 9105655..41779c1 100644
12701 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
12702 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12703 @@ -8,6 +8,8 @@
12704 * including this sentence is retained in full.
12705 */
12706
12707 +#include <asm/alternative-asm.h>
12708 +
12709 .extern crypto_ft_tab
12710 .extern crypto_it_tab
12711 .extern crypto_fl_tab
12712 @@ -70,6 +72,8 @@
12713 je B192; \
12714 leaq 32(r9),r9;
12715
12716 +#define ret pax_force_retaddr; ret
12717 +
12718 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12719 movq r1,r2; \
12720 movq r3,r4; \
12721 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12722 index 477e9d7..c92c7d8 100644
12723 --- a/arch/x86/crypto/aesni-intel_asm.S
12724 +++ b/arch/x86/crypto/aesni-intel_asm.S
12725 @@ -31,6 +31,7 @@
12726
12727 #include <linux/linkage.h>
12728 #include <asm/inst.h>
12729 +#include <asm/alternative-asm.h>
12730
12731 #ifdef __x86_64__
12732 .data
12733 @@ -205,7 +206,7 @@ enc: .octa 0x2
12734 * num_initial_blocks = b mod 4
12735 * encrypt the initial num_initial_blocks blocks and apply ghash on
12736 * the ciphertext
12737 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12738 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12739 * are clobbered
12740 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12741 */
12742 @@ -214,8 +215,8 @@ enc: .octa 0x2
12743 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12744 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12745 mov arg7, %r10 # %r10 = AAD
12746 - mov arg8, %r12 # %r12 = aadLen
12747 - mov %r12, %r11
12748 + mov arg8, %r15 # %r15 = aadLen
12749 + mov %r15, %r11
12750 pxor %xmm\i, %xmm\i
12751 _get_AAD_loop\num_initial_blocks\operation:
12752 movd (%r10), \TMP1
12753 @@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12754 psrldq $4, %xmm\i
12755 pxor \TMP1, %xmm\i
12756 add $4, %r10
12757 - sub $4, %r12
12758 + sub $4, %r15
12759 jne _get_AAD_loop\num_initial_blocks\operation
12760 cmp $16, %r11
12761 je _get_AAD_loop2_done\num_initial_blocks\operation
12762 - mov $16, %r12
12763 + mov $16, %r15
12764 _get_AAD_loop2\num_initial_blocks\operation:
12765 psrldq $4, %xmm\i
12766 - sub $4, %r12
12767 - cmp %r11, %r12
12768 + sub $4, %r15
12769 + cmp %r11, %r15
12770 jne _get_AAD_loop2\num_initial_blocks\operation
12771 _get_AAD_loop2_done\num_initial_blocks\operation:
12772 movdqa SHUF_MASK(%rip), %xmm14
12773 @@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12774 * num_initial_blocks = b mod 4
12775 * encrypt the initial num_initial_blocks blocks and apply ghash on
12776 * the ciphertext
12777 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12778 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12779 * are clobbered
12780 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12781 */
12782 @@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12783 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12784 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12785 mov arg7, %r10 # %r10 = AAD
12786 - mov arg8, %r12 # %r12 = aadLen
12787 - mov %r12, %r11
12788 + mov arg8, %r15 # %r15 = aadLen
12789 + mov %r15, %r11
12790 pxor %xmm\i, %xmm\i
12791 _get_AAD_loop\num_initial_blocks\operation:
12792 movd (%r10), \TMP1
12793 @@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12794 psrldq $4, %xmm\i
12795 pxor \TMP1, %xmm\i
12796 add $4, %r10
12797 - sub $4, %r12
12798 + sub $4, %r15
12799 jne _get_AAD_loop\num_initial_blocks\operation
12800 cmp $16, %r11
12801 je _get_AAD_loop2_done\num_initial_blocks\operation
12802 - mov $16, %r12
12803 + mov $16, %r15
12804 _get_AAD_loop2\num_initial_blocks\operation:
12805 psrldq $4, %xmm\i
12806 - sub $4, %r12
12807 - cmp %r11, %r12
12808 + sub $4, %r15
12809 + cmp %r11, %r15
12810 jne _get_AAD_loop2\num_initial_blocks\operation
12811 _get_AAD_loop2_done\num_initial_blocks\operation:
12812 movdqa SHUF_MASK(%rip), %xmm14
12813 @@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12814 *
12815 *****************************************************************************/
12816 ENTRY(aesni_gcm_dec)
12817 - push %r12
12818 + push %r15
12819 push %r13
12820 push %r14
12821 mov %rsp, %r14
12822 @@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12823 */
12824 sub $VARIABLE_OFFSET, %rsp
12825 and $~63, %rsp # align rsp to 64 bytes
12826 - mov %arg6, %r12
12827 - movdqu (%r12), %xmm13 # %xmm13 = HashKey
12828 + mov %arg6, %r15
12829 + movdqu (%r15), %xmm13 # %xmm13 = HashKey
12830 movdqa SHUF_MASK(%rip), %xmm2
12831 PSHUFB_XMM %xmm2, %xmm13
12832
12833 @@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12834 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12835 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12836 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12837 - mov %r13, %r12
12838 - and $(3<<4), %r12
12839 + mov %r13, %r15
12840 + and $(3<<4), %r15
12841 jz _initial_num_blocks_is_0_decrypt
12842 - cmp $(2<<4), %r12
12843 + cmp $(2<<4), %r15
12844 jb _initial_num_blocks_is_1_decrypt
12845 je _initial_num_blocks_is_2_decrypt
12846 _initial_num_blocks_is_3_decrypt:
12847 @@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12848 sub $16, %r11
12849 add %r13, %r11
12850 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12851 - lea SHIFT_MASK+16(%rip), %r12
12852 - sub %r13, %r12
12853 + lea SHIFT_MASK+16(%rip), %r15
12854 + sub %r13, %r15
12855 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12856 # (%r13 is the number of bytes in plaintext mod 16)
12857 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12858 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12859 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12860
12861 movdqa %xmm1, %xmm2
12862 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12863 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12864 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12865 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12866 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12867 pand %xmm1, %xmm2
12868 @@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12869 sub $1, %r13
12870 jne _less_than_8_bytes_left_decrypt
12871 _multiple_of_16_bytes_decrypt:
12872 - mov arg8, %r12 # %r13 = aadLen (number of bytes)
12873 - shl $3, %r12 # convert into number of bits
12874 - movd %r12d, %xmm15 # len(A) in %xmm15
12875 + mov arg8, %r15 # %r13 = aadLen (number of bytes)
12876 + shl $3, %r15 # convert into number of bits
12877 + movd %r15d, %xmm15 # len(A) in %xmm15
12878 shl $3, %arg4 # len(C) in bits (*128)
12879 MOVQ_R64_XMM %arg4, %xmm1
12880 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12881 @@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12882 mov %r14, %rsp
12883 pop %r14
12884 pop %r13
12885 - pop %r12
12886 + pop %r15
12887 + pax_force_retaddr
12888 ret
12889 ENDPROC(aesni_gcm_dec)
12890
12891 @@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12892 * poly = x^128 + x^127 + x^126 + x^121 + 1
12893 ***************************************************************************/
12894 ENTRY(aesni_gcm_enc)
12895 - push %r12
12896 + push %r15
12897 push %r13
12898 push %r14
12899 mov %rsp, %r14
12900 @@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12901 #
12902 sub $VARIABLE_OFFSET, %rsp
12903 and $~63, %rsp
12904 - mov %arg6, %r12
12905 - movdqu (%r12), %xmm13
12906 + mov %arg6, %r15
12907 + movdqu (%r15), %xmm13
12908 movdqa SHUF_MASK(%rip), %xmm2
12909 PSHUFB_XMM %xmm2, %xmm13
12910
12911 @@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12912 movdqa %xmm13, HashKey(%rsp)
12913 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12914 and $-16, %r13
12915 - mov %r13, %r12
12916 + mov %r13, %r15
12917
12918 # Encrypt first few blocks
12919
12920 - and $(3<<4), %r12
12921 + and $(3<<4), %r15
12922 jz _initial_num_blocks_is_0_encrypt
12923 - cmp $(2<<4), %r12
12924 + cmp $(2<<4), %r15
12925 jb _initial_num_blocks_is_1_encrypt
12926 je _initial_num_blocks_is_2_encrypt
12927 _initial_num_blocks_is_3_encrypt:
12928 @@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12929 sub $16, %r11
12930 add %r13, %r11
12931 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12932 - lea SHIFT_MASK+16(%rip), %r12
12933 - sub %r13, %r12
12934 + lea SHIFT_MASK+16(%rip), %r15
12935 + sub %r13, %r15
12936 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12937 # (%r13 is the number of bytes in plaintext mod 16)
12938 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12939 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12940 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12941 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12942 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12943 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12944 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12945 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12946 movdqa SHUF_MASK(%rip), %xmm10
12947 @@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12948 sub $1, %r13
12949 jne _less_than_8_bytes_left_encrypt
12950 _multiple_of_16_bytes_encrypt:
12951 - mov arg8, %r12 # %r12 = addLen (number of bytes)
12952 - shl $3, %r12
12953 - movd %r12d, %xmm15 # len(A) in %xmm15
12954 + mov arg8, %r15 # %r15 = addLen (number of bytes)
12955 + shl $3, %r15
12956 + movd %r15d, %xmm15 # len(A) in %xmm15
12957 shl $3, %arg4 # len(C) in bits (*128)
12958 MOVQ_R64_XMM %arg4, %xmm1
12959 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12960 @@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12961 mov %r14, %rsp
12962 pop %r14
12963 pop %r13
12964 - pop %r12
12965 + pop %r15
12966 + pax_force_retaddr
12967 ret
12968 ENDPROC(aesni_gcm_enc)
12969
12970 @@ -1722,6 +1725,7 @@ _key_expansion_256a:
12971 pxor %xmm1, %xmm0
12972 movaps %xmm0, (TKEYP)
12973 add $0x10, TKEYP
12974 + pax_force_retaddr
12975 ret
12976 ENDPROC(_key_expansion_128)
12977 ENDPROC(_key_expansion_256a)
12978 @@ -1748,6 +1752,7 @@ _key_expansion_192a:
12979 shufps $0b01001110, %xmm2, %xmm1
12980 movaps %xmm1, 0x10(TKEYP)
12981 add $0x20, TKEYP
12982 + pax_force_retaddr
12983 ret
12984 ENDPROC(_key_expansion_192a)
12985
12986 @@ -1768,6 +1773,7 @@ _key_expansion_192b:
12987
12988 movaps %xmm0, (TKEYP)
12989 add $0x10, TKEYP
12990 + pax_force_retaddr
12991 ret
12992 ENDPROC(_key_expansion_192b)
12993
12994 @@ -1781,6 +1787,7 @@ _key_expansion_256b:
12995 pxor %xmm1, %xmm2
12996 movaps %xmm2, (TKEYP)
12997 add $0x10, TKEYP
12998 + pax_force_retaddr
12999 ret
13000 ENDPROC(_key_expansion_256b)
13001
13002 @@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13003 #ifndef __x86_64__
13004 popl KEYP
13005 #endif
13006 + pax_force_retaddr
13007 ret
13008 ENDPROC(aesni_set_key)
13009
13010 @@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13011 popl KLEN
13012 popl KEYP
13013 #endif
13014 + pax_force_retaddr
13015 ret
13016 ENDPROC(aesni_enc)
13017
13018 @@ -1974,6 +1983,7 @@ _aesni_enc1:
13019 AESENC KEY STATE
13020 movaps 0x70(TKEYP), KEY
13021 AESENCLAST KEY STATE
13022 + pax_force_retaddr
13023 ret
13024 ENDPROC(_aesni_enc1)
13025
13026 @@ -2083,6 +2093,7 @@ _aesni_enc4:
13027 AESENCLAST KEY STATE2
13028 AESENCLAST KEY STATE3
13029 AESENCLAST KEY STATE4
13030 + pax_force_retaddr
13031 ret
13032 ENDPROC(_aesni_enc4)
13033
13034 @@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13035 popl KLEN
13036 popl KEYP
13037 #endif
13038 + pax_force_retaddr
13039 ret
13040 ENDPROC(aesni_dec)
13041
13042 @@ -2164,6 +2176,7 @@ _aesni_dec1:
13043 AESDEC KEY STATE
13044 movaps 0x70(TKEYP), KEY
13045 AESDECLAST KEY STATE
13046 + pax_force_retaddr
13047 ret
13048 ENDPROC(_aesni_dec1)
13049
13050 @@ -2273,6 +2286,7 @@ _aesni_dec4:
13051 AESDECLAST KEY STATE2
13052 AESDECLAST KEY STATE3
13053 AESDECLAST KEY STATE4
13054 + pax_force_retaddr
13055 ret
13056 ENDPROC(_aesni_dec4)
13057
13058 @@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13059 popl KEYP
13060 popl LEN
13061 #endif
13062 + pax_force_retaddr
13063 ret
13064 ENDPROC(aesni_ecb_enc)
13065
13066 @@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13067 popl KEYP
13068 popl LEN
13069 #endif
13070 + pax_force_retaddr
13071 ret
13072 ENDPROC(aesni_ecb_dec)
13073
13074 @@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13075 popl LEN
13076 popl IVP
13077 #endif
13078 + pax_force_retaddr
13079 ret
13080 ENDPROC(aesni_cbc_enc)
13081
13082 @@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13083 popl LEN
13084 popl IVP
13085 #endif
13086 + pax_force_retaddr
13087 ret
13088 ENDPROC(aesni_cbc_dec)
13089
13090 @@ -2550,6 +2568,7 @@ _aesni_inc_init:
13091 mov $1, TCTR_LOW
13092 MOVQ_R64_XMM TCTR_LOW INC
13093 MOVQ_R64_XMM CTR TCTR_LOW
13094 + pax_force_retaddr
13095 ret
13096 ENDPROC(_aesni_inc_init)
13097
13098 @@ -2579,6 +2598,7 @@ _aesni_inc:
13099 .Linc_low:
13100 movaps CTR, IV
13101 PSHUFB_XMM BSWAP_MASK IV
13102 + pax_force_retaddr
13103 ret
13104 ENDPROC(_aesni_inc)
13105
13106 @@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13107 .Lctr_enc_ret:
13108 movups IV, (IVP)
13109 .Lctr_enc_just_ret:
13110 + pax_force_retaddr
13111 ret
13112 ENDPROC(aesni_ctr_enc)
13113
13114 @@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13115 pxor INC, STATE4
13116 movdqu STATE4, 0x70(OUTP)
13117
13118 + pax_force_retaddr
13119 ret
13120 ENDPROC(aesni_xts_crypt8)
13121
13122 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13123 index 246c670..466e2d6 100644
13124 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13125 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13126 @@ -21,6 +21,7 @@
13127 */
13128
13129 #include <linux/linkage.h>
13130 +#include <asm/alternative-asm.h>
13131
13132 .file "blowfish-x86_64-asm.S"
13133 .text
13134 @@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13135 jnz .L__enc_xor;
13136
13137 write_block();
13138 + pax_force_retaddr
13139 ret;
13140 .L__enc_xor:
13141 xor_block();
13142 + pax_force_retaddr
13143 ret;
13144 ENDPROC(__blowfish_enc_blk)
13145
13146 @@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13147
13148 movq %r11, %rbp;
13149
13150 + pax_force_retaddr
13151 ret;
13152 ENDPROC(blowfish_dec_blk)
13153
13154 @@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13155
13156 popq %rbx;
13157 popq %rbp;
13158 + pax_force_retaddr
13159 ret;
13160
13161 .L__enc_xor4:
13162 @@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13163
13164 popq %rbx;
13165 popq %rbp;
13166 + pax_force_retaddr
13167 ret;
13168 ENDPROC(__blowfish_enc_blk_4way)
13169
13170 @@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13171 popq %rbx;
13172 popq %rbp;
13173
13174 + pax_force_retaddr
13175 ret;
13176 ENDPROC(blowfish_dec_blk_4way)
13177 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13178 index ce71f92..1dce7ec 100644
13179 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13180 +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13181 @@ -16,6 +16,7 @@
13182 */
13183
13184 #include <linux/linkage.h>
13185 +#include <asm/alternative-asm.h>
13186
13187 #define CAMELLIA_TABLE_BYTE_LEN 272
13188
13189 @@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13190 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13191 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13192 %rcx, (%r9));
13193 + pax_force_retaddr
13194 ret;
13195 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13196
13197 @@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13198 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13199 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13200 %rax, (%r9));
13201 + pax_force_retaddr
13202 ret;
13203 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13204
13205 @@ -780,6 +783,7 @@ __camellia_enc_blk16:
13206 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13207 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13208
13209 + pax_force_retaddr
13210 ret;
13211
13212 .align 8
13213 @@ -865,6 +869,7 @@ __camellia_dec_blk16:
13214 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13215 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13216
13217 + pax_force_retaddr
13218 ret;
13219
13220 .align 8
13221 @@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13222 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13223 %xmm8, %rsi);
13224
13225 + pax_force_retaddr
13226 ret;
13227 ENDPROC(camellia_ecb_enc_16way)
13228
13229 @@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13230 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13231 %xmm8, %rsi);
13232
13233 + pax_force_retaddr
13234 ret;
13235 ENDPROC(camellia_ecb_dec_16way)
13236
13237 @@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13238 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13239 %xmm8, %rsi);
13240
13241 + pax_force_retaddr
13242 ret;
13243 ENDPROC(camellia_cbc_dec_16way)
13244
13245 @@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13246 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13247 %xmm8, %rsi);
13248
13249 + pax_force_retaddr
13250 ret;
13251 ENDPROC(camellia_ctr_16way)
13252
13253 @@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13254 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13255 %xmm8, %rsi);
13256
13257 + pax_force_retaddr
13258 ret;
13259 ENDPROC(camellia_xts_crypt_16way)
13260
13261 diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13262 index 0e0b886..5a3123c 100644
13263 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13264 +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13265 @@ -11,6 +11,7 @@
13266 */
13267
13268 #include <linux/linkage.h>
13269 +#include <asm/alternative-asm.h>
13270
13271 #define CAMELLIA_TABLE_BYTE_LEN 272
13272
13273 @@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13274 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13275 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13276 %rcx, (%r9));
13277 + pax_force_retaddr
13278 ret;
13279 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13280
13281 @@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13282 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13283 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13284 %rax, (%r9));
13285 + pax_force_retaddr
13286 ret;
13287 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13288
13289 @@ -820,6 +823,7 @@ __camellia_enc_blk32:
13290 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13291 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13292
13293 + pax_force_retaddr
13294 ret;
13295
13296 .align 8
13297 @@ -905,6 +909,7 @@ __camellia_dec_blk32:
13298 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13299 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13300
13301 + pax_force_retaddr
13302 ret;
13303
13304 .align 8
13305 @@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13306
13307 vzeroupper;
13308
13309 + pax_force_retaddr
13310 ret;
13311 ENDPROC(camellia_ecb_enc_32way)
13312
13313 @@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13314
13315 vzeroupper;
13316
13317 + pax_force_retaddr
13318 ret;
13319 ENDPROC(camellia_ecb_dec_32way)
13320
13321 @@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13322
13323 vzeroupper;
13324
13325 + pax_force_retaddr
13326 ret;
13327 ENDPROC(camellia_cbc_dec_32way)
13328
13329 @@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13330
13331 vzeroupper;
13332
13333 + pax_force_retaddr
13334 ret;
13335 ENDPROC(camellia_ctr_32way)
13336
13337 @@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13338
13339 vzeroupper;
13340
13341 + pax_force_retaddr
13342 ret;
13343 ENDPROC(camellia_xts_crypt_32way)
13344
13345 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13346 index 310319c..db3d7b5 100644
13347 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13348 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13349 @@ -21,6 +21,7 @@
13350 */
13351
13352 #include <linux/linkage.h>
13353 +#include <asm/alternative-asm.h>
13354
13355 .file "camellia-x86_64-asm_64.S"
13356 .text
13357 @@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13358 enc_outunpack(mov, RT1);
13359
13360 movq RRBP, %rbp;
13361 + pax_force_retaddr
13362 ret;
13363
13364 .L__enc_xor:
13365 enc_outunpack(xor, RT1);
13366
13367 movq RRBP, %rbp;
13368 + pax_force_retaddr
13369 ret;
13370 ENDPROC(__camellia_enc_blk)
13371
13372 @@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13373 dec_outunpack();
13374
13375 movq RRBP, %rbp;
13376 + pax_force_retaddr
13377 ret;
13378 ENDPROC(camellia_dec_blk)
13379
13380 @@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13381
13382 movq RRBP, %rbp;
13383 popq %rbx;
13384 + pax_force_retaddr
13385 ret;
13386
13387 .L__enc2_xor:
13388 @@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13389
13390 movq RRBP, %rbp;
13391 popq %rbx;
13392 + pax_force_retaddr
13393 ret;
13394 ENDPROC(__camellia_enc_blk_2way)
13395
13396 @@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13397
13398 movq RRBP, %rbp;
13399 movq RXOR, %rbx;
13400 + pax_force_retaddr
13401 ret;
13402 ENDPROC(camellia_dec_blk_2way)
13403 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13404 index c35fd5d..2d8c7db 100644
13405 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13406 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13407 @@ -24,6 +24,7 @@
13408 */
13409
13410 #include <linux/linkage.h>
13411 +#include <asm/alternative-asm.h>
13412
13413 .file "cast5-avx-x86_64-asm_64.S"
13414
13415 @@ -281,6 +282,7 @@ __cast5_enc_blk16:
13416 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13417 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13418
13419 + pax_force_retaddr
13420 ret;
13421 ENDPROC(__cast5_enc_blk16)
13422
13423 @@ -352,6 +354,7 @@ __cast5_dec_blk16:
13424 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13425 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13426
13427 + pax_force_retaddr
13428 ret;
13429
13430 .L__skip_dec:
13431 @@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13432 vmovdqu RR4, (6*4*4)(%r11);
13433 vmovdqu RL4, (7*4*4)(%r11);
13434
13435 + pax_force_retaddr
13436 ret;
13437 ENDPROC(cast5_ecb_enc_16way)
13438
13439 @@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13440 vmovdqu RR4, (6*4*4)(%r11);
13441 vmovdqu RL4, (7*4*4)(%r11);
13442
13443 + pax_force_retaddr
13444 ret;
13445 ENDPROC(cast5_ecb_dec_16way)
13446
13447 @@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13448 * %rdx: src
13449 */
13450
13451 - pushq %r12;
13452 + pushq %r14;
13453
13454 movq %rsi, %r11;
13455 - movq %rdx, %r12;
13456 + movq %rdx, %r14;
13457
13458 vmovdqu (0*16)(%rdx), RL1;
13459 vmovdqu (1*16)(%rdx), RR1;
13460 @@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13461 call __cast5_dec_blk16;
13462
13463 /* xor with src */
13464 - vmovq (%r12), RX;
13465 + vmovq (%r14), RX;
13466 vpshufd $0x4f, RX, RX;
13467 vpxor RX, RR1, RR1;
13468 - vpxor 0*16+8(%r12), RL1, RL1;
13469 - vpxor 1*16+8(%r12), RR2, RR2;
13470 - vpxor 2*16+8(%r12), RL2, RL2;
13471 - vpxor 3*16+8(%r12), RR3, RR3;
13472 - vpxor 4*16+8(%r12), RL3, RL3;
13473 - vpxor 5*16+8(%r12), RR4, RR4;
13474 - vpxor 6*16+8(%r12), RL4, RL4;
13475 + vpxor 0*16+8(%r14), RL1, RL1;
13476 + vpxor 1*16+8(%r14), RR2, RR2;
13477 + vpxor 2*16+8(%r14), RL2, RL2;
13478 + vpxor 3*16+8(%r14), RR3, RR3;
13479 + vpxor 4*16+8(%r14), RL3, RL3;
13480 + vpxor 5*16+8(%r14), RR4, RR4;
13481 + vpxor 6*16+8(%r14), RL4, RL4;
13482
13483 vmovdqu RR1, (0*16)(%r11);
13484 vmovdqu RL1, (1*16)(%r11);
13485 @@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13486 vmovdqu RR4, (6*16)(%r11);
13487 vmovdqu RL4, (7*16)(%r11);
13488
13489 - popq %r12;
13490 + popq %r14;
13491
13492 + pax_force_retaddr
13493 ret;
13494 ENDPROC(cast5_cbc_dec_16way)
13495
13496 @@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13497 * %rcx: iv (big endian, 64bit)
13498 */
13499
13500 - pushq %r12;
13501 + pushq %r14;
13502
13503 movq %rsi, %r11;
13504 - movq %rdx, %r12;
13505 + movq %rdx, %r14;
13506
13507 vpcmpeqd RTMP, RTMP, RTMP;
13508 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13509 @@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13510 call __cast5_enc_blk16;
13511
13512 /* dst = src ^ iv */
13513 - vpxor (0*16)(%r12), RR1, RR1;
13514 - vpxor (1*16)(%r12), RL1, RL1;
13515 - vpxor (2*16)(%r12), RR2, RR2;
13516 - vpxor (3*16)(%r12), RL2, RL2;
13517 - vpxor (4*16)(%r12), RR3, RR3;
13518 - vpxor (5*16)(%r12), RL3, RL3;
13519 - vpxor (6*16)(%r12), RR4, RR4;
13520 - vpxor (7*16)(%r12), RL4, RL4;
13521 + vpxor (0*16)(%r14), RR1, RR1;
13522 + vpxor (1*16)(%r14), RL1, RL1;
13523 + vpxor (2*16)(%r14), RR2, RR2;
13524 + vpxor (3*16)(%r14), RL2, RL2;
13525 + vpxor (4*16)(%r14), RR3, RR3;
13526 + vpxor (5*16)(%r14), RL3, RL3;
13527 + vpxor (6*16)(%r14), RR4, RR4;
13528 + vpxor (7*16)(%r14), RL4, RL4;
13529 vmovdqu RR1, (0*16)(%r11);
13530 vmovdqu RL1, (1*16)(%r11);
13531 vmovdqu RR2, (2*16)(%r11);
13532 @@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13533 vmovdqu RR4, (6*16)(%r11);
13534 vmovdqu RL4, (7*16)(%r11);
13535
13536 - popq %r12;
13537 + popq %r14;
13538
13539 + pax_force_retaddr
13540 ret;
13541 ENDPROC(cast5_ctr_16way)
13542 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13543 index e3531f8..e123f35 100644
13544 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13545 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13546 @@ -24,6 +24,7 @@
13547 */
13548
13549 #include <linux/linkage.h>
13550 +#include <asm/alternative-asm.h>
13551 #include "glue_helper-asm-avx.S"
13552
13553 .file "cast6-avx-x86_64-asm_64.S"
13554 @@ -295,6 +296,7 @@ __cast6_enc_blk8:
13555 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13556 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13557
13558 + pax_force_retaddr
13559 ret;
13560 ENDPROC(__cast6_enc_blk8)
13561
13562 @@ -340,6 +342,7 @@ __cast6_dec_blk8:
13563 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13564 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13565
13566 + pax_force_retaddr
13567 ret;
13568 ENDPROC(__cast6_dec_blk8)
13569
13570 @@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13571
13572 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13573
13574 + pax_force_retaddr
13575 ret;
13576 ENDPROC(cast6_ecb_enc_8way)
13577
13578 @@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13579
13580 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13581
13582 + pax_force_retaddr
13583 ret;
13584 ENDPROC(cast6_ecb_dec_8way)
13585
13586 @@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13587 * %rdx: src
13588 */
13589
13590 - pushq %r12;
13591 + pushq %r14;
13592
13593 movq %rsi, %r11;
13594 - movq %rdx, %r12;
13595 + movq %rdx, %r14;
13596
13597 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13598
13599 call __cast6_dec_blk8;
13600
13601 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13602 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13603
13604 - popq %r12;
13605 + popq %r14;
13606
13607 + pax_force_retaddr
13608 ret;
13609 ENDPROC(cast6_cbc_dec_8way)
13610
13611 @@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13612 * %rcx: iv (little endian, 128bit)
13613 */
13614
13615 - pushq %r12;
13616 + pushq %r14;
13617
13618 movq %rsi, %r11;
13619 - movq %rdx, %r12;
13620 + movq %rdx, %r14;
13621
13622 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13623 RD2, RX, RKR, RKM);
13624
13625 call __cast6_enc_blk8;
13626
13627 - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13628 + store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13629
13630 - popq %r12;
13631 + popq %r14;
13632
13633 + pax_force_retaddr
13634 ret;
13635 ENDPROC(cast6_ctr_8way)
13636
13637 @@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13638 /* dst <= regs xor IVs(in dst) */
13639 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13640
13641 + pax_force_retaddr
13642 ret;
13643 ENDPROC(cast6_xts_enc_8way)
13644
13645 @@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13646 /* dst <= regs xor IVs(in dst) */
13647 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13648
13649 + pax_force_retaddr
13650 ret;
13651 ENDPROC(cast6_xts_dec_8way)
13652 diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13653 index dbc4339..de6e120 100644
13654 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13655 +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13656 @@ -45,6 +45,7 @@
13657
13658 #include <asm/inst.h>
13659 #include <linux/linkage.h>
13660 +#include <asm/alternative-asm.h>
13661
13662 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13663
13664 @@ -312,6 +313,7 @@ do_return:
13665 popq %rsi
13666 popq %rdi
13667 popq %rbx
13668 + pax_force_retaddr
13669 ret
13670
13671 ################################################################
13672 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13673 index 586f41a..d02851e 100644
13674 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13675 +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13676 @@ -18,6 +18,7 @@
13677
13678 #include <linux/linkage.h>
13679 #include <asm/inst.h>
13680 +#include <asm/alternative-asm.h>
13681
13682 .data
13683
13684 @@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13685 psrlq $1, T2
13686 pxor T2, T1
13687 pxor T1, DATA
13688 + pax_force_retaddr
13689 ret
13690 ENDPROC(__clmul_gf128mul_ble)
13691
13692 @@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13693 call __clmul_gf128mul_ble
13694 PSHUFB_XMM BSWAP DATA
13695 movups DATA, (%rdi)
13696 + pax_force_retaddr
13697 ret
13698 ENDPROC(clmul_ghash_mul)
13699
13700 @@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13701 PSHUFB_XMM BSWAP DATA
13702 movups DATA, (%rdi)
13703 .Lupdate_just_ret:
13704 + pax_force_retaddr
13705 ret
13706 ENDPROC(clmul_ghash_update)
13707
13708 @@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13709 pand .Lpoly, %xmm1
13710 pxor %xmm1, %xmm0
13711 movups %xmm0, (%rdi)
13712 + pax_force_retaddr
13713 ret
13714 ENDPROC(clmul_ghash_setkey)
13715 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13716 index 9279e0b..c4b3d2c 100644
13717 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13718 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13719 @@ -1,4 +1,5 @@
13720 #include <linux/linkage.h>
13721 +#include <asm/alternative-asm.h>
13722
13723 # enter salsa20_encrypt_bytes
13724 ENTRY(salsa20_encrypt_bytes)
13725 @@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13726 add %r11,%rsp
13727 mov %rdi,%rax
13728 mov %rsi,%rdx
13729 + pax_force_retaddr
13730 ret
13731 # bytesatleast65:
13732 ._bytesatleast65:
13733 @@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13734 add %r11,%rsp
13735 mov %rdi,%rax
13736 mov %rsi,%rdx
13737 + pax_force_retaddr
13738 ret
13739 ENDPROC(salsa20_keysetup)
13740
13741 @@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13742 add %r11,%rsp
13743 mov %rdi,%rax
13744 mov %rsi,%rdx
13745 + pax_force_retaddr
13746 ret
13747 ENDPROC(salsa20_ivsetup)
13748 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13749 index 2f202f4..d9164d6 100644
13750 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13751 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13752 @@ -24,6 +24,7 @@
13753 */
13754
13755 #include <linux/linkage.h>
13756 +#include <asm/alternative-asm.h>
13757 #include "glue_helper-asm-avx.S"
13758
13759 .file "serpent-avx-x86_64-asm_64.S"
13760 @@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13761 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13762 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13763
13764 + pax_force_retaddr
13765 ret;
13766 ENDPROC(__serpent_enc_blk8_avx)
13767
13768 @@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13769 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13770 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13771
13772 + pax_force_retaddr
13773 ret;
13774 ENDPROC(__serpent_dec_blk8_avx)
13775
13776 @@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13777
13778 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13779
13780 + pax_force_retaddr
13781 ret;
13782 ENDPROC(serpent_ecb_enc_8way_avx)
13783
13784 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13785
13786 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13787
13788 + pax_force_retaddr
13789 ret;
13790 ENDPROC(serpent_ecb_dec_8way_avx)
13791
13792 @@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13793
13794 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13795
13796 + pax_force_retaddr
13797 ret;
13798 ENDPROC(serpent_cbc_dec_8way_avx)
13799
13800 @@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13801
13802 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13803
13804 + pax_force_retaddr
13805 ret;
13806 ENDPROC(serpent_ctr_8way_avx)
13807
13808 @@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13809 /* dst <= regs xor IVs(in dst) */
13810 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13811
13812 + pax_force_retaddr
13813 ret;
13814 ENDPROC(serpent_xts_enc_8way_avx)
13815
13816 @@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13817 /* dst <= regs xor IVs(in dst) */
13818 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13819
13820 + pax_force_retaddr
13821 ret;
13822 ENDPROC(serpent_xts_dec_8way_avx)
13823 diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13824 index b222085..abd483c 100644
13825 --- a/arch/x86/crypto/serpent-avx2-asm_64.S
13826 +++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13827 @@ -15,6 +15,7 @@
13828 */
13829
13830 #include <linux/linkage.h>
13831 +#include <asm/alternative-asm.h>
13832 #include "glue_helper-asm-avx2.S"
13833
13834 .file "serpent-avx2-asm_64.S"
13835 @@ -610,6 +611,7 @@ __serpent_enc_blk16:
13836 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13837 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13838
13839 + pax_force_retaddr
13840 ret;
13841 ENDPROC(__serpent_enc_blk16)
13842
13843 @@ -664,6 +666,7 @@ __serpent_dec_blk16:
13844 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13845 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13846
13847 + pax_force_retaddr
13848 ret;
13849 ENDPROC(__serpent_dec_blk16)
13850
13851 @@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13852
13853 vzeroupper;
13854
13855 + pax_force_retaddr
13856 ret;
13857 ENDPROC(serpent_ecb_enc_16way)
13858
13859 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13860
13861 vzeroupper;
13862
13863 + pax_force_retaddr
13864 ret;
13865 ENDPROC(serpent_ecb_dec_16way)
13866
13867 @@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13868
13869 vzeroupper;
13870
13871 + pax_force_retaddr
13872 ret;
13873 ENDPROC(serpent_cbc_dec_16way)
13874
13875 @@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13876
13877 vzeroupper;
13878
13879 + pax_force_retaddr
13880 ret;
13881 ENDPROC(serpent_ctr_16way)
13882
13883 @@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13884
13885 vzeroupper;
13886
13887 + pax_force_retaddr
13888 ret;
13889 ENDPROC(serpent_xts_enc_16way)
13890
13891 @@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13892
13893 vzeroupper;
13894
13895 + pax_force_retaddr
13896 ret;
13897 ENDPROC(serpent_xts_dec_16way)
13898 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13899 index acc066c..1559cc4 100644
13900 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13901 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13902 @@ -25,6 +25,7 @@
13903 */
13904
13905 #include <linux/linkage.h>
13906 +#include <asm/alternative-asm.h>
13907
13908 .file "serpent-sse2-x86_64-asm_64.S"
13909 .text
13910 @@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13911 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13912 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13913
13914 + pax_force_retaddr
13915 ret;
13916
13917 .L__enc_xor8:
13918 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13919 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13920
13921 + pax_force_retaddr
13922 ret;
13923 ENDPROC(__serpent_enc_blk_8way)
13924
13925 @@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13926 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13927 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13928
13929 + pax_force_retaddr
13930 ret;
13931 ENDPROC(serpent_dec_blk_8way)
13932 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13933 index a410950..9dfe7ad 100644
13934 --- a/arch/x86/crypto/sha1_ssse3_asm.S
13935 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
13936 @@ -29,6 +29,7 @@
13937 */
13938
13939 #include <linux/linkage.h>
13940 +#include <asm/alternative-asm.h>
13941
13942 #define CTX %rdi // arg1
13943 #define BUF %rsi // arg2
13944 @@ -75,9 +76,9 @@
13945
13946 push %rbx
13947 push %rbp
13948 - push %r12
13949 + push %r14
13950
13951 - mov %rsp, %r12
13952 + mov %rsp, %r14
13953 sub $64, %rsp # allocate workspace
13954 and $~15, %rsp # align stack
13955
13956 @@ -99,11 +100,12 @@
13957 xor %rax, %rax
13958 rep stosq
13959
13960 - mov %r12, %rsp # deallocate workspace
13961 + mov %r14, %rsp # deallocate workspace
13962
13963 - pop %r12
13964 + pop %r14
13965 pop %rbp
13966 pop %rbx
13967 + pax_force_retaddr
13968 ret
13969
13970 ENDPROC(\name)
13971 diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13972 index 642f156..51a513c 100644
13973 --- a/arch/x86/crypto/sha256-avx-asm.S
13974 +++ b/arch/x86/crypto/sha256-avx-asm.S
13975 @@ -49,6 +49,7 @@
13976
13977 #ifdef CONFIG_AS_AVX
13978 #include <linux/linkage.h>
13979 +#include <asm/alternative-asm.h>
13980
13981 ## assume buffers not aligned
13982 #define VMOVDQ vmovdqu
13983 @@ -460,6 +461,7 @@ done_hash:
13984 popq %r13
13985 popq %rbp
13986 popq %rbx
13987 + pax_force_retaddr
13988 ret
13989 ENDPROC(sha256_transform_avx)
13990
13991 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13992 index 9e86944..3795e6a 100644
13993 --- a/arch/x86/crypto/sha256-avx2-asm.S
13994 +++ b/arch/x86/crypto/sha256-avx2-asm.S
13995 @@ -50,6 +50,7 @@
13996
13997 #ifdef CONFIG_AS_AVX2
13998 #include <linux/linkage.h>
13999 +#include <asm/alternative-asm.h>
14000
14001 ## assume buffers not aligned
14002 #define VMOVDQ vmovdqu
14003 @@ -720,6 +721,7 @@ done_hash:
14004 popq %r12
14005 popq %rbp
14006 popq %rbx
14007 + pax_force_retaddr
14008 ret
14009 ENDPROC(sha256_transform_rorx)
14010
14011 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14012 index f833b74..8c62a9e 100644
14013 --- a/arch/x86/crypto/sha256-ssse3-asm.S
14014 +++ b/arch/x86/crypto/sha256-ssse3-asm.S
14015 @@ -47,6 +47,7 @@
14016 ########################################################################
14017
14018 #include <linux/linkage.h>
14019 +#include <asm/alternative-asm.h>
14020
14021 ## assume buffers not aligned
14022 #define MOVDQ movdqu
14023 @@ -471,6 +472,7 @@ done_hash:
14024 popq %rbp
14025 popq %rbx
14026
14027 + pax_force_retaddr
14028 ret
14029 ENDPROC(sha256_transform_ssse3)
14030
14031 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14032 index 974dde9..a823ff9 100644
14033 --- a/arch/x86/crypto/sha512-avx-asm.S
14034 +++ b/arch/x86/crypto/sha512-avx-asm.S
14035 @@ -49,6 +49,7 @@
14036
14037 #ifdef CONFIG_AS_AVX
14038 #include <linux/linkage.h>
14039 +#include <asm/alternative-asm.h>
14040
14041 .text
14042
14043 @@ -364,6 +365,7 @@ updateblock:
14044 mov frame_RSPSAVE(%rsp), %rsp
14045
14046 nowork:
14047 + pax_force_retaddr
14048 ret
14049 ENDPROC(sha512_transform_avx)
14050
14051 diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14052 index 568b961..ed20c37 100644
14053 --- a/arch/x86/crypto/sha512-avx2-asm.S
14054 +++ b/arch/x86/crypto/sha512-avx2-asm.S
14055 @@ -51,6 +51,7 @@
14056
14057 #ifdef CONFIG_AS_AVX2
14058 #include <linux/linkage.h>
14059 +#include <asm/alternative-asm.h>
14060
14061 .text
14062
14063 @@ -678,6 +679,7 @@ done_hash:
14064
14065 # Restore Stack Pointer
14066 mov frame_RSPSAVE(%rsp), %rsp
14067 + pax_force_retaddr
14068 ret
14069 ENDPROC(sha512_transform_rorx)
14070
14071 diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14072 index fb56855..6edd768 100644
14073 --- a/arch/x86/crypto/sha512-ssse3-asm.S
14074 +++ b/arch/x86/crypto/sha512-ssse3-asm.S
14075 @@ -48,6 +48,7 @@
14076 ########################################################################
14077
14078 #include <linux/linkage.h>
14079 +#include <asm/alternative-asm.h>
14080
14081 .text
14082
14083 @@ -363,6 +364,7 @@ updateblock:
14084 mov frame_RSPSAVE(%rsp), %rsp
14085
14086 nowork:
14087 + pax_force_retaddr
14088 ret
14089 ENDPROC(sha512_transform_ssse3)
14090
14091 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14092 index 0505813..b067311 100644
14093 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14094 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14095 @@ -24,6 +24,7 @@
14096 */
14097
14098 #include <linux/linkage.h>
14099 +#include <asm/alternative-asm.h>
14100 #include "glue_helper-asm-avx.S"
14101
14102 .file "twofish-avx-x86_64-asm_64.S"
14103 @@ -284,6 +285,7 @@ __twofish_enc_blk8:
14104 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14105 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14106
14107 + pax_force_retaddr
14108 ret;
14109 ENDPROC(__twofish_enc_blk8)
14110
14111 @@ -324,6 +326,7 @@ __twofish_dec_blk8:
14112 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14113 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14114
14115 + pax_force_retaddr
14116 ret;
14117 ENDPROC(__twofish_dec_blk8)
14118
14119 @@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14120
14121 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14122
14123 + pax_force_retaddr
14124 ret;
14125 ENDPROC(twofish_ecb_enc_8way)
14126
14127 @@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14128
14129 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14130
14131 + pax_force_retaddr
14132 ret;
14133 ENDPROC(twofish_ecb_dec_8way)
14134
14135 @@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14136 * %rdx: src
14137 */
14138
14139 - pushq %r12;
14140 + pushq %r14;
14141
14142 movq %rsi, %r11;
14143 - movq %rdx, %r12;
14144 + movq %rdx, %r14;
14145
14146 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14147
14148 call __twofish_dec_blk8;
14149
14150 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14151 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14152
14153 - popq %r12;
14154 + popq %r14;
14155
14156 + pax_force_retaddr
14157 ret;
14158 ENDPROC(twofish_cbc_dec_8way)
14159
14160 @@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14161 * %rcx: iv (little endian, 128bit)
14162 */
14163
14164 - pushq %r12;
14165 + pushq %r14;
14166
14167 movq %rsi, %r11;
14168 - movq %rdx, %r12;
14169 + movq %rdx, %r14;
14170
14171 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14172 RD2, RX0, RX1, RY0);
14173
14174 call __twofish_enc_blk8;
14175
14176 - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14177 + store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14178
14179 - popq %r12;
14180 + popq %r14;
14181
14182 + pax_force_retaddr
14183 ret;
14184 ENDPROC(twofish_ctr_8way)
14185
14186 @@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14187 /* dst <= regs xor IVs(in dst) */
14188 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14189
14190 + pax_force_retaddr
14191 ret;
14192 ENDPROC(twofish_xts_enc_8way)
14193
14194 @@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14195 /* dst <= regs xor IVs(in dst) */
14196 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14197
14198 + pax_force_retaddr
14199 ret;
14200 ENDPROC(twofish_xts_dec_8way)
14201 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14202 index 1c3b7ce..02f578d 100644
14203 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14204 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14205 @@ -21,6 +21,7 @@
14206 */
14207
14208 #include <linux/linkage.h>
14209 +#include <asm/alternative-asm.h>
14210
14211 .file "twofish-x86_64-asm-3way.S"
14212 .text
14213 @@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14214 popq %r13;
14215 popq %r14;
14216 popq %r15;
14217 + pax_force_retaddr
14218 ret;
14219
14220 .L__enc_xor3:
14221 @@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14222 popq %r13;
14223 popq %r14;
14224 popq %r15;
14225 + pax_force_retaddr
14226 ret;
14227 ENDPROC(__twofish_enc_blk_3way)
14228
14229 @@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14230 popq %r13;
14231 popq %r14;
14232 popq %r15;
14233 + pax_force_retaddr
14234 ret;
14235 ENDPROC(twofish_dec_blk_3way)
14236 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14237 index a039d21..524b8b2 100644
14238 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14239 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14240 @@ -22,6 +22,7 @@
14241
14242 #include <linux/linkage.h>
14243 #include <asm/asm-offsets.h>
14244 +#include <asm/alternative-asm.h>
14245
14246 #define a_offset 0
14247 #define b_offset 4
14248 @@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14249
14250 popq R1
14251 movq $1,%rax
14252 + pax_force_retaddr
14253 ret
14254 ENDPROC(twofish_enc_blk)
14255
14256 @@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14257
14258 popq R1
14259 movq $1,%rax
14260 + pax_force_retaddr
14261 ret
14262 ENDPROC(twofish_dec_blk)
14263 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14264 index d21ff89..6da8e6e 100644
14265 --- a/arch/x86/ia32/ia32_aout.c
14266 +++ b/arch/x86/ia32/ia32_aout.c
14267 @@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14268 unsigned long dump_start, dump_size;
14269 struct user32 dump;
14270
14271 + memset(&dump, 0, sizeof(dump));
14272 +
14273 fs = get_fs();
14274 set_fs(KERNEL_DS);
14275 has_dumped = 1;
14276 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14277 index 2206757..85cbcfa 100644
14278 --- a/arch/x86/ia32/ia32_signal.c
14279 +++ b/arch/x86/ia32/ia32_signal.c
14280 @@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14281 if (__get_user(set.sig[0], &frame->sc.oldmask)
14282 || (_COMPAT_NSIG_WORDS > 1
14283 && __copy_from_user((((char *) &set.sig) + 4),
14284 - &frame->extramask,
14285 + frame->extramask,
14286 sizeof(frame->extramask))))
14287 goto badframe;
14288
14289 @@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14290 sp -= frame_size;
14291 /* Align the stack pointer according to the i386 ABI,
14292 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14293 - sp = ((sp + 4) & -16ul) - 4;
14294 + sp = ((sp - 12) & -16ul) - 4;
14295 return (void __user *) sp;
14296 }
14297
14298 @@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14299 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14300 sigreturn);
14301 else
14302 - restorer = &frame->retcode;
14303 + restorer = frame->retcode;
14304 }
14305
14306 put_user_try {
14307 @@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14308 * These are actually not used anymore, but left because some
14309 * gdb versions depend on them as a marker.
14310 */
14311 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14312 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14313 } put_user_catch(err);
14314
14315 if (err)
14316 @@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14317 0xb8,
14318 __NR_ia32_rt_sigreturn,
14319 0x80cd,
14320 - 0,
14321 + 0
14322 };
14323
14324 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14325 @@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14326
14327 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14328 restorer = ksig->ka.sa.sa_restorer;
14329 + else if (current->mm->context.vdso)
14330 + /* Return stub is in 32bit vsyscall page */
14331 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14332 else
14333 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14334 - rt_sigreturn);
14335 + restorer = frame->retcode;
14336 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14337
14338 /*
14339 * Not actually used anymore, but left because some gdb
14340 * versions need it.
14341 */
14342 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14343 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14344 } put_user_catch(err);
14345
14346 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14347 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14348 index 4299eb0..c0687a7 100644
14349 --- a/arch/x86/ia32/ia32entry.S
14350 +++ b/arch/x86/ia32/ia32entry.S
14351 @@ -15,8 +15,10 @@
14352 #include <asm/irqflags.h>
14353 #include <asm/asm.h>
14354 #include <asm/smap.h>
14355 +#include <asm/pgtable.h>
14356 #include <linux/linkage.h>
14357 #include <linux/err.h>
14358 +#include <asm/alternative-asm.h>
14359
14360 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14361 #include <linux/elf-em.h>
14362 @@ -62,12 +64,12 @@
14363 */
14364 .macro LOAD_ARGS32 offset, _r9=0
14365 .if \_r9
14366 - movl \offset+16(%rsp),%r9d
14367 + movl \offset+R9(%rsp),%r9d
14368 .endif
14369 - movl \offset+40(%rsp),%ecx
14370 - movl \offset+48(%rsp),%edx
14371 - movl \offset+56(%rsp),%esi
14372 - movl \offset+64(%rsp),%edi
14373 + movl \offset+RCX(%rsp),%ecx
14374 + movl \offset+RDX(%rsp),%edx
14375 + movl \offset+RSI(%rsp),%esi
14376 + movl \offset+RDI(%rsp),%edi
14377 movl %eax,%eax /* zero extension */
14378 .endm
14379
14380 @@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14381 ENDPROC(native_irq_enable_sysexit)
14382 #endif
14383
14384 + .macro pax_enter_kernel_user
14385 + pax_set_fptr_mask
14386 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14387 + call pax_enter_kernel_user
14388 +#endif
14389 + .endm
14390 +
14391 + .macro pax_exit_kernel_user
14392 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14393 + call pax_exit_kernel_user
14394 +#endif
14395 +#ifdef CONFIG_PAX_RANDKSTACK
14396 + pushq %rax
14397 + pushq %r11
14398 + call pax_randomize_kstack
14399 + popq %r11
14400 + popq %rax
14401 +#endif
14402 + .endm
14403 +
14404 + .macro pax_erase_kstack
14405 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14406 + call pax_erase_kstack
14407 +#endif
14408 + .endm
14409 +
14410 /*
14411 * 32bit SYSENTER instruction entry.
14412 *
14413 @@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14414 CFI_REGISTER rsp,rbp
14415 SWAPGS_UNSAFE_STACK
14416 movq PER_CPU_VAR(kernel_stack), %rsp
14417 - addq $(KERNEL_STACK_OFFSET),%rsp
14418 - /*
14419 - * No need to follow this irqs on/off section: the syscall
14420 - * disabled irqs, here we enable it straight after entry:
14421 - */
14422 - ENABLE_INTERRUPTS(CLBR_NONE)
14423 movl %ebp,%ebp /* zero extension */
14424 pushq_cfi $__USER32_DS
14425 /*CFI_REL_OFFSET ss,0*/
14426 @@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14427 CFI_REL_OFFSET rsp,0
14428 pushfq_cfi
14429 /*CFI_REL_OFFSET rflags,0*/
14430 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14431 - CFI_REGISTER rip,r10
14432 + orl $X86_EFLAGS_IF,(%rsp)
14433 + GET_THREAD_INFO(%r11)
14434 + movl TI_sysenter_return(%r11), %r11d
14435 + CFI_REGISTER rip,r11
14436 pushq_cfi $__USER32_CS
14437 /*CFI_REL_OFFSET cs,0*/
14438 movl %eax, %eax
14439 - pushq_cfi %r10
14440 + pushq_cfi %r11
14441 CFI_REL_OFFSET rip,0
14442 pushq_cfi %rax
14443 cld
14444 SAVE_ARGS 0,1,0
14445 + pax_enter_kernel_user
14446 +
14447 +#ifdef CONFIG_PAX_RANDKSTACK
14448 + pax_erase_kstack
14449 +#endif
14450 +
14451 + /*
14452 + * No need to follow this irqs on/off section: the syscall
14453 + * disabled irqs, here we enable it straight after entry:
14454 + */
14455 + ENABLE_INTERRUPTS(CLBR_NONE)
14456 /* no need to do an access_ok check here because rbp has been
14457 32bit zero extended */
14458 +
14459 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14460 + addq pax_user_shadow_base,%rbp
14461 + ASM_PAX_OPEN_USERLAND
14462 +#endif
14463 +
14464 ASM_STAC
14465 1: movl (%rbp),%ebp
14466 _ASM_EXTABLE(1b,ia32_badarg)
14467 ASM_CLAC
14468 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14469 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14470 +
14471 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14472 + ASM_PAX_CLOSE_USERLAND
14473 +#endif
14474 +
14475 + GET_THREAD_INFO(%r11)
14476 + orl $TS_COMPAT,TI_status(%r11)
14477 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14478 CFI_REMEMBER_STATE
14479 jnz sysenter_tracesys
14480 cmpq $(IA32_NR_syscalls-1),%rax
14481 @@ -162,15 +209,18 @@ sysenter_do_call:
14482 sysenter_dispatch:
14483 call *ia32_sys_call_table(,%rax,8)
14484 movq %rax,RAX-ARGOFFSET(%rsp)
14485 + GET_THREAD_INFO(%r11)
14486 DISABLE_INTERRUPTS(CLBR_NONE)
14487 TRACE_IRQS_OFF
14488 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14489 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14490 jnz sysexit_audit
14491 sysexit_from_sys_call:
14492 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14493 + pax_exit_kernel_user
14494 + pax_erase_kstack
14495 + andl $~TS_COMPAT,TI_status(%r11)
14496 /* clear IF, that popfq doesn't enable interrupts early */
14497 - andl $~0x200,EFLAGS-R11(%rsp)
14498 - movl RIP-R11(%rsp),%edx /* User %eip */
14499 + andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14500 + movl RIP(%rsp),%edx /* User %eip */
14501 CFI_REGISTER rip,rdx
14502 RESTORE_ARGS 0,24,0,0,0,0
14503 xorq %r8,%r8
14504 @@ -193,6 +243,9 @@ sysexit_from_sys_call:
14505 movl %eax,%esi /* 2nd arg: syscall number */
14506 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
14507 call __audit_syscall_entry
14508 +
14509 + pax_erase_kstack
14510 +
14511 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14512 cmpq $(IA32_NR_syscalls-1),%rax
14513 ja ia32_badsys
14514 @@ -204,7 +257,7 @@ sysexit_from_sys_call:
14515 .endm
14516
14517 .macro auditsys_exit exit
14518 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14519 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14520 jnz ia32_ret_from_sys_call
14521 TRACE_IRQS_ON
14522 ENABLE_INTERRUPTS(CLBR_NONE)
14523 @@ -215,11 +268,12 @@ sysexit_from_sys_call:
14524 1: setbe %al /* 1 if error, 0 if not */
14525 movzbl %al,%edi /* zero-extend that into %edi */
14526 call __audit_syscall_exit
14527 + GET_THREAD_INFO(%r11)
14528 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14529 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14530 DISABLE_INTERRUPTS(CLBR_NONE)
14531 TRACE_IRQS_OFF
14532 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14533 + testl %edi,TI_flags(%r11)
14534 jz \exit
14535 CLEAR_RREGS -ARGOFFSET
14536 jmp int_with_check
14537 @@ -237,7 +291,7 @@ sysexit_audit:
14538
14539 sysenter_tracesys:
14540 #ifdef CONFIG_AUDITSYSCALL
14541 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14542 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14543 jz sysenter_auditsys
14544 #endif
14545 SAVE_REST
14546 @@ -249,6 +303,9 @@ sysenter_tracesys:
14547 RESTORE_REST
14548 cmpq $(IA32_NR_syscalls-1),%rax
14549 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14550 +
14551 + pax_erase_kstack
14552 +
14553 jmp sysenter_do_call
14554 CFI_ENDPROC
14555 ENDPROC(ia32_sysenter_target)
14556 @@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14557 ENTRY(ia32_cstar_target)
14558 CFI_STARTPROC32 simple
14559 CFI_SIGNAL_FRAME
14560 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14561 + CFI_DEF_CFA rsp,0
14562 CFI_REGISTER rip,rcx
14563 /*CFI_REGISTER rflags,r11*/
14564 SWAPGS_UNSAFE_STACK
14565 movl %esp,%r8d
14566 CFI_REGISTER rsp,r8
14567 movq PER_CPU_VAR(kernel_stack),%rsp
14568 + SAVE_ARGS 8*6,0,0
14569 + pax_enter_kernel_user
14570 +
14571 +#ifdef CONFIG_PAX_RANDKSTACK
14572 + pax_erase_kstack
14573 +#endif
14574 +
14575 /*
14576 * No need to follow this irqs on/off section: the syscall
14577 * disabled irqs and here we enable it straight after entry:
14578 */
14579 ENABLE_INTERRUPTS(CLBR_NONE)
14580 - SAVE_ARGS 8,0,0
14581 movl %eax,%eax /* zero extension */
14582 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14583 movq %rcx,RIP-ARGOFFSET(%rsp)
14584 @@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14585 /* no need to do an access_ok check here because r8 has been
14586 32bit zero extended */
14587 /* hardware stack frame is complete now */
14588 +
14589 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14590 + ASM_PAX_OPEN_USERLAND
14591 + movq pax_user_shadow_base,%r8
14592 + addq RSP-ARGOFFSET(%rsp),%r8
14593 +#endif
14594 +
14595 ASM_STAC
14596 1: movl (%r8),%r9d
14597 _ASM_EXTABLE(1b,ia32_badarg)
14598 ASM_CLAC
14599 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14600 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14601 +
14602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14603 + ASM_PAX_CLOSE_USERLAND
14604 +#endif
14605 +
14606 + GET_THREAD_INFO(%r11)
14607 + orl $TS_COMPAT,TI_status(%r11)
14608 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14609 CFI_REMEMBER_STATE
14610 jnz cstar_tracesys
14611 cmpq $IA32_NR_syscalls-1,%rax
14612 @@ -319,13 +395,16 @@ cstar_do_call:
14613 cstar_dispatch:
14614 call *ia32_sys_call_table(,%rax,8)
14615 movq %rax,RAX-ARGOFFSET(%rsp)
14616 + GET_THREAD_INFO(%r11)
14617 DISABLE_INTERRUPTS(CLBR_NONE)
14618 TRACE_IRQS_OFF
14619 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14620 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14621 jnz sysretl_audit
14622 sysretl_from_sys_call:
14623 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14624 - RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14625 + pax_exit_kernel_user
14626 + pax_erase_kstack
14627 + andl $~TS_COMPAT,TI_status(%r11)
14628 + RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14629 movl RIP-ARGOFFSET(%rsp),%ecx
14630 CFI_REGISTER rip,rcx
14631 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14632 @@ -352,7 +431,7 @@ sysretl_audit:
14633
14634 cstar_tracesys:
14635 #ifdef CONFIG_AUDITSYSCALL
14636 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14637 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14638 jz cstar_auditsys
14639 #endif
14640 xchgl %r9d,%ebp
14641 @@ -366,11 +445,19 @@ cstar_tracesys:
14642 xchgl %ebp,%r9d
14643 cmpq $(IA32_NR_syscalls-1),%rax
14644 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14645 +
14646 + pax_erase_kstack
14647 +
14648 jmp cstar_do_call
14649 END(ia32_cstar_target)
14650
14651 ia32_badarg:
14652 ASM_CLAC
14653 +
14654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14655 + ASM_PAX_CLOSE_USERLAND
14656 +#endif
14657 +
14658 movq $-EFAULT,%rax
14659 jmp ia32_sysret
14660 CFI_ENDPROC
14661 @@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14662 CFI_REL_OFFSET rip,RIP-RIP
14663 PARAVIRT_ADJUST_EXCEPTION_FRAME
14664 SWAPGS
14665 - /*
14666 - * No need to follow this irqs on/off section: the syscall
14667 - * disabled irqs and here we enable it straight after entry:
14668 - */
14669 - ENABLE_INTERRUPTS(CLBR_NONE)
14670 movl %eax,%eax
14671 pushq_cfi %rax
14672 cld
14673 /* note the registers are not zero extended to the sf.
14674 this could be a problem. */
14675 SAVE_ARGS 0,1,0
14676 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14677 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14678 + pax_enter_kernel_user
14679 +
14680 +#ifdef CONFIG_PAX_RANDKSTACK
14681 + pax_erase_kstack
14682 +#endif
14683 +
14684 + /*
14685 + * No need to follow this irqs on/off section: the syscall
14686 + * disabled irqs and here we enable it straight after entry:
14687 + */
14688 + ENABLE_INTERRUPTS(CLBR_NONE)
14689 + GET_THREAD_INFO(%r11)
14690 + orl $TS_COMPAT,TI_status(%r11)
14691 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14692 jnz ia32_tracesys
14693 cmpq $(IA32_NR_syscalls-1),%rax
14694 ja ia32_badsys
14695 @@ -442,6 +536,9 @@ ia32_tracesys:
14696 RESTORE_REST
14697 cmpq $(IA32_NR_syscalls-1),%rax
14698 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14699 +
14700 + pax_erase_kstack
14701 +
14702 jmp ia32_do_call
14703 END(ia32_syscall)
14704
14705 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14706 index 8e0ceec..af13504 100644
14707 --- a/arch/x86/ia32/sys_ia32.c
14708 +++ b/arch/x86/ia32/sys_ia32.c
14709 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14710 */
14711 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14712 {
14713 - typeof(ubuf->st_uid) uid = 0;
14714 - typeof(ubuf->st_gid) gid = 0;
14715 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
14716 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
14717 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14718 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14719 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14720 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14721 index 372231c..51b537d 100644
14722 --- a/arch/x86/include/asm/alternative-asm.h
14723 +++ b/arch/x86/include/asm/alternative-asm.h
14724 @@ -18,6 +18,45 @@
14725 .endm
14726 #endif
14727
14728 +#ifdef KERNEXEC_PLUGIN
14729 + .macro pax_force_retaddr_bts rip=0
14730 + btsq $63,\rip(%rsp)
14731 + .endm
14732 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14733 + .macro pax_force_retaddr rip=0, reload=0
14734 + btsq $63,\rip(%rsp)
14735 + .endm
14736 + .macro pax_force_fptr ptr
14737 + btsq $63,\ptr
14738 + .endm
14739 + .macro pax_set_fptr_mask
14740 + .endm
14741 +#endif
14742 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14743 + .macro pax_force_retaddr rip=0, reload=0
14744 + .if \reload
14745 + pax_set_fptr_mask
14746 + .endif
14747 + orq %r12,\rip(%rsp)
14748 + .endm
14749 + .macro pax_force_fptr ptr
14750 + orq %r12,\ptr
14751 + .endm
14752 + .macro pax_set_fptr_mask
14753 + movabs $0x8000000000000000,%r12
14754 + .endm
14755 +#endif
14756 +#else
14757 + .macro pax_force_retaddr rip=0, reload=0
14758 + .endm
14759 + .macro pax_force_fptr ptr
14760 + .endm
14761 + .macro pax_force_retaddr_bts rip=0
14762 + .endm
14763 + .macro pax_set_fptr_mask
14764 + .endm
14765 +#endif
14766 +
14767 .macro altinstruction_entry orig alt feature orig_len alt_len
14768 .long \orig - .
14769 .long \alt - .
14770 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14771 index 0a3f9c9..c9d081d 100644
14772 --- a/arch/x86/include/asm/alternative.h
14773 +++ b/arch/x86/include/asm/alternative.h
14774 @@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14775 ".pushsection .discard,\"aw\",@progbits\n" \
14776 DISCARD_ENTRY(1) \
14777 ".popsection\n" \
14778 - ".pushsection .altinstr_replacement, \"ax\"\n" \
14779 + ".pushsection .altinstr_replacement, \"a\"\n" \
14780 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14781 ".popsection"
14782
14783 @@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14784 DISCARD_ENTRY(1) \
14785 DISCARD_ENTRY(2) \
14786 ".popsection\n" \
14787 - ".pushsection .altinstr_replacement, \"ax\"\n" \
14788 + ".pushsection .altinstr_replacement, \"a\"\n" \
14789 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14790 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14791 ".popsection"
14792 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14793 index 1d2091a..f5074c1 100644
14794 --- a/arch/x86/include/asm/apic.h
14795 +++ b/arch/x86/include/asm/apic.h
14796 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14797
14798 #ifdef CONFIG_X86_LOCAL_APIC
14799
14800 -extern unsigned int apic_verbosity;
14801 +extern int apic_verbosity;
14802 extern int local_apic_timer_c2_ok;
14803
14804 extern int disable_apic;
14805 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14806 index 20370c6..a2eb9b0 100644
14807 --- a/arch/x86/include/asm/apm.h
14808 +++ b/arch/x86/include/asm/apm.h
14809 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14810 __asm__ __volatile__(APM_DO_ZERO_SEGS
14811 "pushl %%edi\n\t"
14812 "pushl %%ebp\n\t"
14813 - "lcall *%%cs:apm_bios_entry\n\t"
14814 + "lcall *%%ss:apm_bios_entry\n\t"
14815 "setc %%al\n\t"
14816 "popl %%ebp\n\t"
14817 "popl %%edi\n\t"
14818 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14819 __asm__ __volatile__(APM_DO_ZERO_SEGS
14820 "pushl %%edi\n\t"
14821 "pushl %%ebp\n\t"
14822 - "lcall *%%cs:apm_bios_entry\n\t"
14823 + "lcall *%%ss:apm_bios_entry\n\t"
14824 "setc %%bl\n\t"
14825 "popl %%ebp\n\t"
14826 "popl %%edi\n\t"
14827 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14828 index b17f4f4..9620151 100644
14829 --- a/arch/x86/include/asm/atomic.h
14830 +++ b/arch/x86/include/asm/atomic.h
14831 @@ -23,7 +23,18 @@
14832 */
14833 static inline int atomic_read(const atomic_t *v)
14834 {
14835 - return (*(volatile int *)&(v)->counter);
14836 + return (*(volatile const int *)&(v)->counter);
14837 +}
14838 +
14839 +/**
14840 + * atomic_read_unchecked - read atomic variable
14841 + * @v: pointer of type atomic_unchecked_t
14842 + *
14843 + * Atomically reads the value of @v.
14844 + */
14845 +static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14846 +{
14847 + return (*(volatile const int *)&(v)->counter);
14848 }
14849
14850 /**
14851 @@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
14852 }
14853
14854 /**
14855 + * atomic_set_unchecked - set atomic variable
14856 + * @v: pointer of type atomic_unchecked_t
14857 + * @i: required value
14858 + *
14859 + * Atomically sets the value of @v to @i.
14860 + */
14861 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14862 +{
14863 + v->counter = i;
14864 +}
14865 +
14866 +/**
14867 * atomic_add - add integer to atomic variable
14868 * @i: integer value to add
14869 * @v: pointer of type atomic_t
14870 @@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i)
14871 */
14872 static inline void atomic_add(int i, atomic_t *v)
14873 {
14874 - asm volatile(LOCK_PREFIX "addl %1,%0"
14875 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
14876 +
14877 +#ifdef CONFIG_PAX_REFCOUNT
14878 + "jno 0f\n"
14879 + LOCK_PREFIX "subl %1,%0\n"
14880 + "int $4\n0:\n"
14881 + _ASM_EXTABLE(0b, 0b)
14882 +#endif
14883 +
14884 + : "+m" (v->counter)
14885 + : "ir" (i));
14886 +}
14887 +
14888 +/**
14889 + * atomic_add_unchecked - add integer to atomic variable
14890 + * @i: integer value to add
14891 + * @v: pointer of type atomic_unchecked_t
14892 + *
14893 + * Atomically adds @i to @v.
14894 + */
14895 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14896 +{
14897 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
14898 : "+m" (v->counter)
14899 : "ir" (i));
14900 }
14901 @@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v)
14902 */
14903 static inline void atomic_sub(int i, atomic_t *v)
14904 {
14905 - asm volatile(LOCK_PREFIX "subl %1,%0"
14906 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
14907 +
14908 +#ifdef CONFIG_PAX_REFCOUNT
14909 + "jno 0f\n"
14910 + LOCK_PREFIX "addl %1,%0\n"
14911 + "int $4\n0:\n"
14912 + _ASM_EXTABLE(0b, 0b)
14913 +#endif
14914 +
14915 + : "+m" (v->counter)
14916 + : "ir" (i));
14917 +}
14918 +
14919 +/**
14920 + * atomic_sub_unchecked - subtract integer from atomic variable
14921 + * @i: integer value to subtract
14922 + * @v: pointer of type atomic_unchecked_t
14923 + *
14924 + * Atomically subtracts @i from @v.
14925 + */
14926 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14927 +{
14928 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
14929 : "+m" (v->counter)
14930 : "ir" (i));
14931 }
14932 @@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v)
14933 */
14934 static inline int atomic_sub_and_test(int i, atomic_t *v)
14935 {
14936 - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
14937 + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
14938 }
14939
14940 /**
14941 @@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14942 */
14943 static inline void atomic_inc(atomic_t *v)
14944 {
14945 - asm volatile(LOCK_PREFIX "incl %0"
14946 + asm volatile(LOCK_PREFIX "incl %0\n"
14947 +
14948 +#ifdef CONFIG_PAX_REFCOUNT
14949 + "jno 0f\n"
14950 + LOCK_PREFIX "decl %0\n"
14951 + "int $4\n0:\n"
14952 + _ASM_EXTABLE(0b, 0b)
14953 +#endif
14954 +
14955 + : "+m" (v->counter));
14956 +}
14957 +
14958 +/**
14959 + * atomic_inc_unchecked - increment atomic variable
14960 + * @v: pointer of type atomic_unchecked_t
14961 + *
14962 + * Atomically increments @v by 1.
14963 + */
14964 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14965 +{
14966 + asm volatile(LOCK_PREFIX "incl %0\n"
14967 : "+m" (v->counter));
14968 }
14969
14970 @@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v)
14971 */
14972 static inline void atomic_dec(atomic_t *v)
14973 {
14974 - asm volatile(LOCK_PREFIX "decl %0"
14975 + asm volatile(LOCK_PREFIX "decl %0\n"
14976 +
14977 +#ifdef CONFIG_PAX_REFCOUNT
14978 + "jno 0f\n"
14979 + LOCK_PREFIX "incl %0\n"
14980 + "int $4\n0:\n"
14981 + _ASM_EXTABLE(0b, 0b)
14982 +#endif
14983 +
14984 + : "+m" (v->counter));
14985 +}
14986 +
14987 +/**
14988 + * atomic_dec_unchecked - decrement atomic variable
14989 + * @v: pointer of type atomic_unchecked_t
14990 + *
14991 + * Atomically decrements @v by 1.
14992 + */
14993 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14994 +{
14995 + asm volatile(LOCK_PREFIX "decl %0\n"
14996 : "+m" (v->counter));
14997 }
14998
14999 @@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v)
15000 */
15001 static inline int atomic_dec_and_test(atomic_t *v)
15002 {
15003 - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15004 + GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15005 }
15006
15007 /**
15008 @@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15009 */
15010 static inline int atomic_inc_and_test(atomic_t *v)
15011 {
15012 - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15013 + GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15014 +}
15015 +
15016 +/**
15017 + * atomic_inc_and_test_unchecked - increment and test
15018 + * @v: pointer of type atomic_unchecked_t
15019 + *
15020 + * Atomically increments @v by 1
15021 + * and returns true if the result is zero, or false for all
15022 + * other cases.
15023 + */
15024 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15025 +{
15026 + GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15027 }
15028
15029 /**
15030 @@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15031 */
15032 static inline int atomic_add_negative(int i, atomic_t *v)
15033 {
15034 - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15035 + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15036 }
15037
15038 /**
15039 @@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15040 */
15041 static inline int atomic_add_return(int i, atomic_t *v)
15042 {
15043 + return i + xadd_check_overflow(&v->counter, i);
15044 +}
15045 +
15046 +/**
15047 + * atomic_add_return_unchecked - add integer and return
15048 + * @i: integer value to add
15049 + * @v: pointer of type atomic_unchecked_t
15050 + *
15051 + * Atomically adds @i to @v and returns @i + @v
15052 + */
15053 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15054 +{
15055 return i + xadd(&v->counter, i);
15056 }
15057
15058 @@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15059 }
15060
15061 #define atomic_inc_return(v) (atomic_add_return(1, v))
15062 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15063 +{
15064 + return atomic_add_return_unchecked(1, v);
15065 +}
15066 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15067
15068 -static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15069 +static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15070 +{
15071 + return cmpxchg(&v->counter, old, new);
15072 +}
15073 +
15074 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15075 {
15076 return cmpxchg(&v->counter, old, new);
15077 }
15078 @@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15079 return xchg(&v->counter, new);
15080 }
15081
15082 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15083 +{
15084 + return xchg(&v->counter, new);
15085 +}
15086 +
15087 /**
15088 * __atomic_add_unless - add unless the number is already a given value
15089 * @v: pointer of type atomic_t
15090 @@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15091 * Atomically adds @a to @v, so long as @v was not already @u.
15092 * Returns the old value of @v.
15093 */
15094 -static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15095 +static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15096 {
15097 - int c, old;
15098 + int c, old, new;
15099 c = atomic_read(v);
15100 for (;;) {
15101 - if (unlikely(c == (u)))
15102 + if (unlikely(c == u))
15103 break;
15104 - old = atomic_cmpxchg((v), c, c + (a));
15105 +
15106 + asm volatile("addl %2,%0\n"
15107 +
15108 +#ifdef CONFIG_PAX_REFCOUNT
15109 + "jno 0f\n"
15110 + "subl %2,%0\n"
15111 + "int $4\n0:\n"
15112 + _ASM_EXTABLE(0b, 0b)
15113 +#endif
15114 +
15115 + : "=r" (new)
15116 + : "0" (c), "ir" (a));
15117 +
15118 + old = atomic_cmpxchg(v, c, new);
15119 if (likely(old == c))
15120 break;
15121 c = old;
15122 @@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15123 }
15124
15125 /**
15126 + * atomic_inc_not_zero_hint - increment if not null
15127 + * @v: pointer of type atomic_t
15128 + * @hint: probable value of the atomic before the increment
15129 + *
15130 + * This version of atomic_inc_not_zero() gives a hint of probable
15131 + * value of the atomic. This helps processor to not read the memory
15132 + * before doing the atomic read/modify/write cycle, lowering
15133 + * number of bus transactions on some arches.
15134 + *
15135 + * Returns: 0 if increment was not done, 1 otherwise.
15136 + */
15137 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15138 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15139 +{
15140 + int val, c = hint, new;
15141 +
15142 + /* sanity test, should be removed by compiler if hint is a constant */
15143 + if (!hint)
15144 + return __atomic_add_unless(v, 1, 0);
15145 +
15146 + do {
15147 + asm volatile("incl %0\n"
15148 +
15149 +#ifdef CONFIG_PAX_REFCOUNT
15150 + "jno 0f\n"
15151 + "decl %0\n"
15152 + "int $4\n0:\n"
15153 + _ASM_EXTABLE(0b, 0b)
15154 +#endif
15155 +
15156 + : "=r" (new)
15157 + : "0" (c));
15158 +
15159 + val = atomic_cmpxchg(v, c, new);
15160 + if (val == c)
15161 + return 1;
15162 + c = val;
15163 + } while (c);
15164 +
15165 + return 0;
15166 +}
15167 +
15168 +/**
15169 * atomic_inc_short - increment of a short integer
15170 * @v: pointer to type int
15171 *
15172 @@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15173 #endif
15174
15175 /* These are x86-specific, used by some header files */
15176 -#define atomic_clear_mask(mask, addr) \
15177 - asm volatile(LOCK_PREFIX "andl %0,%1" \
15178 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
15179 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15180 +{
15181 + asm volatile(LOCK_PREFIX "andl %1,%0"
15182 + : "+m" (v->counter)
15183 + : "r" (~(mask))
15184 + : "memory");
15185 +}
15186
15187 -#define atomic_set_mask(mask, addr) \
15188 - asm volatile(LOCK_PREFIX "orl %0,%1" \
15189 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15190 - : "memory")
15191 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15192 +{
15193 + asm volatile(LOCK_PREFIX "andl %1,%0"
15194 + : "+m" (v->counter)
15195 + : "r" (~(mask))
15196 + : "memory");
15197 +}
15198 +
15199 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15200 +{
15201 + asm volatile(LOCK_PREFIX "orl %1,%0"
15202 + : "+m" (v->counter)
15203 + : "r" (mask)
15204 + : "memory");
15205 +}
15206 +
15207 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15208 +{
15209 + asm volatile(LOCK_PREFIX "orl %1,%0"
15210 + : "+m" (v->counter)
15211 + : "r" (mask)
15212 + : "memory");
15213 +}
15214
15215 /* Atomic operations are already serializing on x86 */
15216 #define smp_mb__before_atomic_dec() barrier()
15217 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15218 index b154de7..bf18a5a 100644
15219 --- a/arch/x86/include/asm/atomic64_32.h
15220 +++ b/arch/x86/include/asm/atomic64_32.h
15221 @@ -12,6 +12,14 @@ typedef struct {
15222 u64 __aligned(8) counter;
15223 } atomic64_t;
15224
15225 +#ifdef CONFIG_PAX_REFCOUNT
15226 +typedef struct {
15227 + u64 __aligned(8) counter;
15228 +} atomic64_unchecked_t;
15229 +#else
15230 +typedef atomic64_t atomic64_unchecked_t;
15231 +#endif
15232 +
15233 #define ATOMIC64_INIT(val) { (val) }
15234
15235 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15236 @@ -37,21 +45,31 @@ typedef struct {
15237 ATOMIC64_DECL_ONE(sym##_386)
15238
15239 ATOMIC64_DECL_ONE(add_386);
15240 +ATOMIC64_DECL_ONE(add_unchecked_386);
15241 ATOMIC64_DECL_ONE(sub_386);
15242 +ATOMIC64_DECL_ONE(sub_unchecked_386);
15243 ATOMIC64_DECL_ONE(inc_386);
15244 +ATOMIC64_DECL_ONE(inc_unchecked_386);
15245 ATOMIC64_DECL_ONE(dec_386);
15246 +ATOMIC64_DECL_ONE(dec_unchecked_386);
15247 #endif
15248
15249 #define alternative_atomic64(f, out, in...) \
15250 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15251
15252 ATOMIC64_DECL(read);
15253 +ATOMIC64_DECL(read_unchecked);
15254 ATOMIC64_DECL(set);
15255 +ATOMIC64_DECL(set_unchecked);
15256 ATOMIC64_DECL(xchg);
15257 ATOMIC64_DECL(add_return);
15258 +ATOMIC64_DECL(add_return_unchecked);
15259 ATOMIC64_DECL(sub_return);
15260 +ATOMIC64_DECL(sub_return_unchecked);
15261 ATOMIC64_DECL(inc_return);
15262 +ATOMIC64_DECL(inc_return_unchecked);
15263 ATOMIC64_DECL(dec_return);
15264 +ATOMIC64_DECL(dec_return_unchecked);
15265 ATOMIC64_DECL(dec_if_positive);
15266 ATOMIC64_DECL(inc_not_zero);
15267 ATOMIC64_DECL(add_unless);
15268 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15269 }
15270
15271 /**
15272 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15273 + * @p: pointer to type atomic64_unchecked_t
15274 + * @o: expected value
15275 + * @n: new value
15276 + *
15277 + * Atomically sets @v to @n if it was equal to @o and returns
15278 + * the old value.
15279 + */
15280 +
15281 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15282 +{
15283 + return cmpxchg64(&v->counter, o, n);
15284 +}
15285 +
15286 +/**
15287 * atomic64_xchg - xchg atomic64 variable
15288 * @v: pointer to type atomic64_t
15289 * @n: value to assign
15290 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15291 }
15292
15293 /**
15294 + * atomic64_set_unchecked - set atomic64 variable
15295 + * @v: pointer to type atomic64_unchecked_t
15296 + * @n: value to assign
15297 + *
15298 + * Atomically sets the value of @v to @n.
15299 + */
15300 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15301 +{
15302 + unsigned high = (unsigned)(i >> 32);
15303 + unsigned low = (unsigned)i;
15304 + alternative_atomic64(set, /* no output */,
15305 + "S" (v), "b" (low), "c" (high)
15306 + : "eax", "edx", "memory");
15307 +}
15308 +
15309 +/**
15310 * atomic64_read - read atomic64 variable
15311 * @v: pointer to type atomic64_t
15312 *
15313 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15314 }
15315
15316 /**
15317 + * atomic64_read_unchecked - read atomic64 variable
15318 + * @v: pointer to type atomic64_unchecked_t
15319 + *
15320 + * Atomically reads the value of @v and returns it.
15321 + */
15322 +static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15323 +{
15324 + long long r;
15325 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15326 + return r;
15327 + }
15328 +
15329 +/**
15330 * atomic64_add_return - add and return
15331 * @i: integer value to add
15332 * @v: pointer to type atomic64_t
15333 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15334 return i;
15335 }
15336
15337 +/**
15338 + * atomic64_add_return_unchecked - add and return
15339 + * @i: integer value to add
15340 + * @v: pointer to type atomic64_unchecked_t
15341 + *
15342 + * Atomically adds @i to @v and returns @i + *@v
15343 + */
15344 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15345 +{
15346 + alternative_atomic64(add_return_unchecked,
15347 + ASM_OUTPUT2("+A" (i), "+c" (v)),
15348 + ASM_NO_INPUT_CLOBBER("memory"));
15349 + return i;
15350 +}
15351 +
15352 /*
15353 * Other variants with different arithmetic operators:
15354 */
15355 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15356 return a;
15357 }
15358
15359 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15360 +{
15361 + long long a;
15362 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
15363 + "S" (v) : "memory", "ecx");
15364 + return a;
15365 +}
15366 +
15367 static inline long long atomic64_dec_return(atomic64_t *v)
15368 {
15369 long long a;
15370 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15371 }
15372
15373 /**
15374 + * atomic64_add_unchecked - add integer to atomic64 variable
15375 + * @i: integer value to add
15376 + * @v: pointer to type atomic64_unchecked_t
15377 + *
15378 + * Atomically adds @i to @v.
15379 + */
15380 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15381 +{
15382 + __alternative_atomic64(add_unchecked, add_return_unchecked,
15383 + ASM_OUTPUT2("+A" (i), "+c" (v)),
15384 + ASM_NO_INPUT_CLOBBER("memory"));
15385 + return i;
15386 +}
15387 +
15388 +/**
15389 * atomic64_sub - subtract the atomic64 variable
15390 * @i: integer value to subtract
15391 * @v: pointer to type atomic64_t
15392 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15393 index 46e9052..ae45136 100644
15394 --- a/arch/x86/include/asm/atomic64_64.h
15395 +++ b/arch/x86/include/asm/atomic64_64.h
15396 @@ -18,7 +18,19 @@
15397 */
15398 static inline long atomic64_read(const atomic64_t *v)
15399 {
15400 - return (*(volatile long *)&(v)->counter);
15401 + return (*(volatile const long *)&(v)->counter);
15402 +}
15403 +
15404 +/**
15405 + * atomic64_read_unchecked - read atomic64 variable
15406 + * @v: pointer of type atomic64_unchecked_t
15407 + *
15408 + * Atomically reads the value of @v.
15409 + * Doesn't imply a read memory barrier.
15410 + */
15411 +static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15412 +{
15413 + return (*(volatile const long *)&(v)->counter);
15414 }
15415
15416 /**
15417 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15418 }
15419
15420 /**
15421 + * atomic64_set_unchecked - set atomic64 variable
15422 + * @v: pointer to type atomic64_unchecked_t
15423 + * @i: required value
15424 + *
15425 + * Atomically sets the value of @v to @i.
15426 + */
15427 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15428 +{
15429 + v->counter = i;
15430 +}
15431 +
15432 +/**
15433 * atomic64_add - add integer to atomic64 variable
15434 * @i: integer value to add
15435 * @v: pointer to type atomic64_t
15436 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15437 */
15438 static inline void atomic64_add(long i, atomic64_t *v)
15439 {
15440 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
15441 +
15442 +#ifdef CONFIG_PAX_REFCOUNT
15443 + "jno 0f\n"
15444 + LOCK_PREFIX "subq %1,%0\n"
15445 + "int $4\n0:\n"
15446 + _ASM_EXTABLE(0b, 0b)
15447 +#endif
15448 +
15449 + : "=m" (v->counter)
15450 + : "er" (i), "m" (v->counter));
15451 +}
15452 +
15453 +/**
15454 + * atomic64_add_unchecked - add integer to atomic64 variable
15455 + * @i: integer value to add
15456 + * @v: pointer to type atomic64_unchecked_t
15457 + *
15458 + * Atomically adds @i to @v.
15459 + */
15460 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15461 +{
15462 asm volatile(LOCK_PREFIX "addq %1,%0"
15463 : "=m" (v->counter)
15464 : "er" (i), "m" (v->counter));
15465 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15466 */
15467 static inline void atomic64_sub(long i, atomic64_t *v)
15468 {
15469 - asm volatile(LOCK_PREFIX "subq %1,%0"
15470 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
15471 +
15472 +#ifdef CONFIG_PAX_REFCOUNT
15473 + "jno 0f\n"
15474 + LOCK_PREFIX "addq %1,%0\n"
15475 + "int $4\n0:\n"
15476 + _ASM_EXTABLE(0b, 0b)
15477 +#endif
15478 +
15479 + : "=m" (v->counter)
15480 + : "er" (i), "m" (v->counter));
15481 +}
15482 +
15483 +/**
15484 + * atomic64_sub_unchecked - subtract the atomic64 variable
15485 + * @i: integer value to subtract
15486 + * @v: pointer to type atomic64_unchecked_t
15487 + *
15488 + * Atomically subtracts @i from @v.
15489 + */
15490 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15491 +{
15492 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
15493 : "=m" (v->counter)
15494 : "er" (i), "m" (v->counter));
15495 }
15496 @@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15497 */
15498 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15499 {
15500 - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15501 + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15502 }
15503
15504 /**
15505 @@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15506 */
15507 static inline void atomic64_inc(atomic64_t *v)
15508 {
15509 + asm volatile(LOCK_PREFIX "incq %0\n"
15510 +
15511 +#ifdef CONFIG_PAX_REFCOUNT
15512 + "jno 0f\n"
15513 + LOCK_PREFIX "decq %0\n"
15514 + "int $4\n0:\n"
15515 + _ASM_EXTABLE(0b, 0b)
15516 +#endif
15517 +
15518 + : "=m" (v->counter)
15519 + : "m" (v->counter));
15520 +}
15521 +
15522 +/**
15523 + * atomic64_inc_unchecked - increment atomic64 variable
15524 + * @v: pointer to type atomic64_unchecked_t
15525 + *
15526 + * Atomically increments @v by 1.
15527 + */
15528 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15529 +{
15530 asm volatile(LOCK_PREFIX "incq %0"
15531 : "=m" (v->counter)
15532 : "m" (v->counter));
15533 @@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15534 */
15535 static inline void atomic64_dec(atomic64_t *v)
15536 {
15537 - asm volatile(LOCK_PREFIX "decq %0"
15538 + asm volatile(LOCK_PREFIX "decq %0\n"
15539 +
15540 +#ifdef CONFIG_PAX_REFCOUNT
15541 + "jno 0f\n"
15542 + LOCK_PREFIX "incq %0\n"
15543 + "int $4\n0:\n"
15544 + _ASM_EXTABLE(0b, 0b)
15545 +#endif
15546 +
15547 + : "=m" (v->counter)
15548 + : "m" (v->counter));
15549 +}
15550 +
15551 +/**
15552 + * atomic64_dec_unchecked - decrement atomic64 variable
15553 + * @v: pointer to type atomic64_t
15554 + *
15555 + * Atomically decrements @v by 1.
15556 + */
15557 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15558 +{
15559 + asm volatile(LOCK_PREFIX "decq %0\n"
15560 : "=m" (v->counter)
15561 : "m" (v->counter));
15562 }
15563 @@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15564 */
15565 static inline int atomic64_dec_and_test(atomic64_t *v)
15566 {
15567 - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15568 + GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15569 }
15570
15571 /**
15572 @@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15573 */
15574 static inline int atomic64_inc_and_test(atomic64_t *v)
15575 {
15576 - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15577 + GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15578 }
15579
15580 /**
15581 @@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15582 */
15583 static inline int atomic64_add_negative(long i, atomic64_t *v)
15584 {
15585 - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15586 + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15587 }
15588
15589 /**
15590 @@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15591 */
15592 static inline long atomic64_add_return(long i, atomic64_t *v)
15593 {
15594 + return i + xadd_check_overflow(&v->counter, i);
15595 +}
15596 +
15597 +/**
15598 + * atomic64_add_return_unchecked - add and return
15599 + * @i: integer value to add
15600 + * @v: pointer to type atomic64_unchecked_t
15601 + *
15602 + * Atomically adds @i to @v and returns @i + @v
15603 + */
15604 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15605 +{
15606 return i + xadd(&v->counter, i);
15607 }
15608
15609 @@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15610 }
15611
15612 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15613 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15614 +{
15615 + return atomic64_add_return_unchecked(1, v);
15616 +}
15617 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15618
15619 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15620 @@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15621 return cmpxchg(&v->counter, old, new);
15622 }
15623
15624 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15625 +{
15626 + return cmpxchg(&v->counter, old, new);
15627 +}
15628 +
15629 static inline long atomic64_xchg(atomic64_t *v, long new)
15630 {
15631 return xchg(&v->counter, new);
15632 @@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15633 */
15634 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15635 {
15636 - long c, old;
15637 + long c, old, new;
15638 c = atomic64_read(v);
15639 for (;;) {
15640 - if (unlikely(c == (u)))
15641 + if (unlikely(c == u))
15642 break;
15643 - old = atomic64_cmpxchg((v), c, c + (a));
15644 +
15645 + asm volatile("add %2,%0\n"
15646 +
15647 +#ifdef CONFIG_PAX_REFCOUNT
15648 + "jno 0f\n"
15649 + "sub %2,%0\n"
15650 + "int $4\n0:\n"
15651 + _ASM_EXTABLE(0b, 0b)
15652 +#endif
15653 +
15654 + : "=r" (new)
15655 + : "0" (c), "ir" (a));
15656 +
15657 + old = atomic64_cmpxchg(v, c, new);
15658 if (likely(old == c))
15659 break;
15660 c = old;
15661 }
15662 - return c != (u);
15663 + return c != u;
15664 }
15665
15666 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15667 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15668 index 9fc1af7..fc71228 100644
15669 --- a/arch/x86/include/asm/bitops.h
15670 +++ b/arch/x86/include/asm/bitops.h
15671 @@ -49,7 +49,7 @@
15672 * a mask operation on a byte.
15673 */
15674 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15675 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15676 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15677 #define CONST_MASK(nr) (1 << ((nr) & 7))
15678
15679 /**
15680 @@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
15681 */
15682 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
15683 {
15684 - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15685 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15686 }
15687
15688 /**
15689 @@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
15690 */
15691 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
15692 {
15693 - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15694 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15695 }
15696
15697 /**
15698 @@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
15699 */
15700 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
15701 {
15702 - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15703 + GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15704 }
15705
15706 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
15707 @@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15708 *
15709 * Undefined if no bit exists, so code should check against 0 first.
15710 */
15711 -static inline unsigned long __ffs(unsigned long word)
15712 +static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15713 {
15714 asm("rep; bsf %1,%0"
15715 : "=r" (word)
15716 @@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
15717 *
15718 * Undefined if no zero exists, so code should check against ~0UL first.
15719 */
15720 -static inline unsigned long ffz(unsigned long word)
15721 +static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15722 {
15723 asm("rep; bsf %1,%0"
15724 : "=r" (word)
15725 @@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word)
15726 *
15727 * Undefined if no set bit exists, so code should check against 0 first.
15728 */
15729 -static inline unsigned long __fls(unsigned long word)
15730 +static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15731 {
15732 asm("bsr %1,%0"
15733 : "=r" (word)
15734 @@ -436,7 +436,7 @@ static inline int ffs(int x)
15735 * set bit if value is nonzero. The last (most significant) bit is
15736 * at position 32.
15737 */
15738 -static inline int fls(int x)
15739 +static inline int __intentional_overflow(-1) fls(int x)
15740 {
15741 int r;
15742
15743 @@ -478,7 +478,7 @@ static inline int fls(int x)
15744 * at position 64.
15745 */
15746 #ifdef CONFIG_X86_64
15747 -static __always_inline int fls64(__u64 x)
15748 +static __always_inline long fls64(__u64 x)
15749 {
15750 int bitpos = -1;
15751 /*
15752 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15753 index 4fa687a..60f2d39 100644
15754 --- a/arch/x86/include/asm/boot.h
15755 +++ b/arch/x86/include/asm/boot.h
15756 @@ -6,10 +6,15 @@
15757 #include <uapi/asm/boot.h>
15758
15759 /* Physical address where kernel should be loaded. */
15760 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15761 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15762 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15763 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15764
15765 +#ifndef __ASSEMBLY__
15766 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
15767 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15768 +#endif
15769 +
15770 /* Minimum kernel alignment, as a power of two */
15771 #ifdef CONFIG_X86_64
15772 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15773 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15774 index 48f99f1..d78ebf9 100644
15775 --- a/arch/x86/include/asm/cache.h
15776 +++ b/arch/x86/include/asm/cache.h
15777 @@ -5,12 +5,13 @@
15778
15779 /* L1 cache line size */
15780 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15781 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15782 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15783
15784 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15785 +#define __read_only __attribute__((__section__(".data..read_only")))
15786
15787 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15788 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15789 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15790
15791 #ifdef CONFIG_X86_VSMP
15792 #ifdef CONFIG_SMP
15793 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15794 index 9863ee3..4a1f8e1 100644
15795 --- a/arch/x86/include/asm/cacheflush.h
15796 +++ b/arch/x86/include/asm/cacheflush.h
15797 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15798 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15799
15800 if (pg_flags == _PGMT_DEFAULT)
15801 - return -1;
15802 + return ~0UL;
15803 else if (pg_flags == _PGMT_WC)
15804 return _PAGE_CACHE_WC;
15805 else if (pg_flags == _PGMT_UC_MINUS)
15806 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15807 index cb4c73b..c473c29 100644
15808 --- a/arch/x86/include/asm/calling.h
15809 +++ b/arch/x86/include/asm/calling.h
15810 @@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
15811 #define RSP 152
15812 #define SS 160
15813
15814 -#define ARGOFFSET R11
15815 -#define SWFRAME ORIG_RAX
15816 +#define ARGOFFSET R15
15817
15818 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15819 - subq $9*8+\addskip, %rsp
15820 - CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15821 - movq_cfi rdi, 8*8
15822 - movq_cfi rsi, 7*8
15823 - movq_cfi rdx, 6*8
15824 + subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15825 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15826 + movq_cfi rdi, RDI
15827 + movq_cfi rsi, RSI
15828 + movq_cfi rdx, RDX
15829
15830 .if \save_rcx
15831 - movq_cfi rcx, 5*8
15832 + movq_cfi rcx, RCX
15833 .endif
15834
15835 - movq_cfi rax, 4*8
15836 + movq_cfi rax, RAX
15837
15838 .if \save_r891011
15839 - movq_cfi r8, 3*8
15840 - movq_cfi r9, 2*8
15841 - movq_cfi r10, 1*8
15842 - movq_cfi r11, 0*8
15843 + movq_cfi r8, R8
15844 + movq_cfi r9, R9
15845 + movq_cfi r10, R10
15846 + movq_cfi r11, R11
15847 .endif
15848
15849 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15850 + movq_cfi r12, R12
15851 +#endif
15852 +
15853 .endm
15854
15855 -#define ARG_SKIP (9*8)
15856 +#define ARG_SKIP ORIG_RAX
15857
15858 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15859 rstor_r8910=1, rstor_rdx=1
15860 +
15861 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15862 + movq_cfi_restore R12, r12
15863 +#endif
15864 +
15865 .if \rstor_r11
15866 - movq_cfi_restore 0*8, r11
15867 + movq_cfi_restore R11, r11
15868 .endif
15869
15870 .if \rstor_r8910
15871 - movq_cfi_restore 1*8, r10
15872 - movq_cfi_restore 2*8, r9
15873 - movq_cfi_restore 3*8, r8
15874 + movq_cfi_restore R10, r10
15875 + movq_cfi_restore R9, r9
15876 + movq_cfi_restore R8, r8
15877 .endif
15878
15879 .if \rstor_rax
15880 - movq_cfi_restore 4*8, rax
15881 + movq_cfi_restore RAX, rax
15882 .endif
15883
15884 .if \rstor_rcx
15885 - movq_cfi_restore 5*8, rcx
15886 + movq_cfi_restore RCX, rcx
15887 .endif
15888
15889 .if \rstor_rdx
15890 - movq_cfi_restore 6*8, rdx
15891 + movq_cfi_restore RDX, rdx
15892 .endif
15893
15894 - movq_cfi_restore 7*8, rsi
15895 - movq_cfi_restore 8*8, rdi
15896 + movq_cfi_restore RSI, rsi
15897 + movq_cfi_restore RDI, rdi
15898
15899 - .if ARG_SKIP+\addskip > 0
15900 - addq $ARG_SKIP+\addskip, %rsp
15901 - CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15902 + .if ORIG_RAX+\addskip > 0
15903 + addq $ORIG_RAX+\addskip, %rsp
15904 + CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15905 .endif
15906 .endm
15907
15908 - .macro LOAD_ARGS offset, skiprax=0
15909 - movq \offset(%rsp), %r11
15910 - movq \offset+8(%rsp), %r10
15911 - movq \offset+16(%rsp), %r9
15912 - movq \offset+24(%rsp), %r8
15913 - movq \offset+40(%rsp), %rcx
15914 - movq \offset+48(%rsp), %rdx
15915 - movq \offset+56(%rsp), %rsi
15916 - movq \offset+64(%rsp), %rdi
15917 + .macro LOAD_ARGS skiprax=0
15918 + movq R11(%rsp), %r11
15919 + movq R10(%rsp), %r10
15920 + movq R9(%rsp), %r9
15921 + movq R8(%rsp), %r8
15922 + movq RCX(%rsp), %rcx
15923 + movq RDX(%rsp), %rdx
15924 + movq RSI(%rsp), %rsi
15925 + movq RDI(%rsp), %rdi
15926 .if \skiprax
15927 .else
15928 - movq \offset+72(%rsp), %rax
15929 + movq RAX(%rsp), %rax
15930 .endif
15931 .endm
15932
15933 -#define REST_SKIP (6*8)
15934 -
15935 .macro SAVE_REST
15936 - subq $REST_SKIP, %rsp
15937 - CFI_ADJUST_CFA_OFFSET REST_SKIP
15938 - movq_cfi rbx, 5*8
15939 - movq_cfi rbp, 4*8
15940 - movq_cfi r12, 3*8
15941 - movq_cfi r13, 2*8
15942 - movq_cfi r14, 1*8
15943 - movq_cfi r15, 0*8
15944 + movq_cfi rbx, RBX
15945 + movq_cfi rbp, RBP
15946 +
15947 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15948 + movq_cfi r12, R12
15949 +#endif
15950 +
15951 + movq_cfi r13, R13
15952 + movq_cfi r14, R14
15953 + movq_cfi r15, R15
15954 .endm
15955
15956 .macro RESTORE_REST
15957 - movq_cfi_restore 0*8, r15
15958 - movq_cfi_restore 1*8, r14
15959 - movq_cfi_restore 2*8, r13
15960 - movq_cfi_restore 3*8, r12
15961 - movq_cfi_restore 4*8, rbp
15962 - movq_cfi_restore 5*8, rbx
15963 - addq $REST_SKIP, %rsp
15964 - CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15965 + movq_cfi_restore R15, r15
15966 + movq_cfi_restore R14, r14
15967 + movq_cfi_restore R13, r13
15968 +
15969 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15970 + movq_cfi_restore R12, r12
15971 +#endif
15972 +
15973 + movq_cfi_restore RBP, rbp
15974 + movq_cfi_restore RBX, rbx
15975 .endm
15976
15977 .macro SAVE_ALL
15978 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15979 index f50de69..2b0a458 100644
15980 --- a/arch/x86/include/asm/checksum_32.h
15981 +++ b/arch/x86/include/asm/checksum_32.h
15982 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15983 int len, __wsum sum,
15984 int *src_err_ptr, int *dst_err_ptr);
15985
15986 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15987 + int len, __wsum sum,
15988 + int *src_err_ptr, int *dst_err_ptr);
15989 +
15990 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15991 + int len, __wsum sum,
15992 + int *src_err_ptr, int *dst_err_ptr);
15993 +
15994 /*
15995 * Note: when you get a NULL pointer exception here this means someone
15996 * passed in an incorrect kernel address to one of these functions.
15997 @@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15998
15999 might_sleep();
16000 stac();
16001 - ret = csum_partial_copy_generic((__force void *)src, dst,
16002 + ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16003 len, sum, err_ptr, NULL);
16004 clac();
16005
16006 @@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16007 might_sleep();
16008 if (access_ok(VERIFY_WRITE, dst, len)) {
16009 stac();
16010 - ret = csum_partial_copy_generic(src, (__force void *)dst,
16011 + ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16012 len, sum, NULL, err_ptr);
16013 clac();
16014 return ret;
16015 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16016 index d47786a..ce1b05d 100644
16017 --- a/arch/x86/include/asm/cmpxchg.h
16018 +++ b/arch/x86/include/asm/cmpxchg.h
16019 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16020 __compiletime_error("Bad argument size for cmpxchg");
16021 extern void __xadd_wrong_size(void)
16022 __compiletime_error("Bad argument size for xadd");
16023 +extern void __xadd_check_overflow_wrong_size(void)
16024 + __compiletime_error("Bad argument size for xadd_check_overflow");
16025 extern void __add_wrong_size(void)
16026 __compiletime_error("Bad argument size for add");
16027 +extern void __add_check_overflow_wrong_size(void)
16028 + __compiletime_error("Bad argument size for add_check_overflow");
16029
16030 /*
16031 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16032 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
16033 __ret; \
16034 })
16035
16036 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16037 + ({ \
16038 + __typeof__ (*(ptr)) __ret = (arg); \
16039 + switch (sizeof(*(ptr))) { \
16040 + case __X86_CASE_L: \
16041 + asm volatile (lock #op "l %0, %1\n" \
16042 + "jno 0f\n" \
16043 + "mov %0,%1\n" \
16044 + "int $4\n0:\n" \
16045 + _ASM_EXTABLE(0b, 0b) \
16046 + : "+r" (__ret), "+m" (*(ptr)) \
16047 + : : "memory", "cc"); \
16048 + break; \
16049 + case __X86_CASE_Q: \
16050 + asm volatile (lock #op "q %q0, %1\n" \
16051 + "jno 0f\n" \
16052 + "mov %0,%1\n" \
16053 + "int $4\n0:\n" \
16054 + _ASM_EXTABLE(0b, 0b) \
16055 + : "+r" (__ret), "+m" (*(ptr)) \
16056 + : : "memory", "cc"); \
16057 + break; \
16058 + default: \
16059 + __ ## op ## _check_overflow_wrong_size(); \
16060 + } \
16061 + __ret; \
16062 + })
16063 +
16064 /*
16065 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16066 * Since this is generally used to protect other memory information, we
16067 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
16068 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16069 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16070
16071 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16072 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16073 +
16074 #define __add(ptr, inc, lock) \
16075 ({ \
16076 __typeof__ (*(ptr)) __ret = (inc); \
16077 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16078 index 59c6c40..5e0b22c 100644
16079 --- a/arch/x86/include/asm/compat.h
16080 +++ b/arch/x86/include/asm/compat.h
16081 @@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16082 typedef u32 compat_uint_t;
16083 typedef u32 compat_ulong_t;
16084 typedef u64 __attribute__((aligned(4))) compat_u64;
16085 -typedef u32 compat_uptr_t;
16086 +typedef u32 __user compat_uptr_t;
16087
16088 struct compat_timespec {
16089 compat_time_t tv_sec;
16090 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16091 index 89270b4..f0abf8e 100644
16092 --- a/arch/x86/include/asm/cpufeature.h
16093 +++ b/arch/x86/include/asm/cpufeature.h
16094 @@ -203,7 +203,7 @@
16095 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16096 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16097 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16098 -
16099 +#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16100
16101 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16102 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16103 @@ -211,7 +211,7 @@
16104 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16105 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16106 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16107 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16108 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16109 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16110 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16111 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16112 @@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
16113 #undef cpu_has_centaur_mcr
16114 #define cpu_has_centaur_mcr 0
16115
16116 +#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16117 #endif /* CONFIG_X86_64 */
16118
16119 #if __GNUC__ >= 4
16120 @@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16121
16122 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16123 t_warn:
16124 - warn_pre_alternatives();
16125 + if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16126 + warn_pre_alternatives();
16127 return false;
16128 #endif
16129
16130 @@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16131 ".section .discard,\"aw\",@progbits\n"
16132 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16133 ".previous\n"
16134 - ".section .altinstr_replacement,\"ax\"\n"
16135 + ".section .altinstr_replacement,\"a\"\n"
16136 "3: movb $1,%0\n"
16137 "4:\n"
16138 ".previous\n"
16139 @@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16140 " .byte 2b - 1b\n" /* src len */
16141 " .byte 4f - 3f\n" /* repl len */
16142 ".previous\n"
16143 - ".section .altinstr_replacement,\"ax\"\n"
16144 + ".section .altinstr_replacement,\"a\"\n"
16145 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16146 "4:\n"
16147 ".previous\n"
16148 @@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16149 ".section .discard,\"aw\",@progbits\n"
16150 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16151 ".previous\n"
16152 - ".section .altinstr_replacement,\"ax\"\n"
16153 + ".section .altinstr_replacement,\"a\"\n"
16154 "3: movb $0,%0\n"
16155 "4:\n"
16156 ".previous\n"
16157 @@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16158 ".section .discard,\"aw\",@progbits\n"
16159 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16160 ".previous\n"
16161 - ".section .altinstr_replacement,\"ax\"\n"
16162 + ".section .altinstr_replacement,\"a\"\n"
16163 "5: movb $1,%0\n"
16164 "6:\n"
16165 ".previous\n"
16166 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16167 index 50d033a..37deb26 100644
16168 --- a/arch/x86/include/asm/desc.h
16169 +++ b/arch/x86/include/asm/desc.h
16170 @@ -4,6 +4,7 @@
16171 #include <asm/desc_defs.h>
16172 #include <asm/ldt.h>
16173 #include <asm/mmu.h>
16174 +#include <asm/pgtable.h>
16175
16176 #include <linux/smp.h>
16177 #include <linux/percpu.h>
16178 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16179
16180 desc->type = (info->read_exec_only ^ 1) << 1;
16181 desc->type |= info->contents << 2;
16182 + desc->type |= info->seg_not_present ^ 1;
16183
16184 desc->s = 1;
16185 desc->dpl = 0x3;
16186 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16187 }
16188
16189 extern struct desc_ptr idt_descr;
16190 -extern gate_desc idt_table[];
16191 -extern struct desc_ptr debug_idt_descr;
16192 -extern gate_desc debug_idt_table[];
16193 -
16194 -struct gdt_page {
16195 - struct desc_struct gdt[GDT_ENTRIES];
16196 -} __attribute__((aligned(PAGE_SIZE)));
16197 -
16198 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16199 +extern gate_desc idt_table[IDT_ENTRIES];
16200 +extern const struct desc_ptr debug_idt_descr;
16201 +extern gate_desc debug_idt_table[IDT_ENTRIES];
16202
16203 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16204 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16205 {
16206 - return per_cpu(gdt_page, cpu).gdt;
16207 + return cpu_gdt_table[cpu];
16208 }
16209
16210 #ifdef CONFIG_X86_64
16211 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16212 unsigned long base, unsigned dpl, unsigned flags,
16213 unsigned short seg)
16214 {
16215 - gate->a = (seg << 16) | (base & 0xffff);
16216 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16217 + gate->gate.offset_low = base;
16218 + gate->gate.seg = seg;
16219 + gate->gate.reserved = 0;
16220 + gate->gate.type = type;
16221 + gate->gate.s = 0;
16222 + gate->gate.dpl = dpl;
16223 + gate->gate.p = 1;
16224 + gate->gate.offset_high = base >> 16;
16225 }
16226
16227 #endif
16228 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16229
16230 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16231 {
16232 + pax_open_kernel();
16233 memcpy(&idt[entry], gate, sizeof(*gate));
16234 + pax_close_kernel();
16235 }
16236
16237 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16238 {
16239 + pax_open_kernel();
16240 memcpy(&ldt[entry], desc, 8);
16241 + pax_close_kernel();
16242 }
16243
16244 static inline void
16245 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16246 default: size = sizeof(*gdt); break;
16247 }
16248
16249 + pax_open_kernel();
16250 memcpy(&gdt[entry], desc, size);
16251 + pax_close_kernel();
16252 }
16253
16254 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16255 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16256
16257 static inline void native_load_tr_desc(void)
16258 {
16259 + pax_open_kernel();
16260 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16261 + pax_close_kernel();
16262 }
16263
16264 static inline void native_load_gdt(const struct desc_ptr *dtr)
16265 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16266 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16267 unsigned int i;
16268
16269 + pax_open_kernel();
16270 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16271 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16272 + pax_close_kernel();
16273 }
16274
16275 #define _LDT_empty(info) \
16276 @@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16277 preempt_enable();
16278 }
16279
16280 -static inline unsigned long get_desc_base(const struct desc_struct *desc)
16281 +static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16282 {
16283 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16284 }
16285 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16286 }
16287
16288 #ifdef CONFIG_X86_64
16289 -static inline void set_nmi_gate(int gate, void *addr)
16290 +static inline void set_nmi_gate(int gate, const void *addr)
16291 {
16292 gate_desc s;
16293
16294 @@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16295 #endif
16296
16297 #ifdef CONFIG_TRACING
16298 -extern struct desc_ptr trace_idt_descr;
16299 -extern gate_desc trace_idt_table[];
16300 +extern const struct desc_ptr trace_idt_descr;
16301 +extern gate_desc trace_idt_table[IDT_ENTRIES];
16302 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16303 {
16304 write_idt_entry(trace_idt_table, entry, gate);
16305 }
16306
16307 -static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16308 +static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16309 unsigned dpl, unsigned ist, unsigned seg)
16310 {
16311 gate_desc s;
16312 @@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16313 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16314 #endif
16315
16316 -static inline void _set_gate(int gate, unsigned type, void *addr,
16317 +static inline void _set_gate(int gate, unsigned type, const void *addr,
16318 unsigned dpl, unsigned ist, unsigned seg)
16319 {
16320 gate_desc s;
16321 @@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16322 #define set_intr_gate(n, addr) \
16323 do { \
16324 BUG_ON((unsigned)n > 0xFF); \
16325 - _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16326 + _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16327 __KERNEL_CS); \
16328 - _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16329 + _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16330 0, 0, __KERNEL_CS); \
16331 } while (0)
16332
16333 @@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16334 /*
16335 * This routine sets up an interrupt gate at directory privilege level 3.
16336 */
16337 -static inline void set_system_intr_gate(unsigned int n, void *addr)
16338 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
16339 {
16340 BUG_ON((unsigned)n > 0xFF);
16341 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16342 }
16343
16344 -static inline void set_system_trap_gate(unsigned int n, void *addr)
16345 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
16346 {
16347 BUG_ON((unsigned)n > 0xFF);
16348 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16349 }
16350
16351 -static inline void set_trap_gate(unsigned int n, void *addr)
16352 +static inline void set_trap_gate(unsigned int n, const void *addr)
16353 {
16354 BUG_ON((unsigned)n > 0xFF);
16355 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16356 @@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16357 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16358 {
16359 BUG_ON((unsigned)n > 0xFF);
16360 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16361 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16362 }
16363
16364 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16365 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16366 {
16367 BUG_ON((unsigned)n > 0xFF);
16368 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16369 }
16370
16371 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16372 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16373 {
16374 BUG_ON((unsigned)n > 0xFF);
16375 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16376 @@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16377 else
16378 load_idt((const struct desc_ptr *)&idt_descr);
16379 }
16380 +
16381 +#ifdef CONFIG_X86_32
16382 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16383 +{
16384 + struct desc_struct d;
16385 +
16386 + if (likely(limit))
16387 + limit = (limit - 1UL) >> PAGE_SHIFT;
16388 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
16389 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16390 +}
16391 +#endif
16392 +
16393 #endif /* _ASM_X86_DESC_H */
16394 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16395 index 278441f..b95a174 100644
16396 --- a/arch/x86/include/asm/desc_defs.h
16397 +++ b/arch/x86/include/asm/desc_defs.h
16398 @@ -31,6 +31,12 @@ struct desc_struct {
16399 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16400 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16401 };
16402 + struct {
16403 + u16 offset_low;
16404 + u16 seg;
16405 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16406 + unsigned offset_high: 16;
16407 + } gate;
16408 };
16409 } __attribute__((packed));
16410
16411 diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16412 index ced283a..ffe04cc 100644
16413 --- a/arch/x86/include/asm/div64.h
16414 +++ b/arch/x86/include/asm/div64.h
16415 @@ -39,7 +39,7 @@
16416 __mod; \
16417 })
16418
16419 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16420 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16421 {
16422 union {
16423 u64 v64;
16424 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16425 index 9c999c1..3860cb8 100644
16426 --- a/arch/x86/include/asm/elf.h
16427 +++ b/arch/x86/include/asm/elf.h
16428 @@ -243,7 +243,25 @@ extern int force_personality32;
16429 the loader. We need to make sure that it is out of the way of the program
16430 that it will "exec", and that there is sufficient room for the brk. */
16431
16432 +#ifdef CONFIG_PAX_SEGMEXEC
16433 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16434 +#else
16435 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16436 +#endif
16437 +
16438 +#ifdef CONFIG_PAX_ASLR
16439 +#ifdef CONFIG_X86_32
16440 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16441 +
16442 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16443 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16444 +#else
16445 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
16446 +
16447 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16448 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16449 +#endif
16450 +#endif
16451
16452 /* This yields a mask that user programs can use to figure out what
16453 instruction set this CPU supports. This could be done in user space,
16454 @@ -296,16 +314,12 @@ do { \
16455
16456 #define ARCH_DLINFO \
16457 do { \
16458 - if (vdso_enabled) \
16459 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16460 - (unsigned long)current->mm->context.vdso); \
16461 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16462 } while (0)
16463
16464 #define ARCH_DLINFO_X32 \
16465 do { \
16466 - if (vdso_enabled) \
16467 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16468 - (unsigned long)current->mm->context.vdso); \
16469 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16470 } while (0)
16471
16472 #define AT_SYSINFO 32
16473 @@ -320,7 +334,7 @@ else \
16474
16475 #endif /* !CONFIG_X86_32 */
16476
16477 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16478 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16479
16480 #define VDSO_ENTRY \
16481 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
16482 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
16483 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
16484 #define compat_arch_setup_additional_pages syscall32_setup_pages
16485
16486 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16487 -#define arch_randomize_brk arch_randomize_brk
16488 -
16489 /*
16490 * True on X86_32 or when emulating IA32 on X86_64
16491 */
16492 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16493 index 77a99ac..39ff7f5 100644
16494 --- a/arch/x86/include/asm/emergency-restart.h
16495 +++ b/arch/x86/include/asm/emergency-restart.h
16496 @@ -1,6 +1,6 @@
16497 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16498 #define _ASM_X86_EMERGENCY_RESTART_H
16499
16500 -extern void machine_emergency_restart(void);
16501 +extern void machine_emergency_restart(void) __noreturn;
16502
16503 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16504 diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16505 index d3d7469..677ef72 100644
16506 --- a/arch/x86/include/asm/floppy.h
16507 +++ b/arch/x86/include/asm/floppy.h
16508 @@ -229,18 +229,18 @@ static struct fd_routine_l {
16509 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16510 } fd_routine[] = {
16511 {
16512 - request_dma,
16513 - free_dma,
16514 - get_dma_residue,
16515 - dma_mem_alloc,
16516 - hard_dma_setup
16517 + ._request_dma = request_dma,
16518 + ._free_dma = free_dma,
16519 + ._get_dma_residue = get_dma_residue,
16520 + ._dma_mem_alloc = dma_mem_alloc,
16521 + ._dma_setup = hard_dma_setup
16522 },
16523 {
16524 - vdma_request_dma,
16525 - vdma_nop,
16526 - vdma_get_dma_residue,
16527 - vdma_mem_alloc,
16528 - vdma_dma_setup
16529 + ._request_dma = vdma_request_dma,
16530 + ._free_dma = vdma_nop,
16531 + ._get_dma_residue = vdma_get_dma_residue,
16532 + ._dma_mem_alloc = vdma_mem_alloc,
16533 + ._dma_setup = vdma_dma_setup
16534 }
16535 };
16536
16537 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16538 index cea1c76..6c0d79b 100644
16539 --- a/arch/x86/include/asm/fpu-internal.h
16540 +++ b/arch/x86/include/asm/fpu-internal.h
16541 @@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16542 #define user_insn(insn, output, input...) \
16543 ({ \
16544 int err; \
16545 + pax_open_userland(); \
16546 asm volatile(ASM_STAC "\n" \
16547 - "1:" #insn "\n\t" \
16548 + "1:" \
16549 + __copyuser_seg \
16550 + #insn "\n\t" \
16551 "2: " ASM_CLAC "\n" \
16552 ".section .fixup,\"ax\"\n" \
16553 "3: movl $-1,%[err]\n" \
16554 @@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16555 _ASM_EXTABLE(1b, 3b) \
16556 : [err] "=r" (err), output \
16557 : "0"(0), input); \
16558 + pax_close_userland(); \
16559 err; \
16560 })
16561
16562 @@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16563 "fnclex\n\t"
16564 "emms\n\t"
16565 "fildl %P[addr]" /* set F?P to defined value */
16566 - : : [addr] "m" (tsk->thread.fpu.has_fpu));
16567 + : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16568 }
16569
16570 return fpu_restore_checking(&tsk->thread.fpu);
16571 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16572 index be27ba1..04a8801 100644
16573 --- a/arch/x86/include/asm/futex.h
16574 +++ b/arch/x86/include/asm/futex.h
16575 @@ -12,6 +12,7 @@
16576 #include <asm/smap.h>
16577
16578 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16579 + typecheck(u32 __user *, uaddr); \
16580 asm volatile("\t" ASM_STAC "\n" \
16581 "1:\t" insn "\n" \
16582 "2:\t" ASM_CLAC "\n" \
16583 @@ -20,15 +21,16 @@
16584 "\tjmp\t2b\n" \
16585 "\t.previous\n" \
16586 _ASM_EXTABLE(1b, 3b) \
16587 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16588 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16589 : "i" (-EFAULT), "0" (oparg), "1" (0))
16590
16591 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16592 + typecheck(u32 __user *, uaddr); \
16593 asm volatile("\t" ASM_STAC "\n" \
16594 "1:\tmovl %2, %0\n" \
16595 "\tmovl\t%0, %3\n" \
16596 "\t" insn "\n" \
16597 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16598 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16599 "\tjnz\t1b\n" \
16600 "3:\t" ASM_CLAC "\n" \
16601 "\t.section .fixup,\"ax\"\n" \
16602 @@ -38,7 +40,7 @@
16603 _ASM_EXTABLE(1b, 4b) \
16604 _ASM_EXTABLE(2b, 4b) \
16605 : "=&a" (oldval), "=&r" (ret), \
16606 - "+m" (*uaddr), "=&r" (tem) \
16607 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16608 : "r" (oparg), "i" (-EFAULT), "1" (0))
16609
16610 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16611 @@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16612
16613 pagefault_disable();
16614
16615 + pax_open_userland();
16616 switch (op) {
16617 case FUTEX_OP_SET:
16618 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16619 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16620 break;
16621 case FUTEX_OP_ADD:
16622 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16623 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16624 uaddr, oparg);
16625 break;
16626 case FUTEX_OP_OR:
16627 @@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16628 default:
16629 ret = -ENOSYS;
16630 }
16631 + pax_close_userland();
16632
16633 pagefault_enable();
16634
16635 @@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16636 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16637 return -EFAULT;
16638
16639 + pax_open_userland();
16640 asm volatile("\t" ASM_STAC "\n"
16641 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16642 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16643 "2:\t" ASM_CLAC "\n"
16644 "\t.section .fixup, \"ax\"\n"
16645 "3:\tmov %3, %0\n"
16646 "\tjmp 2b\n"
16647 "\t.previous\n"
16648 _ASM_EXTABLE(1b, 3b)
16649 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16650 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16651 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16652 : "memory"
16653 );
16654 + pax_close_userland();
16655
16656 *uval = oldval;
16657 return ret;
16658 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16659 index cba45d9..86344ba 100644
16660 --- a/arch/x86/include/asm/hw_irq.h
16661 +++ b/arch/x86/include/asm/hw_irq.h
16662 @@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16663 extern void enable_IO_APIC(void);
16664
16665 /* Statistics */
16666 -extern atomic_t irq_err_count;
16667 -extern atomic_t irq_mis_count;
16668 +extern atomic_unchecked_t irq_err_count;
16669 +extern atomic_unchecked_t irq_mis_count;
16670
16671 /* EISA */
16672 extern void eisa_set_level_irq(unsigned int irq);
16673 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16674 index a203659..9889f1c 100644
16675 --- a/arch/x86/include/asm/i8259.h
16676 +++ b/arch/x86/include/asm/i8259.h
16677 @@ -62,7 +62,7 @@ struct legacy_pic {
16678 void (*init)(int auto_eoi);
16679 int (*irq_pending)(unsigned int irq);
16680 void (*make_irq)(unsigned int irq);
16681 -};
16682 +} __do_const;
16683
16684 extern struct legacy_pic *legacy_pic;
16685 extern struct legacy_pic null_legacy_pic;
16686 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16687 index 34f69cb..6d95446 100644
16688 --- a/arch/x86/include/asm/io.h
16689 +++ b/arch/x86/include/asm/io.h
16690 @@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16691 "m" (*(volatile type __force *)addr) barrier); }
16692
16693 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16694 -build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16695 -build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16696 +build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16697 +build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16698
16699 build_mmio_read(__readb, "b", unsigned char, "=q", )
16700 -build_mmio_read(__readw, "w", unsigned short, "=r", )
16701 -build_mmio_read(__readl, "l", unsigned int, "=r", )
16702 +build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16703 +build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16704
16705 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16706 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16707 @@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16708 return ioremap_nocache(offset, size);
16709 }
16710
16711 -extern void iounmap(volatile void __iomem *addr);
16712 +extern void iounmap(const volatile void __iomem *addr);
16713
16714 extern void set_iounmap_nonlazy(void);
16715
16716 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16717
16718 #include <linux/vmalloc.h>
16719
16720 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16721 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16722 +{
16723 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16724 +}
16725 +
16726 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16727 +{
16728 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16729 +}
16730 +
16731 /*
16732 * Convert a virtual cached pointer to an uncached pointer
16733 */
16734 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16735 index bba3cf8..06bc8da 100644
16736 --- a/arch/x86/include/asm/irqflags.h
16737 +++ b/arch/x86/include/asm/irqflags.h
16738 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16739 sti; \
16740 sysexit
16741
16742 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
16743 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16744 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
16745 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16746 +
16747 #else
16748 #define INTERRUPT_RETURN iret
16749 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16750 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16751 index 9454c16..e4100e3 100644
16752 --- a/arch/x86/include/asm/kprobes.h
16753 +++ b/arch/x86/include/asm/kprobes.h
16754 @@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16755 #define RELATIVEJUMP_SIZE 5
16756 #define RELATIVECALL_OPCODE 0xe8
16757 #define RELATIVE_ADDR_SIZE 4
16758 -#define MAX_STACK_SIZE 64
16759 -#define MIN_STACK_SIZE(ADDR) \
16760 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16761 - THREAD_SIZE - (unsigned long)(ADDR))) \
16762 - ? (MAX_STACK_SIZE) \
16763 - : (((unsigned long)current_thread_info()) + \
16764 - THREAD_SIZE - (unsigned long)(ADDR)))
16765 +#define MAX_STACK_SIZE 64UL
16766 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16767
16768 #define flush_insn_slot(p) do { } while (0)
16769
16770 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16771 index 4ad6560..75c7bdd 100644
16772 --- a/arch/x86/include/asm/local.h
16773 +++ b/arch/x86/include/asm/local.h
16774 @@ -10,33 +10,97 @@ typedef struct {
16775 atomic_long_t a;
16776 } local_t;
16777
16778 +typedef struct {
16779 + atomic_long_unchecked_t a;
16780 +} local_unchecked_t;
16781 +
16782 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16783
16784 #define local_read(l) atomic_long_read(&(l)->a)
16785 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16786 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16787 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16788
16789 static inline void local_inc(local_t *l)
16790 {
16791 - asm volatile(_ASM_INC "%0"
16792 + asm volatile(_ASM_INC "%0\n"
16793 +
16794 +#ifdef CONFIG_PAX_REFCOUNT
16795 + "jno 0f\n"
16796 + _ASM_DEC "%0\n"
16797 + "int $4\n0:\n"
16798 + _ASM_EXTABLE(0b, 0b)
16799 +#endif
16800 +
16801 + : "+m" (l->a.counter));
16802 +}
16803 +
16804 +static inline void local_inc_unchecked(local_unchecked_t *l)
16805 +{
16806 + asm volatile(_ASM_INC "%0\n"
16807 : "+m" (l->a.counter));
16808 }
16809
16810 static inline void local_dec(local_t *l)
16811 {
16812 - asm volatile(_ASM_DEC "%0"
16813 + asm volatile(_ASM_DEC "%0\n"
16814 +
16815 +#ifdef CONFIG_PAX_REFCOUNT
16816 + "jno 0f\n"
16817 + _ASM_INC "%0\n"
16818 + "int $4\n0:\n"
16819 + _ASM_EXTABLE(0b, 0b)
16820 +#endif
16821 +
16822 + : "+m" (l->a.counter));
16823 +}
16824 +
16825 +static inline void local_dec_unchecked(local_unchecked_t *l)
16826 +{
16827 + asm volatile(_ASM_DEC "%0\n"
16828 : "+m" (l->a.counter));
16829 }
16830
16831 static inline void local_add(long i, local_t *l)
16832 {
16833 - asm volatile(_ASM_ADD "%1,%0"
16834 + asm volatile(_ASM_ADD "%1,%0\n"
16835 +
16836 +#ifdef CONFIG_PAX_REFCOUNT
16837 + "jno 0f\n"
16838 + _ASM_SUB "%1,%0\n"
16839 + "int $4\n0:\n"
16840 + _ASM_EXTABLE(0b, 0b)
16841 +#endif
16842 +
16843 + : "+m" (l->a.counter)
16844 + : "ir" (i));
16845 +}
16846 +
16847 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
16848 +{
16849 + asm volatile(_ASM_ADD "%1,%0\n"
16850 : "+m" (l->a.counter)
16851 : "ir" (i));
16852 }
16853
16854 static inline void local_sub(long i, local_t *l)
16855 {
16856 - asm volatile(_ASM_SUB "%1,%0"
16857 + asm volatile(_ASM_SUB "%1,%0\n"
16858 +
16859 +#ifdef CONFIG_PAX_REFCOUNT
16860 + "jno 0f\n"
16861 + _ASM_ADD "%1,%0\n"
16862 + "int $4\n0:\n"
16863 + _ASM_EXTABLE(0b, 0b)
16864 +#endif
16865 +
16866 + : "+m" (l->a.counter)
16867 + : "ir" (i));
16868 +}
16869 +
16870 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16871 +{
16872 + asm volatile(_ASM_SUB "%1,%0\n"
16873 : "+m" (l->a.counter)
16874 : "ir" (i));
16875 }
16876 @@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
16877 */
16878 static inline int local_sub_and_test(long i, local_t *l)
16879 {
16880 - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
16881 + GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
16882 }
16883
16884 /**
16885 @@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
16886 */
16887 static inline int local_dec_and_test(local_t *l)
16888 {
16889 - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
16890 + GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
16891 }
16892
16893 /**
16894 @@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
16895 */
16896 static inline int local_inc_and_test(local_t *l)
16897 {
16898 - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
16899 + GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
16900 }
16901
16902 /**
16903 @@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
16904 */
16905 static inline int local_add_negative(long i, local_t *l)
16906 {
16907 - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
16908 + GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
16909 }
16910
16911 /**
16912 @@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
16913 static inline long local_add_return(long i, local_t *l)
16914 {
16915 long __i = i;
16916 + asm volatile(_ASM_XADD "%0, %1\n"
16917 +
16918 +#ifdef CONFIG_PAX_REFCOUNT
16919 + "jno 0f\n"
16920 + _ASM_MOV "%0,%1\n"
16921 + "int $4\n0:\n"
16922 + _ASM_EXTABLE(0b, 0b)
16923 +#endif
16924 +
16925 + : "+r" (i), "+m" (l->a.counter)
16926 + : : "memory");
16927 + return i + __i;
16928 +}
16929 +
16930 +/**
16931 + * local_add_return_unchecked - add and return
16932 + * @i: integer value to add
16933 + * @l: pointer to type local_unchecked_t
16934 + *
16935 + * Atomically adds @i to @l and returns @i + @l
16936 + */
16937 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16938 +{
16939 + long __i = i;
16940 asm volatile(_ASM_XADD "%0, %1;"
16941 : "+r" (i), "+m" (l->a.counter)
16942 : : "memory");
16943 @@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
16944
16945 #define local_cmpxchg(l, o, n) \
16946 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16947 +#define local_cmpxchg_unchecked(l, o, n) \
16948 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
16949 /* Always has a lock prefix */
16950 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16951
16952 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16953 new file mode 100644
16954 index 0000000..2bfd3ba
16955 --- /dev/null
16956 +++ b/arch/x86/include/asm/mman.h
16957 @@ -0,0 +1,15 @@
16958 +#ifndef _X86_MMAN_H
16959 +#define _X86_MMAN_H
16960 +
16961 +#include <uapi/asm/mman.h>
16962 +
16963 +#ifdef __KERNEL__
16964 +#ifndef __ASSEMBLY__
16965 +#ifdef CONFIG_X86_32
16966 +#define arch_mmap_check i386_mmap_check
16967 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16968 +#endif
16969 +#endif
16970 +#endif
16971 +
16972 +#endif /* X86_MMAN_H */
16973 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16974 index 5f55e69..e20bfb1 100644
16975 --- a/arch/x86/include/asm/mmu.h
16976 +++ b/arch/x86/include/asm/mmu.h
16977 @@ -9,7 +9,7 @@
16978 * we put the segment information here.
16979 */
16980 typedef struct {
16981 - void *ldt;
16982 + struct desc_struct *ldt;
16983 int size;
16984
16985 #ifdef CONFIG_X86_64
16986 @@ -18,7 +18,19 @@ typedef struct {
16987 #endif
16988
16989 struct mutex lock;
16990 - void *vdso;
16991 + unsigned long vdso;
16992 +
16993 +#ifdef CONFIG_X86_32
16994 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16995 + unsigned long user_cs_base;
16996 + unsigned long user_cs_limit;
16997 +
16998 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16999 + cpumask_t cpu_user_cs_mask;
17000 +#endif
17001 +
17002 +#endif
17003 +#endif
17004 } mm_context_t;
17005
17006 #ifdef CONFIG_SMP
17007 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17008 index be12c53..4d24039 100644
17009 --- a/arch/x86/include/asm/mmu_context.h
17010 +++ b/arch/x86/include/asm/mmu_context.h
17011 @@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17012
17013 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17014 {
17015 +
17016 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17017 + if (!(static_cpu_has(X86_FEATURE_PCID))) {
17018 + unsigned int i;
17019 + pgd_t *pgd;
17020 +
17021 + pax_open_kernel();
17022 + pgd = get_cpu_pgd(smp_processor_id(), kernel);
17023 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17024 + set_pgd_batched(pgd+i, native_make_pgd(0));
17025 + pax_close_kernel();
17026 + }
17027 +#endif
17028 +
17029 #ifdef CONFIG_SMP
17030 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17031 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17032 @@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17033 struct task_struct *tsk)
17034 {
17035 unsigned cpu = smp_processor_id();
17036 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17037 + int tlbstate = TLBSTATE_OK;
17038 +#endif
17039
17040 if (likely(prev != next)) {
17041 #ifdef CONFIG_SMP
17042 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17043 + tlbstate = this_cpu_read(cpu_tlbstate.state);
17044 +#endif
17045 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17046 this_cpu_write(cpu_tlbstate.active_mm, next);
17047 #endif
17048 cpumask_set_cpu(cpu, mm_cpumask(next));
17049
17050 /* Re-load page tables */
17051 +#ifdef CONFIG_PAX_PER_CPU_PGD
17052 + pax_open_kernel();
17053 +
17054 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17055 + if (static_cpu_has(X86_FEATURE_PCID))
17056 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17057 + else
17058 +#endif
17059 +
17060 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17061 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17062 + pax_close_kernel();
17063 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17064 +
17065 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17066 + if (static_cpu_has(X86_FEATURE_PCID)) {
17067 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
17068 + u64 descriptor[2];
17069 + descriptor[0] = PCID_USER;
17070 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17071 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17072 + descriptor[0] = PCID_KERNEL;
17073 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17074 + }
17075 + } else {
17076 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17077 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17078 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17079 + else
17080 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17081 + }
17082 + } else
17083 +#endif
17084 +
17085 + load_cr3(get_cpu_pgd(cpu, kernel));
17086 +#else
17087 load_cr3(next->pgd);
17088 +#endif
17089
17090 /* Stop flush ipis for the previous mm */
17091 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17092 @@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17093 /* Load the LDT, if the LDT is different: */
17094 if (unlikely(prev->context.ldt != next->context.ldt))
17095 load_LDT_nolock(&next->context);
17096 +
17097 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17098 + if (!(__supported_pte_mask & _PAGE_NX)) {
17099 + smp_mb__before_clear_bit();
17100 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17101 + smp_mb__after_clear_bit();
17102 + cpu_set(cpu, next->context.cpu_user_cs_mask);
17103 + }
17104 +#endif
17105 +
17106 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17107 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17108 + prev->context.user_cs_limit != next->context.user_cs_limit))
17109 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17110 +#ifdef CONFIG_SMP
17111 + else if (unlikely(tlbstate != TLBSTATE_OK))
17112 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17113 +#endif
17114 +#endif
17115 +
17116 }
17117 + else {
17118 +
17119 +#ifdef CONFIG_PAX_PER_CPU_PGD
17120 + pax_open_kernel();
17121 +
17122 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17123 + if (static_cpu_has(X86_FEATURE_PCID))
17124 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17125 + else
17126 +#endif
17127 +
17128 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17129 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17130 + pax_close_kernel();
17131 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17132 +
17133 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134 + if (static_cpu_has(X86_FEATURE_PCID)) {
17135 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
17136 + u64 descriptor[2];
17137 + descriptor[0] = PCID_USER;
17138 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17139 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17140 + descriptor[0] = PCID_KERNEL;
17141 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17142 + }
17143 + } else {
17144 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17145 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17146 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17147 + else
17148 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17149 + }
17150 + } else
17151 +#endif
17152 +
17153 + load_cr3(get_cpu_pgd(cpu, kernel));
17154 +#endif
17155 +
17156 #ifdef CONFIG_SMP
17157 - else {
17158 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17159 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17160
17161 @@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17162 * tlb flush IPI delivery. We must reload CR3
17163 * to make sure to use no freed page tables.
17164 */
17165 +
17166 +#ifndef CONFIG_PAX_PER_CPU_PGD
17167 load_cr3(next->pgd);
17168 +#endif
17169 +
17170 load_LDT_nolock(&next->context);
17171 +
17172 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17173 + if (!(__supported_pte_mask & _PAGE_NX))
17174 + cpu_set(cpu, next->context.cpu_user_cs_mask);
17175 +#endif
17176 +
17177 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17178 +#ifdef CONFIG_PAX_PAGEEXEC
17179 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17180 +#endif
17181 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17182 +#endif
17183 +
17184 }
17185 +#endif
17186 }
17187 -#endif
17188 }
17189
17190 #define activate_mm(prev, next) \
17191 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17192 index e3b7819..b257c64 100644
17193 --- a/arch/x86/include/asm/module.h
17194 +++ b/arch/x86/include/asm/module.h
17195 @@ -5,6 +5,7 @@
17196
17197 #ifdef CONFIG_X86_64
17198 /* X86_64 does not define MODULE_PROC_FAMILY */
17199 +#define MODULE_PROC_FAMILY ""
17200 #elif defined CONFIG_M486
17201 #define MODULE_PROC_FAMILY "486 "
17202 #elif defined CONFIG_M586
17203 @@ -57,8 +58,20 @@
17204 #error unknown processor family
17205 #endif
17206
17207 -#ifdef CONFIG_X86_32
17208 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17209 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17210 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17211 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17212 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17213 +#else
17214 +#define MODULE_PAX_KERNEXEC ""
17215 #endif
17216
17217 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17218 +#define MODULE_PAX_UDEREF "UDEREF "
17219 +#else
17220 +#define MODULE_PAX_UDEREF ""
17221 +#endif
17222 +
17223 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17224 +
17225 #endif /* _ASM_X86_MODULE_H */
17226 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17227 index 86f9301..b365cda 100644
17228 --- a/arch/x86/include/asm/nmi.h
17229 +++ b/arch/x86/include/asm/nmi.h
17230 @@ -40,11 +40,11 @@ struct nmiaction {
17231 nmi_handler_t handler;
17232 unsigned long flags;
17233 const char *name;
17234 -};
17235 +} __do_const;
17236
17237 #define register_nmi_handler(t, fn, fg, n, init...) \
17238 ({ \
17239 - static struct nmiaction init fn##_na = { \
17240 + static const struct nmiaction init fn##_na = { \
17241 .handler = (fn), \
17242 .name = (n), \
17243 .flags = (fg), \
17244 @@ -52,7 +52,7 @@ struct nmiaction {
17245 __register_nmi_handler((t), &fn##_na); \
17246 })
17247
17248 -int __register_nmi_handler(unsigned int, struct nmiaction *);
17249 +int __register_nmi_handler(unsigned int, const struct nmiaction *);
17250
17251 void unregister_nmi_handler(unsigned int, const char *);
17252
17253 diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17254 index c878924..21f4889 100644
17255 --- a/arch/x86/include/asm/page.h
17256 +++ b/arch/x86/include/asm/page.h
17257 @@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17258 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17259
17260 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17261 +#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17262
17263 #define __boot_va(x) __va(x)
17264 #define __boot_pa(x) __pa(x)
17265 diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17266 index 0f1ddee..e2fc3d1 100644
17267 --- a/arch/x86/include/asm/page_64.h
17268 +++ b/arch/x86/include/asm/page_64.h
17269 @@ -7,9 +7,9 @@
17270
17271 /* duplicated to the one in bootmem.h */
17272 extern unsigned long max_pfn;
17273 -extern unsigned long phys_base;
17274 +extern const unsigned long phys_base;
17275
17276 -static inline unsigned long __phys_addr_nodebug(unsigned long x)
17277 +static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17278 {
17279 unsigned long y = x - __START_KERNEL_map;
17280
17281 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17282 index 401f350..dee5d13 100644
17283 --- a/arch/x86/include/asm/paravirt.h
17284 +++ b/arch/x86/include/asm/paravirt.h
17285 @@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17286 return (pmd_t) { ret };
17287 }
17288
17289 -static inline pmdval_t pmd_val(pmd_t pmd)
17290 +static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17291 {
17292 pmdval_t ret;
17293
17294 @@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17295 val);
17296 }
17297
17298 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17299 +{
17300 + pgdval_t val = native_pgd_val(pgd);
17301 +
17302 + if (sizeof(pgdval_t) > sizeof(long))
17303 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17304 + val, (u64)val >> 32);
17305 + else
17306 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17307 + val);
17308 +}
17309 +
17310 static inline void pgd_clear(pgd_t *pgdp)
17311 {
17312 set_pgd(pgdp, __pgd(0));
17313 @@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17314 pv_mmu_ops.set_fixmap(idx, phys, flags);
17315 }
17316
17317 +#ifdef CONFIG_PAX_KERNEXEC
17318 +static inline unsigned long pax_open_kernel(void)
17319 +{
17320 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17321 +}
17322 +
17323 +static inline unsigned long pax_close_kernel(void)
17324 +{
17325 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17326 +}
17327 +#else
17328 +static inline unsigned long pax_open_kernel(void) { return 0; }
17329 +static inline unsigned long pax_close_kernel(void) { return 0; }
17330 +#endif
17331 +
17332 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17333
17334 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17335 @@ -906,7 +933,7 @@ extern void default_banner(void);
17336
17337 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17338 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17339 -#define PARA_INDIRECT(addr) *%cs:addr
17340 +#define PARA_INDIRECT(addr) *%ss:addr
17341 #endif
17342
17343 #define INTERRUPT_RETURN \
17344 @@ -981,6 +1008,21 @@ extern void default_banner(void);
17345 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17346 CLBR_NONE, \
17347 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17348 +
17349 +#define GET_CR0_INTO_RDI \
17350 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17351 + mov %rax,%rdi
17352 +
17353 +#define SET_RDI_INTO_CR0 \
17354 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17355 +
17356 +#define GET_CR3_INTO_RDI \
17357 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17358 + mov %rax,%rdi
17359 +
17360 +#define SET_RDI_INTO_CR3 \
17361 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17362 +
17363 #endif /* CONFIG_X86_32 */
17364
17365 #endif /* __ASSEMBLY__ */
17366 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17367 index aab8f67..0fb0ee4 100644
17368 --- a/arch/x86/include/asm/paravirt_types.h
17369 +++ b/arch/x86/include/asm/paravirt_types.h
17370 @@ -84,7 +84,7 @@ struct pv_init_ops {
17371 */
17372 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17373 unsigned long addr, unsigned len);
17374 -};
17375 +} __no_const __no_randomize_layout;
17376
17377
17378 struct pv_lazy_ops {
17379 @@ -92,13 +92,13 @@ struct pv_lazy_ops {
17380 void (*enter)(void);
17381 void (*leave)(void);
17382 void (*flush)(void);
17383 -};
17384 +} __no_randomize_layout;
17385
17386 struct pv_time_ops {
17387 unsigned long long (*sched_clock)(void);
17388 unsigned long long (*steal_clock)(int cpu);
17389 unsigned long (*get_tsc_khz)(void);
17390 -};
17391 +} __no_const __no_randomize_layout;
17392
17393 struct pv_cpu_ops {
17394 /* hooks for various privileged instructions */
17395 @@ -192,7 +192,7 @@ struct pv_cpu_ops {
17396
17397 void (*start_context_switch)(struct task_struct *prev);
17398 void (*end_context_switch)(struct task_struct *next);
17399 -};
17400 +} __no_const __no_randomize_layout;
17401
17402 struct pv_irq_ops {
17403 /*
17404 @@ -215,7 +215,7 @@ struct pv_irq_ops {
17405 #ifdef CONFIG_X86_64
17406 void (*adjust_exception_frame)(void);
17407 #endif
17408 -};
17409 +} __no_randomize_layout;
17410
17411 struct pv_apic_ops {
17412 #ifdef CONFIG_X86_LOCAL_APIC
17413 @@ -223,7 +223,7 @@ struct pv_apic_ops {
17414 unsigned long start_eip,
17415 unsigned long start_esp);
17416 #endif
17417 -};
17418 +} __no_const __no_randomize_layout;
17419
17420 struct pv_mmu_ops {
17421 unsigned long (*read_cr2)(void);
17422 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
17423 struct paravirt_callee_save make_pud;
17424
17425 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17426 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17427 #endif /* PAGETABLE_LEVELS == 4 */
17428 #endif /* PAGETABLE_LEVELS >= 3 */
17429
17430 @@ -324,7 +325,13 @@ struct pv_mmu_ops {
17431 an mfn. We can tell which is which from the index. */
17432 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17433 phys_addr_t phys, pgprot_t flags);
17434 -};
17435 +
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 + unsigned long (*pax_open_kernel)(void);
17438 + unsigned long (*pax_close_kernel)(void);
17439 +#endif
17440 +
17441 +} __no_randomize_layout;
17442
17443 struct arch_spinlock;
17444 #ifdef CONFIG_SMP
17445 @@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17446 struct pv_lock_ops {
17447 struct paravirt_callee_save lock_spinning;
17448 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17449 -};
17450 +} __no_randomize_layout;
17451
17452 /* This contains all the paravirt structures: we get a convenient
17453 * number for each function using the offset which we use to indicate
17454 - * what to patch. */
17455 + * what to patch.
17456 + * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17457 + */
17458 +
17459 struct paravirt_patch_template {
17460 struct pv_init_ops pv_init_ops;
17461 struct pv_time_ops pv_time_ops;
17462 @@ -349,7 +359,7 @@ struct paravirt_patch_template {
17463 struct pv_apic_ops pv_apic_ops;
17464 struct pv_mmu_ops pv_mmu_ops;
17465 struct pv_lock_ops pv_lock_ops;
17466 -};
17467 +} __no_randomize_layout;
17468
17469 extern struct pv_info pv_info;
17470 extern struct pv_init_ops pv_init_ops;
17471 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17472 index c4412e9..90e88c5 100644
17473 --- a/arch/x86/include/asm/pgalloc.h
17474 +++ b/arch/x86/include/asm/pgalloc.h
17475 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17476 pmd_t *pmd, pte_t *pte)
17477 {
17478 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17479 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17480 +}
17481 +
17482 +static inline void pmd_populate_user(struct mm_struct *mm,
17483 + pmd_t *pmd, pte_t *pte)
17484 +{
17485 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17486 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17487 }
17488
17489 @@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17490
17491 #ifdef CONFIG_X86_PAE
17492 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17493 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17494 +{
17495 + pud_populate(mm, pudp, pmd);
17496 +}
17497 #else /* !CONFIG_X86_PAE */
17498 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17499 {
17500 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17501 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17502 }
17503 +
17504 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17505 +{
17506 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17507 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17508 +}
17509 #endif /* CONFIG_X86_PAE */
17510
17511 #if PAGETABLE_LEVELS > 3
17512 @@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17513 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17514 }
17515
17516 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17517 +{
17518 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17519 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17520 +}
17521 +
17522 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17523 {
17524 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17525 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17526 index 3bf2dd0..23d2a9f 100644
17527 --- a/arch/x86/include/asm/pgtable-2level.h
17528 +++ b/arch/x86/include/asm/pgtable-2level.h
17529 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17530
17531 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17532 {
17533 + pax_open_kernel();
17534 *pmdp = pmd;
17535 + pax_close_kernel();
17536 }
17537
17538 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17539 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17540 index 81bb91b..9392125 100644
17541 --- a/arch/x86/include/asm/pgtable-3level.h
17542 +++ b/arch/x86/include/asm/pgtable-3level.h
17543 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17544
17545 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17546 {
17547 + pax_open_kernel();
17548 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17549 + pax_close_kernel();
17550 }
17551
17552 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17553 {
17554 + pax_open_kernel();
17555 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17556 + pax_close_kernel();
17557 }
17558
17559 /*
17560 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17561 index 5ad38ad..f228861 100644
17562 --- a/arch/x86/include/asm/pgtable.h
17563 +++ b/arch/x86/include/asm/pgtable.h
17564 @@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17565
17566 #ifndef __PAGETABLE_PUD_FOLDED
17567 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17568 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17569 #define pgd_clear(pgd) native_pgd_clear(pgd)
17570 #endif
17571
17572 @@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17573
17574 #define arch_end_context_switch(prev) do {} while(0)
17575
17576 +#define pax_open_kernel() native_pax_open_kernel()
17577 +#define pax_close_kernel() native_pax_close_kernel()
17578 #endif /* CONFIG_PARAVIRT */
17579
17580 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
17581 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17582 +
17583 +#ifdef CONFIG_PAX_KERNEXEC
17584 +static inline unsigned long native_pax_open_kernel(void)
17585 +{
17586 + unsigned long cr0;
17587 +
17588 + preempt_disable();
17589 + barrier();
17590 + cr0 = read_cr0() ^ X86_CR0_WP;
17591 + BUG_ON(cr0 & X86_CR0_WP);
17592 + write_cr0(cr0);
17593 + return cr0 ^ X86_CR0_WP;
17594 +}
17595 +
17596 +static inline unsigned long native_pax_close_kernel(void)
17597 +{
17598 + unsigned long cr0;
17599 +
17600 + cr0 = read_cr0() ^ X86_CR0_WP;
17601 + BUG_ON(!(cr0 & X86_CR0_WP));
17602 + write_cr0(cr0);
17603 + barrier();
17604 + preempt_enable_no_resched();
17605 + return cr0 ^ X86_CR0_WP;
17606 +}
17607 +#else
17608 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
17609 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
17610 +#endif
17611 +
17612 /*
17613 * The following only work if pte_present() is true.
17614 * Undefined behaviour if not..
17615 */
17616 +static inline int pte_user(pte_t pte)
17617 +{
17618 + return pte_val(pte) & _PAGE_USER;
17619 +}
17620 +
17621 static inline int pte_dirty(pte_t pte)
17622 {
17623 return pte_flags(pte) & _PAGE_DIRTY;
17624 @@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17625 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17626 }
17627
17628 +static inline unsigned long pgd_pfn(pgd_t pgd)
17629 +{
17630 + return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17631 +}
17632 +
17633 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17634
17635 static inline int pmd_large(pmd_t pte)
17636 @@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17637 return pte_clear_flags(pte, _PAGE_RW);
17638 }
17639
17640 +static inline pte_t pte_mkread(pte_t pte)
17641 +{
17642 + return __pte(pte_val(pte) | _PAGE_USER);
17643 +}
17644 +
17645 static inline pte_t pte_mkexec(pte_t pte)
17646 {
17647 - return pte_clear_flags(pte, _PAGE_NX);
17648 +#ifdef CONFIG_X86_PAE
17649 + if (__supported_pte_mask & _PAGE_NX)
17650 + return pte_clear_flags(pte, _PAGE_NX);
17651 + else
17652 +#endif
17653 + return pte_set_flags(pte, _PAGE_USER);
17654 +}
17655 +
17656 +static inline pte_t pte_exprotect(pte_t pte)
17657 +{
17658 +#ifdef CONFIG_X86_PAE
17659 + if (__supported_pte_mask & _PAGE_NX)
17660 + return pte_set_flags(pte, _PAGE_NX);
17661 + else
17662 +#endif
17663 + return pte_clear_flags(pte, _PAGE_USER);
17664 }
17665
17666 static inline pte_t pte_mkdirty(pte_t pte)
17667 @@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17668 #endif
17669
17670 #ifndef __ASSEMBLY__
17671 +
17672 +#ifdef CONFIG_PAX_PER_CPU_PGD
17673 +extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17674 +enum cpu_pgd_type {kernel = 0, user = 1};
17675 +static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17676 +{
17677 + return cpu_pgd[cpu][type];
17678 +}
17679 +#endif
17680 +
17681 #include <linux/mm_types.h>
17682 #include <linux/mmdebug.h>
17683 #include <linux/log2.h>
17684 @@ -445,20 +520,10 @@ static inline int pte_same(pte_t a, pte_t b)
17685 return a.pte == b.pte;
17686 }
17687
17688 -static inline int pteval_present(pteval_t pteval)
17689 -{
17690 - /*
17691 - * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
17692 - * way clearly states that the intent is that protnone and numa
17693 - * hinting ptes are considered present for the purposes of
17694 - * pagetable operations like zapping, protection changes, gup etc.
17695 - */
17696 - return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
17697 -}
17698 -
17699 static inline int pte_present(pte_t a)
17700 {
17701 - return pteval_present(pte_flags(a));
17702 + return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
17703 + _PAGE_NUMA);
17704 }
17705
17706 #define pte_accessible pte_accessible
17707 @@ -580,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17708 * Currently stuck as a macro due to indirect forward reference to
17709 * linux/mmzone.h's __section_mem_map_addr() definition:
17710 */
17711 -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17712 +#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17713
17714 /* Find an entry in the second-level page table.. */
17715 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17716 @@ -620,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17717 * Currently stuck as a macro due to indirect forward reference to
17718 * linux/mmzone.h's __section_mem_map_addr() definition:
17719 */
17720 -#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17721 +#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17722
17723 /* to find an entry in a page-table-directory. */
17724 static inline unsigned long pud_index(unsigned long address)
17725 @@ -635,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17726
17727 static inline int pgd_bad(pgd_t pgd)
17728 {
17729 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17730 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17731 }
17732
17733 static inline int pgd_none(pgd_t pgd)
17734 @@ -658,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
17735 * pgd_offset() returns a (pgd_t *)
17736 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17737 */
17738 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17739 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17740 +
17741 +#ifdef CONFIG_PAX_PER_CPU_PGD
17742 +#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17743 +#endif
17744 +
17745 /*
17746 * a shortcut which implies the use of the kernel's pgd, instead
17747 * of a process's
17748 @@ -669,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
17749 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17750 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17751
17752 +#ifdef CONFIG_X86_32
17753 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17754 +#else
17755 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17756 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17757 +
17758 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17759 +#ifdef __ASSEMBLY__
17760 +#define pax_user_shadow_base pax_user_shadow_base(%rip)
17761 +#else
17762 +extern unsigned long pax_user_shadow_base;
17763 +extern pgdval_t clone_pgd_mask;
17764 +#endif
17765 +#endif
17766 +
17767 +#endif
17768 +
17769 #ifndef __ASSEMBLY__
17770
17771 extern int direct_gbpages;
17772 @@ -835,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17773 * dst and src can be on the same page, but the range must not overlap,
17774 * and must not cross a page boundary.
17775 */
17776 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17777 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17778 {
17779 - memcpy(dst, src, count * sizeof(pgd_t));
17780 + pax_open_kernel();
17781 + while (count--)
17782 + *dst++ = *src++;
17783 + pax_close_kernel();
17784 }
17785
17786 +#ifdef CONFIG_PAX_PER_CPU_PGD
17787 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17788 +#endif
17789 +
17790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17791 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17792 +#else
17793 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17794 +#endif
17795 +
17796 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17797 static inline int page_level_shift(enum pg_level level)
17798 {
17799 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17800 index 9ee3221..b979c6b 100644
17801 --- a/arch/x86/include/asm/pgtable_32.h
17802 +++ b/arch/x86/include/asm/pgtable_32.h
17803 @@ -25,9 +25,6 @@
17804 struct mm_struct;
17805 struct vm_area_struct;
17806
17807 -extern pgd_t swapper_pg_dir[1024];
17808 -extern pgd_t initial_page_table[1024];
17809 -
17810 static inline void pgtable_cache_init(void) { }
17811 static inline void check_pgt_cache(void) { }
17812 void paging_init(void);
17813 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17814 # include <asm/pgtable-2level.h>
17815 #endif
17816
17817 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17818 +extern pgd_t initial_page_table[PTRS_PER_PGD];
17819 +#ifdef CONFIG_X86_PAE
17820 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17821 +#endif
17822 +
17823 #if defined(CONFIG_HIGHPTE)
17824 #define pte_offset_map(dir, address) \
17825 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17826 @@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17827 /* Clear a kernel PTE and flush it from the TLB */
17828 #define kpte_clear_flush(ptep, vaddr) \
17829 do { \
17830 + pax_open_kernel(); \
17831 pte_clear(&init_mm, (vaddr), (ptep)); \
17832 + pax_close_kernel(); \
17833 __flush_tlb_one((vaddr)); \
17834 } while (0)
17835
17836 #endif /* !__ASSEMBLY__ */
17837
17838 +#define HAVE_ARCH_UNMAPPED_AREA
17839 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17840 +
17841 /*
17842 * kern_addr_valid() is (1) for FLATMEM and (0) for
17843 * SPARSEMEM and DISCONTIGMEM
17844 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17845 index ed5903b..c7fe163 100644
17846 --- a/arch/x86/include/asm/pgtable_32_types.h
17847 +++ b/arch/x86/include/asm/pgtable_32_types.h
17848 @@ -8,7 +8,7 @@
17849 */
17850 #ifdef CONFIG_X86_PAE
17851 # include <asm/pgtable-3level_types.h>
17852 -# define PMD_SIZE (1UL << PMD_SHIFT)
17853 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17854 # define PMD_MASK (~(PMD_SIZE - 1))
17855 #else
17856 # include <asm/pgtable-2level_types.h>
17857 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17858 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17859 #endif
17860
17861 +#ifdef CONFIG_PAX_KERNEXEC
17862 +#ifndef __ASSEMBLY__
17863 +extern unsigned char MODULES_EXEC_VADDR[];
17864 +extern unsigned char MODULES_EXEC_END[];
17865 +#endif
17866 +#include <asm/boot.h>
17867 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17868 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17869 +#else
17870 +#define ktla_ktva(addr) (addr)
17871 +#define ktva_ktla(addr) (addr)
17872 +#endif
17873 +
17874 #define MODULES_VADDR VMALLOC_START
17875 #define MODULES_END VMALLOC_END
17876 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17877 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17878 index e22c1db..23a625a 100644
17879 --- a/arch/x86/include/asm/pgtable_64.h
17880 +++ b/arch/x86/include/asm/pgtable_64.h
17881 @@ -16,10 +16,14 @@
17882
17883 extern pud_t level3_kernel_pgt[512];
17884 extern pud_t level3_ident_pgt[512];
17885 +extern pud_t level3_vmalloc_start_pgt[512];
17886 +extern pud_t level3_vmalloc_end_pgt[512];
17887 +extern pud_t level3_vmemmap_pgt[512];
17888 +extern pud_t level2_vmemmap_pgt[512];
17889 extern pmd_t level2_kernel_pgt[512];
17890 extern pmd_t level2_fixmap_pgt[512];
17891 -extern pmd_t level2_ident_pgt[512];
17892 -extern pgd_t init_level4_pgt[];
17893 +extern pmd_t level2_ident_pgt[512*2];
17894 +extern pgd_t init_level4_pgt[512];
17895
17896 #define swapper_pg_dir init_level4_pgt
17897
17898 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17899
17900 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17901 {
17902 + pax_open_kernel();
17903 *pmdp = pmd;
17904 + pax_close_kernel();
17905 }
17906
17907 static inline void native_pmd_clear(pmd_t *pmd)
17908 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17909
17910 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17911 {
17912 + pax_open_kernel();
17913 *pudp = pud;
17914 + pax_close_kernel();
17915 }
17916
17917 static inline void native_pud_clear(pud_t *pud)
17918 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17919
17920 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17921 {
17922 + pax_open_kernel();
17923 + *pgdp = pgd;
17924 + pax_close_kernel();
17925 +}
17926 +
17927 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17928 +{
17929 *pgdp = pgd;
17930 }
17931
17932 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17933 index 2d88344..4679fc3 100644
17934 --- a/arch/x86/include/asm/pgtable_64_types.h
17935 +++ b/arch/x86/include/asm/pgtable_64_types.h
17936 @@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17937 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17938 #define MODULES_END _AC(0xffffffffff000000, UL)
17939 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17940 +#define MODULES_EXEC_VADDR MODULES_VADDR
17941 +#define MODULES_EXEC_END MODULES_END
17942 +
17943 +#define ktla_ktva(addr) (addr)
17944 +#define ktva_ktla(addr) (addr)
17945
17946 #define EARLY_DYNAMIC_PAGE_TABLES 64
17947
17948 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17949 index 840c127..a8f297b 100644
17950 --- a/arch/x86/include/asm/pgtable_types.h
17951 +++ b/arch/x86/include/asm/pgtable_types.h
17952 @@ -16,13 +16,12 @@
17953 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17954 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17955 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17956 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17957 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17958 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17959 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17960 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17961 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17962 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17963 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17964 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17965 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17966 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17967
17968 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17969 @@ -40,7 +39,6 @@
17970 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17971 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17972 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17973 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17974 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17975 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17976 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17977 @@ -87,8 +85,10 @@
17978
17979 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17980 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17981 -#else
17982 +#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
17983 #define _PAGE_NX (_AT(pteval_t, 0))
17984 +#else
17985 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17986 #endif
17987
17988 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17989 @@ -147,6 +147,9 @@
17990 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17991 _PAGE_ACCESSED)
17992
17993 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
17994 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
17995 +
17996 #define __PAGE_KERNEL_EXEC \
17997 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17998 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17999 @@ -157,7 +160,7 @@
18000 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18001 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18002 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18003 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18004 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18005 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18006 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18007 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18008 @@ -219,8 +222,8 @@
18009 * bits are combined, this will alow user to access the high address mapped
18010 * VDSO in the presence of CONFIG_COMPAT_VDSO
18011 */
18012 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18013 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
18014 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18015 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18016 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18017 #endif
18018
18019 @@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18020 {
18021 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18022 }
18023 +#endif
18024
18025 +#if PAGETABLE_LEVELS == 3
18026 +#include <asm-generic/pgtable-nopud.h>
18027 +#endif
18028 +
18029 +#if PAGETABLE_LEVELS == 2
18030 +#include <asm-generic/pgtable-nopmd.h>
18031 +#endif
18032 +
18033 +#ifndef __ASSEMBLY__
18034 #if PAGETABLE_LEVELS > 3
18035 typedef struct { pudval_t pud; } pud_t;
18036
18037 @@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18038 return pud.pud;
18039 }
18040 #else
18041 -#include <asm-generic/pgtable-nopud.h>
18042 -
18043 static inline pudval_t native_pud_val(pud_t pud)
18044 {
18045 return native_pgd_val(pud.pgd);
18046 @@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18047 return pmd.pmd;
18048 }
18049 #else
18050 -#include <asm-generic/pgtable-nopmd.h>
18051 -
18052 static inline pmdval_t native_pmd_val(pmd_t pmd)
18053 {
18054 return native_pgd_val(pmd.pud.pgd);
18055 @@ -334,7 +343,6 @@ typedef struct page *pgtable_t;
18056
18057 extern pteval_t __supported_pte_mask;
18058 extern void set_nx(void);
18059 -extern int nx_enabled;
18060
18061 #define pgprot_writecombine pgprot_writecombine
18062 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18063 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18064 index c8b0519..fd29e73 100644
18065 --- a/arch/x86/include/asm/preempt.h
18066 +++ b/arch/x86/include/asm/preempt.h
18067 @@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18068 */
18069 static __always_inline bool __preempt_count_dec_and_test(void)
18070 {
18071 - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18072 + GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18073 }
18074
18075 /*
18076 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18077 index 7b034a4..4fe3e3f 100644
18078 --- a/arch/x86/include/asm/processor.h
18079 +++ b/arch/x86/include/asm/processor.h
18080 @@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18081 /* Index into per_cpu list: */
18082 u16 cpu_index;
18083 u32 microcode;
18084 -} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18085 +} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18086
18087 #define X86_VENDOR_INTEL 0
18088 #define X86_VENDOR_CYRIX 1
18089 @@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18090 : "memory");
18091 }
18092
18093 +/* invpcid (%rdx),%rax */
18094 +#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18095 +
18096 +#define INVPCID_SINGLE_ADDRESS 0UL
18097 +#define INVPCID_SINGLE_CONTEXT 1UL
18098 +#define INVPCID_ALL_GLOBAL 2UL
18099 +#define INVPCID_ALL_MONGLOBAL 3UL
18100 +
18101 +#define PCID_KERNEL 0UL
18102 +#define PCID_USER 1UL
18103 +#define PCID_NOFLUSH (1UL << 63)
18104 +
18105 static inline void load_cr3(pgd_t *pgdir)
18106 {
18107 - write_cr3(__pa(pgdir));
18108 + write_cr3(__pa(pgdir) | PCID_KERNEL);
18109 }
18110
18111 #ifdef CONFIG_X86_32
18112 @@ -283,7 +295,7 @@ struct tss_struct {
18113
18114 } ____cacheline_aligned;
18115
18116 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18117 +extern struct tss_struct init_tss[NR_CPUS];
18118
18119 /*
18120 * Save the original ist values for checking stack pointers during debugging
18121 @@ -453,6 +465,7 @@ struct thread_struct {
18122 unsigned short ds;
18123 unsigned short fsindex;
18124 unsigned short gsindex;
18125 + unsigned short ss;
18126 #endif
18127 #ifdef CONFIG_X86_32
18128 unsigned long ip;
18129 @@ -562,29 +575,8 @@ static inline void load_sp0(struct tss_struct *tss,
18130 extern unsigned long mmu_cr4_features;
18131 extern u32 *trampoline_cr4_features;
18132
18133 -static inline void set_in_cr4(unsigned long mask)
18134 -{
18135 - unsigned long cr4;
18136 -
18137 - mmu_cr4_features |= mask;
18138 - if (trampoline_cr4_features)
18139 - *trampoline_cr4_features = mmu_cr4_features;
18140 - cr4 = read_cr4();
18141 - cr4 |= mask;
18142 - write_cr4(cr4);
18143 -}
18144 -
18145 -static inline void clear_in_cr4(unsigned long mask)
18146 -{
18147 - unsigned long cr4;
18148 -
18149 - mmu_cr4_features &= ~mask;
18150 - if (trampoline_cr4_features)
18151 - *trampoline_cr4_features = mmu_cr4_features;
18152 - cr4 = read_cr4();
18153 - cr4 &= ~mask;
18154 - write_cr4(cr4);
18155 -}
18156 +extern void set_in_cr4(unsigned long mask);
18157 +extern void clear_in_cr4(unsigned long mask);
18158
18159 typedef struct {
18160 unsigned long seg;
18161 @@ -833,11 +825,18 @@ static inline void spin_lock_prefetch(const void *x)
18162 */
18163 #define TASK_SIZE PAGE_OFFSET
18164 #define TASK_SIZE_MAX TASK_SIZE
18165 +
18166 +#ifdef CONFIG_PAX_SEGMEXEC
18167 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18168 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18169 +#else
18170 #define STACK_TOP TASK_SIZE
18171 -#define STACK_TOP_MAX STACK_TOP
18172 +#endif
18173 +
18174 +#define STACK_TOP_MAX TASK_SIZE
18175
18176 #define INIT_THREAD { \
18177 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
18178 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18179 .vm86_info = NULL, \
18180 .sysenter_cs = __KERNEL_CS, \
18181 .io_bitmap_ptr = NULL, \
18182 @@ -851,7 +850,7 @@ static inline void spin_lock_prefetch(const void *x)
18183 */
18184 #define INIT_TSS { \
18185 .x86_tss = { \
18186 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
18187 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18188 .ss0 = __KERNEL_DS, \
18189 .ss1 = __KERNEL_CS, \
18190 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18191 @@ -862,11 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
18192 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18193
18194 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18195 -#define KSTK_TOP(info) \
18196 -({ \
18197 - unsigned long *__ptr = (unsigned long *)(info); \
18198 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18199 -})
18200 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18201
18202 /*
18203 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18204 @@ -881,7 +876,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18205 #define task_pt_regs(task) \
18206 ({ \
18207 struct pt_regs *__regs__; \
18208 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18209 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18210 __regs__ - 1; \
18211 })
18212
18213 @@ -891,13 +886,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18214 /*
18215 * User space process size. 47bits minus one guard page.
18216 */
18217 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18218 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18219
18220 /* This decides where the kernel will search for a free chunk of vm
18221 * space during mmap's.
18222 */
18223 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18224 - 0xc0000000 : 0xFFFFe000)
18225 + 0xc0000000 : 0xFFFFf000)
18226
18227 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18228 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18229 @@ -908,11 +903,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18230 #define STACK_TOP_MAX TASK_SIZE_MAX
18231
18232 #define INIT_THREAD { \
18233 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18234 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18235 }
18236
18237 #define INIT_TSS { \
18238 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18239 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18240 }
18241
18242 /*
18243 @@ -940,6 +935,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18244 */
18245 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18246
18247 +#ifdef CONFIG_PAX_SEGMEXEC
18248 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18249 +#endif
18250 +
18251 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18252
18253 /* Get/set a process' ability to use the timestamp counter instruction */
18254 @@ -966,7 +965,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18255 return 0;
18256 }
18257
18258 -extern unsigned long arch_align_stack(unsigned long sp);
18259 +#define arch_align_stack(x) ((x) & ~0xfUL)
18260 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18261
18262 void default_idle(void);
18263 @@ -976,6 +975,6 @@ bool xen_set_default_idle(void);
18264 #define xen_set_default_idle 0
18265 #endif
18266
18267 -void stop_this_cpu(void *dummy);
18268 +void stop_this_cpu(void *dummy) __noreturn;
18269 void df_debug(struct pt_regs *regs, long error_code);
18270 #endif /* _ASM_X86_PROCESSOR_H */
18271 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18272 index 942a086..6c26446 100644
18273 --- a/arch/x86/include/asm/ptrace.h
18274 +++ b/arch/x86/include/asm/ptrace.h
18275 @@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18276 }
18277
18278 /*
18279 - * user_mode_vm(regs) determines whether a register set came from user mode.
18280 + * user_mode(regs) determines whether a register set came from user mode.
18281 * This is true if V8086 mode was enabled OR if the register set was from
18282 * protected mode with RPL-3 CS value. This tricky test checks that with
18283 * one comparison. Many places in the kernel can bypass this full check
18284 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18285 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18286 + * be used.
18287 */
18288 -static inline int user_mode(struct pt_regs *regs)
18289 +static inline int user_mode_novm(struct pt_regs *regs)
18290 {
18291 #ifdef CONFIG_X86_32
18292 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18293 #else
18294 - return !!(regs->cs & 3);
18295 + return !!(regs->cs & SEGMENT_RPL_MASK);
18296 #endif
18297 }
18298
18299 -static inline int user_mode_vm(struct pt_regs *regs)
18300 +static inline int user_mode(struct pt_regs *regs)
18301 {
18302 #ifdef CONFIG_X86_32
18303 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18304 USER_RPL;
18305 #else
18306 - return user_mode(regs);
18307 + return user_mode_novm(regs);
18308 #endif
18309 }
18310
18311 @@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18312 #ifdef CONFIG_X86_64
18313 static inline bool user_64bit_mode(struct pt_regs *regs)
18314 {
18315 + unsigned long cs = regs->cs & 0xffff;
18316 #ifndef CONFIG_PARAVIRT
18317 /*
18318 * On non-paravirt systems, this is the only long mode CPL 3
18319 * selector. We do not allow long mode selectors in the LDT.
18320 */
18321 - return regs->cs == __USER_CS;
18322 + return cs == __USER_CS;
18323 #else
18324 /* Headers are too twisted for this to go in paravirt.h. */
18325 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18326 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18327 #endif
18328 }
18329
18330 @@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18331 * Traps from the kernel do not save sp and ss.
18332 * Use the helper function to retrieve sp.
18333 */
18334 - if (offset == offsetof(struct pt_regs, sp) &&
18335 - regs->cs == __KERNEL_CS)
18336 - return kernel_stack_pointer(regs);
18337 + if (offset == offsetof(struct pt_regs, sp)) {
18338 + unsigned long cs = regs->cs & 0xffff;
18339 + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18340 + return kernel_stack_pointer(regs);
18341 + }
18342 #endif
18343 return *(unsigned long *)((unsigned long)regs + offset);
18344 }
18345 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18346 index 9c6b890..5305f53 100644
18347 --- a/arch/x86/include/asm/realmode.h
18348 +++ b/arch/x86/include/asm/realmode.h
18349 @@ -22,16 +22,14 @@ struct real_mode_header {
18350 #endif
18351 /* APM/BIOS reboot */
18352 u32 machine_real_restart_asm;
18353 -#ifdef CONFIG_X86_64
18354 u32 machine_real_restart_seg;
18355 -#endif
18356 };
18357
18358 /* This must match data at trampoline_32/64.S */
18359 struct trampoline_header {
18360 #ifdef CONFIG_X86_32
18361 u32 start;
18362 - u16 gdt_pad;
18363 + u16 boot_cs;
18364 u16 gdt_limit;
18365 u32 gdt_base;
18366 #else
18367 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18368 index a82c4f1..ac45053 100644
18369 --- a/arch/x86/include/asm/reboot.h
18370 +++ b/arch/x86/include/asm/reboot.h
18371 @@ -6,13 +6,13 @@
18372 struct pt_regs;
18373
18374 struct machine_ops {
18375 - void (*restart)(char *cmd);
18376 - void (*halt)(void);
18377 - void (*power_off)(void);
18378 + void (* __noreturn restart)(char *cmd);
18379 + void (* __noreturn halt)(void);
18380 + void (* __noreturn power_off)(void);
18381 void (*shutdown)(void);
18382 void (*crash_shutdown)(struct pt_regs *);
18383 - void (*emergency_restart)(void);
18384 -};
18385 + void (* __noreturn emergency_restart)(void);
18386 +} __no_const;
18387
18388 extern struct machine_ops machine_ops;
18389
18390 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18391 index 8f7866a..e442f20 100644
18392 --- a/arch/x86/include/asm/rmwcc.h
18393 +++ b/arch/x86/include/asm/rmwcc.h
18394 @@ -3,7 +3,34 @@
18395
18396 #ifdef CC_HAVE_ASM_GOTO
18397
18398 -#define __GEN_RMWcc(fullop, var, cc, ...) \
18399 +#ifdef CONFIG_PAX_REFCOUNT
18400 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18401 +do { \
18402 + asm_volatile_goto (fullop \
18403 + ";jno 0f\n" \
18404 + fullantiop \
18405 + ";int $4\n0:\n" \
18406 + _ASM_EXTABLE(0b, 0b) \
18407 + ";j" cc " %l[cc_label]" \
18408 + : : "m" (var), ## __VA_ARGS__ \
18409 + : "memory" : cc_label); \
18410 + return 0; \
18411 +cc_label: \
18412 + return 1; \
18413 +} while (0)
18414 +#else
18415 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18416 +do { \
18417 + asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18418 + : : "m" (var), ## __VA_ARGS__ \
18419 + : "memory" : cc_label); \
18420 + return 0; \
18421 +cc_label: \
18422 + return 1; \
18423 +} while (0)
18424 +#endif
18425 +
18426 +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18427 do { \
18428 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18429 : : "m" (var), ## __VA_ARGS__ \
18430 @@ -13,15 +40,46 @@ cc_label: \
18431 return 1; \
18432 } while (0)
18433
18434 -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18435 - __GEN_RMWcc(op " " arg0, var, cc)
18436 +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18437 + __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18438
18439 -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18440 - __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18441 +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18442 + __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18443 +
18444 +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18445 + __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18446 +
18447 +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18448 + __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18449
18450 #else /* !CC_HAVE_ASM_GOTO */
18451
18452 -#define __GEN_RMWcc(fullop, var, cc, ...) \
18453 +#ifdef CONFIG_PAX_REFCOUNT
18454 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18455 +do { \
18456 + char c; \
18457 + asm volatile (fullop \
18458 + ";jno 0f\n" \
18459 + fullantiop \
18460 + ";int $4\n0:\n" \
18461 + _ASM_EXTABLE(0b, 0b) \
18462 + "; set" cc " %1" \
18463 + : "+m" (var), "=qm" (c) \
18464 + : __VA_ARGS__ : "memory"); \
18465 + return c != 0; \
18466 +} while (0)
18467 +#else
18468 +#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18469 +do { \
18470 + char c; \
18471 + asm volatile (fullop "; set" cc " %1" \
18472 + : "+m" (var), "=qm" (c) \
18473 + : __VA_ARGS__ : "memory"); \
18474 + return c != 0; \
18475 +} while (0)
18476 +#endif
18477 +
18478 +#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18479 do { \
18480 char c; \
18481 asm volatile (fullop "; set" cc " %1" \
18482 @@ -30,11 +88,17 @@ do { \
18483 return c != 0; \
18484 } while (0)
18485
18486 -#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18487 - __GEN_RMWcc(op " " arg0, var, cc)
18488 +#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18489 + __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18490 +
18491 +#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18492 + __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18493 +
18494 +#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18495 + __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18496
18497 -#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18498 - __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18499 +#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18500 + __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18501
18502 #endif /* CC_HAVE_ASM_GOTO */
18503
18504 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18505 index cad82c9..2e5c5c1 100644
18506 --- a/arch/x86/include/asm/rwsem.h
18507 +++ b/arch/x86/include/asm/rwsem.h
18508 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18509 {
18510 asm volatile("# beginning down_read\n\t"
18511 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18512 +
18513 +#ifdef CONFIG_PAX_REFCOUNT
18514 + "jno 0f\n"
18515 + LOCK_PREFIX _ASM_DEC "(%1)\n"
18516 + "int $4\n0:\n"
18517 + _ASM_EXTABLE(0b, 0b)
18518 +#endif
18519 +
18520 /* adds 0x00000001 */
18521 " jns 1f\n"
18522 " call call_rwsem_down_read_failed\n"
18523 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18524 "1:\n\t"
18525 " mov %1,%2\n\t"
18526 " add %3,%2\n\t"
18527 +
18528 +#ifdef CONFIG_PAX_REFCOUNT
18529 + "jno 0f\n"
18530 + "sub %3,%2\n"
18531 + "int $4\n0:\n"
18532 + _ASM_EXTABLE(0b, 0b)
18533 +#endif
18534 +
18535 " jle 2f\n\t"
18536 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18537 " jnz 1b\n\t"
18538 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18539 long tmp;
18540 asm volatile("# beginning down_write\n\t"
18541 LOCK_PREFIX " xadd %1,(%2)\n\t"
18542 +
18543 +#ifdef CONFIG_PAX_REFCOUNT
18544 + "jno 0f\n"
18545 + "mov %1,(%2)\n"
18546 + "int $4\n0:\n"
18547 + _ASM_EXTABLE(0b, 0b)
18548 +#endif
18549 +
18550 /* adds 0xffff0001, returns the old value */
18551 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18552 /* was the active mask 0 before? */
18553 @@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18554 long tmp;
18555 asm volatile("# beginning __up_read\n\t"
18556 LOCK_PREFIX " xadd %1,(%2)\n\t"
18557 +
18558 +#ifdef CONFIG_PAX_REFCOUNT
18559 + "jno 0f\n"
18560 + "mov %1,(%2)\n"
18561 + "int $4\n0:\n"
18562 + _ASM_EXTABLE(0b, 0b)
18563 +#endif
18564 +
18565 /* subtracts 1, returns the old value */
18566 " jns 1f\n\t"
18567 " call call_rwsem_wake\n" /* expects old value in %edx */
18568 @@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18569 long tmp;
18570 asm volatile("# beginning __up_write\n\t"
18571 LOCK_PREFIX " xadd %1,(%2)\n\t"
18572 +
18573 +#ifdef CONFIG_PAX_REFCOUNT
18574 + "jno 0f\n"
18575 + "mov %1,(%2)\n"
18576 + "int $4\n0:\n"
18577 + _ASM_EXTABLE(0b, 0b)
18578 +#endif
18579 +
18580 /* subtracts 0xffff0001, returns the old value */
18581 " jns 1f\n\t"
18582 " call call_rwsem_wake\n" /* expects old value in %edx */
18583 @@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18584 {
18585 asm volatile("# beginning __downgrade_write\n\t"
18586 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18587 +
18588 +#ifdef CONFIG_PAX_REFCOUNT
18589 + "jno 0f\n"
18590 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18591 + "int $4\n0:\n"
18592 + _ASM_EXTABLE(0b, 0b)
18593 +#endif
18594 +
18595 /*
18596 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18597 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18598 @@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18599 */
18600 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18601 {
18602 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18603 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18604 +
18605 +#ifdef CONFIG_PAX_REFCOUNT
18606 + "jno 0f\n"
18607 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
18608 + "int $4\n0:\n"
18609 + _ASM_EXTABLE(0b, 0b)
18610 +#endif
18611 +
18612 : "+m" (sem->count)
18613 : "er" (delta));
18614 }
18615 @@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18616 */
18617 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18618 {
18619 - return delta + xadd(&sem->count, delta);
18620 + return delta + xadd_check_overflow(&sem->count, delta);
18621 }
18622
18623 #endif /* __KERNEL__ */
18624 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18625 index 6f1c3a8..7744f19 100644
18626 --- a/arch/x86/include/asm/segment.h
18627 +++ b/arch/x86/include/asm/segment.h
18628 @@ -64,10 +64,15 @@
18629 * 26 - ESPFIX small SS
18630 * 27 - per-cpu [ offset to per-cpu data area ]
18631 * 28 - stack_canary-20 [ for stack protector ]
18632 - * 29 - unused
18633 - * 30 - unused
18634 + * 29 - PCI BIOS CS
18635 + * 30 - PCI BIOS DS
18636 * 31 - TSS for double fault handler
18637 */
18638 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18639 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18640 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18641 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18642 +
18643 #define GDT_ENTRY_TLS_MIN 6
18644 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18645
18646 @@ -79,6 +84,8 @@
18647
18648 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18649
18650 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18651 +
18652 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18653
18654 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18655 @@ -104,6 +111,12 @@
18656 #define __KERNEL_STACK_CANARY 0
18657 #endif
18658
18659 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18660 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18661 +
18662 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
18663 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
18664 +
18665 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
18666
18667 /*
18668 @@ -141,7 +154,7 @@
18669 */
18670
18671 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
18672 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
18673 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
18674
18675
18676 #else
18677 @@ -165,6 +178,8 @@
18678 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
18679 #define __USER32_DS __USER_DS
18680
18681 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
18682 +
18683 #define GDT_ENTRY_TSS 8 /* needs two entries */
18684 #define GDT_ENTRY_LDT 10 /* needs two entries */
18685 #define GDT_ENTRY_TLS_MIN 12
18686 @@ -173,6 +188,8 @@
18687 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
18688 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
18689
18690 +#define GDT_ENTRY_UDEREF_KERNEL_DS 16
18691 +
18692 /* TLS indexes for 64bit - hardcoded in arch_prctl */
18693 #define FS_TLS 0
18694 #define GS_TLS 1
18695 @@ -180,12 +197,14 @@
18696 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
18697 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
18698
18699 -#define GDT_ENTRIES 16
18700 +#define GDT_ENTRIES 17
18701
18702 #endif
18703
18704 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
18705 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
18706 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
18707 +#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18708 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18709 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18710 #ifndef CONFIG_PARAVIRT
18711 @@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
18712 {
18713 unsigned long __limit;
18714 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18715 - return __limit + 1;
18716 + return __limit;
18717 }
18718
18719 #endif /* !__ASSEMBLY__ */
18720 diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18721 index 8d3120f..352b440 100644
18722 --- a/arch/x86/include/asm/smap.h
18723 +++ b/arch/x86/include/asm/smap.h
18724 @@ -25,11 +25,40 @@
18725
18726 #include <asm/alternative-asm.h>
18727
18728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18729 +#define ASM_PAX_OPEN_USERLAND \
18730 + 661: jmp 663f; \
18731 + .pushsection .altinstr_replacement, "a" ; \
18732 + 662: pushq %rax; nop; \
18733 + .popsection ; \
18734 + .pushsection .altinstructions, "a" ; \
18735 + altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18736 + .popsection ; \
18737 + call __pax_open_userland; \
18738 + popq %rax; \
18739 + 663:
18740 +
18741 +#define ASM_PAX_CLOSE_USERLAND \
18742 + 661: jmp 663f; \
18743 + .pushsection .altinstr_replacement, "a" ; \
18744 + 662: pushq %rax; nop; \
18745 + .popsection; \
18746 + .pushsection .altinstructions, "a" ; \
18747 + altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18748 + .popsection; \
18749 + call __pax_close_userland; \
18750 + popq %rax; \
18751 + 663:
18752 +#else
18753 +#define ASM_PAX_OPEN_USERLAND
18754 +#define ASM_PAX_CLOSE_USERLAND
18755 +#endif
18756 +
18757 #ifdef CONFIG_X86_SMAP
18758
18759 #define ASM_CLAC \
18760 661: ASM_NOP3 ; \
18761 - .pushsection .altinstr_replacement, "ax" ; \
18762 + .pushsection .altinstr_replacement, "a" ; \
18763 662: __ASM_CLAC ; \
18764 .popsection ; \
18765 .pushsection .altinstructions, "a" ; \
18766 @@ -38,7 +67,7 @@
18767
18768 #define ASM_STAC \
18769 661: ASM_NOP3 ; \
18770 - .pushsection .altinstr_replacement, "ax" ; \
18771 + .pushsection .altinstr_replacement, "a" ; \
18772 662: __ASM_STAC ; \
18773 .popsection ; \
18774 .pushsection .altinstructions, "a" ; \
18775 @@ -56,6 +85,37 @@
18776
18777 #include <asm/alternative.h>
18778
18779 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
18780 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18781 +
18782 +extern void __pax_open_userland(void);
18783 +static __always_inline unsigned long pax_open_userland(void)
18784 +{
18785 +
18786 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18787 + asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18788 + :
18789 + : [open] "i" (__pax_open_userland)
18790 + : "memory", "rax");
18791 +#endif
18792 +
18793 + return 0;
18794 +}
18795 +
18796 +extern void __pax_close_userland(void);
18797 +static __always_inline unsigned long pax_close_userland(void)
18798 +{
18799 +
18800 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18801 + asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18802 + :
18803 + : [close] "i" (__pax_close_userland)
18804 + : "memory", "rax");
18805 +#endif
18806 +
18807 + return 0;
18808 +}
18809 +
18810 #ifdef CONFIG_X86_SMAP
18811
18812 static __always_inline void clac(void)
18813 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18814 index 4137890..03fa172 100644
18815 --- a/arch/x86/include/asm/smp.h
18816 +++ b/arch/x86/include/asm/smp.h
18817 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18818 /* cpus sharing the last level cache: */
18819 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18820 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18821 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18822 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18823
18824 static inline struct cpumask *cpu_sibling_mask(int cpu)
18825 {
18826 @@ -79,7 +79,7 @@ struct smp_ops {
18827
18828 void (*send_call_func_ipi)(const struct cpumask *mask);
18829 void (*send_call_func_single_ipi)(int cpu);
18830 -};
18831 +} __no_const;
18832
18833 /* Globals due to paravirt */
18834 extern void set_cpu_sibling_map(int cpu);
18835 @@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18836 extern int safe_smp_processor_id(void);
18837
18838 #elif defined(CONFIG_X86_64_SMP)
18839 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18840 -
18841 -#define stack_smp_processor_id() \
18842 -({ \
18843 - struct thread_info *ti; \
18844 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18845 - ti->cpu; \
18846 -})
18847 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18848 +#define stack_smp_processor_id() raw_smp_processor_id()
18849 #define safe_smp_processor_id() smp_processor_id()
18850
18851 #endif
18852 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18853 index bf156de..1a782ab 100644
18854 --- a/arch/x86/include/asm/spinlock.h
18855 +++ b/arch/x86/include/asm/spinlock.h
18856 @@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18857 static inline void arch_read_lock(arch_rwlock_t *rw)
18858 {
18859 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18860 +
18861 +#ifdef CONFIG_PAX_REFCOUNT
18862 + "jno 0f\n"
18863 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18864 + "int $4\n0:\n"
18865 + _ASM_EXTABLE(0b, 0b)
18866 +#endif
18867 +
18868 "jns 1f\n"
18869 "call __read_lock_failed\n\t"
18870 "1:\n"
18871 @@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18872 static inline void arch_write_lock(arch_rwlock_t *rw)
18873 {
18874 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18875 +
18876 +#ifdef CONFIG_PAX_REFCOUNT
18877 + "jno 0f\n"
18878 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18879 + "int $4\n0:\n"
18880 + _ASM_EXTABLE(0b, 0b)
18881 +#endif
18882 +
18883 "jz 1f\n"
18884 "call __write_lock_failed\n\t"
18885 "1:\n"
18886 @@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18887
18888 static inline void arch_read_unlock(arch_rwlock_t *rw)
18889 {
18890 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18891 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18892 +
18893 +#ifdef CONFIG_PAX_REFCOUNT
18894 + "jno 0f\n"
18895 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18896 + "int $4\n0:\n"
18897 + _ASM_EXTABLE(0b, 0b)
18898 +#endif
18899 +
18900 :"+m" (rw->lock) : : "memory");
18901 }
18902
18903 static inline void arch_write_unlock(arch_rwlock_t *rw)
18904 {
18905 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18906 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18907 +
18908 +#ifdef CONFIG_PAX_REFCOUNT
18909 + "jno 0f\n"
18910 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18911 + "int $4\n0:\n"
18912 + _ASM_EXTABLE(0b, 0b)
18913 +#endif
18914 +
18915 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18916 }
18917
18918 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18919 index 6a99859..03cb807 100644
18920 --- a/arch/x86/include/asm/stackprotector.h
18921 +++ b/arch/x86/include/asm/stackprotector.h
18922 @@ -47,7 +47,7 @@
18923 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18924 */
18925 #define GDT_STACK_CANARY_INIT \
18926 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18927 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18928
18929 /*
18930 * Initialize the stackprotector canary value.
18931 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18932
18933 static inline void load_stack_canary_segment(void)
18934 {
18935 -#ifdef CONFIG_X86_32
18936 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18937 asm volatile ("mov %0, %%gs" : : "r" (0));
18938 #endif
18939 }
18940 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18941 index 70bbe39..4ae2bd4 100644
18942 --- a/arch/x86/include/asm/stacktrace.h
18943 +++ b/arch/x86/include/asm/stacktrace.h
18944 @@ -11,28 +11,20 @@
18945
18946 extern int kstack_depth_to_print;
18947
18948 -struct thread_info;
18949 +struct task_struct;
18950 struct stacktrace_ops;
18951
18952 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18953 - unsigned long *stack,
18954 - unsigned long bp,
18955 - const struct stacktrace_ops *ops,
18956 - void *data,
18957 - unsigned long *end,
18958 - int *graph);
18959 +typedef unsigned long walk_stack_t(struct task_struct *task,
18960 + void *stack_start,
18961 + unsigned long *stack,
18962 + unsigned long bp,
18963 + const struct stacktrace_ops *ops,
18964 + void *data,
18965 + unsigned long *end,
18966 + int *graph);
18967
18968 -extern unsigned long
18969 -print_context_stack(struct thread_info *tinfo,
18970 - unsigned long *stack, unsigned long bp,
18971 - const struct stacktrace_ops *ops, void *data,
18972 - unsigned long *end, int *graph);
18973 -
18974 -extern unsigned long
18975 -print_context_stack_bp(struct thread_info *tinfo,
18976 - unsigned long *stack, unsigned long bp,
18977 - const struct stacktrace_ops *ops, void *data,
18978 - unsigned long *end, int *graph);
18979 +extern walk_stack_t print_context_stack;
18980 +extern walk_stack_t print_context_stack_bp;
18981
18982 /* Generic stack tracer with callbacks */
18983
18984 @@ -40,7 +32,7 @@ struct stacktrace_ops {
18985 void (*address)(void *data, unsigned long address, int reliable);
18986 /* On negative return stop dumping */
18987 int (*stack)(void *data, char *name);
18988 - walk_stack_t walk_stack;
18989 + walk_stack_t *walk_stack;
18990 };
18991
18992 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18993 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18994 index d7f3b3b..3cc39f1 100644
18995 --- a/arch/x86/include/asm/switch_to.h
18996 +++ b/arch/x86/include/asm/switch_to.h
18997 @@ -108,7 +108,7 @@ do { \
18998 "call __switch_to\n\t" \
18999 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19000 __switch_canary \
19001 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
19002 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19003 "movq %%rax,%%rdi\n\t" \
19004 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19005 "jnz ret_from_fork\n\t" \
19006 @@ -119,7 +119,7 @@ do { \
19007 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19008 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19009 [_tif_fork] "i" (_TIF_FORK), \
19010 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
19011 + [thread_info] "m" (current_tinfo), \
19012 [current_task] "m" (current_task) \
19013 __switch_canary_iparam \
19014 : "memory", "cc" __EXTRA_CLOBBER)
19015 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19016 index 3ba3de4..6c113b2 100644
19017 --- a/arch/x86/include/asm/thread_info.h
19018 +++ b/arch/x86/include/asm/thread_info.h
19019 @@ -10,6 +10,7 @@
19020 #include <linux/compiler.h>
19021 #include <asm/page.h>
19022 #include <asm/types.h>
19023 +#include <asm/percpu.h>
19024
19025 /*
19026 * low level task data that entry.S needs immediate access to
19027 @@ -23,7 +24,6 @@ struct exec_domain;
19028 #include <linux/atomic.h>
19029
19030 struct thread_info {
19031 - struct task_struct *task; /* main task structure */
19032 struct exec_domain *exec_domain; /* execution domain */
19033 __u32 flags; /* low level flags */
19034 __u32 status; /* thread synchronous flags */
19035 @@ -32,19 +32,13 @@ struct thread_info {
19036 mm_segment_t addr_limit;
19037 struct restart_block restart_block;
19038 void __user *sysenter_return;
19039 -#ifdef CONFIG_X86_32
19040 - unsigned long previous_esp; /* ESP of the previous stack in
19041 - case of nested (IRQ) stacks
19042 - */
19043 - __u8 supervisor_stack[0];
19044 -#endif
19045 + unsigned long lowest_stack;
19046 unsigned int sig_on_uaccess_error:1;
19047 unsigned int uaccess_err:1; /* uaccess failed */
19048 };
19049
19050 -#define INIT_THREAD_INFO(tsk) \
19051 +#define INIT_THREAD_INFO \
19052 { \
19053 - .task = &tsk, \
19054 .exec_domain = &default_exec_domain, \
19055 .flags = 0, \
19056 .cpu = 0, \
19057 @@ -55,7 +49,7 @@ struct thread_info {
19058 }, \
19059 }
19060
19061 -#define init_thread_info (init_thread_union.thread_info)
19062 +#define init_thread_info (init_thread_union.stack)
19063 #define init_stack (init_thread_union.stack)
19064
19065 #else /* !__ASSEMBLY__ */
19066 @@ -95,6 +89,7 @@ struct thread_info {
19067 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19068 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19069 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19070 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19071
19072 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19073 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19074 @@ -118,17 +113,18 @@ struct thread_info {
19075 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19076 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19077 #define _TIF_X32 (1 << TIF_X32)
19078 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19079
19080 /* work to do in syscall_trace_enter() */
19081 #define _TIF_WORK_SYSCALL_ENTRY \
19082 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19083 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19084 - _TIF_NOHZ)
19085 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
19086
19087 /* work to do in syscall_trace_leave() */
19088 #define _TIF_WORK_SYSCALL_EXIT \
19089 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19090 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19091 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19092
19093 /* work to do on interrupt/exception return */
19094 #define _TIF_WORK_MASK \
19095 @@ -139,7 +135,7 @@ struct thread_info {
19096 /* work to do on any return to user space */
19097 #define _TIF_ALLWORK_MASK \
19098 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19099 - _TIF_NOHZ)
19100 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
19101
19102 /* Only used for 64 bit */
19103 #define _TIF_DO_NOTIFY_MASK \
19104 @@ -153,45 +149,40 @@ struct thread_info {
19105 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
19106 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19107
19108 -#ifdef CONFIG_X86_32
19109 -
19110 -#define STACK_WARN (THREAD_SIZE/8)
19111 -/*
19112 - * macros/functions for gaining access to the thread information structure
19113 - *
19114 - * preempt_count needs to be 1 initially, until the scheduler is functional.
19115 - */
19116 -#ifndef __ASSEMBLY__
19117 -
19118 -
19119 -/* how to get the current stack pointer from C */
19120 -register unsigned long current_stack_pointer asm("esp") __used;
19121 -
19122 -/* how to get the thread information struct from C */
19123 -static inline struct thread_info *current_thread_info(void)
19124 -{
19125 - return (struct thread_info *)
19126 - (current_stack_pointer & ~(THREAD_SIZE - 1));
19127 -}
19128 -
19129 -#else /* !__ASSEMBLY__ */
19130 -
19131 +#ifdef __ASSEMBLY__
19132 /* how to get the thread information struct from ASM */
19133 #define GET_THREAD_INFO(reg) \
19134 - movl $-THREAD_SIZE, reg; \
19135 - andl %esp, reg
19136 + mov PER_CPU_VAR(current_tinfo), reg
19137
19138 /* use this one if reg already contains %esp */
19139 -#define GET_THREAD_INFO_WITH_ESP(reg) \
19140 - andl $-THREAD_SIZE, reg
19141 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
19142 +#else
19143 +/* how to get the thread information struct from C */
19144 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19145 +
19146 +static __always_inline struct thread_info *current_thread_info(void)
19147 +{
19148 + return this_cpu_read_stable(current_tinfo);
19149 +}
19150 +#endif
19151 +
19152 +#ifdef CONFIG_X86_32
19153 +
19154 +#define STACK_WARN (THREAD_SIZE/8)
19155 +/*
19156 + * macros/functions for gaining access to the thread information structure
19157 + *
19158 + * preempt_count needs to be 1 initially, until the scheduler is functional.
19159 + */
19160 +#ifndef __ASSEMBLY__
19161 +
19162 +/* how to get the current stack pointer from C */
19163 +register unsigned long current_stack_pointer asm("esp") __used;
19164
19165 #endif
19166
19167 #else /* X86_32 */
19168
19169 -#include <asm/percpu.h>
19170 -#define KERNEL_STACK_OFFSET (5*8)
19171 -
19172 /*
19173 * macros/functions for gaining access to the thread information structure
19174 * preempt_count needs to be 1 initially, until the scheduler is functional.
19175 @@ -199,27 +190,8 @@ static inline struct thread_info *current_thread_info(void)
19176 #ifndef __ASSEMBLY__
19177 DECLARE_PER_CPU(unsigned long, kernel_stack);
19178
19179 -static inline struct thread_info *current_thread_info(void)
19180 -{
19181 - struct thread_info *ti;
19182 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
19183 - KERNEL_STACK_OFFSET - THREAD_SIZE);
19184 - return ti;
19185 -}
19186 -
19187 -#else /* !__ASSEMBLY__ */
19188 -
19189 -/* how to get the thread information struct from ASM */
19190 -#define GET_THREAD_INFO(reg) \
19191 - movq PER_CPU_VAR(kernel_stack),reg ; \
19192 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
19193 -
19194 -/*
19195 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19196 - * a certain register (to be used in assembler memory operands).
19197 - */
19198 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19199 -
19200 +/* how to get the current stack pointer from C */
19201 +register unsigned long current_stack_pointer asm("rsp") __used;
19202 #endif
19203
19204 #endif /* !X86_32 */
19205 @@ -278,5 +250,12 @@ static inline bool is_ia32_task(void)
19206 extern void arch_task_cache_init(void);
19207 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19208 extern void arch_release_task_struct(struct task_struct *tsk);
19209 +
19210 +#define __HAVE_THREAD_FUNCTIONS
19211 +#define task_thread_info(task) (&(task)->tinfo)
19212 +#define task_stack_page(task) ((task)->stack)
19213 +#define setup_thread_stack(p, org) do {} while (0)
19214 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19215 +
19216 #endif
19217 #endif /* _ASM_X86_THREAD_INFO_H */
19218 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19219 index e6d90ba..f81f114 100644
19220 --- a/arch/x86/include/asm/tlbflush.h
19221 +++ b/arch/x86/include/asm/tlbflush.h
19222 @@ -17,18 +17,44 @@
19223
19224 static inline void __native_flush_tlb(void)
19225 {
19226 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19227 + u64 descriptor[2];
19228 +
19229 + descriptor[0] = PCID_KERNEL;
19230 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
19231 + return;
19232 + }
19233 +
19234 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19235 + if (static_cpu_has(X86_FEATURE_PCID)) {
19236 + unsigned int cpu = raw_get_cpu();
19237 +
19238 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19239 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19240 + raw_put_cpu_no_resched();
19241 + return;
19242 + }
19243 +#endif
19244 +
19245 native_write_cr3(native_read_cr3());
19246 }
19247
19248 static inline void __native_flush_tlb_global_irq_disabled(void)
19249 {
19250 - unsigned long cr4;
19251 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19252 + u64 descriptor[2];
19253
19254 - cr4 = native_read_cr4();
19255 - /* clear PGE */
19256 - native_write_cr4(cr4 & ~X86_CR4_PGE);
19257 - /* write old PGE again and flush TLBs */
19258 - native_write_cr4(cr4);
19259 + descriptor[0] = PCID_KERNEL;
19260 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19261 + } else {
19262 + unsigned long cr4;
19263 +
19264 + cr4 = native_read_cr4();
19265 + /* clear PGE */
19266 + native_write_cr4(cr4 & ~X86_CR4_PGE);
19267 + /* write old PGE again and flush TLBs */
19268 + native_write_cr4(cr4);
19269 + }
19270 }
19271
19272 static inline void __native_flush_tlb_global(void)
19273 @@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19274
19275 static inline void __native_flush_tlb_single(unsigned long addr)
19276 {
19277 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
19278 + u64 descriptor[2];
19279 +
19280 + descriptor[0] = PCID_KERNEL;
19281 + descriptor[1] = addr;
19282 +
19283 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19284 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19285 + if (addr < TASK_SIZE_MAX)
19286 + descriptor[1] += pax_user_shadow_base;
19287 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19288 + }
19289 +
19290 + descriptor[0] = PCID_USER;
19291 + descriptor[1] = addr;
19292 +#endif
19293 +
19294 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19295 + return;
19296 + }
19297 +
19298 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19299 + if (static_cpu_has(X86_FEATURE_PCID)) {
19300 + unsigned int cpu = raw_get_cpu();
19301 +
19302 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19303 + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19304 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19305 + raw_put_cpu_no_resched();
19306 +
19307 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19308 + addr += pax_user_shadow_base;
19309 + }
19310 +#endif
19311 +
19312 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19313 }
19314
19315 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19316 index 8ec57c0..451bcfc 100644
19317 --- a/arch/x86/include/asm/uaccess.h
19318 +++ b/arch/x86/include/asm/uaccess.h
19319 @@ -7,6 +7,7 @@
19320 #include <linux/compiler.h>
19321 #include <linux/thread_info.h>
19322 #include <linux/string.h>
19323 +#include <linux/spinlock.h>
19324 #include <asm/asm.h>
19325 #include <asm/page.h>
19326 #include <asm/smap.h>
19327 @@ -29,7 +30,12 @@
19328
19329 #define get_ds() (KERNEL_DS)
19330 #define get_fs() (current_thread_info()->addr_limit)
19331 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19332 +void __set_fs(mm_segment_t x);
19333 +void set_fs(mm_segment_t x);
19334 +#else
19335 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19336 +#endif
19337
19338 #define segment_eq(a, b) ((a).seg == (b).seg)
19339
19340 @@ -77,8 +83,34 @@
19341 * checks that the pointer is in the user space range - after calling
19342 * this function, memory access functions may still return -EFAULT.
19343 */
19344 -#define access_ok(type, addr, size) \
19345 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
19346 +extern int _cond_resched(void);
19347 +#define access_ok_noprefault(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
19348 +#define access_ok(type, addr, size) \
19349 +({ \
19350 + long __size = size; \
19351 + unsigned long __addr = (unsigned long)addr; \
19352 + unsigned long __addr_ao = __addr & PAGE_MASK; \
19353 + unsigned long __end_ao = __addr + __size - 1; \
19354 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19355 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19356 + while(__addr_ao <= __end_ao) { \
19357 + char __c_ao; \
19358 + __addr_ao += PAGE_SIZE; \
19359 + if (__size > PAGE_SIZE) \
19360 + _cond_resched(); \
19361 + if (__get_user(__c_ao, (char __user *)__addr)) \
19362 + break; \
19363 + if (type != VERIFY_WRITE) { \
19364 + __addr = __addr_ao; \
19365 + continue; \
19366 + } \
19367 + if (__put_user(__c_ao, (char __user *)__addr)) \
19368 + break; \
19369 + __addr = __addr_ao; \
19370 + } \
19371 + } \
19372 + __ret_ao; \
19373 +})
19374
19375 /*
19376 * The exception table consists of pairs of addresses relative to the
19377 @@ -168,10 +200,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19378 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19379 __chk_user_ptr(ptr); \
19380 might_fault(); \
19381 + pax_open_userland(); \
19382 asm volatile("call __get_user_%P3" \
19383 : "=a" (__ret_gu), "=r" (__val_gu) \
19384 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19385 (x) = (__typeof__(*(ptr))) __val_gu; \
19386 + pax_close_userland(); \
19387 __ret_gu; \
19388 })
19389
19390 @@ -179,13 +213,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19391 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19392 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19393
19394 -
19395 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19396 +#define __copyuser_seg "gs;"
19397 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19398 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19399 +#else
19400 +#define __copyuser_seg
19401 +#define __COPYUSER_SET_ES
19402 +#define __COPYUSER_RESTORE_ES
19403 +#endif
19404
19405 #ifdef CONFIG_X86_32
19406 #define __put_user_asm_u64(x, addr, err, errret) \
19407 asm volatile(ASM_STAC "\n" \
19408 - "1: movl %%eax,0(%2)\n" \
19409 - "2: movl %%edx,4(%2)\n" \
19410 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19411 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19412 "3: " ASM_CLAC "\n" \
19413 ".section .fixup,\"ax\"\n" \
19414 "4: movl %3,%0\n" \
19415 @@ -198,8 +240,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19416
19417 #define __put_user_asm_ex_u64(x, addr) \
19418 asm volatile(ASM_STAC "\n" \
19419 - "1: movl %%eax,0(%1)\n" \
19420 - "2: movl %%edx,4(%1)\n" \
19421 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19422 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19423 "3: " ASM_CLAC "\n" \
19424 _ASM_EXTABLE_EX(1b, 2b) \
19425 _ASM_EXTABLE_EX(2b, 3b) \
19426 @@ -249,7 +291,8 @@ extern void __put_user_8(void);
19427 __typeof__(*(ptr)) __pu_val; \
19428 __chk_user_ptr(ptr); \
19429 might_fault(); \
19430 - __pu_val = x; \
19431 + __pu_val = (x); \
19432 + pax_open_userland(); \
19433 switch (sizeof(*(ptr))) { \
19434 case 1: \
19435 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19436 @@ -267,6 +310,7 @@ extern void __put_user_8(void);
19437 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19438 break; \
19439 } \
19440 + pax_close_userland(); \
19441 __ret_pu; \
19442 })
19443
19444 @@ -347,8 +391,10 @@ do { \
19445 } while (0)
19446
19447 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19448 +do { \
19449 + pax_open_userland(); \
19450 asm volatile(ASM_STAC "\n" \
19451 - "1: mov"itype" %2,%"rtype"1\n" \
19452 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19453 "2: " ASM_CLAC "\n" \
19454 ".section .fixup,\"ax\"\n" \
19455 "3: mov %3,%0\n" \
19456 @@ -356,8 +402,10 @@ do { \
19457 " jmp 2b\n" \
19458 ".previous\n" \
19459 _ASM_EXTABLE(1b, 3b) \
19460 - : "=r" (err), ltype(x) \
19461 - : "m" (__m(addr)), "i" (errret), "0" (err))
19462 + : "=r" (err), ltype (x) \
19463 + : "m" (__m(addr)), "i" (errret), "0" (err)); \
19464 + pax_close_userland(); \
19465 +} while (0)
19466
19467 #define __get_user_size_ex(x, ptr, size) \
19468 do { \
19469 @@ -381,7 +429,7 @@ do { \
19470 } while (0)
19471
19472 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19473 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19474 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19475 "2:\n" \
19476 _ASM_EXTABLE_EX(1b, 2b) \
19477 : ltype(x) : "m" (__m(addr)))
19478 @@ -398,13 +446,24 @@ do { \
19479 int __gu_err; \
19480 unsigned long __gu_val; \
19481 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19482 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
19483 + (x) = (__typeof__(*(ptr)))__gu_val; \
19484 __gu_err; \
19485 })
19486
19487 /* FIXME: this hack is definitely wrong -AK */
19488 struct __large_struct { unsigned long buf[100]; };
19489 -#define __m(x) (*(struct __large_struct __user *)(x))
19490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19491 +#define ____m(x) \
19492 +({ \
19493 + unsigned long ____x = (unsigned long)(x); \
19494 + if (____x < pax_user_shadow_base) \
19495 + ____x += pax_user_shadow_base; \
19496 + (typeof(x))____x; \
19497 +})
19498 +#else
19499 +#define ____m(x) (x)
19500 +#endif
19501 +#define __m(x) (*(struct __large_struct __user *)____m(x))
19502
19503 /*
19504 * Tell gcc we read from memory instead of writing: this is because
19505 @@ -412,8 +471,10 @@ struct __large_struct { unsigned long buf[100]; };
19506 * aliasing issues.
19507 */
19508 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19509 +do { \
19510 + pax_open_userland(); \
19511 asm volatile(ASM_STAC "\n" \
19512 - "1: mov"itype" %"rtype"1,%2\n" \
19513 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19514 "2: " ASM_CLAC "\n" \
19515 ".section .fixup,\"ax\"\n" \
19516 "3: mov %3,%0\n" \
19517 @@ -421,10 +482,12 @@ struct __large_struct { unsigned long buf[100]; };
19518 ".previous\n" \
19519 _ASM_EXTABLE(1b, 3b) \
19520 : "=r"(err) \
19521 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19522 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19523 + pax_close_userland(); \
19524 +} while (0)
19525
19526 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19527 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19528 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19529 "2:\n" \
19530 _ASM_EXTABLE_EX(1b, 2b) \
19531 : : ltype(x), "m" (__m(addr)))
19532 @@ -434,11 +497,13 @@ struct __large_struct { unsigned long buf[100]; };
19533 */
19534 #define uaccess_try do { \
19535 current_thread_info()->uaccess_err = 0; \
19536 + pax_open_userland(); \
19537 stac(); \
19538 barrier();
19539
19540 #define uaccess_catch(err) \
19541 clac(); \
19542 + pax_close_userland(); \
19543 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19544 } while (0)
19545
19546 @@ -463,8 +528,12 @@ struct __large_struct { unsigned long buf[100]; };
19547 * On error, the variable @x is set to zero.
19548 */
19549
19550 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19551 +#define __get_user(x, ptr) get_user((x), (ptr))
19552 +#else
19553 #define __get_user(x, ptr) \
19554 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19555 +#endif
19556
19557 /**
19558 * __put_user: - Write a simple value into user space, with less checking.
19559 @@ -486,8 +555,12 @@ struct __large_struct { unsigned long buf[100]; };
19560 * Returns zero on success, or -EFAULT on error.
19561 */
19562
19563 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19564 +#define __put_user(x, ptr) put_user((x), (ptr))
19565 +#else
19566 #define __put_user(x, ptr) \
19567 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19568 +#endif
19569
19570 #define __get_user_unaligned __get_user
19571 #define __put_user_unaligned __put_user
19572 @@ -505,7 +578,7 @@ struct __large_struct { unsigned long buf[100]; };
19573 #define get_user_ex(x, ptr) do { \
19574 unsigned long __gue_val; \
19575 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19576 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
19577 + (x) = (__typeof__(*(ptr)))__gue_val; \
19578 } while (0)
19579
19580 #define put_user_try uaccess_try
19581 @@ -536,17 +609,6 @@ extern struct movsl_mask {
19582
19583 #define ARCH_HAS_NOCACHE_UACCESS 1
19584
19585 -#ifdef CONFIG_X86_32
19586 -# include <asm/uaccess_32.h>
19587 -#else
19588 -# include <asm/uaccess_64.h>
19589 -#endif
19590 -
19591 -unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19592 - unsigned n);
19593 -unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19594 - unsigned n);
19595 -
19596 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19597 # define copy_user_diag __compiletime_error
19598 #else
19599 @@ -556,7 +618,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19600 extern void copy_user_diag("copy_from_user() buffer size is too small")
19601 copy_from_user_overflow(void);
19602 extern void copy_user_diag("copy_to_user() buffer size is too small")
19603 -copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19604 +copy_to_user_overflow(void);
19605
19606 #undef copy_user_diag
19607
19608 @@ -569,7 +631,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19609
19610 extern void
19611 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19612 -__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19613 +__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19614 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19615
19616 #else
19617 @@ -584,10 +646,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19618
19619 #endif
19620
19621 +#ifdef CONFIG_X86_32
19622 +# include <asm/uaccess_32.h>
19623 +#else
19624 +# include <asm/uaccess_64.h>
19625 +#endif
19626 +
19627 static inline unsigned long __must_check
19628 copy_from_user(void *to, const void __user *from, unsigned long n)
19629 {
19630 - int sz = __compiletime_object_size(to);
19631 + size_t sz = __compiletime_object_size(to);
19632
19633 might_fault();
19634
19635 @@ -609,12 +677,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19636 * case, and do only runtime checking for non-constant sizes.
19637 */
19638
19639 - if (likely(sz < 0 || sz >= n))
19640 - n = _copy_from_user(to, from, n);
19641 - else if(__builtin_constant_p(n))
19642 - copy_from_user_overflow();
19643 - else
19644 - __copy_from_user_overflow(sz, n);
19645 + if (likely(sz != (size_t)-1 && sz < n)) {
19646 + if(__builtin_constant_p(n))
19647 + copy_from_user_overflow();
19648 + else
19649 + __copy_from_user_overflow(sz, n);
19650 + } if (access_ok(VERIFY_READ, from, n))
19651 + n = __copy_from_user(to, from, n);
19652 + else if ((long)n > 0)
19653 + memset(to, 0, n);
19654
19655 return n;
19656 }
19657 @@ -622,17 +693,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19658 static inline unsigned long __must_check
19659 copy_to_user(void __user *to, const void *from, unsigned long n)
19660 {
19661 - int sz = __compiletime_object_size(from);
19662 + size_t sz = __compiletime_object_size(from);
19663
19664 might_fault();
19665
19666 /* See the comment in copy_from_user() above. */
19667 - if (likely(sz < 0 || sz >= n))
19668 - n = _copy_to_user(to, from, n);
19669 - else if(__builtin_constant_p(n))
19670 - copy_to_user_overflow();
19671 - else
19672 - __copy_to_user_overflow(sz, n);
19673 + if (likely(sz != (size_t)-1 && sz < n)) {
19674 + if(__builtin_constant_p(n))
19675 + copy_to_user_overflow();
19676 + else
19677 + __copy_to_user_overflow(sz, n);
19678 + } else if (access_ok(VERIFY_WRITE, to, n))
19679 + n = __copy_to_user(to, from, n);
19680
19681 return n;
19682 }
19683 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19684 index 3c03a5d..1071638 100644
19685 --- a/arch/x86/include/asm/uaccess_32.h
19686 +++ b/arch/x86/include/asm/uaccess_32.h
19687 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19688 static __always_inline unsigned long __must_check
19689 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19690 {
19691 + if ((long)n < 0)
19692 + return n;
19693 +
19694 + check_object_size(from, n, true);
19695 +
19696 if (__builtin_constant_p(n)) {
19697 unsigned long ret;
19698
19699 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19700 __copy_to_user(void __user *to, const void *from, unsigned long n)
19701 {
19702 might_fault();
19703 +
19704 return __copy_to_user_inatomic(to, from, n);
19705 }
19706
19707 static __always_inline unsigned long
19708 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19709 {
19710 + if ((long)n < 0)
19711 + return n;
19712 +
19713 /* Avoid zeroing the tail if the copy fails..
19714 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19715 * but as the zeroing behaviour is only significant when n is not
19716 @@ -137,6 +146,12 @@ static __always_inline unsigned long
19717 __copy_from_user(void *to, const void __user *from, unsigned long n)
19718 {
19719 might_fault();
19720 +
19721 + if ((long)n < 0)
19722 + return n;
19723 +
19724 + check_object_size(to, n, false);
19725 +
19726 if (__builtin_constant_p(n)) {
19727 unsigned long ret;
19728
19729 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
19730 const void __user *from, unsigned long n)
19731 {
19732 might_fault();
19733 +
19734 + if ((long)n < 0)
19735 + return n;
19736 +
19737 if (__builtin_constant_p(n)) {
19738 unsigned long ret;
19739
19740 @@ -181,7 +200,10 @@ static __always_inline unsigned long
19741 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
19742 unsigned long n)
19743 {
19744 - return __copy_from_user_ll_nocache_nozero(to, from, n);
19745 + if ((long)n < 0)
19746 + return n;
19747 +
19748 + return __copy_from_user_ll_nocache_nozero(to, from, n);
19749 }
19750
19751 #endif /* _ASM_X86_UACCESS_32_H */
19752 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19753 index 190413d..206c200 100644
19754 --- a/arch/x86/include/asm/uaccess_64.h
19755 +++ b/arch/x86/include/asm/uaccess_64.h
19756 @@ -10,6 +10,9 @@
19757 #include <asm/alternative.h>
19758 #include <asm/cpufeature.h>
19759 #include <asm/page.h>
19760 +#include <asm/pgtable.h>
19761 +
19762 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
19763
19764 /*
19765 * Copy To/From Userspace
19766 @@ -17,14 +20,14 @@
19767
19768 /* Handles exceptions in both to and from, but doesn't do access_ok */
19769 __must_check unsigned long
19770 -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19771 +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19772 __must_check unsigned long
19773 -copy_user_generic_string(void *to, const void *from, unsigned len);
19774 +copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19775 __must_check unsigned long
19776 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19777 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19778
19779 static __always_inline __must_check unsigned long
19780 -copy_user_generic(void *to, const void *from, unsigned len)
19781 +copy_user_generic(void *to, const void *from, unsigned long len)
19782 {
19783 unsigned ret;
19784
19785 @@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
19786 }
19787
19788 __must_check unsigned long
19789 -copy_in_user(void __user *to, const void __user *from, unsigned len);
19790 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
19791
19792 static __always_inline __must_check
19793 -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
19794 +unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
19795 {
19796 - int ret = 0;
19797 + size_t sz = __compiletime_object_size(dst);
19798 + unsigned ret = 0;
19799 +
19800 + if (size > INT_MAX)
19801 + return size;
19802 +
19803 + check_object_size(dst, size, false);
19804 +
19805 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19806 + if (!access_ok_noprefault(VERIFY_READ, src, size))
19807 + return size;
19808 +#endif
19809 +
19810 + if (unlikely(sz != (size_t)-1 && sz < size)) {
19811 + if(__builtin_constant_p(size))
19812 + copy_from_user_overflow();
19813 + else
19814 + __copy_from_user_overflow(sz, size);
19815 + return size;
19816 + }
19817
19818 if (!__builtin_constant_p(size))
19819 - return copy_user_generic(dst, (__force void *)src, size);
19820 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19821 switch (size) {
19822 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19823 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19824 ret, "b", "b", "=q", 1);
19825 return ret;
19826 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19827 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19828 ret, "w", "w", "=r", 2);
19829 return ret;
19830 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19831 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19832 ret, "l", "k", "=r", 4);
19833 return ret;
19834 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19835 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19836 ret, "q", "", "=r", 8);
19837 return ret;
19838 case 10:
19839 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19840 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19841 ret, "q", "", "=r", 10);
19842 if (unlikely(ret))
19843 return ret;
19844 __get_user_asm(*(u16 *)(8 + (char *)dst),
19845 - (u16 __user *)(8 + (char __user *)src),
19846 + (const u16 __user *)(8 + (const char __user *)src),
19847 ret, "w", "w", "=r", 2);
19848 return ret;
19849 case 16:
19850 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19851 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19852 ret, "q", "", "=r", 16);
19853 if (unlikely(ret))
19854 return ret;
19855 __get_user_asm(*(u64 *)(8 + (char *)dst),
19856 - (u64 __user *)(8 + (char __user *)src),
19857 + (const u64 __user *)(8 + (const char __user *)src),
19858 ret, "q", "", "=r", 8);
19859 return ret;
19860 default:
19861 - return copy_user_generic(dst, (__force void *)src, size);
19862 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19863 }
19864 }
19865
19866 static __always_inline __must_check
19867 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
19868 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19869 {
19870 might_fault();
19871 return __copy_from_user_nocheck(dst, src, size);
19872 }
19873
19874 static __always_inline __must_check
19875 -int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
19876 +unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
19877 {
19878 - int ret = 0;
19879 + size_t sz = __compiletime_object_size(src);
19880 + unsigned ret = 0;
19881 +
19882 + if (size > INT_MAX)
19883 + return size;
19884 +
19885 + check_object_size(src, size, true);
19886 +
19887 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19888 + if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
19889 + return size;
19890 +#endif
19891 +
19892 + if (unlikely(sz != (size_t)-1 && sz < size)) {
19893 + if(__builtin_constant_p(size))
19894 + copy_to_user_overflow();
19895 + else
19896 + __copy_to_user_overflow(sz, size);
19897 + return size;
19898 + }
19899
19900 if (!__builtin_constant_p(size))
19901 - return copy_user_generic((__force void *)dst, src, size);
19902 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19903 switch (size) {
19904 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19905 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19906 ret, "b", "b", "iq", 1);
19907 return ret;
19908 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19909 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19910 ret, "w", "w", "ir", 2);
19911 return ret;
19912 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19913 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19914 ret, "l", "k", "ir", 4);
19915 return ret;
19916 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19917 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19918 ret, "q", "", "er", 8);
19919 return ret;
19920 case 10:
19921 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19922 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19923 ret, "q", "", "er", 10);
19924 if (unlikely(ret))
19925 return ret;
19926 asm("":::"memory");
19927 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19928 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19929 ret, "w", "w", "ir", 2);
19930 return ret;
19931 case 16:
19932 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19933 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19934 ret, "q", "", "er", 16);
19935 if (unlikely(ret))
19936 return ret;
19937 asm("":::"memory");
19938 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19939 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19940 ret, "q", "", "er", 8);
19941 return ret;
19942 default:
19943 - return copy_user_generic((__force void *)dst, src, size);
19944 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19945 }
19946 }
19947
19948 static __always_inline __must_check
19949 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
19950 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19951 {
19952 might_fault();
19953 return __copy_to_user_nocheck(dst, src, size);
19954 }
19955
19956 static __always_inline __must_check
19957 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19958 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19959 {
19960 - int ret = 0;
19961 + unsigned ret = 0;
19962
19963 might_fault();
19964 +
19965 + if (size > INT_MAX)
19966 + return size;
19967 +
19968 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19969 + if (!access_ok_noprefault(VERIFY_READ, src, size))
19970 + return size;
19971 + if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
19972 + return size;
19973 +#endif
19974 +
19975 if (!__builtin_constant_p(size))
19976 - return copy_user_generic((__force void *)dst,
19977 - (__force void *)src, size);
19978 + return copy_user_generic((__force_kernel void *)____m(dst),
19979 + (__force_kernel const void *)____m(src), size);
19980 switch (size) {
19981 case 1: {
19982 u8 tmp;
19983 - __get_user_asm(tmp, (u8 __user *)src,
19984 + __get_user_asm(tmp, (const u8 __user *)src,
19985 ret, "b", "b", "=q", 1);
19986 if (likely(!ret))
19987 __put_user_asm(tmp, (u8 __user *)dst,
19988 @@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19989 }
19990 case 2: {
19991 u16 tmp;
19992 - __get_user_asm(tmp, (u16 __user *)src,
19993 + __get_user_asm(tmp, (const u16 __user *)src,
19994 ret, "w", "w", "=r", 2);
19995 if (likely(!ret))
19996 __put_user_asm(tmp, (u16 __user *)dst,
19997 @@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19998
19999 case 4: {
20000 u32 tmp;
20001 - __get_user_asm(tmp, (u32 __user *)src,
20002 + __get_user_asm(tmp, (const u32 __user *)src,
20003 ret, "l", "k", "=r", 4);
20004 if (likely(!ret))
20005 __put_user_asm(tmp, (u32 __user *)dst,
20006 @@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20007 }
20008 case 8: {
20009 u64 tmp;
20010 - __get_user_asm(tmp, (u64 __user *)src,
20011 + __get_user_asm(tmp, (const u64 __user *)src,
20012 ret, "q", "", "=r", 8);
20013 if (likely(!ret))
20014 __put_user_asm(tmp, (u64 __user *)dst,
20015 @@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20016 return ret;
20017 }
20018 default:
20019 - return copy_user_generic((__force void *)dst,
20020 - (__force void *)src, size);
20021 + return copy_user_generic((__force_kernel void *)____m(dst),
20022 + (__force_kernel const void *)____m(src), size);
20023 }
20024 }
20025
20026 -static __must_check __always_inline int
20027 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20028 +static __must_check __always_inline unsigned long
20029 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20030 {
20031 - return __copy_from_user_nocheck(dst, (__force const void *)src, size);
20032 + return __copy_from_user_nocheck(dst, src, size);
20033 }
20034
20035 -static __must_check __always_inline int
20036 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20037 +static __must_check __always_inline unsigned long
20038 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20039 {
20040 - return __copy_to_user_nocheck((__force void *)dst, src, size);
20041 + return __copy_to_user_nocheck(dst, src, size);
20042 }
20043
20044 -extern long __copy_user_nocache(void *dst, const void __user *src,
20045 - unsigned size, int zerorest);
20046 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20047 + unsigned long size, int zerorest);
20048
20049 -static inline int
20050 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20051 +static inline unsigned long
20052 +__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20053 {
20054 might_fault();
20055 +
20056 + if (size > INT_MAX)
20057 + return size;
20058 +
20059 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20060 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20061 + return size;
20062 +#endif
20063 +
20064 return __copy_user_nocache(dst, src, size, 1);
20065 }
20066
20067 -static inline int
20068 +static inline unsigned long
20069 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20070 - unsigned size)
20071 + unsigned long size)
20072 {
20073 + if (size > INT_MAX)
20074 + return size;
20075 +
20076 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20077 + if (!access_ok_noprefault(VERIFY_READ, src, size))
20078 + return size;
20079 +#endif
20080 +
20081 return __copy_user_nocache(dst, src, size, 0);
20082 }
20083
20084 unsigned long
20085 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20086 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20087
20088 #endif /* _ASM_X86_UACCESS_64_H */
20089 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20090 index 5b238981..77fdd78 100644
20091 --- a/arch/x86/include/asm/word-at-a-time.h
20092 +++ b/arch/x86/include/asm/word-at-a-time.h
20093 @@ -11,7 +11,7 @@
20094 * and shift, for example.
20095 */
20096 struct word_at_a_time {
20097 - const unsigned long one_bits, high_bits;
20098 + unsigned long one_bits, high_bits;
20099 };
20100
20101 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20102 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20103 index 0f1be11..f7542bf 100644
20104 --- a/arch/x86/include/asm/x86_init.h
20105 +++ b/arch/x86/include/asm/x86_init.h
20106 @@ -129,7 +129,7 @@ struct x86_init_ops {
20107 struct x86_init_timers timers;
20108 struct x86_init_iommu iommu;
20109 struct x86_init_pci pci;
20110 -};
20111 +} __no_const;
20112
20113 /**
20114 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20115 @@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20116 void (*setup_percpu_clockev)(void);
20117 void (*early_percpu_clock_init)(void);
20118 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20119 -};
20120 +} __no_const;
20121
20122 struct timespec;
20123
20124 @@ -168,7 +168,7 @@ struct x86_platform_ops {
20125 void (*save_sched_clock_state)(void);
20126 void (*restore_sched_clock_state)(void);
20127 void (*apic_post_init)(void);
20128 -};
20129 +} __no_const;
20130
20131 struct pci_dev;
20132 struct msi_msg;
20133 @@ -185,7 +185,7 @@ struct x86_msi_ops {
20134 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20135 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20136 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20137 -};
20138 +} __no_const;
20139
20140 struct IO_APIC_route_entry;
20141 struct io_apic_irq_attr;
20142 @@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20143 unsigned int destination, int vector,
20144 struct io_apic_irq_attr *attr);
20145 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20146 -};
20147 +} __no_const;
20148
20149 extern struct x86_init_ops x86_init;
20150 extern struct x86_cpuinit_ops x86_cpuinit;
20151 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20152 index b913915..4f5a581 100644
20153 --- a/arch/x86/include/asm/xen/page.h
20154 +++ b/arch/x86/include/asm/xen/page.h
20155 @@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
20156 extern struct page *m2p_find_override(unsigned long mfn);
20157 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20158
20159 -static inline unsigned long pfn_to_mfn(unsigned long pfn)
20160 +static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20161 {
20162 unsigned long mfn;
20163
20164 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20165 index 0415cda..3b22adc 100644
20166 --- a/arch/x86/include/asm/xsave.h
20167 +++ b/arch/x86/include/asm/xsave.h
20168 @@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20169 if (unlikely(err))
20170 return -EFAULT;
20171
20172 + pax_open_userland();
20173 __asm__ __volatile__(ASM_STAC "\n"
20174 - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20175 + "1:"
20176 + __copyuser_seg
20177 + ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20178 "2: " ASM_CLAC "\n"
20179 ".section .fixup,\"ax\"\n"
20180 "3: movl $-1,%[err]\n"
20181 @@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20182 : [err] "=r" (err)
20183 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20184 : "memory");
20185 + pax_close_userland();
20186 return err;
20187 }
20188
20189 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20190 {
20191 int err;
20192 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20193 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20194 u32 lmask = mask;
20195 u32 hmask = mask >> 32;
20196
20197 + pax_open_userland();
20198 __asm__ __volatile__(ASM_STAC "\n"
20199 - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20200 + "1:"
20201 + __copyuser_seg
20202 + ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20203 "2: " ASM_CLAC "\n"
20204 ".section .fixup,\"ax\"\n"
20205 "3: movl $-1,%[err]\n"
20206 @@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20207 : [err] "=r" (err)
20208 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20209 : "memory"); /* memory required? */
20210 + pax_close_userland();
20211 return err;
20212 }
20213
20214 diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20215 index bbae024..e1528f9 100644
20216 --- a/arch/x86/include/uapi/asm/e820.h
20217 +++ b/arch/x86/include/uapi/asm/e820.h
20218 @@ -63,7 +63,7 @@ struct e820map {
20219 #define ISA_START_ADDRESS 0xa0000
20220 #define ISA_END_ADDRESS 0x100000
20221
20222 -#define BIOS_BEGIN 0x000a0000
20223 +#define BIOS_BEGIN 0x000c0000
20224 #define BIOS_END 0x00100000
20225
20226 #define BIOS_ROM_BASE 0xffe00000
20227 diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20228 index 7b0a55a..ad115bf 100644
20229 --- a/arch/x86/include/uapi/asm/ptrace-abi.h
20230 +++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20231 @@ -49,7 +49,6 @@
20232 #define EFLAGS 144
20233 #define RSP 152
20234 #define SS 160
20235 -#define ARGOFFSET R11
20236 #endif /* __ASSEMBLY__ */
20237
20238 /* top of stack page */
20239 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20240 index 9b0a34e..fc7e553 100644
20241 --- a/arch/x86/kernel/Makefile
20242 +++ b/arch/x86/kernel/Makefile
20243 @@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20244 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20245 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20246 obj-y += probe_roms.o
20247 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20248 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20249 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20250 obj-y += syscall_$(BITS).o
20251 obj-$(CONFIG_X86_64) += vsyscall_64.o
20252 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20253 index 6c0b43b..e67bb31 100644
20254 --- a/arch/x86/kernel/acpi/boot.c
20255 +++ b/arch/x86/kernel/acpi/boot.c
20256 @@ -1315,7 +1315,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20257 * If your system is blacklisted here, but you find that acpi=force
20258 * works for you, please contact linux-acpi@vger.kernel.org
20259 */
20260 -static struct dmi_system_id __initdata acpi_dmi_table[] = {
20261 +static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20262 /*
20263 * Boxes that need ACPI disabled
20264 */
20265 @@ -1390,7 +1390,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20266 };
20267
20268 /* second table for DMI checks that should run after early-quirks */
20269 -static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20270 +static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20271 /*
20272 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20273 * which includes some code which overrides all temperature
20274 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20275 index 3a2ae4c..9db31d6 100644
20276 --- a/arch/x86/kernel/acpi/sleep.c
20277 +++ b/arch/x86/kernel/acpi/sleep.c
20278 @@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20279 #else /* CONFIG_64BIT */
20280 #ifdef CONFIG_SMP
20281 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20282 +
20283 + pax_open_kernel();
20284 early_gdt_descr.address =
20285 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20286 + pax_close_kernel();
20287 +
20288 initial_gs = per_cpu_offset(smp_processor_id());
20289 #endif
20290 initial_code = (unsigned long)wakeup_long64;
20291 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20292 index 665c6b7..eae4d56 100644
20293 --- a/arch/x86/kernel/acpi/wakeup_32.S
20294 +++ b/arch/x86/kernel/acpi/wakeup_32.S
20295 @@ -29,13 +29,11 @@ wakeup_pmode_return:
20296 # and restore the stack ... but you need gdt for this to work
20297 movl saved_context_esp, %esp
20298
20299 - movl %cs:saved_magic, %eax
20300 - cmpl $0x12345678, %eax
20301 + cmpl $0x12345678, saved_magic
20302 jne bogus_magic
20303
20304 # jump to place where we left off
20305 - movl saved_eip, %eax
20306 - jmp *%eax
20307 + jmp *(saved_eip)
20308
20309 bogus_magic:
20310 jmp bogus_magic
20311 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20312 index df94598..f3b29bf 100644
20313 --- a/arch/x86/kernel/alternative.c
20314 +++ b/arch/x86/kernel/alternative.c
20315 @@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20316 */
20317 for (a = start; a < end; a++) {
20318 instr = (u8 *)&a->instr_offset + a->instr_offset;
20319 +
20320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20321 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20322 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20323 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20324 +#endif
20325 +
20326 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20327 BUG_ON(a->replacementlen > a->instrlen);
20328 BUG_ON(a->instrlen > sizeof(insnbuf));
20329 @@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20330 for (poff = start; poff < end; poff++) {
20331 u8 *ptr = (u8 *)poff + *poff;
20332
20333 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20334 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20335 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20336 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20337 +#endif
20338 +
20339 if (!*poff || ptr < text || ptr >= text_end)
20340 continue;
20341 /* turn DS segment override prefix into lock prefix */
20342 - if (*ptr == 0x3e)
20343 + if (*ktla_ktva(ptr) == 0x3e)
20344 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20345 }
20346 mutex_unlock(&text_mutex);
20347 @@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20348 for (poff = start; poff < end; poff++) {
20349 u8 *ptr = (u8 *)poff + *poff;
20350
20351 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20352 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20353 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20354 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20355 +#endif
20356 +
20357 if (!*poff || ptr < text || ptr >= text_end)
20358 continue;
20359 /* turn lock prefix into DS segment override prefix */
20360 - if (*ptr == 0xf0)
20361 + if (*ktla_ktva(ptr) == 0xf0)
20362 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20363 }
20364 mutex_unlock(&text_mutex);
20365 @@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20366
20367 BUG_ON(p->len > MAX_PATCH_LEN);
20368 /* prep the buffer with the original instructions */
20369 - memcpy(insnbuf, p->instr, p->len);
20370 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20371 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20372 (unsigned long)p->instr, p->len);
20373
20374 @@ -505,7 +524,7 @@ void __init alternative_instructions(void)
20375 if (!uniproc_patched || num_possible_cpus() == 1)
20376 free_init_pages("SMP alternatives",
20377 (unsigned long)__smp_locks,
20378 - (unsigned long)__smp_locks_end);
20379 + PAGE_ALIGN((unsigned long)__smp_locks_end));
20380 #endif
20381
20382 apply_paravirt(__parainstructions, __parainstructions_end);
20383 @@ -525,13 +544,17 @@ void __init alternative_instructions(void)
20384 * instructions. And on the local CPU you need to be protected again NMI or MCE
20385 * handlers seeing an inconsistent instruction while you patch.
20386 */
20387 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
20388 +void *__kprobes text_poke_early(void *addr, const void *opcode,
20389 size_t len)
20390 {
20391 unsigned long flags;
20392 local_irq_save(flags);
20393 - memcpy(addr, opcode, len);
20394 +
20395 + pax_open_kernel();
20396 + memcpy(ktla_ktva(addr), opcode, len);
20397 sync_core();
20398 + pax_close_kernel();
20399 +
20400 local_irq_restore(flags);
20401 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20402 that causes hangs on some VIA CPUs. */
20403 @@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20404 */
20405 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
20406 {
20407 - unsigned long flags;
20408 - char *vaddr;
20409 + unsigned char *vaddr = ktla_ktva(addr);
20410 struct page *pages[2];
20411 - int i;
20412 + size_t i;
20413
20414 if (!core_kernel_text((unsigned long)addr)) {
20415 - pages[0] = vmalloc_to_page(addr);
20416 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20417 + pages[0] = vmalloc_to_page(vaddr);
20418 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20419 } else {
20420 - pages[0] = virt_to_page(addr);
20421 + pages[0] = virt_to_page(vaddr);
20422 WARN_ON(!PageReserved(pages[0]));
20423 - pages[1] = virt_to_page(addr + PAGE_SIZE);
20424 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20425 }
20426 BUG_ON(!pages[0]);
20427 - local_irq_save(flags);
20428 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20429 - if (pages[1])
20430 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20431 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20432 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20433 - clear_fixmap(FIX_TEXT_POKE0);
20434 - if (pages[1])
20435 - clear_fixmap(FIX_TEXT_POKE1);
20436 - local_flush_tlb();
20437 - sync_core();
20438 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
20439 - that causes hangs on some VIA CPUs. */
20440 + text_poke_early(addr, opcode, len);
20441 for (i = 0; i < len; i++)
20442 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20443 - local_irq_restore(flags);
20444 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20445 return addr;
20446 }
20447
20448 @@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs)
20449 if (likely(!bp_patching_in_progress))
20450 return 0;
20451
20452 - if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20453 + if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20454 return 0;
20455
20456 /* set up the specified breakpoint handler */
20457 @@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs)
20458 */
20459 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20460 {
20461 - unsigned char int3 = 0xcc;
20462 + const unsigned char int3 = 0xcc;
20463
20464 bp_int3_handler = handler;
20465 bp_int3_addr = (u8 *)addr + sizeof(int3);
20466 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20467 index d278736..0b4af9a8 100644
20468 --- a/arch/x86/kernel/apic/apic.c
20469 +++ b/arch/x86/kernel/apic/apic.c
20470 @@ -191,7 +191,7 @@ int first_system_vector = 0xfe;
20471 /*
20472 * Debug level, exported for io_apic.c
20473 */
20474 -unsigned int apic_verbosity;
20475 +int apic_verbosity;
20476
20477 int pic_mode;
20478
20479 @@ -1986,7 +1986,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20480 apic_write(APIC_ESR, 0);
20481 v1 = apic_read(APIC_ESR);
20482 ack_APIC_irq();
20483 - atomic_inc(&irq_err_count);
20484 + atomic_inc_unchecked(&irq_err_count);
20485
20486 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
20487 smp_processor_id(), v0 , v1);
20488 diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20489 index 00c77cf..2dc6a2d 100644
20490 --- a/arch/x86/kernel/apic/apic_flat_64.c
20491 +++ b/arch/x86/kernel/apic/apic_flat_64.c
20492 @@ -157,7 +157,7 @@ static int flat_probe(void)
20493 return 1;
20494 }
20495
20496 -static struct apic apic_flat = {
20497 +static struct apic apic_flat __read_only = {
20498 .name = "flat",
20499 .probe = flat_probe,
20500 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20501 @@ -271,7 +271,7 @@ static int physflat_probe(void)
20502 return 0;
20503 }
20504
20505 -static struct apic apic_physflat = {
20506 +static struct apic apic_physflat __read_only = {
20507
20508 .name = "physical flat",
20509 .probe = physflat_probe,
20510 diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20511 index e145f28..2752888 100644
20512 --- a/arch/x86/kernel/apic/apic_noop.c
20513 +++ b/arch/x86/kernel/apic/apic_noop.c
20514 @@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
20515 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20516 }
20517
20518 -struct apic apic_noop = {
20519 +struct apic apic_noop __read_only = {
20520 .name = "noop",
20521 .probe = noop_probe,
20522 .acpi_madt_oem_check = NULL,
20523 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20524 index d50e364..543bee3 100644
20525 --- a/arch/x86/kernel/apic/bigsmp_32.c
20526 +++ b/arch/x86/kernel/apic/bigsmp_32.c
20527 @@ -152,7 +152,7 @@ static int probe_bigsmp(void)
20528 return dmi_bigsmp;
20529 }
20530
20531 -static struct apic apic_bigsmp = {
20532 +static struct apic apic_bigsmp __read_only = {
20533
20534 .name = "bigsmp",
20535 .probe = probe_bigsmp,
20536 diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
20537 index c552247..587a316 100644
20538 --- a/arch/x86/kernel/apic/es7000_32.c
20539 +++ b/arch/x86/kernel/apic/es7000_32.c
20540 @@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
20541 return ret && es7000_apic_is_cluster();
20542 }
20543
20544 -/* We've been warned by a false positive warning.Use __refdata to keep calm. */
20545 -static struct apic __refdata apic_es7000_cluster = {
20546 +static struct apic apic_es7000_cluster __read_only = {
20547
20548 .name = "es7000",
20549 .probe = probe_es7000,
20550 @@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
20551 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
20552 };
20553
20554 -static struct apic __refdata apic_es7000 = {
20555 +static struct apic apic_es7000 __read_only = {
20556
20557 .name = "es7000",
20558 .probe = probe_es7000,
20559 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20560 index e63a5bd..c0babf8 100644
20561 --- a/arch/x86/kernel/apic/io_apic.c
20562 +++ b/arch/x86/kernel/apic/io_apic.c
20563 @@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
20564 }
20565 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
20566
20567 -void lock_vector_lock(void)
20568 +void lock_vector_lock(void) __acquires(vector_lock)
20569 {
20570 /* Used to the online set of cpus does not change
20571 * during assign_irq_vector.
20572 @@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
20573 raw_spin_lock(&vector_lock);
20574 }
20575
20576 -void unlock_vector_lock(void)
20577 +void unlock_vector_lock(void) __releases(vector_lock)
20578 {
20579 raw_spin_unlock(&vector_lock);
20580 }
20581 @@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
20582 ack_APIC_irq();
20583 }
20584
20585 -atomic_t irq_mis_count;
20586 +atomic_unchecked_t irq_mis_count;
20587
20588 #ifdef CONFIG_GENERIC_PENDING_IRQ
20589 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20590 @@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
20591 * at the cpu.
20592 */
20593 if (!(v & (1 << (i & 0x1f)))) {
20594 - atomic_inc(&irq_mis_count);
20595 + atomic_inc_unchecked(&irq_mis_count);
20596
20597 eoi_ioapic_irq(irq, cfg);
20598 }
20599 diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
20600 index 1e42e8f..daacf44 100644
20601 --- a/arch/x86/kernel/apic/numaq_32.c
20602 +++ b/arch/x86/kernel/apic/numaq_32.c
20603 @@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
20604 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
20605 }
20606
20607 -/* Use __refdata to keep false positive warning calm. */
20608 -static struct apic __refdata apic_numaq = {
20609 +static struct apic apic_numaq __read_only = {
20610
20611 .name = "NUMAQ",
20612 .probe = probe_numaq,
20613 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20614 index eb35ef9..f184a21 100644
20615 --- a/arch/x86/kernel/apic/probe_32.c
20616 +++ b/arch/x86/kernel/apic/probe_32.c
20617 @@ -72,7 +72,7 @@ static int probe_default(void)
20618 return 1;
20619 }
20620
20621 -static struct apic apic_default = {
20622 +static struct apic apic_default __read_only = {
20623
20624 .name = "default",
20625 .probe = probe_default,
20626 diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
20627 index 77c95c0..434f8a4 100644
20628 --- a/arch/x86/kernel/apic/summit_32.c
20629 +++ b/arch/x86/kernel/apic/summit_32.c
20630 @@ -486,7 +486,7 @@ void setup_summit(void)
20631 }
20632 #endif
20633
20634 -static struct apic apic_summit = {
20635 +static struct apic apic_summit __read_only = {
20636
20637 .name = "summit",
20638 .probe = probe_summit,
20639 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20640 index 140e29d..d88bc95 100644
20641 --- a/arch/x86/kernel/apic/x2apic_cluster.c
20642 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
20643 @@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20644 return notifier_from_errno(err);
20645 }
20646
20647 -static struct notifier_block __refdata x2apic_cpu_notifier = {
20648 +static struct notifier_block x2apic_cpu_notifier = {
20649 .notifier_call = update_clusterinfo,
20650 };
20651
20652 @@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20653 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20654 }
20655
20656 -static struct apic apic_x2apic_cluster = {
20657 +static struct apic apic_x2apic_cluster __read_only = {
20658
20659 .name = "cluster x2apic",
20660 .probe = x2apic_cluster_probe,
20661 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20662 index 562a76d..a003c0f 100644
20663 --- a/arch/x86/kernel/apic/x2apic_phys.c
20664 +++ b/arch/x86/kernel/apic/x2apic_phys.c
20665 @@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
20666 return apic == &apic_x2apic_phys;
20667 }
20668
20669 -static struct apic apic_x2apic_phys = {
20670 +static struct apic apic_x2apic_phys __read_only = {
20671
20672 .name = "physical x2apic",
20673 .probe = x2apic_phys_probe,
20674 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20675 index ad0dc04..0d9cc56 100644
20676 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
20677 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20678 @@ -350,7 +350,7 @@ static int uv_probe(void)
20679 return apic == &apic_x2apic_uv_x;
20680 }
20681
20682 -static struct apic __refdata apic_x2apic_uv_x = {
20683 +static struct apic apic_x2apic_uv_x __read_only = {
20684
20685 .name = "UV large system",
20686 .probe = uv_probe,
20687 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20688 index 3ab0343..814c4787 100644
20689 --- a/arch/x86/kernel/apm_32.c
20690 +++ b/arch/x86/kernel/apm_32.c
20691 @@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20692 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20693 * even though they are called in protected mode.
20694 */
20695 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20696 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20697 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20698
20699 static const char driver_version[] = "1.16ac"; /* no spaces */
20700 @@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20701 BUG_ON(cpu != 0);
20702 gdt = get_cpu_gdt_table(cpu);
20703 save_desc_40 = gdt[0x40 / 8];
20704 +
20705 + pax_open_kernel();
20706 gdt[0x40 / 8] = bad_bios_desc;
20707 + pax_close_kernel();
20708
20709 apm_irq_save(flags);
20710 APM_DO_SAVE_SEGS;
20711 @@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20712 &call->esi);
20713 APM_DO_RESTORE_SEGS;
20714 apm_irq_restore(flags);
20715 +
20716 + pax_open_kernel();
20717 gdt[0x40 / 8] = save_desc_40;
20718 + pax_close_kernel();
20719 +
20720 put_cpu();
20721
20722 return call->eax & 0xff;
20723 @@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20724 BUG_ON(cpu != 0);
20725 gdt = get_cpu_gdt_table(cpu);
20726 save_desc_40 = gdt[0x40 / 8];
20727 +
20728 + pax_open_kernel();
20729 gdt[0x40 / 8] = bad_bios_desc;
20730 + pax_close_kernel();
20731
20732 apm_irq_save(flags);
20733 APM_DO_SAVE_SEGS;
20734 @@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20735 &call->eax);
20736 APM_DO_RESTORE_SEGS;
20737 apm_irq_restore(flags);
20738 +
20739 + pax_open_kernel();
20740 gdt[0x40 / 8] = save_desc_40;
20741 + pax_close_kernel();
20742 +
20743 put_cpu();
20744 return error;
20745 }
20746 @@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20747 * code to that CPU.
20748 */
20749 gdt = get_cpu_gdt_table(0);
20750 +
20751 + pax_open_kernel();
20752 set_desc_base(&gdt[APM_CS >> 3],
20753 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20754 set_desc_base(&gdt[APM_CS_16 >> 3],
20755 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20756 set_desc_base(&gdt[APM_DS >> 3],
20757 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20758 + pax_close_kernel();
20759
20760 proc_create("apm", 0, NULL, &apm_file_ops);
20761
20762 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20763 index 9f6b934..cf5ffb3 100644
20764 --- a/arch/x86/kernel/asm-offsets.c
20765 +++ b/arch/x86/kernel/asm-offsets.c
20766 @@ -32,6 +32,8 @@ void common(void) {
20767 OFFSET(TI_flags, thread_info, flags);
20768 OFFSET(TI_status, thread_info, status);
20769 OFFSET(TI_addr_limit, thread_info, addr_limit);
20770 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20771 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20772
20773 BLANK();
20774 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20775 @@ -52,8 +54,26 @@ void common(void) {
20776 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20777 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20778 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20779 +
20780 +#ifdef CONFIG_PAX_KERNEXEC
20781 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20782 #endif
20783
20784 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20785 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20786 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20787 +#ifdef CONFIG_X86_64
20788 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20789 +#endif
20790 +#endif
20791 +
20792 +#endif
20793 +
20794 + BLANK();
20795 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20796 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20797 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20798 +
20799 #ifdef CONFIG_XEN
20800 BLANK();
20801 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20802 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20803 index e7c798b..2b2019b 100644
20804 --- a/arch/x86/kernel/asm-offsets_64.c
20805 +++ b/arch/x86/kernel/asm-offsets_64.c
20806 @@ -77,6 +77,7 @@ int main(void)
20807 BLANK();
20808 #undef ENTRY
20809
20810 + DEFINE(TSS_size, sizeof(struct tss_struct));
20811 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20812 BLANK();
20813
20814 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20815 index 47b56a7..efc2bc6 100644
20816 --- a/arch/x86/kernel/cpu/Makefile
20817 +++ b/arch/x86/kernel/cpu/Makefile
20818 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20819 CFLAGS_REMOVE_perf_event.o = -pg
20820 endif
20821
20822 -# Make sure load_percpu_segment has no stackprotector
20823 -nostackp := $(call cc-option, -fno-stack-protector)
20824 -CFLAGS_common.o := $(nostackp)
20825 -
20826 obj-y := intel_cacheinfo.o scattered.o topology.o
20827 obj-y += proc.o capflags.o powerflags.o common.o
20828 obj-y += rdrand.o
20829 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20830 index 59bfebc..d8f27bd 100644
20831 --- a/arch/x86/kernel/cpu/amd.c
20832 +++ b/arch/x86/kernel/cpu/amd.c
20833 @@ -753,7 +753,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20834 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20835 {
20836 /* AMD errata T13 (order #21922) */
20837 - if ((c->x86 == 6)) {
20838 + if (c->x86 == 6) {
20839 /* Duron Rev A0 */
20840 if (c->x86_model == 3 && c->x86_mask == 0)
20841 size = 64;
20842 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20843 index fe2bdd0..77b0d1b 100644
20844 --- a/arch/x86/kernel/cpu/common.c
20845 +++ b/arch/x86/kernel/cpu/common.c
20846 @@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20847
20848 static const struct cpu_dev *this_cpu = &default_cpu;
20849
20850 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20851 -#ifdef CONFIG_X86_64
20852 - /*
20853 - * We need valid kernel segments for data and code in long mode too
20854 - * IRET will check the segment types kkeil 2000/10/28
20855 - * Also sysret mandates a special GDT layout
20856 - *
20857 - * TLS descriptors are currently at a different place compared to i386.
20858 - * Hopefully nobody expects them at a fixed place (Wine?)
20859 - */
20860 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20861 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20862 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20863 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20864 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20865 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20866 -#else
20867 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20868 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20869 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20870 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20871 - /*
20872 - * Segments used for calling PnP BIOS have byte granularity.
20873 - * They code segments and data segments have fixed 64k limits,
20874 - * the transfer segment sizes are set at run time.
20875 - */
20876 - /* 32-bit code */
20877 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20878 - /* 16-bit code */
20879 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20880 - /* 16-bit data */
20881 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20882 - /* 16-bit data */
20883 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20884 - /* 16-bit data */
20885 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20886 - /*
20887 - * The APM segments have byte granularity and their bases
20888 - * are set at run time. All have 64k limits.
20889 - */
20890 - /* 32-bit code */
20891 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20892 - /* 16-bit code */
20893 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20894 - /* data */
20895 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20896 -
20897 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20898 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20899 - GDT_STACK_CANARY_INIT
20900 -#endif
20901 -} };
20902 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20903 -
20904 static int __init x86_xsave_setup(char *s)
20905 {
20906 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20907 @@ -293,6 +239,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20908 }
20909 }
20910
20911 +#ifdef CONFIG_X86_64
20912 +static __init int setup_disable_pcid(char *arg)
20913 +{
20914 + setup_clear_cpu_cap(X86_FEATURE_PCID);
20915 + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20916 +
20917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20918 + if (clone_pgd_mask != ~(pgdval_t)0UL)
20919 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20920 +#endif
20921 +
20922 + return 1;
20923 +}
20924 +__setup("nopcid", setup_disable_pcid);
20925 +
20926 +static void setup_pcid(struct cpuinfo_x86 *c)
20927 +{
20928 + if (!cpu_has(c, X86_FEATURE_PCID)) {
20929 + clear_cpu_cap(c, X86_FEATURE_INVPCID);
20930 +
20931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20932 + if (clone_pgd_mask != ~(pgdval_t)0UL) {
20933 + pax_open_kernel();
20934 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20935 + pax_close_kernel();
20936 + printk("PAX: slow and weak UDEREF enabled\n");
20937 + } else
20938 + printk("PAX: UDEREF disabled\n");
20939 +#endif
20940 +
20941 + return;
20942 + }
20943 +
20944 + printk("PAX: PCID detected\n");
20945 + set_in_cr4(X86_CR4_PCIDE);
20946 +
20947 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20948 + pax_open_kernel();
20949 + clone_pgd_mask = ~(pgdval_t)0UL;
20950 + pax_close_kernel();
20951 + if (pax_user_shadow_base)
20952 + printk("PAX: weak UDEREF enabled\n");
20953 + else {
20954 + set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20955 + printk("PAX: strong UDEREF enabled\n");
20956 + }
20957 +#endif
20958 +
20959 + if (cpu_has(c, X86_FEATURE_INVPCID))
20960 + printk("PAX: INVPCID detected\n");
20961 +}
20962 +#endif
20963 +
20964 /*
20965 * Some CPU features depend on higher CPUID levels, which may not always
20966 * be available due to CPUID level capping or broken virtualization
20967 @@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu)
20968 {
20969 struct desc_ptr gdt_descr;
20970
20971 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20972 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20973 gdt_descr.size = GDT_SIZE - 1;
20974 load_gdt(&gdt_descr);
20975 /* Reload the per-cpu base */
20976 @@ -882,6 +881,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20977 setup_smep(c);
20978 setup_smap(c);
20979
20980 +#ifdef CONFIG_X86_64
20981 + setup_pcid(c);
20982 +#endif
20983 +
20984 /*
20985 * The vendor-specific functions might have changed features.
20986 * Now we do "generic changes."
20987 @@ -890,6 +893,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20988 /* Filter out anything that depends on CPUID levels we don't have */
20989 filter_cpuid_features(c, true);
20990
20991 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20992 + setup_clear_cpu_cap(X86_FEATURE_SEP);
20993 +#endif
20994 +
20995 /* If the model name is still unset, do table lookup. */
20996 if (!c->x86_model_id[0]) {
20997 const char *p;
20998 @@ -1077,10 +1084,12 @@ static __init int setup_disablecpuid(char *arg)
20999 }
21000 __setup("clearcpuid=", setup_disablecpuid);
21001
21002 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21003 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
21004 +
21005 #ifdef CONFIG_X86_64
21006 -struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21007 -struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21008 - (unsigned long) debug_idt_table };
21009 +struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21010 +const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21011
21012 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21013 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21014 @@ -1094,7 +1103,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
21015 EXPORT_PER_CPU_SYMBOL(current_task);
21016
21017 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21018 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21019 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21020 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21021
21022 DEFINE_PER_CPU(char *, irq_stack_ptr) =
21023 @@ -1244,7 +1253,7 @@ void cpu_init(void)
21024 load_ucode_ap();
21025
21026 cpu = stack_smp_processor_id();
21027 - t = &per_cpu(init_tss, cpu);
21028 + t = init_tss + cpu;
21029 oist = &per_cpu(orig_ist, cpu);
21030
21031 #ifdef CONFIG_NUMA
21032 @@ -1279,7 +1288,6 @@ void cpu_init(void)
21033 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21034 barrier();
21035
21036 - x86_configure_nx();
21037 enable_x2apic();
21038
21039 /*
21040 @@ -1331,7 +1339,7 @@ void cpu_init(void)
21041 {
21042 int cpu = smp_processor_id();
21043 struct task_struct *curr = current;
21044 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21045 + struct tss_struct *t = init_tss + cpu;
21046 struct thread_struct *thread = &curr->thread;
21047
21048 show_ucode_info_early();
21049 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21050 index 0641113..06f5ba4 100644
21051 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21052 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21053 @@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
21054 };
21055
21056 #ifdef CONFIG_AMD_NB
21057 +static struct attribute *default_attrs_amd_nb[] = {
21058 + &type.attr,
21059 + &level.attr,
21060 + &coherency_line_size.attr,
21061 + &physical_line_partition.attr,
21062 + &ways_of_associativity.attr,
21063 + &number_of_sets.attr,
21064 + &size.attr,
21065 + &shared_cpu_map.attr,
21066 + &shared_cpu_list.attr,
21067 + NULL,
21068 + NULL,
21069 + NULL,
21070 + NULL
21071 +};
21072 +
21073 static struct attribute **amd_l3_attrs(void)
21074 {
21075 static struct attribute **attrs;
21076 @@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
21077
21078 n = ARRAY_SIZE(default_attrs);
21079
21080 - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21081 - n += 2;
21082 -
21083 - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21084 - n += 1;
21085 -
21086 - attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21087 - if (attrs == NULL)
21088 - return attrs = default_attrs;
21089 -
21090 - for (n = 0; default_attrs[n]; n++)
21091 - attrs[n] = default_attrs[n];
21092 + attrs = default_attrs_amd_nb;
21093
21094 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21095 attrs[n++] = &cache_disable_0.attr;
21096 @@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
21097 .default_attrs = default_attrs,
21098 };
21099
21100 +#ifdef CONFIG_AMD_NB
21101 +static struct kobj_type ktype_cache_amd_nb = {
21102 + .sysfs_ops = &sysfs_ops,
21103 + .default_attrs = default_attrs_amd_nb,
21104 +};
21105 +#endif
21106 +
21107 static struct kobj_type ktype_percpu_entry = {
21108 .sysfs_ops = &sysfs_ops,
21109 };
21110 @@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
21111 return retval;
21112 }
21113
21114 +#ifdef CONFIG_AMD_NB
21115 + amd_l3_attrs();
21116 +#endif
21117 +
21118 for (i = 0; i < num_cache_leaves; i++) {
21119 + struct kobj_type *ktype;
21120 +
21121 this_object = INDEX_KOBJECT_PTR(cpu, i);
21122 this_object->cpu = cpu;
21123 this_object->index = i;
21124
21125 this_leaf = CPUID4_INFO_IDX(cpu, i);
21126
21127 - ktype_cache.default_attrs = default_attrs;
21128 + ktype = &ktype_cache;
21129 #ifdef CONFIG_AMD_NB
21130 if (this_leaf->base.nb)
21131 - ktype_cache.default_attrs = amd_l3_attrs();
21132 + ktype = &ktype_cache_amd_nb;
21133 #endif
21134 retval = kobject_init_and_add(&(this_object->kobj),
21135 - &ktype_cache,
21136 + ktype,
21137 per_cpu(ici_cache_kobject, cpu),
21138 "index%1lu", i);
21139 if (unlikely(retval)) {
21140 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21141 index b3218cd..99a75de 100644
21142 --- a/arch/x86/kernel/cpu/mcheck/mce.c
21143 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
21144 @@ -45,6 +45,7 @@
21145 #include <asm/processor.h>
21146 #include <asm/mce.h>
21147 #include <asm/msr.h>
21148 +#include <asm/local.h>
21149
21150 #include "mce-internal.h"
21151
21152 @@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21153 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21154 m->cs, m->ip);
21155
21156 - if (m->cs == __KERNEL_CS)
21157 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21158 print_symbol("{%s}", m->ip);
21159 pr_cont("\n");
21160 }
21161 @@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21162
21163 #define PANIC_TIMEOUT 5 /* 5 seconds */
21164
21165 -static atomic_t mce_paniced;
21166 +static atomic_unchecked_t mce_paniced;
21167
21168 static int fake_panic;
21169 -static atomic_t mce_fake_paniced;
21170 +static atomic_unchecked_t mce_fake_paniced;
21171
21172 /* Panic in progress. Enable interrupts and wait for final IPI */
21173 static void wait_for_panic(void)
21174 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21175 /*
21176 * Make sure only one CPU runs in machine check panic
21177 */
21178 - if (atomic_inc_return(&mce_paniced) > 1)
21179 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21180 wait_for_panic();
21181 barrier();
21182
21183 @@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21184 console_verbose();
21185 } else {
21186 /* Don't log too much for fake panic */
21187 - if (atomic_inc_return(&mce_fake_paniced) > 1)
21188 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21189 return;
21190 }
21191 /* First print corrected ones that are still unlogged */
21192 @@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21193 if (!fake_panic) {
21194 if (panic_timeout == 0)
21195 panic_timeout = mca_cfg.panic_timeout;
21196 - panic(msg);
21197 + panic("%s", msg);
21198 } else
21199 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21200 }
21201 @@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
21202 * might have been modified by someone else.
21203 */
21204 rmb();
21205 - if (atomic_read(&mce_paniced))
21206 + if (atomic_read_unchecked(&mce_paniced))
21207 wait_for_panic();
21208 if (!mca_cfg.monarch_timeout)
21209 goto out;
21210 @@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21211 }
21212
21213 /* Call the installed machine check handler for this CPU setup. */
21214 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
21215 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21216 unexpected_machine_check;
21217
21218 /*
21219 @@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21220 return;
21221 }
21222
21223 + pax_open_kernel();
21224 machine_check_vector = do_machine_check;
21225 + pax_close_kernel();
21226
21227 __mcheck_cpu_init_generic();
21228 __mcheck_cpu_init_vendor(c);
21229 @@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21230 */
21231
21232 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21233 -static int mce_chrdev_open_count; /* #times opened */
21234 +static local_t mce_chrdev_open_count; /* #times opened */
21235 static int mce_chrdev_open_exclu; /* already open exclusive? */
21236
21237 static int mce_chrdev_open(struct inode *inode, struct file *file)
21238 @@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21239 spin_lock(&mce_chrdev_state_lock);
21240
21241 if (mce_chrdev_open_exclu ||
21242 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21243 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21244 spin_unlock(&mce_chrdev_state_lock);
21245
21246 return -EBUSY;
21247 @@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21248
21249 if (file->f_flags & O_EXCL)
21250 mce_chrdev_open_exclu = 1;
21251 - mce_chrdev_open_count++;
21252 + local_inc(&mce_chrdev_open_count);
21253
21254 spin_unlock(&mce_chrdev_state_lock);
21255
21256 @@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21257 {
21258 spin_lock(&mce_chrdev_state_lock);
21259
21260 - mce_chrdev_open_count--;
21261 + local_dec(&mce_chrdev_open_count);
21262 mce_chrdev_open_exclu = 0;
21263
21264 spin_unlock(&mce_chrdev_state_lock);
21265 @@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
21266
21267 for (i = 0; i < mca_cfg.banks; i++) {
21268 struct mce_bank *b = &mce_banks[i];
21269 - struct device_attribute *a = &b->attr;
21270 + device_attribute_no_const *a = &b->attr;
21271
21272 sysfs_attr_init(&a->attr);
21273 a->attr.name = b->attrname;
21274 @@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
21275 static void mce_reset(void)
21276 {
21277 cpu_missing = 0;
21278 - atomic_set(&mce_fake_paniced, 0);
21279 + atomic_set_unchecked(&mce_fake_paniced, 0);
21280 atomic_set(&mce_executing, 0);
21281 atomic_set(&mce_callin, 0);
21282 atomic_set(&global_nwo, 0);
21283 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21284 index 1c044b1..37a2a43 100644
21285 --- a/arch/x86/kernel/cpu/mcheck/p5.c
21286 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
21287 @@ -11,6 +11,7 @@
21288 #include <asm/processor.h>
21289 #include <asm/mce.h>
21290 #include <asm/msr.h>
21291 +#include <asm/pgtable.h>
21292
21293 /* By default disabled */
21294 int mce_p5_enabled __read_mostly;
21295 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21296 if (!cpu_has(c, X86_FEATURE_MCE))
21297 return;
21298
21299 + pax_open_kernel();
21300 machine_check_vector = pentium_machine_check;
21301 + pax_close_kernel();
21302 /* Make sure the vector pointer is visible before we enable MCEs: */
21303 wmb();
21304
21305 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21306 index e9a701a..35317d6 100644
21307 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
21308 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21309 @@ -10,6 +10,7 @@
21310 #include <asm/processor.h>
21311 #include <asm/mce.h>
21312 #include <asm/msr.h>
21313 +#include <asm/pgtable.h>
21314
21315 /* Machine check handler for WinChip C6: */
21316 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21317 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21318 {
21319 u32 lo, hi;
21320
21321 + pax_open_kernel();
21322 machine_check_vector = winchip_machine_check;
21323 + pax_close_kernel();
21324 /* Make sure the vector pointer is visible before we enable MCEs: */
21325 wmb();
21326
21327 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21328 index f961de9..8a9d332 100644
21329 --- a/arch/x86/kernel/cpu/mtrr/main.c
21330 +++ b/arch/x86/kernel/cpu/mtrr/main.c
21331 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21332 u64 size_or_mask, size_and_mask;
21333 static bool mtrr_aps_delayed_init;
21334
21335 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21336 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21337
21338 const struct mtrr_ops *mtrr_if;
21339
21340 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21341 index df5e41f..816c719 100644
21342 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21343 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21344 @@ -25,7 +25,7 @@ struct mtrr_ops {
21345 int (*validate_add_page)(unsigned long base, unsigned long size,
21346 unsigned int type);
21347 int (*have_wrcomb)(void);
21348 -};
21349 +} __do_const;
21350
21351 extern int generic_get_free_region(unsigned long base, unsigned long size,
21352 int replace_reg);
21353 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21354 index db6cdbe..faaf834 100644
21355 --- a/arch/x86/kernel/cpu/perf_event.c
21356 +++ b/arch/x86/kernel/cpu/perf_event.c
21357 @@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
21358 pr_info("no hardware sampling interrupt available.\n");
21359 }
21360
21361 -static struct attribute_group x86_pmu_format_group = {
21362 +static attribute_group_no_const x86_pmu_format_group = {
21363 .name = "format",
21364 .attrs = NULL,
21365 };
21366 @@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = {
21367 NULL,
21368 };
21369
21370 -static struct attribute_group x86_pmu_events_group = {
21371 +static attribute_group_no_const x86_pmu_events_group = {
21372 .name = "events",
21373 .attrs = events_attr,
21374 };
21375 @@ -1961,7 +1961,7 @@ static unsigned long get_segment_base(unsigned int segment)
21376 if (idx > GDT_ENTRIES)
21377 return 0;
21378
21379 - desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21380 + desc = get_cpu_gdt_table(smp_processor_id());
21381 }
21382
21383 return get_desc_base(desc + idx);
21384 @@ -2051,7 +2051,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21385 break;
21386
21387 perf_callchain_store(entry, frame.return_address);
21388 - fp = frame.next_frame;
21389 + fp = (const void __force_user *)frame.next_frame;
21390 }
21391 }
21392
21393 diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21394 index 639d128..e92d7e5 100644
21395 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21396 +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21397 @@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21398 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21399 {
21400 struct attribute **attrs;
21401 - struct attribute_group *attr_group;
21402 + attribute_group_no_const *attr_group;
21403 int i = 0, j;
21404
21405 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21406 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21407 index 0fa4f24..17990ed 100644
21408 --- a/arch/x86/kernel/cpu/perf_event_intel.c
21409 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
21410 @@ -2314,10 +2314,10 @@ __init int intel_pmu_init(void)
21411 * v2 and above have a perf capabilities MSR
21412 */
21413 if (version > 1) {
21414 - u64 capabilities;
21415 + u64 capabilities = x86_pmu.intel_cap.capabilities;
21416
21417 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21418 - x86_pmu.intel_cap.capabilities = capabilities;
21419 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21420 + x86_pmu.intel_cap.capabilities = capabilities;
21421 }
21422
21423 intel_ds_init();
21424 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21425 index 29c2487..a5606fa 100644
21426 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21427 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21428 @@ -3318,7 +3318,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21429 static int __init uncore_type_init(struct intel_uncore_type *type)
21430 {
21431 struct intel_uncore_pmu *pmus;
21432 - struct attribute_group *attr_group;
21433 + attribute_group_no_const *attr_group;
21434 struct attribute **attrs;
21435 int i, j;
21436
21437 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21438 index a80ab71..4089da5 100644
21439 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21440 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21441 @@ -498,7 +498,7 @@ struct intel_uncore_box {
21442 struct uncore_event_desc {
21443 struct kobj_attribute attr;
21444 const char *config;
21445 -};
21446 +} __do_const;
21447
21448 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
21449 { \
21450 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21451 index 7d9481c..99c7e4b 100644
21452 --- a/arch/x86/kernel/cpuid.c
21453 +++ b/arch/x86/kernel/cpuid.c
21454 @@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21455 return notifier_from_errno(err);
21456 }
21457
21458 -static struct notifier_block __refdata cpuid_class_cpu_notifier =
21459 +static struct notifier_block cpuid_class_cpu_notifier =
21460 {
21461 .notifier_call = cpuid_class_cpu_callback,
21462 };
21463 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21464 index 18677a9..f67c45b 100644
21465 --- a/arch/x86/kernel/crash.c
21466 +++ b/arch/x86/kernel/crash.c
21467 @@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21468 {
21469 #ifdef CONFIG_X86_32
21470 struct pt_regs fixed_regs;
21471 -#endif
21472
21473 -#ifdef CONFIG_X86_32
21474 - if (!user_mode_vm(regs)) {
21475 + if (!user_mode(regs)) {
21476 crash_fixup_ss_esp(&fixed_regs, regs);
21477 regs = &fixed_regs;
21478 }
21479 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21480 index afa64ad..dce67dd 100644
21481 --- a/arch/x86/kernel/crash_dump_64.c
21482 +++ b/arch/x86/kernel/crash_dump_64.c
21483 @@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21484 return -ENOMEM;
21485
21486 if (userbuf) {
21487 - if (copy_to_user(buf, vaddr + offset, csize)) {
21488 + if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21489 iounmap(vaddr);
21490 return -EFAULT;
21491 }
21492 diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21493 index 5d3fe8d..02e1429 100644
21494 --- a/arch/x86/kernel/doublefault.c
21495 +++ b/arch/x86/kernel/doublefault.c
21496 @@ -13,7 +13,7 @@
21497
21498 #define DOUBLEFAULT_STACKSIZE (1024)
21499 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21500 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21501 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21502
21503 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21504
21505 @@ -23,7 +23,7 @@ static void doublefault_fn(void)
21506 unsigned long gdt, tss;
21507
21508 native_store_gdt(&gdt_desc);
21509 - gdt = gdt_desc.address;
21510 + gdt = (unsigned long)gdt_desc.address;
21511
21512 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21513
21514 @@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21515 /* 0x2 bit is always set */
21516 .flags = X86_EFLAGS_SF | 0x2,
21517 .sp = STACK_START,
21518 - .es = __USER_DS,
21519 + .es = __KERNEL_DS,
21520 .cs = __KERNEL_CS,
21521 .ss = __KERNEL_DS,
21522 - .ds = __USER_DS,
21523 + .ds = __KERNEL_DS,
21524 .fs = __KERNEL_PERCPU,
21525
21526 .__cr3 = __pa_nodebug(swapper_pg_dir),
21527 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21528 index d9c12d3..7858b62 100644
21529 --- a/arch/x86/kernel/dumpstack.c
21530 +++ b/arch/x86/kernel/dumpstack.c
21531 @@ -2,6 +2,9 @@
21532 * Copyright (C) 1991, 1992 Linus Torvalds
21533 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21534 */
21535 +#ifdef CONFIG_GRKERNSEC_HIDESYM
21536 +#define __INCLUDED_BY_HIDESYM 1
21537 +#endif
21538 #include <linux/kallsyms.h>
21539 #include <linux/kprobes.h>
21540 #include <linux/uaccess.h>
21541 @@ -40,16 +43,14 @@ void printk_address(unsigned long address)
21542 static void
21543 print_ftrace_graph_addr(unsigned long addr, void *data,
21544 const struct stacktrace_ops *ops,
21545 - struct thread_info *tinfo, int *graph)
21546 + struct task_struct *task, int *graph)
21547 {
21548 - struct task_struct *task;
21549 unsigned long ret_addr;
21550 int index;
21551
21552 if (addr != (unsigned long)return_to_handler)
21553 return;
21554
21555 - task = tinfo->task;
21556 index = task->curr_ret_stack;
21557
21558 if (!task->ret_stack || index < *graph)
21559 @@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21560 static inline void
21561 print_ftrace_graph_addr(unsigned long addr, void *data,
21562 const struct stacktrace_ops *ops,
21563 - struct thread_info *tinfo, int *graph)
21564 + struct task_struct *task, int *graph)
21565 { }
21566 #endif
21567
21568 @@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21569 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21570 */
21571
21572 -static inline int valid_stack_ptr(struct thread_info *tinfo,
21573 - void *p, unsigned int size, void *end)
21574 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21575 {
21576 - void *t = tinfo;
21577 if (end) {
21578 if (p < end && p >= (end-THREAD_SIZE))
21579 return 1;
21580 @@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21581 }
21582
21583 unsigned long
21584 -print_context_stack(struct thread_info *tinfo,
21585 +print_context_stack(struct task_struct *task, void *stack_start,
21586 unsigned long *stack, unsigned long bp,
21587 const struct stacktrace_ops *ops, void *data,
21588 unsigned long *end, int *graph)
21589 {
21590 struct stack_frame *frame = (struct stack_frame *)bp;
21591
21592 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21593 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21594 unsigned long addr;
21595
21596 addr = *stack;
21597 @@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21598 } else {
21599 ops->address(data, addr, 0);
21600 }
21601 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21602 + print_ftrace_graph_addr(addr, data, ops, task, graph);
21603 }
21604 stack++;
21605 }
21606 @@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21607 EXPORT_SYMBOL_GPL(print_context_stack);
21608
21609 unsigned long
21610 -print_context_stack_bp(struct thread_info *tinfo,
21611 +print_context_stack_bp(struct task_struct *task, void *stack_start,
21612 unsigned long *stack, unsigned long bp,
21613 const struct stacktrace_ops *ops, void *data,
21614 unsigned long *end, int *graph)
21615 @@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21616 struct stack_frame *frame = (struct stack_frame *)bp;
21617 unsigned long *ret_addr = &frame->return_address;
21618
21619 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21620 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21621 unsigned long addr = *ret_addr;
21622
21623 if (!__kernel_text_address(addr))
21624 @@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21625 ops->address(data, addr, 1);
21626 frame = frame->next_frame;
21627 ret_addr = &frame->return_address;
21628 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21629 + print_ftrace_graph_addr(addr, data, ops, task, graph);
21630 }
21631
21632 return (unsigned long)frame;
21633 @@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21634 static void print_trace_address(void *data, unsigned long addr, int reliable)
21635 {
21636 touch_nmi_watchdog();
21637 - printk(data);
21638 + printk("%s", (char *)data);
21639 printk_stack_address(addr, reliable);
21640 }
21641
21642 @@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
21643 }
21644 EXPORT_SYMBOL_GPL(oops_begin);
21645
21646 +extern void gr_handle_kernel_exploit(void);
21647 +
21648 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21649 {
21650 if (regs && kexec_should_crash(current))
21651 @@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21652 panic("Fatal exception in interrupt");
21653 if (panic_on_oops)
21654 panic("Fatal exception");
21655 - do_exit(signr);
21656 +
21657 + gr_handle_kernel_exploit();
21658 +
21659 + do_group_exit(signr);
21660 }
21661
21662 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21663 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21664 print_modules();
21665 show_regs(regs);
21666 #ifdef CONFIG_X86_32
21667 - if (user_mode_vm(regs)) {
21668 + if (user_mode(regs)) {
21669 sp = regs->sp;
21670 ss = regs->ss & 0xffff;
21671 } else {
21672 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21673 unsigned long flags = oops_begin();
21674 int sig = SIGSEGV;
21675
21676 - if (!user_mode_vm(regs))
21677 + if (!user_mode(regs))
21678 report_bug(regs->ip, regs);
21679
21680 if (__die(str, regs, err))
21681 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21682 index f2a1770..540657f 100644
21683 --- a/arch/x86/kernel/dumpstack_32.c
21684 +++ b/arch/x86/kernel/dumpstack_32.c
21685 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21686 bp = stack_frame(task, regs);
21687
21688 for (;;) {
21689 - struct thread_info *context;
21690 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21691
21692 - context = (struct thread_info *)
21693 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21694 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21695 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21696
21697 - stack = (unsigned long *)context->previous_esp;
21698 - if (!stack)
21699 + if (stack_start == task_stack_page(task))
21700 break;
21701 + stack = *(unsigned long **)stack_start;
21702 if (ops->stack(data, "IRQ") < 0)
21703 break;
21704 touch_nmi_watchdog();
21705 @@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21706 int i;
21707
21708 show_regs_print_info(KERN_EMERG);
21709 - __show_regs(regs, !user_mode_vm(regs));
21710 + __show_regs(regs, !user_mode(regs));
21711
21712 /*
21713 * When in-kernel, we also print out the stack and code at the
21714 * time of the fault..
21715 */
21716 - if (!user_mode_vm(regs)) {
21717 + if (!user_mode(regs)) {
21718 unsigned int code_prologue = code_bytes * 43 / 64;
21719 unsigned int code_len = code_bytes;
21720 unsigned char c;
21721 u8 *ip;
21722 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21723
21724 pr_emerg("Stack:\n");
21725 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21726
21727 pr_emerg("Code:");
21728
21729 - ip = (u8 *)regs->ip - code_prologue;
21730 + ip = (u8 *)regs->ip - code_prologue + cs_base;
21731 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21732 /* try starting at IP */
21733 - ip = (u8 *)regs->ip;
21734 + ip = (u8 *)regs->ip + cs_base;
21735 code_len = code_len - code_prologue + 1;
21736 }
21737 for (i = 0; i < code_len; i++, ip++) {
21738 @@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21739 pr_cont(" Bad EIP value.");
21740 break;
21741 }
21742 - if (ip == (u8 *)regs->ip)
21743 + if (ip == (u8 *)regs->ip + cs_base)
21744 pr_cont(" <%02x>", c);
21745 else
21746 pr_cont(" %02x", c);
21747 @@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21748 {
21749 unsigned short ud2;
21750
21751 + ip = ktla_ktva(ip);
21752 if (ip < PAGE_OFFSET)
21753 return 0;
21754 if (probe_kernel_address((unsigned short *)ip, ud2))
21755 @@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21756
21757 return ud2 == 0x0b0f;
21758 }
21759 +
21760 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21761 +void pax_check_alloca(unsigned long size)
21762 +{
21763 + unsigned long sp = (unsigned long)&sp, stack_left;
21764 +
21765 + /* all kernel stacks are of the same size */
21766 + stack_left = sp & (THREAD_SIZE - 1);
21767 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21768 +}
21769 +EXPORT_SYMBOL(pax_check_alloca);
21770 +#endif
21771 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21772 index addb207..99635fa 100644
21773 --- a/arch/x86/kernel/dumpstack_64.c
21774 +++ b/arch/x86/kernel/dumpstack_64.c
21775 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21776 unsigned long *irq_stack_end =
21777 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21778 unsigned used = 0;
21779 - struct thread_info *tinfo;
21780 int graph = 0;
21781 unsigned long dummy;
21782 + void *stack_start;
21783
21784 if (!task)
21785 task = current;
21786 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21787 * current stack address. If the stacks consist of nested
21788 * exceptions
21789 */
21790 - tinfo = task_thread_info(task);
21791 for (;;) {
21792 char *id;
21793 unsigned long *estack_end;
21794 +
21795 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21796 &used, &id);
21797
21798 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21799 if (ops->stack(data, id) < 0)
21800 break;
21801
21802 - bp = ops->walk_stack(tinfo, stack, bp, ops,
21803 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21804 data, estack_end, &graph);
21805 ops->stack(data, "<EOE>");
21806 /*
21807 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21808 * second-to-last pointer (index -2 to end) in the
21809 * exception stack:
21810 */
21811 + if ((u16)estack_end[-1] != __KERNEL_DS)
21812 + goto out;
21813 stack = (unsigned long *) estack_end[-2];
21814 continue;
21815 }
21816 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21817 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21818 if (ops->stack(data, "IRQ") < 0)
21819 break;
21820 - bp = ops->walk_stack(tinfo, stack, bp,
21821 + bp = ops->walk_stack(task, irq_stack, stack, bp,
21822 ops, data, irq_stack_end, &graph);
21823 /*
21824 * We link to the next stack (which would be
21825 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21826 /*
21827 * This handles the process stack:
21828 */
21829 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21830 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21831 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21832 +out:
21833 put_cpu();
21834 }
21835 EXPORT_SYMBOL(dump_trace);
21836 @@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21837
21838 return ud2 == 0x0b0f;
21839 }
21840 +
21841 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21842 +void pax_check_alloca(unsigned long size)
21843 +{
21844 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21845 + unsigned cpu, used;
21846 + char *id;
21847 +
21848 + /* check the process stack first */
21849 + stack_start = (unsigned long)task_stack_page(current);
21850 + stack_end = stack_start + THREAD_SIZE;
21851 + if (likely(stack_start <= sp && sp < stack_end)) {
21852 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
21853 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21854 + return;
21855 + }
21856 +
21857 + cpu = get_cpu();
21858 +
21859 + /* check the irq stacks */
21860 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21861 + stack_start = stack_end - IRQ_STACK_SIZE;
21862 + if (stack_start <= sp && sp < stack_end) {
21863 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21864 + put_cpu();
21865 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21866 + return;
21867 + }
21868 +
21869 + /* check the exception stacks */
21870 + used = 0;
21871 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21872 + stack_start = stack_end - EXCEPTION_STKSZ;
21873 + if (stack_end && stack_start <= sp && sp < stack_end) {
21874 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21875 + put_cpu();
21876 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21877 + return;
21878 + }
21879 +
21880 + put_cpu();
21881 +
21882 + /* unknown stack */
21883 + BUG();
21884 +}
21885 +EXPORT_SYMBOL(pax_check_alloca);
21886 +#endif
21887 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21888 index 174da5f..5e55606 100644
21889 --- a/arch/x86/kernel/e820.c
21890 +++ b/arch/x86/kernel/e820.c
21891 @@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21892
21893 static void early_panic(char *msg)
21894 {
21895 - early_printk(msg);
21896 - panic(msg);
21897 + early_printk("%s", msg);
21898 + panic("%s", msg);
21899 }
21900
21901 static int userdef __initdata;
21902 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21903 index 01d1c18..8073693 100644
21904 --- a/arch/x86/kernel/early_printk.c
21905 +++ b/arch/x86/kernel/early_printk.c
21906 @@ -7,6 +7,7 @@
21907 #include <linux/pci_regs.h>
21908 #include <linux/pci_ids.h>
21909 #include <linux/errno.h>
21910 +#include <linux/sched.h>
21911 #include <asm/io.h>
21912 #include <asm/processor.h>
21913 #include <asm/fcntl.h>
21914 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21915 index a2a4f46..6cab058 100644
21916 --- a/arch/x86/kernel/entry_32.S
21917 +++ b/arch/x86/kernel/entry_32.S
21918 @@ -177,13 +177,153 @@
21919 /*CFI_REL_OFFSET gs, PT_GS*/
21920 .endm
21921 .macro SET_KERNEL_GS reg
21922 +
21923 +#ifdef CONFIG_CC_STACKPROTECTOR
21924 movl $(__KERNEL_STACK_CANARY), \reg
21925 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21926 + movl $(__USER_DS), \reg
21927 +#else
21928 + xorl \reg, \reg
21929 +#endif
21930 +
21931 movl \reg, %gs
21932 .endm
21933
21934 #endif /* CONFIG_X86_32_LAZY_GS */
21935
21936 -.macro SAVE_ALL
21937 +.macro pax_enter_kernel
21938 +#ifdef CONFIG_PAX_KERNEXEC
21939 + call pax_enter_kernel
21940 +#endif
21941 +.endm
21942 +
21943 +.macro pax_exit_kernel
21944 +#ifdef CONFIG_PAX_KERNEXEC
21945 + call pax_exit_kernel
21946 +#endif
21947 +.endm
21948 +
21949 +#ifdef CONFIG_PAX_KERNEXEC
21950 +ENTRY(pax_enter_kernel)
21951 +#ifdef CONFIG_PARAVIRT
21952 + pushl %eax
21953 + pushl %ecx
21954 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21955 + mov %eax, %esi
21956 +#else
21957 + mov %cr0, %esi
21958 +#endif
21959 + bts $16, %esi
21960 + jnc 1f
21961 + mov %cs, %esi
21962 + cmp $__KERNEL_CS, %esi
21963 + jz 3f
21964 + ljmp $__KERNEL_CS, $3f
21965 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21966 +2:
21967 +#ifdef CONFIG_PARAVIRT
21968 + mov %esi, %eax
21969 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21970 +#else
21971 + mov %esi, %cr0
21972 +#endif
21973 +3:
21974 +#ifdef CONFIG_PARAVIRT
21975 + popl %ecx
21976 + popl %eax
21977 +#endif
21978 + ret
21979 +ENDPROC(pax_enter_kernel)
21980 +
21981 +ENTRY(pax_exit_kernel)
21982 +#ifdef CONFIG_PARAVIRT
21983 + pushl %eax
21984 + pushl %ecx
21985 +#endif
21986 + mov %cs, %esi
21987 + cmp $__KERNEXEC_KERNEL_CS, %esi
21988 + jnz 2f
21989 +#ifdef CONFIG_PARAVIRT
21990 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21991 + mov %eax, %esi
21992 +#else
21993 + mov %cr0, %esi
21994 +#endif
21995 + btr $16, %esi
21996 + ljmp $__KERNEL_CS, $1f
21997 +1:
21998 +#ifdef CONFIG_PARAVIRT
21999 + mov %esi, %eax
22000 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22001 +#else
22002 + mov %esi, %cr0
22003 +#endif
22004 +2:
22005 +#ifdef CONFIG_PARAVIRT
22006 + popl %ecx
22007 + popl %eax
22008 +#endif
22009 + ret
22010 +ENDPROC(pax_exit_kernel)
22011 +#endif
22012 +
22013 + .macro pax_erase_kstack
22014 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22015 + call pax_erase_kstack
22016 +#endif
22017 + .endm
22018 +
22019 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22020 +/*
22021 + * ebp: thread_info
22022 + */
22023 +ENTRY(pax_erase_kstack)
22024 + pushl %edi
22025 + pushl %ecx
22026 + pushl %eax
22027 +
22028 + mov TI_lowest_stack(%ebp), %edi
22029 + mov $-0xBEEF, %eax
22030 + std
22031 +
22032 +1: mov %edi, %ecx
22033 + and $THREAD_SIZE_asm - 1, %ecx
22034 + shr $2, %ecx
22035 + repne scasl
22036 + jecxz 2f
22037 +
22038 + cmp $2*16, %ecx
22039 + jc 2f
22040 +
22041 + mov $2*16, %ecx
22042 + repe scasl
22043 + jecxz 2f
22044 + jne 1b
22045 +
22046 +2: cld
22047 + mov %esp, %ecx
22048 + sub %edi, %ecx
22049 +
22050 + cmp $THREAD_SIZE_asm, %ecx
22051 + jb 3f
22052 + ud2
22053 +3:
22054 +
22055 + shr $2, %ecx
22056 + rep stosl
22057 +
22058 + mov TI_task_thread_sp0(%ebp), %edi
22059 + sub $128, %edi
22060 + mov %edi, TI_lowest_stack(%ebp)
22061 +
22062 + popl %eax
22063 + popl %ecx
22064 + popl %edi
22065 + ret
22066 +ENDPROC(pax_erase_kstack)
22067 +#endif
22068 +
22069 +.macro __SAVE_ALL _DS
22070 cld
22071 PUSH_GS
22072 pushl_cfi %fs
22073 @@ -206,7 +346,7 @@
22074 CFI_REL_OFFSET ecx, 0
22075 pushl_cfi %ebx
22076 CFI_REL_OFFSET ebx, 0
22077 - movl $(__USER_DS), %edx
22078 + movl $\_DS, %edx
22079 movl %edx, %ds
22080 movl %edx, %es
22081 movl $(__KERNEL_PERCPU), %edx
22082 @@ -214,6 +354,15 @@
22083 SET_KERNEL_GS %edx
22084 .endm
22085
22086 +.macro SAVE_ALL
22087 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22088 + __SAVE_ALL __KERNEL_DS
22089 + pax_enter_kernel
22090 +#else
22091 + __SAVE_ALL __USER_DS
22092 +#endif
22093 +.endm
22094 +
22095 .macro RESTORE_INT_REGS
22096 popl_cfi %ebx
22097 CFI_RESTORE ebx
22098 @@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22099 popfl_cfi
22100 jmp syscall_exit
22101 CFI_ENDPROC
22102 -END(ret_from_fork)
22103 +ENDPROC(ret_from_fork)
22104
22105 ENTRY(ret_from_kernel_thread)
22106 CFI_STARTPROC
22107 @@ -344,7 +493,15 @@ ret_from_intr:
22108 andl $SEGMENT_RPL_MASK, %eax
22109 #endif
22110 cmpl $USER_RPL, %eax
22111 +
22112 +#ifdef CONFIG_PAX_KERNEXEC
22113 + jae resume_userspace
22114 +
22115 + pax_exit_kernel
22116 + jmp resume_kernel
22117 +#else
22118 jb resume_kernel # not returning to v8086 or userspace
22119 +#endif
22120
22121 ENTRY(resume_userspace)
22122 LOCKDEP_SYS_EXIT
22123 @@ -356,8 +513,8 @@ ENTRY(resume_userspace)
22124 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22125 # int/exception return?
22126 jne work_pending
22127 - jmp restore_all
22128 -END(ret_from_exception)
22129 + jmp restore_all_pax
22130 +ENDPROC(ret_from_exception)
22131
22132 #ifdef CONFIG_PREEMPT
22133 ENTRY(resume_kernel)
22134 @@ -369,7 +526,7 @@ need_resched:
22135 jz restore_all
22136 call preempt_schedule_irq
22137 jmp need_resched
22138 -END(resume_kernel)
22139 +ENDPROC(resume_kernel)
22140 #endif
22141 CFI_ENDPROC
22142 /*
22143 @@ -403,30 +560,45 @@ sysenter_past_esp:
22144 /*CFI_REL_OFFSET cs, 0*/
22145 /*
22146 * Push current_thread_info()->sysenter_return to the stack.
22147 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22148 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
22149 */
22150 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22151 + pushl_cfi $0
22152 CFI_REL_OFFSET eip, 0
22153
22154 pushl_cfi %eax
22155 SAVE_ALL
22156 + GET_THREAD_INFO(%ebp)
22157 + movl TI_sysenter_return(%ebp),%ebp
22158 + movl %ebp,PT_EIP(%esp)
22159 ENABLE_INTERRUPTS(CLBR_NONE)
22160
22161 /*
22162 * Load the potential sixth argument from user stack.
22163 * Careful about security.
22164 */
22165 + movl PT_OLDESP(%esp),%ebp
22166 +
22167 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22168 + mov PT_OLDSS(%esp),%ds
22169 +1: movl %ds:(%ebp),%ebp
22170 + push %ss
22171 + pop %ds
22172 +#else
22173 cmpl $__PAGE_OFFSET-3,%ebp
22174 jae syscall_fault
22175 ASM_STAC
22176 1: movl (%ebp),%ebp
22177 ASM_CLAC
22178 +#endif
22179 +
22180 movl %ebp,PT_EBP(%esp)
22181 _ASM_EXTABLE(1b,syscall_fault)
22182
22183 GET_THREAD_INFO(%ebp)
22184
22185 +#ifdef CONFIG_PAX_RANDKSTACK
22186 + pax_erase_kstack
22187 +#endif
22188 +
22189 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22190 jnz sysenter_audit
22191 sysenter_do_call:
22192 @@ -441,12 +613,24 @@ sysenter_do_call:
22193 testl $_TIF_ALLWORK_MASK, %ecx
22194 jne sysexit_audit
22195 sysenter_exit:
22196 +
22197 +#ifdef CONFIG_PAX_RANDKSTACK
22198 + pushl_cfi %eax
22199 + movl %esp, %eax
22200 + call pax_randomize_kstack
22201 + popl_cfi %eax
22202 +#endif
22203 +
22204 + pax_erase_kstack
22205 +
22206 /* if something modifies registers it must also disable sysexit */
22207 movl PT_EIP(%esp), %edx
22208 movl PT_OLDESP(%esp), %ecx
22209 xorl %ebp,%ebp
22210 TRACE_IRQS_ON
22211 1: mov PT_FS(%esp), %fs
22212 +2: mov PT_DS(%esp), %ds
22213 +3: mov PT_ES(%esp), %es
22214 PTGS_TO_GS
22215 ENABLE_INTERRUPTS_SYSEXIT
22216
22217 @@ -463,6 +647,9 @@ sysenter_audit:
22218 movl %eax,%edx /* 2nd arg: syscall number */
22219 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22220 call __audit_syscall_entry
22221 +
22222 + pax_erase_kstack
22223 +
22224 pushl_cfi %ebx
22225 movl PT_EAX(%esp),%eax /* reload syscall number */
22226 jmp sysenter_do_call
22227 @@ -488,10 +675,16 @@ sysexit_audit:
22228
22229 CFI_ENDPROC
22230 .pushsection .fixup,"ax"
22231 -2: movl $0,PT_FS(%esp)
22232 +4: movl $0,PT_FS(%esp)
22233 + jmp 1b
22234 +5: movl $0,PT_DS(%esp)
22235 + jmp 1b
22236 +6: movl $0,PT_ES(%esp)
22237 jmp 1b
22238 .popsection
22239 - _ASM_EXTABLE(1b,2b)
22240 + _ASM_EXTABLE(1b,4b)
22241 + _ASM_EXTABLE(2b,5b)
22242 + _ASM_EXTABLE(3b,6b)
22243 PTGS_TO_GS_EX
22244 ENDPROC(ia32_sysenter_target)
22245
22246 @@ -506,6 +699,11 @@ ENTRY(system_call)
22247 pushl_cfi %eax # save orig_eax
22248 SAVE_ALL
22249 GET_THREAD_INFO(%ebp)
22250 +
22251 +#ifdef CONFIG_PAX_RANDKSTACK
22252 + pax_erase_kstack
22253 +#endif
22254 +
22255 # system call tracing in operation / emulation
22256 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22257 jnz syscall_trace_entry
22258 @@ -524,6 +722,15 @@ syscall_exit:
22259 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22260 jne syscall_exit_work
22261
22262 +restore_all_pax:
22263 +
22264 +#ifdef CONFIG_PAX_RANDKSTACK
22265 + movl %esp, %eax
22266 + call pax_randomize_kstack
22267 +#endif
22268 +
22269 + pax_erase_kstack
22270 +
22271 restore_all:
22272 TRACE_IRQS_IRET
22273 restore_all_notrace:
22274 @@ -580,14 +787,34 @@ ldt_ss:
22275 * compensating for the offset by changing to the ESPFIX segment with
22276 * a base address that matches for the difference.
22277 */
22278 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22279 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22280 mov %esp, %edx /* load kernel esp */
22281 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22282 mov %dx, %ax /* eax: new kernel esp */
22283 sub %eax, %edx /* offset (low word is 0) */
22284 +#ifdef CONFIG_SMP
22285 + movl PER_CPU_VAR(cpu_number), %ebx
22286 + shll $PAGE_SHIFT_asm, %ebx
22287 + addl $cpu_gdt_table, %ebx
22288 +#else
22289 + movl $cpu_gdt_table, %ebx
22290 +#endif
22291 shr $16, %edx
22292 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22293 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22294 +
22295 +#ifdef CONFIG_PAX_KERNEXEC
22296 + mov %cr0, %esi
22297 + btr $16, %esi
22298 + mov %esi, %cr0
22299 +#endif
22300 +
22301 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22302 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22303 +
22304 +#ifdef CONFIG_PAX_KERNEXEC
22305 + bts $16, %esi
22306 + mov %esi, %cr0
22307 +#endif
22308 +
22309 pushl_cfi $__ESPFIX_SS
22310 pushl_cfi %eax /* new kernel esp */
22311 /* Disable interrupts, but do not irqtrace this section: we
22312 @@ -616,20 +843,18 @@ work_resched:
22313 movl TI_flags(%ebp), %ecx
22314 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22315 # than syscall tracing?
22316 - jz restore_all
22317 + jz restore_all_pax
22318 testb $_TIF_NEED_RESCHED, %cl
22319 jnz work_resched
22320
22321 work_notifysig: # deal with pending signals and
22322 # notify-resume requests
22323 + movl %esp, %eax
22324 #ifdef CONFIG_VM86
22325 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22326 - movl %esp, %eax
22327 jne work_notifysig_v86 # returning to kernel-space or
22328 # vm86-space
22329 1:
22330 -#else
22331 - movl %esp, %eax
22332 #endif
22333 TRACE_IRQS_ON
22334 ENABLE_INTERRUPTS(CLBR_NONE)
22335 @@ -650,7 +875,7 @@ work_notifysig_v86:
22336 movl %eax, %esp
22337 jmp 1b
22338 #endif
22339 -END(work_pending)
22340 +ENDPROC(work_pending)
22341
22342 # perform syscall exit tracing
22343 ALIGN
22344 @@ -658,11 +883,14 @@ syscall_trace_entry:
22345 movl $-ENOSYS,PT_EAX(%esp)
22346 movl %esp, %eax
22347 call syscall_trace_enter
22348 +
22349 + pax_erase_kstack
22350 +
22351 /* What it returned is what we'll actually use. */
22352 cmpl $(NR_syscalls), %eax
22353 jnae syscall_call
22354 jmp syscall_exit
22355 -END(syscall_trace_entry)
22356 +ENDPROC(syscall_trace_entry)
22357
22358 # perform syscall exit tracing
22359 ALIGN
22360 @@ -675,21 +903,25 @@ syscall_exit_work:
22361 movl %esp, %eax
22362 call syscall_trace_leave
22363 jmp resume_userspace
22364 -END(syscall_exit_work)
22365 +ENDPROC(syscall_exit_work)
22366 CFI_ENDPROC
22367
22368 RING0_INT_FRAME # can't unwind into user space anyway
22369 syscall_fault:
22370 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22371 + push %ss
22372 + pop %ds
22373 +#endif
22374 ASM_CLAC
22375 GET_THREAD_INFO(%ebp)
22376 movl $-EFAULT,PT_EAX(%esp)
22377 jmp resume_userspace
22378 -END(syscall_fault)
22379 +ENDPROC(syscall_fault)
22380
22381 syscall_badsys:
22382 movl $-ENOSYS,PT_EAX(%esp)
22383 jmp resume_userspace
22384 -END(syscall_badsys)
22385 +ENDPROC(syscall_badsys)
22386 CFI_ENDPROC
22387 /*
22388 * End of kprobes section
22389 @@ -705,8 +937,15 @@ END(syscall_badsys)
22390 * normal stack and adjusts ESP with the matching offset.
22391 */
22392 /* fixup the stack */
22393 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22394 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22395 +#ifdef CONFIG_SMP
22396 + movl PER_CPU_VAR(cpu_number), %ebx
22397 + shll $PAGE_SHIFT_asm, %ebx
22398 + addl $cpu_gdt_table, %ebx
22399 +#else
22400 + movl $cpu_gdt_table, %ebx
22401 +#endif
22402 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22403 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22404 shl $16, %eax
22405 addl %esp, %eax /* the adjusted stack pointer */
22406 pushl_cfi $__KERNEL_DS
22407 @@ -759,7 +998,7 @@ vector=vector+1
22408 .endr
22409 2: jmp common_interrupt
22410 .endr
22411 -END(irq_entries_start)
22412 +ENDPROC(irq_entries_start)
22413
22414 .previous
22415 END(interrupt)
22416 @@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
22417 pushl_cfi $do_coprocessor_error
22418 jmp error_code
22419 CFI_ENDPROC
22420 -END(coprocessor_error)
22421 +ENDPROC(coprocessor_error)
22422
22423 ENTRY(simd_coprocessor_error)
22424 RING0_INT_FRAME
22425 @@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
22426 .section .altinstructions,"a"
22427 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22428 .previous
22429 -.section .altinstr_replacement,"ax"
22430 +.section .altinstr_replacement,"a"
22431 663: pushl $do_simd_coprocessor_error
22432 664:
22433 .previous
22434 @@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
22435 #endif
22436 jmp error_code
22437 CFI_ENDPROC
22438 -END(simd_coprocessor_error)
22439 +ENDPROC(simd_coprocessor_error)
22440
22441 ENTRY(device_not_available)
22442 RING0_INT_FRAME
22443 @@ -851,18 +1090,18 @@ ENTRY(device_not_available)
22444 pushl_cfi $do_device_not_available
22445 jmp error_code
22446 CFI_ENDPROC
22447 -END(device_not_available)
22448 +ENDPROC(device_not_available)
22449
22450 #ifdef CONFIG_PARAVIRT
22451 ENTRY(native_iret)
22452 iret
22453 _ASM_EXTABLE(native_iret, iret_exc)
22454 -END(native_iret)
22455 +ENDPROC(native_iret)
22456
22457 ENTRY(native_irq_enable_sysexit)
22458 sti
22459 sysexit
22460 -END(native_irq_enable_sysexit)
22461 +ENDPROC(native_irq_enable_sysexit)
22462 #endif
22463
22464 ENTRY(overflow)
22465 @@ -872,7 +1111,7 @@ ENTRY(overflow)
22466 pushl_cfi $do_overflow
22467 jmp error_code
22468 CFI_ENDPROC
22469 -END(overflow)
22470 +ENDPROC(overflow)
22471
22472 ENTRY(bounds)
22473 RING0_INT_FRAME
22474 @@ -881,7 +1120,7 @@ ENTRY(bounds)
22475 pushl_cfi $do_bounds
22476 jmp error_code
22477 CFI_ENDPROC
22478 -END(bounds)
22479 +ENDPROC(bounds)
22480
22481 ENTRY(invalid_op)
22482 RING0_INT_FRAME
22483 @@ -890,7 +1129,7 @@ ENTRY(invalid_op)
22484 pushl_cfi $do_invalid_op
22485 jmp error_code
22486 CFI_ENDPROC
22487 -END(invalid_op)
22488 +ENDPROC(invalid_op)
22489
22490 ENTRY(coprocessor_segment_overrun)
22491 RING0_INT_FRAME
22492 @@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
22493 pushl_cfi $do_coprocessor_segment_overrun
22494 jmp error_code
22495 CFI_ENDPROC
22496 -END(coprocessor_segment_overrun)
22497 +ENDPROC(coprocessor_segment_overrun)
22498
22499 ENTRY(invalid_TSS)
22500 RING0_EC_FRAME
22501 @@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
22502 pushl_cfi $do_invalid_TSS
22503 jmp error_code
22504 CFI_ENDPROC
22505 -END(invalid_TSS)
22506 +ENDPROC(invalid_TSS)
22507
22508 ENTRY(segment_not_present)
22509 RING0_EC_FRAME
22510 @@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
22511 pushl_cfi $do_segment_not_present
22512 jmp error_code
22513 CFI_ENDPROC
22514 -END(segment_not_present)
22515 +ENDPROC(segment_not_present)
22516
22517 ENTRY(stack_segment)
22518 RING0_EC_FRAME
22519 @@ -923,7 +1162,7 @@ ENTRY(stack_segment)
22520 pushl_cfi $do_stack_segment
22521 jmp error_code
22522 CFI_ENDPROC
22523 -END(stack_segment)
22524 +ENDPROC(stack_segment)
22525
22526 ENTRY(alignment_check)
22527 RING0_EC_FRAME
22528 @@ -931,7 +1170,7 @@ ENTRY(alignment_check)
22529 pushl_cfi $do_alignment_check
22530 jmp error_code
22531 CFI_ENDPROC
22532 -END(alignment_check)
22533 +ENDPROC(alignment_check)
22534
22535 ENTRY(divide_error)
22536 RING0_INT_FRAME
22537 @@ -940,7 +1179,7 @@ ENTRY(divide_error)
22538 pushl_cfi $do_divide_error
22539 jmp error_code
22540 CFI_ENDPROC
22541 -END(divide_error)
22542 +ENDPROC(divide_error)
22543
22544 #ifdef CONFIG_X86_MCE
22545 ENTRY(machine_check)
22546 @@ -950,7 +1189,7 @@ ENTRY(machine_check)
22547 pushl_cfi machine_check_vector
22548 jmp error_code
22549 CFI_ENDPROC
22550 -END(machine_check)
22551 +ENDPROC(machine_check)
22552 #endif
22553
22554 ENTRY(spurious_interrupt_bug)
22555 @@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
22556 pushl_cfi $do_spurious_interrupt_bug
22557 jmp error_code
22558 CFI_ENDPROC
22559 -END(spurious_interrupt_bug)
22560 +ENDPROC(spurious_interrupt_bug)
22561 /*
22562 * End of kprobes section
22563 */
22564 @@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22565
22566 ENTRY(mcount)
22567 ret
22568 -END(mcount)
22569 +ENDPROC(mcount)
22570
22571 ENTRY(ftrace_caller)
22572 cmpl $0, function_trace_stop
22573 @@ -1103,7 +1342,7 @@ ftrace_graph_call:
22574 .globl ftrace_stub
22575 ftrace_stub:
22576 ret
22577 -END(ftrace_caller)
22578 +ENDPROC(ftrace_caller)
22579
22580 ENTRY(ftrace_regs_caller)
22581 pushf /* push flags before compare (in cs location) */
22582 @@ -1207,7 +1446,7 @@ trace:
22583 popl %ecx
22584 popl %eax
22585 jmp ftrace_stub
22586 -END(mcount)
22587 +ENDPROC(mcount)
22588 #endif /* CONFIG_DYNAMIC_FTRACE */
22589 #endif /* CONFIG_FUNCTION_TRACER */
22590
22591 @@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
22592 popl %ecx
22593 popl %eax
22594 ret
22595 -END(ftrace_graph_caller)
22596 +ENDPROC(ftrace_graph_caller)
22597
22598 .globl return_to_handler
22599 return_to_handler:
22600 @@ -1291,15 +1530,18 @@ error_code:
22601 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22602 REG_TO_PTGS %ecx
22603 SET_KERNEL_GS %ecx
22604 - movl $(__USER_DS), %ecx
22605 + movl $(__KERNEL_DS), %ecx
22606 movl %ecx, %ds
22607 movl %ecx, %es
22608 +
22609 + pax_enter_kernel
22610 +
22611 TRACE_IRQS_OFF
22612 movl %esp,%eax # pt_regs pointer
22613 call *%edi
22614 jmp ret_from_exception
22615 CFI_ENDPROC
22616 -END(page_fault)
22617 +ENDPROC(page_fault)
22618
22619 /*
22620 * Debug traps and NMI can happen at the one SYSENTER instruction
22621 @@ -1342,7 +1584,7 @@ debug_stack_correct:
22622 call do_debug
22623 jmp ret_from_exception
22624 CFI_ENDPROC
22625 -END(debug)
22626 +ENDPROC(debug)
22627
22628 /*
22629 * NMI is doubly nasty. It can happen _while_ we're handling
22630 @@ -1380,6 +1622,9 @@ nmi_stack_correct:
22631 xorl %edx,%edx # zero error code
22632 movl %esp,%eax # pt_regs pointer
22633 call do_nmi
22634 +
22635 + pax_exit_kernel
22636 +
22637 jmp restore_all_notrace
22638 CFI_ENDPROC
22639
22640 @@ -1416,12 +1661,15 @@ nmi_espfix_stack:
22641 FIXUP_ESPFIX_STACK # %eax == %esp
22642 xorl %edx,%edx # zero error code
22643 call do_nmi
22644 +
22645 + pax_exit_kernel
22646 +
22647 RESTORE_REGS
22648 lss 12+4(%esp), %esp # back to espfix stack
22649 CFI_ADJUST_CFA_OFFSET -24
22650 jmp irq_return
22651 CFI_ENDPROC
22652 -END(nmi)
22653 +ENDPROC(nmi)
22654
22655 ENTRY(int3)
22656 RING0_INT_FRAME
22657 @@ -1434,14 +1682,14 @@ ENTRY(int3)
22658 call do_int3
22659 jmp ret_from_exception
22660 CFI_ENDPROC
22661 -END(int3)
22662 +ENDPROC(int3)
22663
22664 ENTRY(general_protection)
22665 RING0_EC_FRAME
22666 pushl_cfi $do_general_protection
22667 jmp error_code
22668 CFI_ENDPROC
22669 -END(general_protection)
22670 +ENDPROC(general_protection)
22671
22672 #ifdef CONFIG_KVM_GUEST
22673 ENTRY(async_page_fault)
22674 @@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
22675 pushl_cfi $do_async_page_fault
22676 jmp error_code
22677 CFI_ENDPROC
22678 -END(async_page_fault)
22679 +ENDPROC(async_page_fault)
22680 #endif
22681
22682 /*
22683 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22684 index 1e96c36..3ff710a 100644
22685 --- a/arch/x86/kernel/entry_64.S
22686 +++ b/arch/x86/kernel/entry_64.S
22687 @@ -59,6 +59,8 @@
22688 #include <asm/context_tracking.h>
22689 #include <asm/smap.h>
22690 #include <linux/err.h>
22691 +#include <asm/pgtable.h>
22692 +#include <asm/alternative-asm.h>
22693
22694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22695 #include <linux/elf-em.h>
22696 @@ -80,8 +82,9 @@
22697 #ifdef CONFIG_DYNAMIC_FTRACE
22698
22699 ENTRY(function_hook)
22700 + pax_force_retaddr
22701 retq
22702 -END(function_hook)
22703 +ENDPROC(function_hook)
22704
22705 /* skip is set if stack has been adjusted */
22706 .macro ftrace_caller_setup skip=0
22707 @@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22708 #endif
22709
22710 GLOBAL(ftrace_stub)
22711 + pax_force_retaddr
22712 retq
22713 -END(ftrace_caller)
22714 +ENDPROC(ftrace_caller)
22715
22716 ENTRY(ftrace_regs_caller)
22717 /* Save the current flags before compare (in SS location)*/
22718 @@ -191,7 +195,7 @@ ftrace_restore_flags:
22719 popfq
22720 jmp ftrace_stub
22721
22722 -END(ftrace_regs_caller)
22723 +ENDPROC(ftrace_regs_caller)
22724
22725
22726 #else /* ! CONFIG_DYNAMIC_FTRACE */
22727 @@ -212,6 +216,7 @@ ENTRY(function_hook)
22728 #endif
22729
22730 GLOBAL(ftrace_stub)
22731 + pax_force_retaddr
22732 retq
22733
22734 trace:
22735 @@ -225,12 +230,13 @@ trace:
22736 #endif
22737 subq $MCOUNT_INSN_SIZE, %rdi
22738
22739 + pax_force_fptr ftrace_trace_function
22740 call *ftrace_trace_function
22741
22742 MCOUNT_RESTORE_FRAME
22743
22744 jmp ftrace_stub
22745 -END(function_hook)
22746 +ENDPROC(function_hook)
22747 #endif /* CONFIG_DYNAMIC_FTRACE */
22748 #endif /* CONFIG_FUNCTION_TRACER */
22749
22750 @@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22751
22752 MCOUNT_RESTORE_FRAME
22753
22754 + pax_force_retaddr
22755 retq
22756 -END(ftrace_graph_caller)
22757 +ENDPROC(ftrace_graph_caller)
22758
22759 GLOBAL(return_to_handler)
22760 subq $24, %rsp
22761 @@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22762 movq 8(%rsp), %rdx
22763 movq (%rsp), %rax
22764 addq $24, %rsp
22765 + pax_force_fptr %rdi
22766 jmp *%rdi
22767 +ENDPROC(return_to_handler)
22768 #endif
22769
22770
22771 @@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22772 ENDPROC(native_usergs_sysret64)
22773 #endif /* CONFIG_PARAVIRT */
22774
22775 + .macro ljmpq sel, off
22776 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22777 + .byte 0x48; ljmp *1234f(%rip)
22778 + .pushsection .rodata
22779 + .align 16
22780 + 1234: .quad \off; .word \sel
22781 + .popsection
22782 +#else
22783 + pushq $\sel
22784 + pushq $\off
22785 + lretq
22786 +#endif
22787 + .endm
22788 +
22789 + .macro pax_enter_kernel
22790 + pax_set_fptr_mask
22791 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22792 + call pax_enter_kernel
22793 +#endif
22794 + .endm
22795 +
22796 + .macro pax_exit_kernel
22797 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22798 + call pax_exit_kernel
22799 +#endif
22800 +
22801 + .endm
22802 +
22803 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22804 +ENTRY(pax_enter_kernel)
22805 + pushq %rdi
22806 +
22807 +#ifdef CONFIG_PARAVIRT
22808 + PV_SAVE_REGS(CLBR_RDI)
22809 +#endif
22810 +
22811 +#ifdef CONFIG_PAX_KERNEXEC
22812 + GET_CR0_INTO_RDI
22813 + bts $16,%rdi
22814 + jnc 3f
22815 + mov %cs,%edi
22816 + cmp $__KERNEL_CS,%edi
22817 + jnz 2f
22818 +1:
22819 +#endif
22820 +
22821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22822 + 661: jmp 111f
22823 + .pushsection .altinstr_replacement, "a"
22824 + 662: ASM_NOP2
22825 + .popsection
22826 + .pushsection .altinstructions, "a"
22827 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22828 + .popsection
22829 + GET_CR3_INTO_RDI
22830 + cmp $0,%dil
22831 + jnz 112f
22832 + mov $__KERNEL_DS,%edi
22833 + mov %edi,%ss
22834 + jmp 111f
22835 +112: cmp $1,%dil
22836 + jz 113f
22837 + ud2
22838 +113: sub $4097,%rdi
22839 + bts $63,%rdi
22840 + SET_RDI_INTO_CR3
22841 + mov $__UDEREF_KERNEL_DS,%edi
22842 + mov %edi,%ss
22843 +111:
22844 +#endif
22845 +
22846 +#ifdef CONFIG_PARAVIRT
22847 + PV_RESTORE_REGS(CLBR_RDI)
22848 +#endif
22849 +
22850 + popq %rdi
22851 + pax_force_retaddr
22852 + retq
22853 +
22854 +#ifdef CONFIG_PAX_KERNEXEC
22855 +2: ljmpq __KERNEL_CS,1b
22856 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
22857 +4: SET_RDI_INTO_CR0
22858 + jmp 1b
22859 +#endif
22860 +ENDPROC(pax_enter_kernel)
22861 +
22862 +ENTRY(pax_exit_kernel)
22863 + pushq %rdi
22864 +
22865 +#ifdef CONFIG_PARAVIRT
22866 + PV_SAVE_REGS(CLBR_RDI)
22867 +#endif
22868 +
22869 +#ifdef CONFIG_PAX_KERNEXEC
22870 + mov %cs,%rdi
22871 + cmp $__KERNEXEC_KERNEL_CS,%edi
22872 + jz 2f
22873 + GET_CR0_INTO_RDI
22874 + bts $16,%rdi
22875 + jnc 4f
22876 +1:
22877 +#endif
22878 +
22879 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22880 + 661: jmp 111f
22881 + .pushsection .altinstr_replacement, "a"
22882 + 662: ASM_NOP2
22883 + .popsection
22884 + .pushsection .altinstructions, "a"
22885 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22886 + .popsection
22887 + mov %ss,%edi
22888 + cmp $__UDEREF_KERNEL_DS,%edi
22889 + jnz 111f
22890 + GET_CR3_INTO_RDI
22891 + cmp $0,%dil
22892 + jz 112f
22893 + ud2
22894 +112: add $4097,%rdi
22895 + bts $63,%rdi
22896 + SET_RDI_INTO_CR3
22897 + mov $__KERNEL_DS,%edi
22898 + mov %edi,%ss
22899 +111:
22900 +#endif
22901 +
22902 +#ifdef CONFIG_PARAVIRT
22903 + PV_RESTORE_REGS(CLBR_RDI);
22904 +#endif
22905 +
22906 + popq %rdi
22907 + pax_force_retaddr
22908 + retq
22909 +
22910 +#ifdef CONFIG_PAX_KERNEXEC
22911 +2: GET_CR0_INTO_RDI
22912 + btr $16,%rdi
22913 + jnc 4f
22914 + ljmpq __KERNEL_CS,3f
22915 +3: SET_RDI_INTO_CR0
22916 + jmp 1b
22917 +4: ud2
22918 + jmp 4b
22919 +#endif
22920 +ENDPROC(pax_exit_kernel)
22921 +#endif
22922 +
22923 + .macro pax_enter_kernel_user
22924 + pax_set_fptr_mask
22925 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22926 + call pax_enter_kernel_user
22927 +#endif
22928 + .endm
22929 +
22930 + .macro pax_exit_kernel_user
22931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22932 + call pax_exit_kernel_user
22933 +#endif
22934 +#ifdef CONFIG_PAX_RANDKSTACK
22935 + pushq %rax
22936 + pushq %r11
22937 + call pax_randomize_kstack
22938 + popq %r11
22939 + popq %rax
22940 +#endif
22941 + .endm
22942 +
22943 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22944 +ENTRY(pax_enter_kernel_user)
22945 + pushq %rdi
22946 + pushq %rbx
22947 +
22948 +#ifdef CONFIG_PARAVIRT
22949 + PV_SAVE_REGS(CLBR_RDI)
22950 +#endif
22951 +
22952 + 661: jmp 111f
22953 + .pushsection .altinstr_replacement, "a"
22954 + 662: ASM_NOP2
22955 + .popsection
22956 + .pushsection .altinstructions, "a"
22957 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22958 + .popsection
22959 + GET_CR3_INTO_RDI
22960 + cmp $1,%dil
22961 + jnz 4f
22962 + sub $4097,%rdi
22963 + bts $63,%rdi
22964 + SET_RDI_INTO_CR3
22965 + jmp 3f
22966 +111:
22967 +
22968 + GET_CR3_INTO_RDI
22969 + mov %rdi,%rbx
22970 + add $__START_KERNEL_map,%rbx
22971 + sub phys_base(%rip),%rbx
22972 +
22973 +#ifdef CONFIG_PARAVIRT
22974 + cmpl $0, pv_info+PARAVIRT_enabled
22975 + jz 1f
22976 + pushq %rdi
22977 + i = 0
22978 + .rept USER_PGD_PTRS
22979 + mov i*8(%rbx),%rsi
22980 + mov $0,%sil
22981 + lea i*8(%rbx),%rdi
22982 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22983 + i = i + 1
22984 + .endr
22985 + popq %rdi
22986 + jmp 2f
22987 +1:
22988 +#endif
22989 +
22990 + i = 0
22991 + .rept USER_PGD_PTRS
22992 + movb $0,i*8(%rbx)
22993 + i = i + 1
22994 + .endr
22995 +
22996 +2: SET_RDI_INTO_CR3
22997 +
22998 +#ifdef CONFIG_PAX_KERNEXEC
22999 + GET_CR0_INTO_RDI
23000 + bts $16,%rdi
23001 + SET_RDI_INTO_CR0
23002 +#endif
23003 +
23004 +3:
23005 +
23006 +#ifdef CONFIG_PARAVIRT
23007 + PV_RESTORE_REGS(CLBR_RDI)
23008 +#endif
23009 +
23010 + popq %rbx
23011 + popq %rdi
23012 + pax_force_retaddr
23013 + retq
23014 +4: ud2
23015 +ENDPROC(pax_enter_kernel_user)
23016 +
23017 +ENTRY(pax_exit_kernel_user)
23018 + pushq %rdi
23019 + pushq %rbx
23020 +
23021 +#ifdef CONFIG_PARAVIRT
23022 + PV_SAVE_REGS(CLBR_RDI)
23023 +#endif
23024 +
23025 + GET_CR3_INTO_RDI
23026 + 661: jmp 1f
23027 + .pushsection .altinstr_replacement, "a"
23028 + 662: ASM_NOP2
23029 + .popsection
23030 + .pushsection .altinstructions, "a"
23031 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23032 + .popsection
23033 + cmp $0,%dil
23034 + jnz 3f
23035 + add $4097,%rdi
23036 + bts $63,%rdi
23037 + SET_RDI_INTO_CR3
23038 + jmp 2f
23039 +1:
23040 +
23041 + mov %rdi,%rbx
23042 +
23043 +#ifdef CONFIG_PAX_KERNEXEC
23044 + GET_CR0_INTO_RDI
23045 + btr $16,%rdi
23046 + jnc 3f
23047 + SET_RDI_INTO_CR0
23048 +#endif
23049 +
23050 + add $__START_KERNEL_map,%rbx
23051 + sub phys_base(%rip),%rbx
23052 +
23053 +#ifdef CONFIG_PARAVIRT
23054 + cmpl $0, pv_info+PARAVIRT_enabled
23055 + jz 1f
23056 + i = 0
23057 + .rept USER_PGD_PTRS
23058 + mov i*8(%rbx),%rsi
23059 + mov $0x67,%sil
23060 + lea i*8(%rbx),%rdi
23061 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23062 + i = i + 1
23063 + .endr
23064 + jmp 2f
23065 +1:
23066 +#endif
23067 +
23068 + i = 0
23069 + .rept USER_PGD_PTRS
23070 + movb $0x67,i*8(%rbx)
23071 + i = i + 1
23072 + .endr
23073 +2:
23074 +
23075 +#ifdef CONFIG_PARAVIRT
23076 + PV_RESTORE_REGS(CLBR_RDI)
23077 +#endif
23078 +
23079 + popq %rbx
23080 + popq %rdi
23081 + pax_force_retaddr
23082 + retq
23083 +3: ud2
23084 +ENDPROC(pax_exit_kernel_user)
23085 +#endif
23086 +
23087 + .macro pax_enter_kernel_nmi
23088 + pax_set_fptr_mask
23089 +
23090 +#ifdef CONFIG_PAX_KERNEXEC
23091 + GET_CR0_INTO_RDI
23092 + bts $16,%rdi
23093 + jc 110f
23094 + SET_RDI_INTO_CR0
23095 + or $2,%ebx
23096 +110:
23097 +#endif
23098 +
23099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23100 + 661: jmp 111f
23101 + .pushsection .altinstr_replacement, "a"
23102 + 662: ASM_NOP2
23103 + .popsection
23104 + .pushsection .altinstructions, "a"
23105 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23106 + .popsection
23107 + GET_CR3_INTO_RDI
23108 + cmp $0,%dil
23109 + jz 111f
23110 + sub $4097,%rdi
23111 + or $4,%ebx
23112 + bts $63,%rdi
23113 + SET_RDI_INTO_CR3
23114 + mov $__UDEREF_KERNEL_DS,%edi
23115 + mov %edi,%ss
23116 +111:
23117 +#endif
23118 + .endm
23119 +
23120 + .macro pax_exit_kernel_nmi
23121 +#ifdef CONFIG_PAX_KERNEXEC
23122 + btr $1,%ebx
23123 + jnc 110f
23124 + GET_CR0_INTO_RDI
23125 + btr $16,%rdi
23126 + SET_RDI_INTO_CR0
23127 +110:
23128 +#endif
23129 +
23130 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23131 + btr $2,%ebx
23132 + jnc 111f
23133 + GET_CR3_INTO_RDI
23134 + add $4097,%rdi
23135 + bts $63,%rdi
23136 + SET_RDI_INTO_CR3
23137 + mov $__KERNEL_DS,%edi
23138 + mov %edi,%ss
23139 +111:
23140 +#endif
23141 + .endm
23142 +
23143 + .macro pax_erase_kstack
23144 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23145 + call pax_erase_kstack
23146 +#endif
23147 + .endm
23148 +
23149 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23150 +ENTRY(pax_erase_kstack)
23151 + pushq %rdi
23152 + pushq %rcx
23153 + pushq %rax
23154 + pushq %r11
23155 +
23156 + GET_THREAD_INFO(%r11)
23157 + mov TI_lowest_stack(%r11), %rdi
23158 + mov $-0xBEEF, %rax
23159 + std
23160 +
23161 +1: mov %edi, %ecx
23162 + and $THREAD_SIZE_asm - 1, %ecx
23163 + shr $3, %ecx
23164 + repne scasq
23165 + jecxz 2f
23166 +
23167 + cmp $2*8, %ecx
23168 + jc 2f
23169 +
23170 + mov $2*8, %ecx
23171 + repe scasq
23172 + jecxz 2f
23173 + jne 1b
23174 +
23175 +2: cld
23176 + mov %esp, %ecx
23177 + sub %edi, %ecx
23178 +
23179 + cmp $THREAD_SIZE_asm, %rcx
23180 + jb 3f
23181 + ud2
23182 +3:
23183 +
23184 + shr $3, %ecx
23185 + rep stosq
23186 +
23187 + mov TI_task_thread_sp0(%r11), %rdi
23188 + sub $256, %rdi
23189 + mov %rdi, TI_lowest_stack(%r11)
23190 +
23191 + popq %r11
23192 + popq %rax
23193 + popq %rcx
23194 + popq %rdi
23195 + pax_force_retaddr
23196 + ret
23197 +ENDPROC(pax_erase_kstack)
23198 +#endif
23199
23200 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23201 #ifdef CONFIG_TRACE_IRQFLAGS
23202 @@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
23203 .endm
23204
23205 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23206 - bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23207 + bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23208 jnc 1f
23209 TRACE_IRQS_ON_DEBUG
23210 1:
23211 @@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
23212 movq \tmp,R11+\offset(%rsp)
23213 .endm
23214
23215 - .macro FAKE_STACK_FRAME child_rip
23216 - /* push in order ss, rsp, eflags, cs, rip */
23217 - xorl %eax, %eax
23218 - pushq_cfi $__KERNEL_DS /* ss */
23219 - /*CFI_REL_OFFSET ss,0*/
23220 - pushq_cfi %rax /* rsp */
23221 - CFI_REL_OFFSET rsp,0
23222 - pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23223 - /*CFI_REL_OFFSET rflags,0*/
23224 - pushq_cfi $__KERNEL_CS /* cs */
23225 - /*CFI_REL_OFFSET cs,0*/
23226 - pushq_cfi \child_rip /* rip */
23227 - CFI_REL_OFFSET rip,0
23228 - pushq_cfi %rax /* orig rax */
23229 - .endm
23230 -
23231 - .macro UNFAKE_STACK_FRAME
23232 - addq $8*6, %rsp
23233 - CFI_ADJUST_CFA_OFFSET -(6*8)
23234 - .endm
23235 -
23236 /*
23237 * initial frame state for interrupts (and exceptions without error code)
23238 */
23239 @@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
23240 /* save partial stack frame */
23241 .macro SAVE_ARGS_IRQ
23242 cld
23243 - /* start from rbp in pt_regs and jump over */
23244 - movq_cfi rdi, (RDI-RBP)
23245 - movq_cfi rsi, (RSI-RBP)
23246 - movq_cfi rdx, (RDX-RBP)
23247 - movq_cfi rcx, (RCX-RBP)
23248 - movq_cfi rax, (RAX-RBP)
23249 - movq_cfi r8, (R8-RBP)
23250 - movq_cfi r9, (R9-RBP)
23251 - movq_cfi r10, (R10-RBP)
23252 - movq_cfi r11, (R11-RBP)
23253 + /* start from r15 in pt_regs and jump over */
23254 + movq_cfi rdi, RDI
23255 + movq_cfi rsi, RSI
23256 + movq_cfi rdx, RDX
23257 + movq_cfi rcx, RCX
23258 + movq_cfi rax, RAX
23259 + movq_cfi r8, R8
23260 + movq_cfi r9, R9
23261 + movq_cfi r10, R10
23262 + movq_cfi r11, R11
23263 + movq_cfi r12, R12
23264
23265 /* Save rbp so that we can unwind from get_irq_regs() */
23266 - movq_cfi rbp, 0
23267 + movq_cfi rbp, RBP
23268
23269 /* Save previous stack value */
23270 movq %rsp, %rsi
23271
23272 - leaq -RBP(%rsp),%rdi /* arg1 for handler */
23273 - testl $3, CS-RBP(%rsi)
23274 + movq %rsp,%rdi /* arg1 for handler */
23275 + testb $3, CS(%rsi)
23276 je 1f
23277 SWAPGS
23278 /*
23279 @@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
23280 0x06 /* DW_OP_deref */, \
23281 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23282 0x22 /* DW_OP_plus */
23283 +
23284 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23285 + testb $3, CS(%rdi)
23286 + jnz 1f
23287 + pax_enter_kernel
23288 + jmp 2f
23289 +1: pax_enter_kernel_user
23290 +2:
23291 +#else
23292 + pax_enter_kernel
23293 +#endif
23294 +
23295 /* We entered an interrupt context - irqs are off: */
23296 TRACE_IRQS_OFF
23297 .endm
23298 @@ -514,9 +939,52 @@ ENTRY(save_paranoid)
23299 js 1f /* negative -> in kernel */
23300 SWAPGS
23301 xorl %ebx,%ebx
23302 -1: ret
23303 +1:
23304 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23305 + testb $3, CS+8(%rsp)
23306 + jnz 1f
23307 + pax_enter_kernel
23308 + jmp 2f
23309 +1: pax_enter_kernel_user
23310 +2:
23311 +#else
23312 + pax_enter_kernel
23313 +#endif
23314 + pax_force_retaddr
23315 + ret
23316 CFI_ENDPROC
23317 -END(save_paranoid)
23318 +ENDPROC(save_paranoid)
23319 +
23320 +ENTRY(save_paranoid_nmi)
23321 + XCPT_FRAME 1 RDI+8
23322 + cld
23323 + movq_cfi rdi, RDI+8
23324 + movq_cfi rsi, RSI+8
23325 + movq_cfi rdx, RDX+8
23326 + movq_cfi rcx, RCX+8
23327 + movq_cfi rax, RAX+8
23328 + movq_cfi r8, R8+8
23329 + movq_cfi r9, R9+8
23330 + movq_cfi r10, R10+8
23331 + movq_cfi r11, R11+8
23332 + movq_cfi rbx, RBX+8
23333 + movq_cfi rbp, RBP+8
23334 + movq_cfi r12, R12+8
23335 + movq_cfi r13, R13+8
23336 + movq_cfi r14, R14+8
23337 + movq_cfi r15, R15+8
23338 + movl $1,%ebx
23339 + movl $MSR_GS_BASE,%ecx
23340 + rdmsr
23341 + testl %edx,%edx
23342 + js 1f /* negative -> in kernel */
23343 + SWAPGS
23344 + xorl %ebx,%ebx
23345 +1: pax_enter_kernel_nmi
23346 + pax_force_retaddr
23347 + ret
23348 + CFI_ENDPROC
23349 +ENDPROC(save_paranoid_nmi)
23350 .popsection
23351
23352 /*
23353 @@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
23354
23355 RESTORE_REST
23356
23357 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23358 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23359 jz 1f
23360
23361 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23362 @@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
23363 jmp ret_from_sys_call # go to the SYSRET fastpath
23364
23365 1:
23366 - subq $REST_SKIP, %rsp # leave space for volatiles
23367 - CFI_ADJUST_CFA_OFFSET REST_SKIP
23368 movq %rbp, %rdi
23369 call *%rbx
23370 movl $0, RAX(%rsp)
23371 RESTORE_REST
23372 jmp int_ret_from_sys_call
23373 CFI_ENDPROC
23374 -END(ret_from_fork)
23375 +ENDPROC(ret_from_fork)
23376
23377 /*
23378 * System call entry. Up to 6 arguments in registers are supported.
23379 @@ -593,7 +1059,7 @@ END(ret_from_fork)
23380 ENTRY(system_call)
23381 CFI_STARTPROC simple
23382 CFI_SIGNAL_FRAME
23383 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23384 + CFI_DEF_CFA rsp,0
23385 CFI_REGISTER rip,rcx
23386 /*CFI_REGISTER rflags,r11*/
23387 SWAPGS_UNSAFE_STACK
23388 @@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
23389
23390 movq %rsp,PER_CPU_VAR(old_rsp)
23391 movq PER_CPU_VAR(kernel_stack),%rsp
23392 + SAVE_ARGS 8*6,0
23393 + pax_enter_kernel_user
23394 +
23395 +#ifdef CONFIG_PAX_RANDKSTACK
23396 + pax_erase_kstack
23397 +#endif
23398 +
23399 /*
23400 * No need to follow this irqs off/on section - it's straight
23401 * and short:
23402 */
23403 ENABLE_INTERRUPTS(CLBR_NONE)
23404 - SAVE_ARGS 8,0
23405 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23406 movq %rcx,RIP-ARGOFFSET(%rsp)
23407 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23408 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23409 + GET_THREAD_INFO(%rcx)
23410 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23411 jnz tracesys
23412 system_call_fastpath:
23413 #if __SYSCALL_MASK == ~0
23414 @@ -639,10 +1112,13 @@ sysret_check:
23415 LOCKDEP_SYS_EXIT
23416 DISABLE_INTERRUPTS(CLBR_NONE)
23417 TRACE_IRQS_OFF
23418 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23419 + GET_THREAD_INFO(%rcx)
23420 + movl TI_flags(%rcx),%edx
23421 andl %edi,%edx
23422 jnz sysret_careful
23423 CFI_REMEMBER_STATE
23424 + pax_exit_kernel_user
23425 + pax_erase_kstack
23426 /*
23427 * sysretq will re-enable interrupts:
23428 */
23429 @@ -701,6 +1177,9 @@ auditsys:
23430 movq %rax,%rsi /* 2nd arg: syscall number */
23431 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23432 call __audit_syscall_entry
23433 +
23434 + pax_erase_kstack
23435 +
23436 LOAD_ARGS 0 /* reload call-clobbered registers */
23437 jmp system_call_fastpath
23438
23439 @@ -722,7 +1201,7 @@ sysret_audit:
23440 /* Do syscall tracing */
23441 tracesys:
23442 #ifdef CONFIG_AUDITSYSCALL
23443 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23444 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23445 jz auditsys
23446 #endif
23447 SAVE_REST
23448 @@ -730,12 +1209,15 @@ tracesys:
23449 FIXUP_TOP_OF_STACK %rdi
23450 movq %rsp,%rdi
23451 call syscall_trace_enter
23452 +
23453 + pax_erase_kstack
23454 +
23455 /*
23456 * Reload arg registers from stack in case ptrace changed them.
23457 * We don't reload %rax because syscall_trace_enter() returned
23458 * the value it wants us to use in the table lookup.
23459 */
23460 - LOAD_ARGS ARGOFFSET, 1
23461 + LOAD_ARGS 1
23462 RESTORE_REST
23463 #if __SYSCALL_MASK == ~0
23464 cmpq $__NR_syscall_max,%rax
23465 @@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
23466 andl %edi,%edx
23467 jnz int_careful
23468 andl $~TS_COMPAT,TI_status(%rcx)
23469 - jmp retint_swapgs
23470 + pax_exit_kernel_user
23471 + pax_erase_kstack
23472 + jmp retint_swapgs_pax
23473
23474 /* Either reschedule or signal or syscall exit tracking needed. */
23475 /* First do a reschedule test. */
23476 @@ -811,7 +1295,7 @@ int_restore_rest:
23477 TRACE_IRQS_OFF
23478 jmp int_with_check
23479 CFI_ENDPROC
23480 -END(system_call)
23481 +ENDPROC(system_call)
23482
23483 .macro FORK_LIKE func
23484 ENTRY(stub_\func)
23485 @@ -824,9 +1308,10 @@ ENTRY(stub_\func)
23486 DEFAULT_FRAME 0 8 /* offset 8: return address */
23487 call sys_\func
23488 RESTORE_TOP_OF_STACK %r11, 8
23489 - ret $REST_SKIP /* pop extended registers */
23490 + pax_force_retaddr
23491 + ret
23492 CFI_ENDPROC
23493 -END(stub_\func)
23494 +ENDPROC(stub_\func)
23495 .endm
23496
23497 .macro FIXED_FRAME label,func
23498 @@ -836,9 +1321,10 @@ ENTRY(\label)
23499 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23500 call \func
23501 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23502 + pax_force_retaddr
23503 ret
23504 CFI_ENDPROC
23505 -END(\label)
23506 +ENDPROC(\label)
23507 .endm
23508
23509 FORK_LIKE clone
23510 @@ -846,19 +1332,6 @@ END(\label)
23511 FORK_LIKE vfork
23512 FIXED_FRAME stub_iopl, sys_iopl
23513
23514 -ENTRY(ptregscall_common)
23515 - DEFAULT_FRAME 1 8 /* offset 8: return address */
23516 - RESTORE_TOP_OF_STACK %r11, 8
23517 - movq_cfi_restore R15+8, r15
23518 - movq_cfi_restore R14+8, r14
23519 - movq_cfi_restore R13+8, r13
23520 - movq_cfi_restore R12+8, r12
23521 - movq_cfi_restore RBP+8, rbp
23522 - movq_cfi_restore RBX+8, rbx
23523 - ret $REST_SKIP /* pop extended registers */
23524 - CFI_ENDPROC
23525 -END(ptregscall_common)
23526 -
23527 ENTRY(stub_execve)
23528 CFI_STARTPROC
23529 addq $8, %rsp
23530 @@ -870,7 +1343,7 @@ ENTRY(stub_execve)
23531 RESTORE_REST
23532 jmp int_ret_from_sys_call
23533 CFI_ENDPROC
23534 -END(stub_execve)
23535 +ENDPROC(stub_execve)
23536
23537 /*
23538 * sigreturn is special because it needs to restore all registers on return.
23539 @@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
23540 RESTORE_REST
23541 jmp int_ret_from_sys_call
23542 CFI_ENDPROC
23543 -END(stub_rt_sigreturn)
23544 +ENDPROC(stub_rt_sigreturn)
23545
23546 #ifdef CONFIG_X86_X32_ABI
23547 ENTRY(stub_x32_rt_sigreturn)
23548 @@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
23549 RESTORE_REST
23550 jmp int_ret_from_sys_call
23551 CFI_ENDPROC
23552 -END(stub_x32_rt_sigreturn)
23553 +ENDPROC(stub_x32_rt_sigreturn)
23554
23555 ENTRY(stub_x32_execve)
23556 CFI_STARTPROC
23557 @@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
23558 RESTORE_REST
23559 jmp int_ret_from_sys_call
23560 CFI_ENDPROC
23561 -END(stub_x32_execve)
23562 +ENDPROC(stub_x32_execve)
23563
23564 #endif
23565
23566 @@ -952,7 +1425,7 @@ vector=vector+1
23567 2: jmp common_interrupt
23568 .endr
23569 CFI_ENDPROC
23570 -END(irq_entries_start)
23571 +ENDPROC(irq_entries_start)
23572
23573 .previous
23574 END(interrupt)
23575 @@ -969,8 +1442,8 @@ END(interrupt)
23576 /* 0(%rsp): ~(interrupt number) */
23577 .macro interrupt func
23578 /* reserve pt_regs for scratch regs and rbp */
23579 - subq $ORIG_RAX-RBP, %rsp
23580 - CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23581 + subq $ORIG_RAX, %rsp
23582 + CFI_ADJUST_CFA_OFFSET ORIG_RAX
23583 SAVE_ARGS_IRQ
23584 call \func
23585 .endm
23586 @@ -997,14 +1470,14 @@ ret_from_intr:
23587
23588 /* Restore saved previous stack */
23589 popq %rsi
23590 - CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23591 - leaq ARGOFFSET-RBP(%rsi), %rsp
23592 + CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23593 + movq %rsi, %rsp
23594 CFI_DEF_CFA_REGISTER rsp
23595 - CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23596 + CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23597
23598 exit_intr:
23599 GET_THREAD_INFO(%rcx)
23600 - testl $3,CS-ARGOFFSET(%rsp)
23601 + testb $3,CS-ARGOFFSET(%rsp)
23602 je retint_kernel
23603
23604 /* Interrupt came from user space */
23605 @@ -1026,12 +1499,16 @@ retint_swapgs: /* return to user-space */
23606 * The iretq could re-enable interrupts:
23607 */
23608 DISABLE_INTERRUPTS(CLBR_ANY)
23609 + pax_exit_kernel_user
23610 +retint_swapgs_pax:
23611 TRACE_IRQS_IRETQ
23612 SWAPGS
23613 jmp restore_args
23614
23615 retint_restore_args: /* return to kernel space */
23616 DISABLE_INTERRUPTS(CLBR_ANY)
23617 + pax_exit_kernel
23618 + pax_force_retaddr (RIP-ARGOFFSET)
23619 /*
23620 * The iretq could re-enable interrupts:
23621 */
23622 @@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
23623 #endif
23624
23625 CFI_ENDPROC
23626 -END(common_interrupt)
23627 +ENDPROC(common_interrupt)
23628 /*
23629 * End of kprobes section
23630 */
23631 @@ -1130,7 +1607,7 @@ ENTRY(\sym)
23632 interrupt \do_sym
23633 jmp ret_from_intr
23634 CFI_ENDPROC
23635 -END(\sym)
23636 +ENDPROC(\sym)
23637 .endm
23638
23639 #ifdef CONFIG_TRACING
23640 @@ -1218,7 +1695,7 @@ ENTRY(\sym)
23641 call \do_sym
23642 jmp error_exit /* %ebx: no swapgs flag */
23643 CFI_ENDPROC
23644 -END(\sym)
23645 +ENDPROC(\sym)
23646 .endm
23647
23648 .macro paranoidzeroentry sym do_sym
23649 @@ -1236,10 +1713,10 @@ ENTRY(\sym)
23650 call \do_sym
23651 jmp paranoid_exit /* %ebx: no swapgs flag */
23652 CFI_ENDPROC
23653 -END(\sym)
23654 +ENDPROC(\sym)
23655 .endm
23656
23657 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23658 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23659 .macro paranoidzeroentry_ist sym do_sym ist
23660 ENTRY(\sym)
23661 INTR_FRAME
23662 @@ -1252,12 +1729,18 @@ ENTRY(\sym)
23663 TRACE_IRQS_OFF_DEBUG
23664 movq %rsp,%rdi /* pt_regs pointer */
23665 xorl %esi,%esi /* no error code */
23666 +#ifdef CONFIG_SMP
23667 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23668 + lea init_tss(%r13), %r13
23669 +#else
23670 + lea init_tss(%rip), %r13
23671 +#endif
23672 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23673 call \do_sym
23674 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23675 jmp paranoid_exit /* %ebx: no swapgs flag */
23676 CFI_ENDPROC
23677 -END(\sym)
23678 +ENDPROC(\sym)
23679 .endm
23680
23681 .macro errorentry sym do_sym
23682 @@ -1275,7 +1758,7 @@ ENTRY(\sym)
23683 call \do_sym
23684 jmp error_exit /* %ebx: no swapgs flag */
23685 CFI_ENDPROC
23686 -END(\sym)
23687 +ENDPROC(\sym)
23688 .endm
23689
23690 #ifdef CONFIG_TRACING
23691 @@ -1306,7 +1789,7 @@ ENTRY(\sym)
23692 call \do_sym
23693 jmp paranoid_exit /* %ebx: no swapgs flag */
23694 CFI_ENDPROC
23695 -END(\sym)
23696 +ENDPROC(\sym)
23697 .endm
23698
23699 zeroentry divide_error do_divide_error
23700 @@ -1336,9 +1819,10 @@ gs_change:
23701 2: mfence /* workaround */
23702 SWAPGS
23703 popfq_cfi
23704 + pax_force_retaddr
23705 ret
23706 CFI_ENDPROC
23707 -END(native_load_gs_index)
23708 +ENDPROC(native_load_gs_index)
23709
23710 _ASM_EXTABLE(gs_change,bad_gs)
23711 .section .fixup,"ax"
23712 @@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
23713 CFI_DEF_CFA_REGISTER rsp
23714 CFI_ADJUST_CFA_OFFSET -8
23715 decl PER_CPU_VAR(irq_count)
23716 + pax_force_retaddr
23717 ret
23718 CFI_ENDPROC
23719 -END(do_softirq_own_stack)
23720 +ENDPROC(do_softirq_own_stack)
23721
23722 #ifdef CONFIG_XEN
23723 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23724 @@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23725 decl PER_CPU_VAR(irq_count)
23726 jmp error_exit
23727 CFI_ENDPROC
23728 -END(xen_do_hypervisor_callback)
23729 +ENDPROC(xen_do_hypervisor_callback)
23730
23731 /*
23732 * Hypervisor uses this for application faults while it executes.
23733 @@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
23734 SAVE_ALL
23735 jmp error_exit
23736 CFI_ENDPROC
23737 -END(xen_failsafe_callback)
23738 +ENDPROC(xen_failsafe_callback)
23739
23740 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23741 xen_hvm_callback_vector xen_evtchn_do_upcall
23742 @@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
23743 DEFAULT_FRAME
23744 DISABLE_INTERRUPTS(CLBR_NONE)
23745 TRACE_IRQS_OFF_DEBUG
23746 - testl %ebx,%ebx /* swapgs needed? */
23747 + testl $1,%ebx /* swapgs needed? */
23748 jnz paranoid_restore
23749 - testl $3,CS(%rsp)
23750 + testb $3,CS(%rsp)
23751 jnz paranoid_userspace
23752 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23753 + pax_exit_kernel
23754 + TRACE_IRQS_IRETQ 0
23755 + SWAPGS_UNSAFE_STACK
23756 + RESTORE_ALL 8
23757 + pax_force_retaddr_bts
23758 + jmp irq_return
23759 +#endif
23760 paranoid_swapgs:
23761 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23762 + pax_exit_kernel_user
23763 +#else
23764 + pax_exit_kernel
23765 +#endif
23766 TRACE_IRQS_IRETQ 0
23767 SWAPGS_UNSAFE_STACK
23768 RESTORE_ALL 8
23769 jmp irq_return
23770 paranoid_restore:
23771 + pax_exit_kernel
23772 TRACE_IRQS_IRETQ_DEBUG 0
23773 RESTORE_ALL 8
23774 + pax_force_retaddr_bts
23775 jmp irq_return
23776 paranoid_userspace:
23777 GET_THREAD_INFO(%rcx)
23778 @@ -1557,7 +2057,7 @@ paranoid_schedule:
23779 TRACE_IRQS_OFF
23780 jmp paranoid_userspace
23781 CFI_ENDPROC
23782 -END(paranoid_exit)
23783 +ENDPROC(paranoid_exit)
23784
23785 /*
23786 * Exception entry point. This expects an error code/orig_rax on the stack.
23787 @@ -1584,12 +2084,23 @@ ENTRY(error_entry)
23788 movq_cfi r14, R14+8
23789 movq_cfi r15, R15+8
23790 xorl %ebx,%ebx
23791 - testl $3,CS+8(%rsp)
23792 + testb $3,CS+8(%rsp)
23793 je error_kernelspace
23794 error_swapgs:
23795 SWAPGS
23796 error_sti:
23797 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23798 + testb $3, CS+8(%rsp)
23799 + jnz 1f
23800 + pax_enter_kernel
23801 + jmp 2f
23802 +1: pax_enter_kernel_user
23803 +2:
23804 +#else
23805 + pax_enter_kernel
23806 +#endif
23807 TRACE_IRQS_OFF
23808 + pax_force_retaddr
23809 ret
23810
23811 /*
23812 @@ -1616,7 +2127,7 @@ bstep_iret:
23813 movq %rcx,RIP+8(%rsp)
23814 jmp error_swapgs
23815 CFI_ENDPROC
23816 -END(error_entry)
23817 +ENDPROC(error_entry)
23818
23819
23820 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23821 @@ -1627,7 +2138,7 @@ ENTRY(error_exit)
23822 DISABLE_INTERRUPTS(CLBR_NONE)
23823 TRACE_IRQS_OFF
23824 GET_THREAD_INFO(%rcx)
23825 - testl %eax,%eax
23826 + testl $1,%eax
23827 jne retint_kernel
23828 LOCKDEP_SYS_EXIT_IRQ
23829 movl TI_flags(%rcx),%edx
23830 @@ -1636,7 +2147,7 @@ ENTRY(error_exit)
23831 jnz retint_careful
23832 jmp retint_swapgs
23833 CFI_ENDPROC
23834 -END(error_exit)
23835 +ENDPROC(error_exit)
23836
23837 /*
23838 * Test if a given stack is an NMI stack or not.
23839 @@ -1694,9 +2205,11 @@ ENTRY(nmi)
23840 * If %cs was not the kernel segment, then the NMI triggered in user
23841 * space, which means it is definitely not nested.
23842 */
23843 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23844 + je 1f
23845 cmpl $__KERNEL_CS, 16(%rsp)
23846 jne first_nmi
23847 -
23848 +1:
23849 /*
23850 * Check the special variable on the stack to see if NMIs are
23851 * executing.
23852 @@ -1730,8 +2243,7 @@ nested_nmi:
23853
23854 1:
23855 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23856 - leaq -1*8(%rsp), %rdx
23857 - movq %rdx, %rsp
23858 + subq $8, %rsp
23859 CFI_ADJUST_CFA_OFFSET 1*8
23860 leaq -10*8(%rsp), %rdx
23861 pushq_cfi $__KERNEL_DS
23862 @@ -1749,6 +2261,7 @@ nested_nmi_out:
23863 CFI_RESTORE rdx
23864
23865 /* No need to check faults here */
23866 +# pax_force_retaddr_bts
23867 INTERRUPT_RETURN
23868
23869 CFI_RESTORE_STATE
23870 @@ -1845,13 +2358,13 @@ end_repeat_nmi:
23871 subq $ORIG_RAX-R15, %rsp
23872 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23873 /*
23874 - * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
23875 + * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
23876 * as we should not be calling schedule in NMI context.
23877 * Even with normal interrupts enabled. An NMI should not be
23878 * setting NEED_RESCHED or anything that normal interrupts and
23879 * exceptions might do.
23880 */
23881 - call save_paranoid
23882 + call save_paranoid_nmi
23883 DEFAULT_FRAME 0
23884
23885 /*
23886 @@ -1861,9 +2374,9 @@ end_repeat_nmi:
23887 * NMI itself takes a page fault, the page fault that was preempted
23888 * will read the information from the NMI page fault and not the
23889 * origin fault. Save it off and restore it if it changes.
23890 - * Use the r12 callee-saved register.
23891 + * Use the r13 callee-saved register.
23892 */
23893 - movq %cr2, %r12
23894 + movq %cr2, %r13
23895
23896 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23897 movq %rsp,%rdi
23898 @@ -1872,31 +2385,36 @@ end_repeat_nmi:
23899
23900 /* Did the NMI take a page fault? Restore cr2 if it did */
23901 movq %cr2, %rcx
23902 - cmpq %rcx, %r12
23903 + cmpq %rcx, %r13
23904 je 1f
23905 - movq %r12, %cr2
23906 + movq %r13, %cr2
23907 1:
23908
23909 - testl %ebx,%ebx /* swapgs needed? */
23910 + testl $1,%ebx /* swapgs needed? */
23911 jnz nmi_restore
23912 nmi_swapgs:
23913 SWAPGS_UNSAFE_STACK
23914 nmi_restore:
23915 + pax_exit_kernel_nmi
23916 /* Pop the extra iret frame at once */
23917 RESTORE_ALL 6*8
23918 + testb $3, 8(%rsp)
23919 + jnz 1f
23920 + pax_force_retaddr_bts
23921 +1:
23922
23923 /* Clear the NMI executing stack variable */
23924 movq $0, 5*8(%rsp)
23925 jmp irq_return
23926 CFI_ENDPROC
23927 -END(nmi)
23928 +ENDPROC(nmi)
23929
23930 ENTRY(ignore_sysret)
23931 CFI_STARTPROC
23932 mov $-ENOSYS,%eax
23933 sysret
23934 CFI_ENDPROC
23935 -END(ignore_sysret)
23936 +ENDPROC(ignore_sysret)
23937
23938 /*
23939 * End of kprobes section
23940 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23941 index e625319..b9abb9d 100644
23942 --- a/arch/x86/kernel/ftrace.c
23943 +++ b/arch/x86/kernel/ftrace.c
23944 @@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23945 {
23946 unsigned char replaced[MCOUNT_INSN_SIZE];
23947
23948 + ip = ktla_ktva(ip);
23949 +
23950 /*
23951 * Note: Due to modules and __init, code can
23952 * disappear and change, we need to protect against faulting
23953 @@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
23954 unsigned char old[MCOUNT_INSN_SIZE];
23955 int ret;
23956
23957 - memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
23958 + memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
23959
23960 ftrace_update_func = ip;
23961 /* Make sure the breakpoints see the ftrace_update_func update */
23962 @@ -306,7 +308,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23963 * kernel identity mapping to modify code.
23964 */
23965 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23966 - ip = (unsigned long)__va(__pa_symbol(ip));
23967 + ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23968
23969 return probe_kernel_write((void *)ip, val, size);
23970 }
23971 @@ -316,7 +318,7 @@ static int add_break(unsigned long ip, const char *old)
23972 unsigned char replaced[MCOUNT_INSN_SIZE];
23973 unsigned char brk = BREAKPOINT_INSTRUCTION;
23974
23975 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23976 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23977 return -EFAULT;
23978
23979 /* Make sure it is what we expect it to be */
23980 @@ -664,7 +666,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23981 return ret;
23982
23983 fail_update:
23984 - probe_kernel_write((void *)ip, &old_code[0], 1);
23985 + probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23986 goto out;
23987 }
23988
23989 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23990 index 85126cc..1bbce17 100644
23991 --- a/arch/x86/kernel/head64.c
23992 +++ b/arch/x86/kernel/head64.c
23993 @@ -67,12 +67,12 @@ again:
23994 pgd = *pgd_p;
23995
23996 /*
23997 - * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23998 - * critical -- __PAGE_OFFSET would point us back into the dynamic
23999 + * The use of __early_va rather than __va here is critical:
24000 + * __va would point us back into the dynamic
24001 * range and we might end up looping forever...
24002 */
24003 if (pgd)
24004 - pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24005 + pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24006 else {
24007 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24008 reset_early_page_tables();
24009 @@ -82,13 +82,13 @@ again:
24010 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24011 for (i = 0; i < PTRS_PER_PUD; i++)
24012 pud_p[i] = 0;
24013 - *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24014 + *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24015 }
24016 pud_p += pud_index(address);
24017 pud = *pud_p;
24018
24019 if (pud)
24020 - pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24021 + pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24022 else {
24023 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24024 reset_early_page_tables();
24025 @@ -98,7 +98,7 @@ again:
24026 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24027 for (i = 0; i < PTRS_PER_PMD; i++)
24028 pmd_p[i] = 0;
24029 - *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24030 + *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24031 }
24032 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24033 pmd_p[pmd_index(address)] = pmd;
24034 @@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
24035 if (console_loglevel == 10)
24036 early_printk("Kernel alive\n");
24037
24038 - clear_page(init_level4_pgt);
24039 /* set init_level4_pgt kernel high mapping*/
24040 init_level4_pgt[511] = early_level4_pgt[511];
24041
24042 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24043 index f36bd42..56ee1534 100644
24044 --- a/arch/x86/kernel/head_32.S
24045 +++ b/arch/x86/kernel/head_32.S
24046 @@ -26,6 +26,12 @@
24047 /* Physical address */
24048 #define pa(X) ((X) - __PAGE_OFFSET)
24049
24050 +#ifdef CONFIG_PAX_KERNEXEC
24051 +#define ta(X) (X)
24052 +#else
24053 +#define ta(X) ((X) - __PAGE_OFFSET)
24054 +#endif
24055 +
24056 /*
24057 * References to members of the new_cpu_data structure.
24058 */
24059 @@ -55,11 +61,7 @@
24060 * and small than max_low_pfn, otherwise will waste some page table entries
24061 */
24062
24063 -#if PTRS_PER_PMD > 1
24064 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24065 -#else
24066 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24067 -#endif
24068 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24069
24070 /* Number of possible pages in the lowmem region */
24071 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24072 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24073 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24074
24075 /*
24076 + * Real beginning of normal "text" segment
24077 + */
24078 +ENTRY(stext)
24079 +ENTRY(_stext)
24080 +
24081 +/*
24082 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24083 * %esi points to the real-mode code as a 32-bit pointer.
24084 * CS and DS must be 4 GB flat segments, but we don't depend on
24085 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24086 * can.
24087 */
24088 __HEAD
24089 +
24090 +#ifdef CONFIG_PAX_KERNEXEC
24091 + jmp startup_32
24092 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24093 +.fill PAGE_SIZE-5,1,0xcc
24094 +#endif
24095 +
24096 ENTRY(startup_32)
24097 movl pa(stack_start),%ecx
24098
24099 @@ -106,6 +121,59 @@ ENTRY(startup_32)
24100 2:
24101 leal -__PAGE_OFFSET(%ecx),%esp
24102
24103 +#ifdef CONFIG_SMP
24104 + movl $pa(cpu_gdt_table),%edi
24105 + movl $__per_cpu_load,%eax
24106 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24107 + rorl $16,%eax
24108 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24109 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24110 + movl $__per_cpu_end - 1,%eax
24111 + subl $__per_cpu_start,%eax
24112 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24113 +#endif
24114 +
24115 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24116 + movl $NR_CPUS,%ecx
24117 + movl $pa(cpu_gdt_table),%edi
24118 +1:
24119 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24120 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24121 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24122 + addl $PAGE_SIZE_asm,%edi
24123 + loop 1b
24124 +#endif
24125 +
24126 +#ifdef CONFIG_PAX_KERNEXEC
24127 + movl $pa(boot_gdt),%edi
24128 + movl $__LOAD_PHYSICAL_ADDR,%eax
24129 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24130 + rorl $16,%eax
24131 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24132 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24133 + rorl $16,%eax
24134 +
24135 + ljmp $(__BOOT_CS),$1f
24136 +1:
24137 +
24138 + movl $NR_CPUS,%ecx
24139 + movl $pa(cpu_gdt_table),%edi
24140 + addl $__PAGE_OFFSET,%eax
24141 +1:
24142 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24143 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24144 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24145 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24146 + rorl $16,%eax
24147 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24148 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24149 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24150 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24151 + rorl $16,%eax
24152 + addl $PAGE_SIZE_asm,%edi
24153 + loop 1b
24154 +#endif
24155 +
24156 /*
24157 * Clear BSS first so that there are no surprises...
24158 */
24159 @@ -201,8 +269,11 @@ ENTRY(startup_32)
24160 movl %eax, pa(max_pfn_mapped)
24161
24162 /* Do early initialization of the fixmap area */
24163 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24164 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24165 +#ifdef CONFIG_COMPAT_VDSO
24166 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24167 +#else
24168 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24169 +#endif
24170 #else /* Not PAE */
24171
24172 page_pde_offset = (__PAGE_OFFSET >> 20);
24173 @@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24174 movl %eax, pa(max_pfn_mapped)
24175
24176 /* Do early initialization of the fixmap area */
24177 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24178 - movl %eax,pa(initial_page_table+0xffc)
24179 +#ifdef CONFIG_COMPAT_VDSO
24180 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24181 +#else
24182 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24183 +#endif
24184 #endif
24185
24186 #ifdef CONFIG_PARAVIRT
24187 @@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24188 cmpl $num_subarch_entries, %eax
24189 jae bad_subarch
24190
24191 - movl pa(subarch_entries)(,%eax,4), %eax
24192 - subl $__PAGE_OFFSET, %eax
24193 - jmp *%eax
24194 + jmp *pa(subarch_entries)(,%eax,4)
24195
24196 bad_subarch:
24197 WEAK(lguest_entry)
24198 @@ -261,10 +333,10 @@ WEAK(xen_entry)
24199 __INITDATA
24200
24201 subarch_entries:
24202 - .long default_entry /* normal x86/PC */
24203 - .long lguest_entry /* lguest hypervisor */
24204 - .long xen_entry /* Xen hypervisor */
24205 - .long default_entry /* Moorestown MID */
24206 + .long ta(default_entry) /* normal x86/PC */
24207 + .long ta(lguest_entry) /* lguest hypervisor */
24208 + .long ta(xen_entry) /* Xen hypervisor */
24209 + .long ta(default_entry) /* Moorestown MID */
24210 num_subarch_entries = (. - subarch_entries) / 4
24211 .previous
24212 #else
24213 @@ -354,6 +426,7 @@ default_entry:
24214 movl pa(mmu_cr4_features),%eax
24215 movl %eax,%cr4
24216
24217 +#ifdef CONFIG_X86_PAE
24218 testb $X86_CR4_PAE, %al # check if PAE is enabled
24219 jz enable_paging
24220
24221 @@ -382,6 +455,9 @@ default_entry:
24222 /* Make changes effective */
24223 wrmsr
24224
24225 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24226 +#endif
24227 +
24228 enable_paging:
24229
24230 /*
24231 @@ -449,14 +525,20 @@ is486:
24232 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24233 movl %eax,%ss # after changing gdt.
24234
24235 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
24236 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24237 movl %eax,%ds
24238 movl %eax,%es
24239
24240 movl $(__KERNEL_PERCPU), %eax
24241 movl %eax,%fs # set this cpu's percpu
24242
24243 +#ifdef CONFIG_CC_STACKPROTECTOR
24244 movl $(__KERNEL_STACK_CANARY),%eax
24245 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24246 + movl $(__USER_DS),%eax
24247 +#else
24248 + xorl %eax,%eax
24249 +#endif
24250 movl %eax,%gs
24251
24252 xorl %eax,%eax # Clear LDT
24253 @@ -512,8 +594,11 @@ setup_once:
24254 * relocation. Manually set base address in stack canary
24255 * segment descriptor.
24256 */
24257 - movl $gdt_page,%eax
24258 + movl $cpu_gdt_table,%eax
24259 movl $stack_canary,%ecx
24260 +#ifdef CONFIG_SMP
24261 + addl $__per_cpu_load,%ecx
24262 +#endif
24263 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24264 shrl $16, %ecx
24265 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24266 @@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24267 cmpl $2,(%esp) # X86_TRAP_NMI
24268 je is_nmi # Ignore NMI
24269
24270 - cmpl $2,%ss:early_recursion_flag
24271 + cmpl $1,%ss:early_recursion_flag
24272 je hlt_loop
24273 incl %ss:early_recursion_flag
24274
24275 @@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24276 pushl (20+6*4)(%esp) /* trapno */
24277 pushl $fault_msg
24278 call printk
24279 -#endif
24280 call dump_stack
24281 +#endif
24282 hlt_loop:
24283 hlt
24284 jmp hlt_loop
24285 @@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24286 /* This is the default interrupt "handler" :-) */
24287 ALIGN
24288 ignore_int:
24289 - cld
24290 #ifdef CONFIG_PRINTK
24291 + cmpl $2,%ss:early_recursion_flag
24292 + je hlt_loop
24293 + incl %ss:early_recursion_flag
24294 + cld
24295 pushl %eax
24296 pushl %ecx
24297 pushl %edx
24298 @@ -617,9 +705,6 @@ ignore_int:
24299 movl $(__KERNEL_DS),%eax
24300 movl %eax,%ds
24301 movl %eax,%es
24302 - cmpl $2,early_recursion_flag
24303 - je hlt_loop
24304 - incl early_recursion_flag
24305 pushl 16(%esp)
24306 pushl 24(%esp)
24307 pushl 32(%esp)
24308 @@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24309 /*
24310 * BSS section
24311 */
24312 -__PAGE_ALIGNED_BSS
24313 - .align PAGE_SIZE
24314 #ifdef CONFIG_X86_PAE
24315 +.section .initial_pg_pmd,"a",@progbits
24316 initial_pg_pmd:
24317 .fill 1024*KPMDS,4,0
24318 #else
24319 +.section .initial_page_table,"a",@progbits
24320 ENTRY(initial_page_table)
24321 .fill 1024,4,0
24322 #endif
24323 +.section .initial_pg_fixmap,"a",@progbits
24324 initial_pg_fixmap:
24325 .fill 1024,4,0
24326 +.section .empty_zero_page,"a",@progbits
24327 ENTRY(empty_zero_page)
24328 .fill 4096,1,0
24329 +.section .swapper_pg_dir,"a",@progbits
24330 ENTRY(swapper_pg_dir)
24331 +#ifdef CONFIG_X86_PAE
24332 + .fill 4,8,0
24333 +#else
24334 .fill 1024,4,0
24335 +#endif
24336
24337 /*
24338 * This starts the data section.
24339 */
24340 #ifdef CONFIG_X86_PAE
24341 -__PAGE_ALIGNED_DATA
24342 - /* Page-aligned for the benefit of paravirt? */
24343 - .align PAGE_SIZE
24344 +.section .initial_page_table,"a",@progbits
24345 ENTRY(initial_page_table)
24346 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24347 # if KPMDS == 3
24348 @@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24349 # error "Kernel PMDs should be 1, 2 or 3"
24350 # endif
24351 .align PAGE_SIZE /* needs to be page-sized too */
24352 +
24353 +#ifdef CONFIG_PAX_PER_CPU_PGD
24354 +ENTRY(cpu_pgd)
24355 + .rept 2*NR_CPUS
24356 + .fill 4,8,0
24357 + .endr
24358 +#endif
24359 +
24360 #endif
24361
24362 .data
24363 .balign 4
24364 ENTRY(stack_start)
24365 - .long init_thread_union+THREAD_SIZE
24366 + .long init_thread_union+THREAD_SIZE-8
24367
24368 __INITRODATA
24369 int_msg:
24370 @@ -727,7 +825,7 @@ fault_msg:
24371 * segment size, and 32-bit linear address value:
24372 */
24373
24374 - .data
24375 +.section .rodata,"a",@progbits
24376 .globl boot_gdt_descr
24377 .globl idt_descr
24378
24379 @@ -736,7 +834,7 @@ fault_msg:
24380 .word 0 # 32 bit align gdt_desc.address
24381 boot_gdt_descr:
24382 .word __BOOT_DS+7
24383 - .long boot_gdt - __PAGE_OFFSET
24384 + .long pa(boot_gdt)
24385
24386 .word 0 # 32-bit align idt_desc.address
24387 idt_descr:
24388 @@ -747,7 +845,7 @@ idt_descr:
24389 .word 0 # 32 bit align gdt_desc.address
24390 ENTRY(early_gdt_descr)
24391 .word GDT_ENTRIES*8-1
24392 - .long gdt_page /* Overwritten for secondary CPUs */
24393 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
24394
24395 /*
24396 * The boot_gdt must mirror the equivalent in setup.S and is
24397 @@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24398 .align L1_CACHE_BYTES
24399 ENTRY(boot_gdt)
24400 .fill GDT_ENTRY_BOOT_CS,8,0
24401 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24402 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24403 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24404 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24405 +
24406 + .align PAGE_SIZE_asm
24407 +ENTRY(cpu_gdt_table)
24408 + .rept NR_CPUS
24409 + .quad 0x0000000000000000 /* NULL descriptor */
24410 + .quad 0x0000000000000000 /* 0x0b reserved */
24411 + .quad 0x0000000000000000 /* 0x13 reserved */
24412 + .quad 0x0000000000000000 /* 0x1b reserved */
24413 +
24414 +#ifdef CONFIG_PAX_KERNEXEC
24415 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24416 +#else
24417 + .quad 0x0000000000000000 /* 0x20 unused */
24418 +#endif
24419 +
24420 + .quad 0x0000000000000000 /* 0x28 unused */
24421 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24422 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24423 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24424 + .quad 0x0000000000000000 /* 0x4b reserved */
24425 + .quad 0x0000000000000000 /* 0x53 reserved */
24426 + .quad 0x0000000000000000 /* 0x5b reserved */
24427 +
24428 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24429 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24430 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24431 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24432 +
24433 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24434 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24435 +
24436 + /*
24437 + * Segments used for calling PnP BIOS have byte granularity.
24438 + * The code segments and data segments have fixed 64k limits,
24439 + * the transfer segment sizes are set at run time.
24440 + */
24441 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
24442 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
24443 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
24444 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
24445 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
24446 +
24447 + /*
24448 + * The APM segments have byte granularity and their bases
24449 + * are set at run time. All have 64k limits.
24450 + */
24451 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24452 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24453 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
24454 +
24455 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
24456 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24457 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24458 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24459 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24460 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24461 +
24462 + /* Be sure this is zeroed to avoid false validations in Xen */
24463 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24464 + .endr
24465 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24466 index a468c0a..c7dec74 100644
24467 --- a/arch/x86/kernel/head_64.S
24468 +++ b/arch/x86/kernel/head_64.S
24469 @@ -20,6 +20,8 @@
24470 #include <asm/processor-flags.h>
24471 #include <asm/percpu.h>
24472 #include <asm/nops.h>
24473 +#include <asm/cpufeature.h>
24474 +#include <asm/alternative-asm.h>
24475
24476 #ifdef CONFIG_PARAVIRT
24477 #include <asm/asm-offsets.h>
24478 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24479 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24480 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24481 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24482 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
24483 +L3_VMALLOC_START = pud_index(VMALLOC_START)
24484 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
24485 +L3_VMALLOC_END = pud_index(VMALLOC_END)
24486 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24487 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24488
24489 .text
24490 __HEAD
24491 @@ -89,11 +97,24 @@ startup_64:
24492 * Fixup the physical addresses in the page table
24493 */
24494 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24495 + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24496 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24497 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24498 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24499 + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24500
24501 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24502 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24503 + addq %rbp, level3_ident_pgt + (0*8)(%rip)
24504 +#ifndef CONFIG_XEN
24505 + addq %rbp, level3_ident_pgt + (1*8)(%rip)
24506 +#endif
24507 +
24508 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24509 +
24510 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24511 + addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24512
24513 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24514 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24515
24516 /*
24517 * Set up the identity mapping for the switchover. These
24518 @@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
24519 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24520 1:
24521
24522 - /* Enable PAE mode and PGE */
24523 - movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24524 + /* Enable PAE mode and PSE/PGE */
24525 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24526 movq %rcx, %cr4
24527
24528 /* Setup early boot stage 4 level pagetables. */
24529 @@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
24530 movl $MSR_EFER, %ecx
24531 rdmsr
24532 btsl $_EFER_SCE, %eax /* Enable System Call */
24533 - btl $20,%edi /* No Execute supported? */
24534 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24535 jnc 1f
24536 btsl $_EFER_NX, %eax
24537 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24538 +#ifndef CONFIG_EFI
24539 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24540 +#endif
24541 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24542 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24543 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24544 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24545 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24546 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24547 1: wrmsr /* Make changes effective */
24548
24549 /* Setup cr0 */
24550 @@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
24551 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24552 * address given in m16:64.
24553 */
24554 + pax_set_fptr_mask
24555 movq initial_code(%rip),%rax
24556 pushq $0 # fake return address to stop unwinder
24557 pushq $__KERNEL_CS # set correct cs
24558 @@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
24559 .quad INIT_PER_CPU_VAR(irq_stack_union)
24560
24561 GLOBAL(stack_start)
24562 - .quad init_thread_union+THREAD_SIZE-8
24563 + .quad init_thread_union+THREAD_SIZE-16
24564 .word 0
24565 __FINITDATA
24566
24567 @@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
24568 call dump_stack
24569 #ifdef CONFIG_KALLSYMS
24570 leaq early_idt_ripmsg(%rip),%rdi
24571 - movq 40(%rsp),%rsi # %rip again
24572 + movq 88(%rsp),%rsi # %rip again
24573 call __print_symbol
24574 #endif
24575 #endif /* EARLY_PRINTK */
24576 @@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
24577 early_recursion_flag:
24578 .long 0
24579
24580 + .section .rodata,"a",@progbits
24581 #ifdef CONFIG_EARLY_PRINTK
24582 early_idt_msg:
24583 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24584 @@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
24585 NEXT_PAGE(early_dynamic_pgts)
24586 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24587
24588 - .data
24589 + .section .rodata,"a",@progbits
24590
24591 -#ifndef CONFIG_XEN
24592 NEXT_PAGE(init_level4_pgt)
24593 - .fill 512,8,0
24594 -#else
24595 -NEXT_PAGE(init_level4_pgt)
24596 - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24597 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24598 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24599 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
24600 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24601 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
24602 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24603 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24604 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24605 .org init_level4_pgt + L4_START_KERNEL*8, 0
24606 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24607 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24608
24609 +#ifdef CONFIG_PAX_PER_CPU_PGD
24610 +NEXT_PAGE(cpu_pgd)
24611 + .rept 2*NR_CPUS
24612 + .fill 512,8,0
24613 + .endr
24614 +#endif
24615 +
24616 NEXT_PAGE(level3_ident_pgt)
24617 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24618 +#ifdef CONFIG_XEN
24619 .fill 511, 8, 0
24620 +#else
24621 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24622 + .fill 510,8,0
24623 +#endif
24624 +
24625 +NEXT_PAGE(level3_vmalloc_start_pgt)
24626 + .fill 512,8,0
24627 +
24628 +NEXT_PAGE(level3_vmalloc_end_pgt)
24629 + .fill 512,8,0
24630 +
24631 +NEXT_PAGE(level3_vmemmap_pgt)
24632 + .fill L3_VMEMMAP_START,8,0
24633 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24634 +
24635 NEXT_PAGE(level2_ident_pgt)
24636 - /* Since I easily can, map the first 1G.
24637 + /* Since I easily can, map the first 2G.
24638 * Don't set NX because code runs from these pages.
24639 */
24640 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24641 -#endif
24642 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24643
24644 NEXT_PAGE(level3_kernel_pgt)
24645 .fill L3_START_KERNEL,8,0
24646 @@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
24647 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24648 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24649
24650 +NEXT_PAGE(level2_vmemmap_pgt)
24651 + .fill 512,8,0
24652 +
24653 NEXT_PAGE(level2_kernel_pgt)
24654 /*
24655 * 512 MB kernel mapping. We spend a full page on this pagetable
24656 @@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
24657 NEXT_PAGE(level2_fixmap_pgt)
24658 .fill 506,8,0
24659 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24660 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24661 - .fill 5,8,0
24662 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24663 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24664 + .fill 4,8,0
24665
24666 NEXT_PAGE(level1_fixmap_pgt)
24667 .fill 512,8,0
24668
24669 +NEXT_PAGE(level1_vsyscall_pgt)
24670 + .fill 512,8,0
24671 +
24672 #undef PMDS
24673
24674 - .data
24675 + .align PAGE_SIZE
24676 +ENTRY(cpu_gdt_table)
24677 + .rept NR_CPUS
24678 + .quad 0x0000000000000000 /* NULL descriptor */
24679 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24680 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
24681 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
24682 + .quad 0x00cffb000000ffff /* __USER32_CS */
24683 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24684 + .quad 0x00affb000000ffff /* __USER_CS */
24685 +
24686 +#ifdef CONFIG_PAX_KERNEXEC
24687 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24688 +#else
24689 + .quad 0x0 /* unused */
24690 +#endif
24691 +
24692 + .quad 0,0 /* TSS */
24693 + .quad 0,0 /* LDT */
24694 + .quad 0,0,0 /* three TLS descriptors */
24695 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
24696 + /* asm/segment.h:GDT_ENTRIES must match this */
24697 +
24698 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24699 + .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24700 +#else
24701 + .quad 0x0 /* unused */
24702 +#endif
24703 +
24704 + /* zero the remaining page */
24705 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24706 + .endr
24707 +
24708 .align 16
24709 .globl early_gdt_descr
24710 early_gdt_descr:
24711 .word GDT_ENTRIES*8-1
24712 early_gdt_descr_base:
24713 - .quad INIT_PER_CPU_VAR(gdt_page)
24714 + .quad cpu_gdt_table
24715
24716 ENTRY(phys_base)
24717 /* This must match the first entry in level2_kernel_pgt */
24718 .quad 0x0000000000000000
24719
24720 #include "../../x86/xen/xen-head.S"
24721 -
24722 - __PAGE_ALIGNED_BSS
24723 +
24724 + .section .rodata,"a",@progbits
24725 NEXT_PAGE(empty_zero_page)
24726 .skip PAGE_SIZE
24727 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24728 index 05fd74f..c3548b1 100644
24729 --- a/arch/x86/kernel/i386_ksyms_32.c
24730 +++ b/arch/x86/kernel/i386_ksyms_32.c
24731 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24732 EXPORT_SYMBOL(cmpxchg8b_emu);
24733 #endif
24734
24735 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
24736 +
24737 /* Networking helper routines. */
24738 EXPORT_SYMBOL(csum_partial_copy_generic);
24739 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24740 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24741
24742 EXPORT_SYMBOL(__get_user_1);
24743 EXPORT_SYMBOL(__get_user_2);
24744 @@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
24745 EXPORT_SYMBOL(___preempt_schedule_context);
24746 #endif
24747 #endif
24748 +
24749 +#ifdef CONFIG_PAX_KERNEXEC
24750 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24751 +#endif
24752 +
24753 +#ifdef CONFIG_PAX_PER_CPU_PGD
24754 +EXPORT_SYMBOL(cpu_pgd);
24755 +#endif
24756 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24757 index d5dd808..b6432cf 100644
24758 --- a/arch/x86/kernel/i387.c
24759 +++ b/arch/x86/kernel/i387.c
24760 @@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24761 static inline bool interrupted_user_mode(void)
24762 {
24763 struct pt_regs *regs = get_irq_regs();
24764 - return regs && user_mode_vm(regs);
24765 + return regs && user_mode(regs);
24766 }
24767
24768 /*
24769 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24770 index 2e977b5..5f2c273 100644
24771 --- a/arch/x86/kernel/i8259.c
24772 +++ b/arch/x86/kernel/i8259.c
24773 @@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24774 static void make_8259A_irq(unsigned int irq)
24775 {
24776 disable_irq_nosync(irq);
24777 - io_apic_irqs &= ~(1<<irq);
24778 + io_apic_irqs &= ~(1UL<<irq);
24779 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24780 i8259A_chip.name);
24781 enable_irq(irq);
24782 @@ -209,7 +209,7 @@ spurious_8259A_irq:
24783 "spurious 8259A interrupt: IRQ%d.\n", irq);
24784 spurious_irq_mask |= irqmask;
24785 }
24786 - atomic_inc(&irq_err_count);
24787 + atomic_inc_unchecked(&irq_err_count);
24788 /*
24789 * Theoretically we do not have to handle this IRQ,
24790 * but in Linux this does not cause problems and is
24791 @@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
24792 /* (slave's support for AEOI in flat mode is to be investigated) */
24793 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24794
24795 + pax_open_kernel();
24796 if (auto_eoi)
24797 /*
24798 * In AEOI mode we just have to mask the interrupt
24799 * when acking.
24800 */
24801 - i8259A_chip.irq_mask_ack = disable_8259A_irq;
24802 + *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24803 else
24804 - i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24805 + *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24806 + pax_close_kernel();
24807
24808 udelay(100); /* wait for 8259A to initialize */
24809
24810 diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24811 index a979b5b..1d6db75 100644
24812 --- a/arch/x86/kernel/io_delay.c
24813 +++ b/arch/x86/kernel/io_delay.c
24814 @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24815 * Quirk table for systems that misbehave (lock up, etc.) if port
24816 * 0x80 is used:
24817 */
24818 -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24819 +static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24820 {
24821 .callback = dmi_io_delay_0xed_port,
24822 .ident = "Compaq Presario V6000",
24823 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24824 index 4ddaf66..49d5c18 100644
24825 --- a/arch/x86/kernel/ioport.c
24826 +++ b/arch/x86/kernel/ioport.c
24827 @@ -6,6 +6,7 @@
24828 #include <linux/sched.h>
24829 #include <linux/kernel.h>
24830 #include <linux/capability.h>
24831 +#include <linux/security.h>
24832 #include <linux/errno.h>
24833 #include <linux/types.h>
24834 #include <linux/ioport.h>
24835 @@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24836 return -EINVAL;
24837 if (turn_on && !capable(CAP_SYS_RAWIO))
24838 return -EPERM;
24839 +#ifdef CONFIG_GRKERNSEC_IO
24840 + if (turn_on && grsec_disable_privio) {
24841 + gr_handle_ioperm();
24842 + return -ENODEV;
24843 + }
24844 +#endif
24845
24846 /*
24847 * If it's the first ioperm() call in this thread's lifetime, set the
24848 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24849 * because the ->io_bitmap_max value must match the bitmap
24850 * contents:
24851 */
24852 - tss = &per_cpu(init_tss, get_cpu());
24853 + tss = init_tss + get_cpu();
24854
24855 if (turn_on)
24856 bitmap_clear(t->io_bitmap_ptr, from, num);
24857 @@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24858 if (level > old) {
24859 if (!capable(CAP_SYS_RAWIO))
24860 return -EPERM;
24861 +#ifdef CONFIG_GRKERNSEC_IO
24862 + if (grsec_disable_privio) {
24863 + gr_handle_iopl();
24864 + return -ENODEV;
24865 + }
24866 +#endif
24867 }
24868 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24869 t->iopl = level << 12;
24870 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24871 index 22d0687..e07b2a5 100644
24872 --- a/arch/x86/kernel/irq.c
24873 +++ b/arch/x86/kernel/irq.c
24874 @@ -21,7 +21,7 @@
24875 #define CREATE_TRACE_POINTS
24876 #include <asm/trace/irq_vectors.h>
24877
24878 -atomic_t irq_err_count;
24879 +atomic_unchecked_t irq_err_count;
24880
24881 /* Function pointer for generic interrupt vector handling */
24882 void (*x86_platform_ipi_callback)(void) = NULL;
24883 @@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24884 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24885 seq_printf(p, " Machine check polls\n");
24886 #endif
24887 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24888 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24889 #if defined(CONFIG_X86_IO_APIC)
24890 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24891 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24892 #endif
24893 return 0;
24894 }
24895 @@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24896
24897 u64 arch_irq_stat(void)
24898 {
24899 - u64 sum = atomic_read(&irq_err_count);
24900 + u64 sum = atomic_read_unchecked(&irq_err_count);
24901 return sum;
24902 }
24903
24904 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24905 index d7fcbed..1f747f7 100644
24906 --- a/arch/x86/kernel/irq_32.c
24907 +++ b/arch/x86/kernel/irq_32.c
24908 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24909 __asm__ __volatile__("andl %%esp,%0" :
24910 "=r" (sp) : "0" (THREAD_SIZE - 1));
24911
24912 - return sp < (sizeof(struct thread_info) + STACK_WARN);
24913 + return sp < STACK_WARN;
24914 }
24915
24916 static void print_stack_overflow(void)
24917 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24918 * per-CPU IRQ handling contexts (thread information and stack)
24919 */
24920 union irq_ctx {
24921 - struct thread_info tinfo;
24922 - u32 stack[THREAD_SIZE/sizeof(u32)];
24923 + unsigned long previous_esp;
24924 + u32 stack[THREAD_SIZE/sizeof(u32)];
24925 } __attribute__((aligned(THREAD_SIZE)));
24926
24927 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24928 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24929 static inline int
24930 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24931 {
24932 - union irq_ctx *curctx, *irqctx;
24933 + union irq_ctx *irqctx;
24934 u32 *isp, arg1, arg2;
24935
24936 - curctx = (union irq_ctx *) current_thread_info();
24937 irqctx = __this_cpu_read(hardirq_ctx);
24938
24939 /*
24940 @@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24941 * handler) we can't do that and just have to keep using the
24942 * current stack (which is the irq stack already after all)
24943 */
24944 - if (unlikely(curctx == irqctx))
24945 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24946 return 0;
24947
24948 /* build the stack frame on the IRQ stack */
24949 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24950 - irqctx->tinfo.task = curctx->tinfo.task;
24951 - irqctx->tinfo.previous_esp = current_stack_pointer;
24952 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24953 + irqctx->previous_esp = current_stack_pointer;
24954 +
24955 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24956 + __set_fs(MAKE_MM_SEG(0));
24957 +#endif
24958
24959 if (unlikely(overflow))
24960 call_on_stack(print_stack_overflow, isp);
24961 @@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24962 : "0" (irq), "1" (desc), "2" (isp),
24963 "D" (desc->handle_irq)
24964 : "memory", "cc", "ecx");
24965 +
24966 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24967 + __set_fs(current_thread_info()->addr_limit);
24968 +#endif
24969 +
24970 return 1;
24971 }
24972
24973 @@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24974 */
24975 void irq_ctx_init(int cpu)
24976 {
24977 - union irq_ctx *irqctx;
24978 -
24979 if (per_cpu(hardirq_ctx, cpu))
24980 return;
24981
24982 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24983 - THREADINFO_GFP,
24984 - THREAD_SIZE_ORDER));
24985 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24986 - irqctx->tinfo.cpu = cpu;
24987 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24988 -
24989 - per_cpu(hardirq_ctx, cpu) = irqctx;
24990 -
24991 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24992 - THREADINFO_GFP,
24993 - THREAD_SIZE_ORDER));
24994 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24995 - irqctx->tinfo.cpu = cpu;
24996 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24997 -
24998 - per_cpu(softirq_ctx, cpu) = irqctx;
24999 -
25000 - printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25001 - cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
25002 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25003 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25004 }
25005
25006 void do_softirq_own_stack(void)
25007 {
25008 - struct thread_info *curctx;
25009 union irq_ctx *irqctx;
25010 u32 *isp;
25011
25012 - curctx = current_thread_info();
25013 irqctx = __this_cpu_read(softirq_ctx);
25014 - irqctx->tinfo.task = curctx->task;
25015 - irqctx->tinfo.previous_esp = current_stack_pointer;
25016 + irqctx->previous_esp = current_stack_pointer;
25017
25018 /* build the stack frame on the softirq stack */
25019 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
25020 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
25021 +
25022 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25023 + __set_fs(MAKE_MM_SEG(0));
25024 +#endif
25025
25026 call_on_stack(__do_softirq, isp);
25027 +
25028 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25029 + __set_fs(current_thread_info()->addr_limit);
25030 +#endif
25031 +
25032 }
25033
25034 bool handle_irq(unsigned irq, struct pt_regs *regs)
25035 @@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25036 if (unlikely(!desc))
25037 return false;
25038
25039 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25040 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25041 if (unlikely(overflow))
25042 print_stack_overflow();
25043 desc->handle_irq(irq, desc);
25044 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25045 index 4d1c746..232961d 100644
25046 --- a/arch/x86/kernel/irq_64.c
25047 +++ b/arch/x86/kernel/irq_64.c
25048 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25049 u64 estack_top, estack_bottom;
25050 u64 curbase = (u64)task_stack_page(current);
25051
25052 - if (user_mode_vm(regs))
25053 + if (user_mode(regs))
25054 return;
25055
25056 if (regs->sp >= curbase + sizeof(struct thread_info) +
25057 diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25058 index 26d5a55..a01160a 100644
25059 --- a/arch/x86/kernel/jump_label.c
25060 +++ b/arch/x86/kernel/jump_label.c
25061 @@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25062 * Jump label is enabled for the first time.
25063 * So we expect a default_nop...
25064 */
25065 - if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25066 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25067 != 0))
25068 bug_at((void *)entry->code, __LINE__);
25069 } else {
25070 @@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25071 * ...otherwise expect an ideal_nop. Otherwise
25072 * something went horribly wrong.
25073 */
25074 - if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25075 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25076 != 0))
25077 bug_at((void *)entry->code, __LINE__);
25078 }
25079 @@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25080 * are converting the default nop to the ideal nop.
25081 */
25082 if (init) {
25083 - if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25084 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25085 bug_at((void *)entry->code, __LINE__);
25086 } else {
25087 code.jump = 0xe9;
25088 code.offset = entry->target -
25089 (entry->code + JUMP_LABEL_NOP_SIZE);
25090 - if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25091 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25092 bug_at((void *)entry->code, __LINE__);
25093 }
25094 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25095 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25096 index 836f832..a8bda67 100644
25097 --- a/arch/x86/kernel/kgdb.c
25098 +++ b/arch/x86/kernel/kgdb.c
25099 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25100 #ifdef CONFIG_X86_32
25101 switch (regno) {
25102 case GDB_SS:
25103 - if (!user_mode_vm(regs))
25104 + if (!user_mode(regs))
25105 *(unsigned long *)mem = __KERNEL_DS;
25106 break;
25107 case GDB_SP:
25108 - if (!user_mode_vm(regs))
25109 + if (!user_mode(regs))
25110 *(unsigned long *)mem = kernel_stack_pointer(regs);
25111 break;
25112 case GDB_GS:
25113 @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
25114 bp->attr.bp_addr = breakinfo[breakno].addr;
25115 bp->attr.bp_len = breakinfo[breakno].len;
25116 bp->attr.bp_type = breakinfo[breakno].type;
25117 - info->address = breakinfo[breakno].addr;
25118 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25119 + info->address = ktla_ktva(breakinfo[breakno].addr);
25120 + else
25121 + info->address = breakinfo[breakno].addr;
25122 info->len = breakinfo[breakno].len;
25123 info->type = breakinfo[breakno].type;
25124 val = arch_install_hw_breakpoint(bp);
25125 @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25126 case 'k':
25127 /* clear the trace bit */
25128 linux_regs->flags &= ~X86_EFLAGS_TF;
25129 - atomic_set(&kgdb_cpu_doing_single_step, -1);
25130 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25131
25132 /* set the trace bit if we're stepping */
25133 if (remcomInBuffer[0] == 's') {
25134 linux_regs->flags |= X86_EFLAGS_TF;
25135 - atomic_set(&kgdb_cpu_doing_single_step,
25136 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25137 raw_smp_processor_id());
25138 }
25139
25140 @@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25141
25142 switch (cmd) {
25143 case DIE_DEBUG:
25144 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25145 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25146 if (user_mode(regs))
25147 return single_step_cont(regs, args);
25148 break;
25149 @@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25150 #endif /* CONFIG_DEBUG_RODATA */
25151
25152 bpt->type = BP_BREAKPOINT;
25153 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25154 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25155 BREAK_INSTR_SIZE);
25156 if (err)
25157 return err;
25158 - err = probe_kernel_write((char *)bpt->bpt_addr,
25159 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25160 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25161 #ifdef CONFIG_DEBUG_RODATA
25162 if (!err)
25163 @@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25164 return -EBUSY;
25165 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25166 BREAK_INSTR_SIZE);
25167 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25168 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25169 if (err)
25170 return err;
25171 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25172 @@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25173 if (mutex_is_locked(&text_mutex))
25174 goto knl_write;
25175 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25176 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25177 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25178 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25179 goto knl_write;
25180 return err;
25181 knl_write:
25182 #endif /* CONFIG_DEBUG_RODATA */
25183 - return probe_kernel_write((char *)bpt->bpt_addr,
25184 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25185 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25186 }
25187
25188 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25189 index 79a3f96..6ba030a 100644
25190 --- a/arch/x86/kernel/kprobes/core.c
25191 +++ b/arch/x86/kernel/kprobes/core.c
25192 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
25193 s32 raddr;
25194 } __packed *insn;
25195
25196 - insn = (struct __arch_relative_insn *)from;
25197 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
25198 +
25199 + pax_open_kernel();
25200 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25201 insn->op = op;
25202 + pax_close_kernel();
25203 }
25204
25205 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25206 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
25207 kprobe_opcode_t opcode;
25208 kprobe_opcode_t *orig_opcodes = opcodes;
25209
25210 - if (search_exception_tables((unsigned long)opcodes))
25211 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25212 return 0; /* Page fault may occur on this address. */
25213
25214 retry:
25215 @@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25216 * for the first byte, we can recover the original instruction
25217 * from it and kp->opcode.
25218 */
25219 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25220 + memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25221 buf[0] = kp->opcode;
25222 - return (unsigned long)buf;
25223 + return ktva_ktla((unsigned long)buf);
25224 }
25225
25226 /*
25227 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25228 /* Another subsystem puts a breakpoint, failed to recover */
25229 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25230 return 0;
25231 + pax_open_kernel();
25232 memcpy(dest, insn.kaddr, insn.length);
25233 + pax_close_kernel();
25234
25235 #ifdef CONFIG_X86_64
25236 if (insn_rip_relative(&insn)) {
25237 @@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25238 return 0;
25239 }
25240 disp = (u8 *) dest + insn_offset_displacement(&insn);
25241 + pax_open_kernel();
25242 *(s32 *) disp = (s32) newdisp;
25243 + pax_close_kernel();
25244 }
25245 #endif
25246 return insn.length;
25247 @@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25248 * nor set current_kprobe, because it doesn't use single
25249 * stepping.
25250 */
25251 - regs->ip = (unsigned long)p->ainsn.insn;
25252 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25253 preempt_enable_no_resched();
25254 return;
25255 }
25256 @@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25257 regs->flags &= ~X86_EFLAGS_IF;
25258 /* single step inline if the instruction is an int3 */
25259 if (p->opcode == BREAKPOINT_INSTRUCTION)
25260 - regs->ip = (unsigned long)p->addr;
25261 + regs->ip = ktla_ktva((unsigned long)p->addr);
25262 else
25263 - regs->ip = (unsigned long)p->ainsn.insn;
25264 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25265 }
25266
25267 /*
25268 @@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
25269 setup_singlestep(p, regs, kcb, 0);
25270 return 1;
25271 }
25272 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
25273 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25274 /*
25275 * The breakpoint instruction was removed right
25276 * after we hit it. Another cpu has removed
25277 @@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
25278 " movq %rax, 152(%rsp)\n"
25279 RESTORE_REGS_STRING
25280 " popfq\n"
25281 +#ifdef KERNEXEC_PLUGIN
25282 + " btsq $63,(%rsp)\n"
25283 +#endif
25284 #else
25285 " pushf\n"
25286 SAVE_REGS_STRING
25287 @@ -779,7 +789,7 @@ static void __kprobes
25288 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
25289 {
25290 unsigned long *tos = stack_addr(regs);
25291 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25292 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25293 unsigned long orig_ip = (unsigned long)p->addr;
25294 kprobe_opcode_t *insn = p->ainsn.insn;
25295
25296 @@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
25297 struct die_args *args = data;
25298 int ret = NOTIFY_DONE;
25299
25300 - if (args->regs && user_mode_vm(args->regs))
25301 + if (args->regs && user_mode(args->regs))
25302 return ret;
25303
25304 switch (val) {
25305 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25306 index 898160b..758cde8 100644
25307 --- a/arch/x86/kernel/kprobes/opt.c
25308 +++ b/arch/x86/kernel/kprobes/opt.c
25309 @@ -79,6 +79,7 @@ found:
25310 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25311 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25312 {
25313 + pax_open_kernel();
25314 #ifdef CONFIG_X86_64
25315 *addr++ = 0x48;
25316 *addr++ = 0xbf;
25317 @@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
25318 *addr++ = 0xb8;
25319 #endif
25320 *(unsigned long *)addr = val;
25321 + pax_close_kernel();
25322 }
25323
25324 asm (
25325 @@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25326 * Verify if the address gap is in 2GB range, because this uses
25327 * a relative jump.
25328 */
25329 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25330 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25331 if (abs(rel) > 0x7fffffff)
25332 return -ERANGE;
25333
25334 @@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25335 op->optinsn.size = ret;
25336
25337 /* Copy arch-dep-instance from template */
25338 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25339 + pax_open_kernel();
25340 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25341 + pax_close_kernel();
25342
25343 /* Set probe information */
25344 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25345
25346 /* Set probe function call */
25347 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25348 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25349
25350 /* Set returning jmp instruction at the tail of out-of-line buffer */
25351 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25352 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25353 (u8 *)op->kp.addr + op->optinsn.size);
25354
25355 flush_icache_range((unsigned long) buf,
25356 @@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
25357 WARN_ON(kprobe_disabled(&op->kp));
25358
25359 /* Backup instructions which will be replaced by jump address */
25360 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25361 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25362 RELATIVE_ADDR_SIZE);
25363
25364 insn_buf[0] = RELATIVEJUMP_OPCODE;
25365 @@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25366 /* This kprobe is really able to run optimized path. */
25367 op = container_of(p, struct optimized_kprobe, kp);
25368 /* Detour through copied instructions */
25369 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25370 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25371 if (!reenter)
25372 reset_current_kprobe();
25373 preempt_enable_no_resched();
25374 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25375 index ebc9873..1b9724b 100644
25376 --- a/arch/x86/kernel/ldt.c
25377 +++ b/arch/x86/kernel/ldt.c
25378 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25379 if (reload) {
25380 #ifdef CONFIG_SMP
25381 preempt_disable();
25382 - load_LDT(pc);
25383 + load_LDT_nolock(pc);
25384 if (!cpumask_equal(mm_cpumask(current->mm),
25385 cpumask_of(smp_processor_id())))
25386 smp_call_function(flush_ldt, current->mm, 1);
25387 preempt_enable();
25388 #else
25389 - load_LDT(pc);
25390 + load_LDT_nolock(pc);
25391 #endif
25392 }
25393 if (oldsize) {
25394 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25395 return err;
25396
25397 for (i = 0; i < old->size; i++)
25398 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25399 + write_ldt_entry(new->ldt, i, old->ldt + i);
25400 return 0;
25401 }
25402
25403 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25404 retval = copy_ldt(&mm->context, &old_mm->context);
25405 mutex_unlock(&old_mm->context.lock);
25406 }
25407 +
25408 + if (tsk == current) {
25409 + mm->context.vdso = 0;
25410 +
25411 +#ifdef CONFIG_X86_32
25412 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25413 + mm->context.user_cs_base = 0UL;
25414 + mm->context.user_cs_limit = ~0UL;
25415 +
25416 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25417 + cpus_clear(mm->context.cpu_user_cs_mask);
25418 +#endif
25419 +
25420 +#endif
25421 +#endif
25422 +
25423 + }
25424 +
25425 return retval;
25426 }
25427
25428 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25429 }
25430 }
25431
25432 +#ifdef CONFIG_PAX_SEGMEXEC
25433 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25434 + error = -EINVAL;
25435 + goto out_unlock;
25436 + }
25437 +#endif
25438 +
25439 fill_ldt(&ldt, &ldt_info);
25440 if (oldmode)
25441 ldt.avl = 0;
25442 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25443 index 5b19e4d..6476a76 100644
25444 --- a/arch/x86/kernel/machine_kexec_32.c
25445 +++ b/arch/x86/kernel/machine_kexec_32.c
25446 @@ -26,7 +26,7 @@
25447 #include <asm/cacheflush.h>
25448 #include <asm/debugreg.h>
25449
25450 -static void set_idt(void *newidt, __u16 limit)
25451 +static void set_idt(struct desc_struct *newidt, __u16 limit)
25452 {
25453 struct desc_ptr curidt;
25454
25455 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25456 }
25457
25458
25459 -static void set_gdt(void *newgdt, __u16 limit)
25460 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25461 {
25462 struct desc_ptr curgdt;
25463
25464 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25465 }
25466
25467 control_page = page_address(image->control_code_page);
25468 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25469 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25470
25471 relocate_kernel_ptr = control_page;
25472 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25473 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
25474 index 15c9876..0a43909 100644
25475 --- a/arch/x86/kernel/microcode_core.c
25476 +++ b/arch/x86/kernel/microcode_core.c
25477 @@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
25478 return NOTIFY_OK;
25479 }
25480
25481 -static struct notifier_block __refdata mc_cpu_notifier = {
25482 +static struct notifier_block mc_cpu_notifier = {
25483 .notifier_call = mc_cpu_callback,
25484 };
25485
25486 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
25487 index 5fb2ceb..3ae90bb 100644
25488 --- a/arch/x86/kernel/microcode_intel.c
25489 +++ b/arch/x86/kernel/microcode_intel.c
25490 @@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
25491
25492 static int get_ucode_user(void *to, const void *from, size_t n)
25493 {
25494 - return copy_from_user(to, from, n);
25495 + return copy_from_user(to, (const void __force_user *)from, n);
25496 }
25497
25498 static enum ucode_state
25499 request_microcode_user(int cpu, const void __user *buf, size_t size)
25500 {
25501 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
25502 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
25503 }
25504
25505 static void microcode_fini_cpu(int cpu)
25506 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25507 index 18be189..4a9fe40 100644
25508 --- a/arch/x86/kernel/module.c
25509 +++ b/arch/x86/kernel/module.c
25510 @@ -43,15 +43,60 @@ do { \
25511 } while (0)
25512 #endif
25513
25514 -void *module_alloc(unsigned long size)
25515 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25516 {
25517 - if (PAGE_ALIGN(size) > MODULES_LEN)
25518 + if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25519 return NULL;
25520 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
25521 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
25522 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
25523 NUMA_NO_NODE, __builtin_return_address(0));
25524 }
25525
25526 +void *module_alloc(unsigned long size)
25527 +{
25528 +
25529 +#ifdef CONFIG_PAX_KERNEXEC
25530 + return __module_alloc(size, PAGE_KERNEL);
25531 +#else
25532 + return __module_alloc(size, PAGE_KERNEL_EXEC);
25533 +#endif
25534 +
25535 +}
25536 +
25537 +#ifdef CONFIG_PAX_KERNEXEC
25538 +#ifdef CONFIG_X86_32
25539 +void *module_alloc_exec(unsigned long size)
25540 +{
25541 + struct vm_struct *area;
25542 +
25543 + if (size == 0)
25544 + return NULL;
25545 +
25546 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25547 + return area ? area->addr : NULL;
25548 +}
25549 +EXPORT_SYMBOL(module_alloc_exec);
25550 +
25551 +void module_free_exec(struct module *mod, void *module_region)
25552 +{
25553 + vunmap(module_region);
25554 +}
25555 +EXPORT_SYMBOL(module_free_exec);
25556 +#else
25557 +void module_free_exec(struct module *mod, void *module_region)
25558 +{
25559 + module_free(mod, module_region);
25560 +}
25561 +EXPORT_SYMBOL(module_free_exec);
25562 +
25563 +void *module_alloc_exec(unsigned long size)
25564 +{
25565 + return __module_alloc(size, PAGE_KERNEL_RX);
25566 +}
25567 +EXPORT_SYMBOL(module_alloc_exec);
25568 +#endif
25569 +#endif
25570 +
25571 #ifdef CONFIG_X86_32
25572 int apply_relocate(Elf32_Shdr *sechdrs,
25573 const char *strtab,
25574 @@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25575 unsigned int i;
25576 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
25577 Elf32_Sym *sym;
25578 - uint32_t *location;
25579 + uint32_t *plocation, location;
25580
25581 DEBUGP("Applying relocate section %u to %u\n",
25582 relsec, sechdrs[relsec].sh_info);
25583 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
25584 /* This is where to make the change */
25585 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
25586 - + rel[i].r_offset;
25587 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
25588 + location = (uint32_t)plocation;
25589 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
25590 + plocation = ktla_ktva((void *)plocation);
25591 /* This is the symbol it is referring to. Note that all
25592 undefined symbols have been resolved. */
25593 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
25594 @@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25595 switch (ELF32_R_TYPE(rel[i].r_info)) {
25596 case R_386_32:
25597 /* We add the value into the location given */
25598 - *location += sym->st_value;
25599 + pax_open_kernel();
25600 + *plocation += sym->st_value;
25601 + pax_close_kernel();
25602 break;
25603 case R_386_PC32:
25604 /* Add the value, subtract its position */
25605 - *location += sym->st_value - (uint32_t)location;
25606 + pax_open_kernel();
25607 + *plocation += sym->st_value - location;
25608 + pax_close_kernel();
25609 break;
25610 default:
25611 pr_err("%s: Unknown relocation: %u\n",
25612 @@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
25613 case R_X86_64_NONE:
25614 break;
25615 case R_X86_64_64:
25616 + pax_open_kernel();
25617 *(u64 *)loc = val;
25618 + pax_close_kernel();
25619 break;
25620 case R_X86_64_32:
25621 + pax_open_kernel();
25622 *(u32 *)loc = val;
25623 + pax_close_kernel();
25624 if (val != *(u32 *)loc)
25625 goto overflow;
25626 break;
25627 case R_X86_64_32S:
25628 + pax_open_kernel();
25629 *(s32 *)loc = val;
25630 + pax_close_kernel();
25631 if ((s64)val != *(s32 *)loc)
25632 goto overflow;
25633 break;
25634 case R_X86_64_PC32:
25635 val -= (u64)loc;
25636 + pax_open_kernel();
25637 *(u32 *)loc = val;
25638 + pax_close_kernel();
25639 +
25640 #if 0
25641 if ((s64)val != *(s32 *)loc)
25642 goto overflow;
25643 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
25644 index 05266b5..3432443 100644
25645 --- a/arch/x86/kernel/msr.c
25646 +++ b/arch/x86/kernel/msr.c
25647 @@ -37,6 +37,7 @@
25648 #include <linux/notifier.h>
25649 #include <linux/uaccess.h>
25650 #include <linux/gfp.h>
25651 +#include <linux/grsecurity.h>
25652
25653 #include <asm/processor.h>
25654 #include <asm/msr.h>
25655 @@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
25656 int err = 0;
25657 ssize_t bytes = 0;
25658
25659 +#ifdef CONFIG_GRKERNSEC_KMEM
25660 + gr_handle_msr_write();
25661 + return -EPERM;
25662 +#endif
25663 +
25664 if (count % 8)
25665 return -EINVAL; /* Invalid chunk size */
25666
25667 @@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
25668 err = -EBADF;
25669 break;
25670 }
25671 +#ifdef CONFIG_GRKERNSEC_KMEM
25672 + gr_handle_msr_write();
25673 + return -EPERM;
25674 +#endif
25675 if (copy_from_user(&regs, uregs, sizeof regs)) {
25676 err = -EFAULT;
25677 break;
25678 @@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
25679 return notifier_from_errno(err);
25680 }
25681
25682 -static struct notifier_block __refdata msr_class_cpu_notifier = {
25683 +static struct notifier_block msr_class_cpu_notifier = {
25684 .notifier_call = msr_class_cpu_callback,
25685 };
25686
25687 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
25688 index 6fcb49c..5b3f4ff 100644
25689 --- a/arch/x86/kernel/nmi.c
25690 +++ b/arch/x86/kernel/nmi.c
25691 @@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25692 return handled;
25693 }
25694
25695 -int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25696 +int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25697 {
25698 struct nmi_desc *desc = nmi_to_desc(type);
25699 unsigned long flags;
25700 @@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25701 * event confuses some handlers (kdump uses this flag)
25702 */
25703 if (action->flags & NMI_FLAG_FIRST)
25704 - list_add_rcu(&action->list, &desc->head);
25705 + pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25706 else
25707 - list_add_tail_rcu(&action->list, &desc->head);
25708 + pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25709
25710 spin_unlock_irqrestore(&desc->lock, flags);
25711 return 0;
25712 @@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25713 if (!strcmp(n->name, name)) {
25714 WARN(in_nmi(),
25715 "Trying to free NMI (%s) from NMI context!\n", n->name);
25716 - list_del_rcu(&n->list);
25717 + pax_list_del_rcu((struct list_head *)&n->list);
25718 break;
25719 }
25720 }
25721 @@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25722 dotraplinkage notrace __kprobes void
25723 do_nmi(struct pt_regs *regs, long error_code)
25724 {
25725 +
25726 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25727 + if (!user_mode(regs)) {
25728 + unsigned long cs = regs->cs & 0xFFFF;
25729 + unsigned long ip = ktva_ktla(regs->ip);
25730 +
25731 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25732 + regs->ip = ip;
25733 + }
25734 +#endif
25735 +
25736 nmi_nesting_preprocess(regs);
25737
25738 nmi_enter();
25739 diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25740 index 6d9582e..f746287 100644
25741 --- a/arch/x86/kernel/nmi_selftest.c
25742 +++ b/arch/x86/kernel/nmi_selftest.c
25743 @@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25744 {
25745 /* trap all the unknown NMIs we may generate */
25746 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25747 - __initdata);
25748 + __initconst);
25749 }
25750
25751 static void __init cleanup_nmi_testsuite(void)
25752 @@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25753 unsigned long timeout;
25754
25755 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25756 - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25757 + NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25758 nmi_fail = FAILURE;
25759 return;
25760 }
25761 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25762 index bbb6c73..24a58ef 100644
25763 --- a/arch/x86/kernel/paravirt-spinlocks.c
25764 +++ b/arch/x86/kernel/paravirt-spinlocks.c
25765 @@ -8,7 +8,7 @@
25766
25767 #include <asm/paravirt.h>
25768
25769 -struct pv_lock_ops pv_lock_ops = {
25770 +struct pv_lock_ops pv_lock_ops __read_only = {
25771 #ifdef CONFIG_SMP
25772 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25773 .unlock_kick = paravirt_nop,
25774 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25775 index 1b10af8..0b58cbc 100644
25776 --- a/arch/x86/kernel/paravirt.c
25777 +++ b/arch/x86/kernel/paravirt.c
25778 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25779 {
25780 return x;
25781 }
25782 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25783 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25784 +#endif
25785
25786 void __init default_banner(void)
25787 {
25788 @@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25789 if (opfunc == NULL)
25790 /* If there's no function, patch it with a ud2a (BUG) */
25791 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25792 - else if (opfunc == _paravirt_nop)
25793 + else if (opfunc == (void *)_paravirt_nop)
25794 /* If the operation is a nop, then nop the callsite */
25795 ret = paravirt_patch_nop();
25796
25797 /* identity functions just return their single argument */
25798 - else if (opfunc == _paravirt_ident_32)
25799 + else if (opfunc == (void *)_paravirt_ident_32)
25800 ret = paravirt_patch_ident_32(insnbuf, len);
25801 - else if (opfunc == _paravirt_ident_64)
25802 + else if (opfunc == (void *)_paravirt_ident_64)
25803 ret = paravirt_patch_ident_64(insnbuf, len);
25804 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25805 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25806 + ret = paravirt_patch_ident_64(insnbuf, len);
25807 +#endif
25808
25809 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25810 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25811 @@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25812 if (insn_len > len || start == NULL)
25813 insn_len = len;
25814 else
25815 - memcpy(insnbuf, start, insn_len);
25816 + memcpy(insnbuf, ktla_ktva(start), insn_len);
25817
25818 return insn_len;
25819 }
25820 @@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25821 return this_cpu_read(paravirt_lazy_mode);
25822 }
25823
25824 -struct pv_info pv_info = {
25825 +struct pv_info pv_info __read_only = {
25826 .name = "bare hardware",
25827 .paravirt_enabled = 0,
25828 .kernel_rpl = 0,
25829 @@ -310,16 +317,16 @@ struct pv_info pv_info = {
25830 #endif
25831 };
25832
25833 -struct pv_init_ops pv_init_ops = {
25834 +struct pv_init_ops pv_init_ops __read_only = {
25835 .patch = native_patch,
25836 };
25837
25838 -struct pv_time_ops pv_time_ops = {
25839 +struct pv_time_ops pv_time_ops __read_only = {
25840 .sched_clock = native_sched_clock,
25841 .steal_clock = native_steal_clock,
25842 };
25843
25844 -__visible struct pv_irq_ops pv_irq_ops = {
25845 +__visible struct pv_irq_ops pv_irq_ops __read_only = {
25846 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25847 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25848 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25849 @@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25850 #endif
25851 };
25852
25853 -__visible struct pv_cpu_ops pv_cpu_ops = {
25854 +__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25855 .cpuid = native_cpuid,
25856 .get_debugreg = native_get_debugreg,
25857 .set_debugreg = native_set_debugreg,
25858 @@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25859 .end_context_switch = paravirt_nop,
25860 };
25861
25862 -struct pv_apic_ops pv_apic_ops = {
25863 +struct pv_apic_ops pv_apic_ops __read_only= {
25864 #ifdef CONFIG_X86_LOCAL_APIC
25865 .startup_ipi_hook = paravirt_nop,
25866 #endif
25867 };
25868
25869 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25870 +#ifdef CONFIG_X86_32
25871 +#ifdef CONFIG_X86_PAE
25872 +/* 64-bit pagetable entries */
25873 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25874 +#else
25875 /* 32-bit pagetable entries */
25876 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25877 +#endif
25878 #else
25879 /* 64-bit pagetable entries */
25880 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25881 #endif
25882
25883 -struct pv_mmu_ops pv_mmu_ops = {
25884 +struct pv_mmu_ops pv_mmu_ops __read_only = {
25885
25886 .read_cr2 = native_read_cr2,
25887 .write_cr2 = native_write_cr2,
25888 @@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25889 .make_pud = PTE_IDENT,
25890
25891 .set_pgd = native_set_pgd,
25892 + .set_pgd_batched = native_set_pgd_batched,
25893 #endif
25894 #endif /* PAGETABLE_LEVELS >= 3 */
25895
25896 @@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25897 },
25898
25899 .set_fixmap = native_set_fixmap,
25900 +
25901 +#ifdef CONFIG_PAX_KERNEXEC
25902 + .pax_open_kernel = native_pax_open_kernel,
25903 + .pax_close_kernel = native_pax_close_kernel,
25904 +#endif
25905 +
25906 };
25907
25908 EXPORT_SYMBOL_GPL(pv_time_ops);
25909 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25910 index 299d493..2ccb0ee 100644
25911 --- a/arch/x86/kernel/pci-calgary_64.c
25912 +++ b/arch/x86/kernel/pci-calgary_64.c
25913 @@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25914 tce_space = be64_to_cpu(readq(target));
25915 tce_space = tce_space & TAR_SW_BITS;
25916
25917 - tce_space = tce_space & (~specified_table_size);
25918 + tce_space = tce_space & (~(unsigned long)specified_table_size);
25919 info->tce_space = (u64 *)__va(tce_space);
25920 }
25921 }
25922 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25923 index 35ccf75..7a15747 100644
25924 --- a/arch/x86/kernel/pci-iommu_table.c
25925 +++ b/arch/x86/kernel/pci-iommu_table.c
25926 @@ -2,7 +2,7 @@
25927 #include <asm/iommu_table.h>
25928 #include <linux/string.h>
25929 #include <linux/kallsyms.h>
25930 -
25931 +#include <linux/sched.h>
25932
25933 #define DEBUG 1
25934
25935 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25936 index 6c483ba..d10ce2f 100644
25937 --- a/arch/x86/kernel/pci-swiotlb.c
25938 +++ b/arch/x86/kernel/pci-swiotlb.c
25939 @@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25940 void *vaddr, dma_addr_t dma_addr,
25941 struct dma_attrs *attrs)
25942 {
25943 - swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25944 + swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25945 }
25946
25947 static struct dma_map_ops swiotlb_dma_ops = {
25948 diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
25949 index ca7f0d5..8996469 100644
25950 --- a/arch/x86/kernel/preempt.S
25951 +++ b/arch/x86/kernel/preempt.S
25952 @@ -3,12 +3,14 @@
25953 #include <asm/dwarf2.h>
25954 #include <asm/asm.h>
25955 #include <asm/calling.h>
25956 +#include <asm/alternative-asm.h>
25957
25958 ENTRY(___preempt_schedule)
25959 CFI_STARTPROC
25960 SAVE_ALL
25961 call preempt_schedule
25962 RESTORE_ALL
25963 + pax_force_retaddr
25964 ret
25965 CFI_ENDPROC
25966
25967 @@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
25968 SAVE_ALL
25969 call preempt_schedule_context
25970 RESTORE_ALL
25971 + pax_force_retaddr
25972 ret
25973 CFI_ENDPROC
25974
25975 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25976 index 3fb8d95..254dc51 100644
25977 --- a/arch/x86/kernel/process.c
25978 +++ b/arch/x86/kernel/process.c
25979 @@ -36,7 +36,8 @@
25980 * section. Since TSS's are completely CPU-local, we want them
25981 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25982 */
25983 -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25984 +struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25985 +EXPORT_SYMBOL(init_tss);
25986
25987 #ifdef CONFIG_X86_64
25988 static DEFINE_PER_CPU(unsigned char, is_idle);
25989 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25990 task_xstate_cachep =
25991 kmem_cache_create("task_xstate", xstate_size,
25992 __alignof__(union thread_xstate),
25993 - SLAB_PANIC | SLAB_NOTRACK, NULL);
25994 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25995 }
25996
25997 /*
25998 @@ -105,7 +106,7 @@ void exit_thread(void)
25999 unsigned long *bp = t->io_bitmap_ptr;
26000
26001 if (bp) {
26002 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26003 + struct tss_struct *tss = init_tss + get_cpu();
26004
26005 t->io_bitmap_ptr = NULL;
26006 clear_thread_flag(TIF_IO_BITMAP);
26007 @@ -125,6 +126,9 @@ void flush_thread(void)
26008 {
26009 struct task_struct *tsk = current;
26010
26011 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26012 + loadsegment(gs, 0);
26013 +#endif
26014 flush_ptrace_hw_breakpoint(tsk);
26015 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26016 drop_init_fpu(tsk);
26017 @@ -271,7 +275,7 @@ static void __exit_idle(void)
26018 void exit_idle(void)
26019 {
26020 /* idle loop has pid 0 */
26021 - if (current->pid)
26022 + if (task_pid_nr(current))
26023 return;
26024 __exit_idle();
26025 }
26026 @@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26027 return ret;
26028 }
26029 #endif
26030 -void stop_this_cpu(void *dummy)
26031 +__noreturn void stop_this_cpu(void *dummy)
26032 {
26033 local_irq_disable();
26034 /*
26035 @@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26036 }
26037 early_param("idle", idle_setup);
26038
26039 -unsigned long arch_align_stack(unsigned long sp)
26040 +#ifdef CONFIG_PAX_RANDKSTACK
26041 +void pax_randomize_kstack(struct pt_regs *regs)
26042 {
26043 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26044 - sp -= get_random_int() % 8192;
26045 - return sp & ~0xf;
26046 -}
26047 + struct thread_struct *thread = &current->thread;
26048 + unsigned long time;
26049
26050 -unsigned long arch_randomize_brk(struct mm_struct *mm)
26051 -{
26052 - unsigned long range_end = mm->brk + 0x02000000;
26053 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26054 -}
26055 + if (!randomize_va_space)
26056 + return;
26057 +
26058 + if (v8086_mode(regs))
26059 + return;
26060
26061 + rdtscl(time);
26062 +
26063 + /* P4 seems to return a 0 LSB, ignore it */
26064 +#ifdef CONFIG_MPENTIUM4
26065 + time &= 0x3EUL;
26066 + time <<= 2;
26067 +#elif defined(CONFIG_X86_64)
26068 + time &= 0xFUL;
26069 + time <<= 4;
26070 +#else
26071 + time &= 0x1FUL;
26072 + time <<= 3;
26073 +#endif
26074 +
26075 + thread->sp0 ^= time;
26076 + load_sp0(init_tss + smp_processor_id(), thread);
26077 +
26078 +#ifdef CONFIG_X86_64
26079 + this_cpu_write(kernel_stack, thread->sp0);
26080 +#endif
26081 +}
26082 +#endif
26083 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26084 index 6f1236c..fd448d4 100644
26085 --- a/arch/x86/kernel/process_32.c
26086 +++ b/arch/x86/kernel/process_32.c
26087 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26088 unsigned long thread_saved_pc(struct task_struct *tsk)
26089 {
26090 return ((unsigned long *)tsk->thread.sp)[3];
26091 +//XXX return tsk->thread.eip;
26092 }
26093
26094 void __show_regs(struct pt_regs *regs, int all)
26095 @@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
26096 unsigned long sp;
26097 unsigned short ss, gs;
26098
26099 - if (user_mode_vm(regs)) {
26100 + if (user_mode(regs)) {
26101 sp = regs->sp;
26102 ss = regs->ss & 0xffff;
26103 - gs = get_user_gs(regs);
26104 } else {
26105 sp = kernel_stack_pointer(regs);
26106 savesegment(ss, ss);
26107 - savesegment(gs, gs);
26108 }
26109 + gs = get_user_gs(regs);
26110
26111 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26112 (u16)regs->cs, regs->ip, regs->flags,
26113 - smp_processor_id());
26114 + raw_smp_processor_id());
26115 print_symbol("EIP is at %s\n", regs->ip);
26116
26117 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26118 @@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
26119 int copy_thread(unsigned long clone_flags, unsigned long sp,
26120 unsigned long arg, struct task_struct *p)
26121 {
26122 - struct pt_regs *childregs = task_pt_regs(p);
26123 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26124 struct task_struct *tsk;
26125 int err;
26126
26127 p->thread.sp = (unsigned long) childregs;
26128 p->thread.sp0 = (unsigned long) (childregs+1);
26129 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26130
26131 if (unlikely(p->flags & PF_KTHREAD)) {
26132 /* kernel thread */
26133 memset(childregs, 0, sizeof(struct pt_regs));
26134 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26135 - task_user_gs(p) = __KERNEL_STACK_CANARY;
26136 - childregs->ds = __USER_DS;
26137 - childregs->es = __USER_DS;
26138 + savesegment(gs, childregs->gs);
26139 + childregs->ds = __KERNEL_DS;
26140 + childregs->es = __KERNEL_DS;
26141 childregs->fs = __KERNEL_PERCPU;
26142 childregs->bx = sp; /* function */
26143 childregs->bp = arg;
26144 @@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26145 struct thread_struct *prev = &prev_p->thread,
26146 *next = &next_p->thread;
26147 int cpu = smp_processor_id();
26148 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26149 + struct tss_struct *tss = init_tss + cpu;
26150 fpu_switch_t fpu;
26151
26152 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26153 @@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26154 */
26155 lazy_save_gs(prev->gs);
26156
26157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
26158 + __set_fs(task_thread_info(next_p)->addr_limit);
26159 +#endif
26160 +
26161 /*
26162 * Load the per-thread Thread-Local Storage descriptor.
26163 */
26164 @@ -315,6 +320,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26165 */
26166 arch_end_context_switch(next_p);
26167
26168 + this_cpu_write(current_task, next_p);
26169 + this_cpu_write(current_tinfo, &next_p->tinfo);
26170 +
26171 /*
26172 * Restore %gs if needed (which is common)
26173 */
26174 @@ -323,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26175
26176 switch_fpu_finish(next_p, fpu);
26177
26178 - this_cpu_write(current_task, next_p);
26179 -
26180 return prev_p;
26181 }
26182
26183 @@ -354,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26184 } while (count++ < 16);
26185 return 0;
26186 }
26187 -
26188 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26189 index 9c0280f..5bbb1c0 100644
26190 --- a/arch/x86/kernel/process_64.c
26191 +++ b/arch/x86/kernel/process_64.c
26192 @@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26193 struct pt_regs *childregs;
26194 struct task_struct *me = current;
26195
26196 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26197 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26198 childregs = task_pt_regs(p);
26199 p->thread.sp = (unsigned long) childregs;
26200 p->thread.usersp = me->thread.usersp;
26201 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26202 set_tsk_thread_flag(p, TIF_FORK);
26203 p->thread.fpu_counter = 0;
26204 p->thread.io_bitmap_ptr = NULL;
26205 @@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26206 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26207 savesegment(es, p->thread.es);
26208 savesegment(ds, p->thread.ds);
26209 + savesegment(ss, p->thread.ss);
26210 + BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26211 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26212
26213 if (unlikely(p->flags & PF_KTHREAD)) {
26214 @@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26215 struct thread_struct *prev = &prev_p->thread;
26216 struct thread_struct *next = &next_p->thread;
26217 int cpu = smp_processor_id();
26218 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26219 + struct tss_struct *tss = init_tss + cpu;
26220 unsigned fsindex, gsindex;
26221 fpu_switch_t fpu;
26222
26223 @@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26224 if (unlikely(next->ds | prev->ds))
26225 loadsegment(ds, next->ds);
26226
26227 + savesegment(ss, prev->ss);
26228 + if (unlikely(next->ss != prev->ss))
26229 + loadsegment(ss, next->ss);
26230
26231 /* We must save %fs and %gs before load_TLS() because
26232 * %fs and %gs may be cleared by load_TLS().
26233 @@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26234 prev->usersp = this_cpu_read(old_rsp);
26235 this_cpu_write(old_rsp, next->usersp);
26236 this_cpu_write(current_task, next_p);
26237 + this_cpu_write(current_tinfo, &next_p->tinfo);
26238
26239 /*
26240 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26241 @@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26242 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26243 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26244
26245 - this_cpu_write(kernel_stack,
26246 - (unsigned long)task_stack_page(next_p) +
26247 - THREAD_SIZE - KERNEL_STACK_OFFSET);
26248 + this_cpu_write(kernel_stack, next->sp0);
26249
26250 /*
26251 * Now maybe reload the debug registers and handle I/O bitmaps
26252 @@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p)
26253 if (!p || p == current || p->state == TASK_RUNNING)
26254 return 0;
26255 stack = (unsigned long)task_stack_page(p);
26256 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26257 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26258 return 0;
26259 fp = *(u64 *)(p->thread.sp);
26260 do {
26261 - if (fp < (unsigned long)stack ||
26262 - fp >= (unsigned long)stack+THREAD_SIZE)
26263 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26264 return 0;
26265 ip = *(u64 *)(fp+8);
26266 if (!in_sched_functions(ip))
26267 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26268 index 7461f50..1334029 100644
26269 --- a/arch/x86/kernel/ptrace.c
26270 +++ b/arch/x86/kernel/ptrace.c
26271 @@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26272 {
26273 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
26274 unsigned long sp = (unsigned long)&regs->sp;
26275 - struct thread_info *tinfo;
26276
26277 - if (context == (sp & ~(THREAD_SIZE - 1)))
26278 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26279 return sp;
26280
26281 - tinfo = (struct thread_info *)context;
26282 - if (tinfo->previous_esp)
26283 - return tinfo->previous_esp;
26284 + sp = *(unsigned long *)context;
26285 + if (sp)
26286 + return sp;
26287
26288 return (unsigned long)regs;
26289 }
26290 @@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
26291 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26292 {
26293 int i;
26294 - int dr7 = 0;
26295 + unsigned long dr7 = 0;
26296 struct arch_hw_breakpoint *info;
26297
26298 for (i = 0; i < HBP_NUM; i++) {
26299 @@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
26300 unsigned long addr, unsigned long data)
26301 {
26302 int ret;
26303 - unsigned long __user *datap = (unsigned long __user *)data;
26304 + unsigned long __user *datap = (__force unsigned long __user *)data;
26305
26306 switch (request) {
26307 /* read the word at location addr in the USER area. */
26308 @@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
26309 if ((int) addr < 0)
26310 return -EIO;
26311 ret = do_get_thread_area(child, addr,
26312 - (struct user_desc __user *)data);
26313 + (__force struct user_desc __user *) data);
26314 break;
26315
26316 case PTRACE_SET_THREAD_AREA:
26317 if ((int) addr < 0)
26318 return -EIO;
26319 ret = do_set_thread_area(child, addr,
26320 - (struct user_desc __user *)data, 0);
26321 + (__force struct user_desc __user *) data, 0);
26322 break;
26323 #endif
26324
26325 @@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26326
26327 #ifdef CONFIG_X86_64
26328
26329 -static struct user_regset x86_64_regsets[] __read_mostly = {
26330 +static user_regset_no_const x86_64_regsets[] __read_only = {
26331 [REGSET_GENERAL] = {
26332 .core_note_type = NT_PRSTATUS,
26333 .n = sizeof(struct user_regs_struct) / sizeof(long),
26334 @@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
26335 #endif /* CONFIG_X86_64 */
26336
26337 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26338 -static struct user_regset x86_32_regsets[] __read_mostly = {
26339 +static user_regset_no_const x86_32_regsets[] __read_only = {
26340 [REGSET_GENERAL] = {
26341 .core_note_type = NT_PRSTATUS,
26342 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26343 @@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
26344 */
26345 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26346
26347 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26348 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26349 {
26350 #ifdef CONFIG_X86_64
26351 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26352 @@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26353 memset(info, 0, sizeof(*info));
26354 info->si_signo = SIGTRAP;
26355 info->si_code = si_code;
26356 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26357 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26358 }
26359
26360 void user_single_step_siginfo(struct task_struct *tsk,
26361 @@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
26362 # define IS_IA32 0
26363 #endif
26364
26365 +#ifdef CONFIG_GRKERNSEC_SETXID
26366 +extern void gr_delayed_cred_worker(void);
26367 +#endif
26368 +
26369 /*
26370 * We must return the syscall number to actually look up in the table.
26371 * This can be -1L to skip running any syscall at all.
26372 @@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
26373
26374 user_exit();
26375
26376 +#ifdef CONFIG_GRKERNSEC_SETXID
26377 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26378 + gr_delayed_cred_worker();
26379 +#endif
26380 +
26381 /*
26382 * If we stepped into a sysenter/syscall insn, it trapped in
26383 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26384 @@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26385 */
26386 user_exit();
26387
26388 +#ifdef CONFIG_GRKERNSEC_SETXID
26389 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26390 + gr_delayed_cred_worker();
26391 +#endif
26392 +
26393 audit_syscall_exit(regs);
26394
26395 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26396 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26397 index 2f355d2..e75ed0a 100644
26398 --- a/arch/x86/kernel/pvclock.c
26399 +++ b/arch/x86/kernel/pvclock.c
26400 @@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26401 reset_hung_task_detector();
26402 }
26403
26404 -static atomic64_t last_value = ATOMIC64_INIT(0);
26405 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26406
26407 void pvclock_resume(void)
26408 {
26409 - atomic64_set(&last_value, 0);
26410 + atomic64_set_unchecked(&last_value, 0);
26411 }
26412
26413 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26414 @@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26415 * updating at the same time, and one of them could be slightly behind,
26416 * making the assumption that last_value always go forward fail to hold.
26417 */
26418 - last = atomic64_read(&last_value);
26419 + last = atomic64_read_unchecked(&last_value);
26420 do {
26421 if (ret < last)
26422 return last;
26423 - last = atomic64_cmpxchg(&last_value, last, ret);
26424 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26425 } while (unlikely(last != ret));
26426
26427 return ret;
26428 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26429 index c752cb4..866c432 100644
26430 --- a/arch/x86/kernel/reboot.c
26431 +++ b/arch/x86/kernel/reboot.c
26432 @@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26433
26434 void __noreturn machine_real_restart(unsigned int type)
26435 {
26436 +
26437 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26438 + struct desc_struct *gdt;
26439 +#endif
26440 +
26441 local_irq_disable();
26442
26443 /*
26444 @@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
26445
26446 /* Jump to the identity-mapped low memory code */
26447 #ifdef CONFIG_X86_32
26448 - asm volatile("jmpl *%0" : :
26449 +
26450 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26451 + gdt = get_cpu_gdt_table(smp_processor_id());
26452 + pax_open_kernel();
26453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
26454 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26455 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26456 + loadsegment(ds, __KERNEL_DS);
26457 + loadsegment(es, __KERNEL_DS);
26458 + loadsegment(ss, __KERNEL_DS);
26459 +#endif
26460 +#ifdef CONFIG_PAX_KERNEXEC
26461 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26462 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26463 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26464 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26465 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26466 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26467 +#endif
26468 + pax_close_kernel();
26469 +#endif
26470 +
26471 + asm volatile("ljmpl *%0" : :
26472 "rm" (real_mode_header->machine_real_restart_asm),
26473 "a" (type));
26474 #else
26475 @@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26476 * try to force a triple fault and then cycle between hitting the keyboard
26477 * controller and doing that
26478 */
26479 -static void native_machine_emergency_restart(void)
26480 +static void __noreturn native_machine_emergency_restart(void)
26481 {
26482 int i;
26483 int attempt = 0;
26484 @@ -593,13 +620,13 @@ void native_machine_shutdown(void)
26485 #endif
26486 }
26487
26488 -static void __machine_emergency_restart(int emergency)
26489 +static void __noreturn __machine_emergency_restart(int emergency)
26490 {
26491 reboot_emergency = emergency;
26492 machine_ops.emergency_restart();
26493 }
26494
26495 -static void native_machine_restart(char *__unused)
26496 +static void __noreturn native_machine_restart(char *__unused)
26497 {
26498 pr_notice("machine restart\n");
26499
26500 @@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused)
26501 __machine_emergency_restart(0);
26502 }
26503
26504 -static void native_machine_halt(void)
26505 +static void __noreturn native_machine_halt(void)
26506 {
26507 /* Stop other cpus and apics */
26508 machine_shutdown();
26509 @@ -618,7 +645,7 @@ static void native_machine_halt(void)
26510 stop_this_cpu(NULL);
26511 }
26512
26513 -static void native_machine_power_off(void)
26514 +static void __noreturn native_machine_power_off(void)
26515 {
26516 if (pm_power_off) {
26517 if (!reboot_force)
26518 @@ -627,9 +654,10 @@ static void native_machine_power_off(void)
26519 }
26520 /* A fallback in case there is no PM info available */
26521 tboot_shutdown(TB_SHUTDOWN_HALT);
26522 + unreachable();
26523 }
26524
26525 -struct machine_ops machine_ops = {
26526 +struct machine_ops machine_ops __read_only = {
26527 .power_off = native_machine_power_off,
26528 .shutdown = native_machine_shutdown,
26529 .emergency_restart = native_machine_emergency_restart,
26530 diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
26531 index c8e41e9..64049ef 100644
26532 --- a/arch/x86/kernel/reboot_fixups_32.c
26533 +++ b/arch/x86/kernel/reboot_fixups_32.c
26534 @@ -57,7 +57,7 @@ struct device_fixup {
26535 unsigned int vendor;
26536 unsigned int device;
26537 void (*reboot_fixup)(struct pci_dev *);
26538 -};
26539 +} __do_const;
26540
26541 /*
26542 * PCI ids solely used for fixups_table go here
26543 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
26544 index 3fd2c69..a444264 100644
26545 --- a/arch/x86/kernel/relocate_kernel_64.S
26546 +++ b/arch/x86/kernel/relocate_kernel_64.S
26547 @@ -96,8 +96,7 @@ relocate_kernel:
26548
26549 /* jump to identity mapped page */
26550 addq $(identity_mapped - relocate_kernel), %r8
26551 - pushq %r8
26552 - ret
26553 + jmp *%r8
26554
26555 identity_mapped:
26556 /* set return address to 0 if not preserving context */
26557 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
26558 index cb233bc..23b4879 100644
26559 --- a/arch/x86/kernel/setup.c
26560 +++ b/arch/x86/kernel/setup.c
26561 @@ -110,6 +110,7 @@
26562 #include <asm/mce.h>
26563 #include <asm/alternative.h>
26564 #include <asm/prom.h>
26565 +#include <asm/boot.h>
26566
26567 /*
26568 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
26569 @@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
26570 #endif
26571
26572
26573 -#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
26574 -__visible unsigned long mmu_cr4_features;
26575 +#ifdef CONFIG_X86_64
26576 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
26577 +#elif defined(CONFIG_X86_PAE)
26578 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
26579 #else
26580 -__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
26581 +__visible unsigned long mmu_cr4_features __read_only;
26582 #endif
26583
26584 +void set_in_cr4(unsigned long mask)
26585 +{
26586 + unsigned long cr4 = read_cr4();
26587 +
26588 + if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
26589 + return;
26590 +
26591 + pax_open_kernel();
26592 + mmu_cr4_features |= mask;
26593 + pax_close_kernel();
26594 +
26595 + if (trampoline_cr4_features)
26596 + *trampoline_cr4_features = mmu_cr4_features;
26597 + cr4 |= mask;
26598 + write_cr4(cr4);
26599 +}
26600 +EXPORT_SYMBOL(set_in_cr4);
26601 +
26602 +void clear_in_cr4(unsigned long mask)
26603 +{
26604 + unsigned long cr4 = read_cr4();
26605 +
26606 + if (!(cr4 & mask) && cr4 == mmu_cr4_features)
26607 + return;
26608 +
26609 + pax_open_kernel();
26610 + mmu_cr4_features &= ~mask;
26611 + pax_close_kernel();
26612 +
26613 + if (trampoline_cr4_features)
26614 + *trampoline_cr4_features = mmu_cr4_features;
26615 + cr4 &= ~mask;
26616 + write_cr4(cr4);
26617 +}
26618 +EXPORT_SYMBOL(clear_in_cr4);
26619 +
26620 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
26621 int bootloader_type, bootloader_version;
26622
26623 @@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
26624 * area (640->1Mb) as ram even though it is not.
26625 * take them out.
26626 */
26627 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
26628 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
26629
26630 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
26631 }
26632 @@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
26633 /* called before trim_bios_range() to spare extra sanitize */
26634 static void __init e820_add_kernel_range(void)
26635 {
26636 - u64 start = __pa_symbol(_text);
26637 + u64 start = __pa_symbol(ktla_ktva(_text));
26638 u64 size = __pa_symbol(_end) - start;
26639
26640 /*
26641 @@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
26642
26643 void __init setup_arch(char **cmdline_p)
26644 {
26645 +#ifdef CONFIG_X86_32
26646 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
26647 +#else
26648 memblock_reserve(__pa_symbol(_text),
26649 (unsigned long)__bss_stop - (unsigned long)_text);
26650 +#endif
26651
26652 early_reserve_initrd();
26653
26654 @@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
26655
26656 if (!boot_params.hdr.root_flags)
26657 root_mountflags &= ~MS_RDONLY;
26658 - init_mm.start_code = (unsigned long) _text;
26659 - init_mm.end_code = (unsigned long) _etext;
26660 + init_mm.start_code = ktla_ktva((unsigned long) _text);
26661 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
26662 init_mm.end_data = (unsigned long) _edata;
26663 init_mm.brk = _brk_end;
26664
26665 - code_resource.start = __pa_symbol(_text);
26666 - code_resource.end = __pa_symbol(_etext)-1;
26667 - data_resource.start = __pa_symbol(_etext);
26668 + code_resource.start = __pa_symbol(ktla_ktva(_text));
26669 + code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
26670 + data_resource.start = __pa_symbol(_sdata);
26671 data_resource.end = __pa_symbol(_edata)-1;
26672 bss_resource.start = __pa_symbol(__bss_start);
26673 bss_resource.end = __pa_symbol(__bss_stop)-1;
26674 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
26675 index 5cdff03..80fa283 100644
26676 --- a/arch/x86/kernel/setup_percpu.c
26677 +++ b/arch/x86/kernel/setup_percpu.c
26678 @@ -21,19 +21,17 @@
26679 #include <asm/cpu.h>
26680 #include <asm/stackprotector.h>
26681
26682 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26683 +#ifdef CONFIG_SMP
26684 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
26685 EXPORT_PER_CPU_SYMBOL(cpu_number);
26686 +#endif
26687
26688 -#ifdef CONFIG_X86_64
26689 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
26690 -#else
26691 -#define BOOT_PERCPU_OFFSET 0
26692 -#endif
26693
26694 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
26695 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
26696
26697 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
26698 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
26699 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
26700 };
26701 EXPORT_SYMBOL(__per_cpu_offset);
26702 @@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
26703 {
26704 #ifdef CONFIG_NEED_MULTIPLE_NODES
26705 pg_data_t *last = NULL;
26706 - unsigned int cpu;
26707 + int cpu;
26708
26709 for_each_possible_cpu(cpu) {
26710 int node = early_cpu_to_node(cpu);
26711 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26712 {
26713 #ifdef CONFIG_X86_32
26714 struct desc_struct gdt;
26715 + unsigned long base = per_cpu_offset(cpu);
26716
26717 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26718 - 0x2 | DESCTYPE_S, 0x8);
26719 - gdt.s = 1;
26720 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26721 + 0x83 | DESCTYPE_S, 0xC);
26722 write_gdt_entry(get_cpu_gdt_table(cpu),
26723 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26724 #endif
26725 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26726 /* alrighty, percpu areas up and running */
26727 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26728 for_each_possible_cpu(cpu) {
26729 +#ifdef CONFIG_CC_STACKPROTECTOR
26730 +#ifdef CONFIG_X86_32
26731 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
26732 +#endif
26733 +#endif
26734 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26735 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26736 per_cpu(cpu_number, cpu) = cpu;
26737 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26738 */
26739 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26740 #endif
26741 +#ifdef CONFIG_CC_STACKPROTECTOR
26742 +#ifdef CONFIG_X86_32
26743 + if (!cpu)
26744 + per_cpu(stack_canary.canary, cpu) = canary;
26745 +#endif
26746 +#endif
26747 /*
26748 * Up to this point, the boot CPU has been using .init.data
26749 * area. Reload any changed state for the boot CPU.
26750 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26751 index 9e5de68..16c53cb 100644
26752 --- a/arch/x86/kernel/signal.c
26753 +++ b/arch/x86/kernel/signal.c
26754 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26755 * Align the stack pointer according to the i386 ABI,
26756 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26757 */
26758 - sp = ((sp + 4) & -16ul) - 4;
26759 + sp = ((sp - 12) & -16ul) - 4;
26760 #else /* !CONFIG_X86_32 */
26761 sp = round_down(sp, 16) - 8;
26762 #endif
26763 @@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26764 }
26765
26766 if (current->mm->context.vdso)
26767 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26768 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26769 else
26770 - restorer = &frame->retcode;
26771 + restorer = (void __user *)&frame->retcode;
26772 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26773 restorer = ksig->ka.sa.sa_restorer;
26774
26775 @@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26776 * reasons and because gdb uses it as a signature to notice
26777 * signal handler stack frames.
26778 */
26779 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26780 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26781
26782 if (err)
26783 return -EFAULT;
26784 @@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26785 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26786
26787 /* Set up to return from userspace. */
26788 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26789 + if (current->mm->context.vdso)
26790 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26791 + else
26792 + restorer = (void __user *)&frame->retcode;
26793 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26794 restorer = ksig->ka.sa.sa_restorer;
26795 put_user_ex(restorer, &frame->pretcode);
26796 @@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26797 * reasons and because gdb uses it as a signature to notice
26798 * signal handler stack frames.
26799 */
26800 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26801 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26802 } put_user_catch(err);
26803
26804 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26805 @@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26806 {
26807 int usig = signr_convert(ksig->sig);
26808 sigset_t *set = sigmask_to_save();
26809 - compat_sigset_t *cset = (compat_sigset_t *) set;
26810 + sigset_t sigcopy;
26811 + compat_sigset_t *cset;
26812 +
26813 + sigcopy = *set;
26814 +
26815 + cset = (compat_sigset_t *) &sigcopy;
26816
26817 /* Set up the stack frame */
26818 if (is_ia32_frame()) {
26819 @@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26820 } else if (is_x32_frame()) {
26821 return x32_setup_rt_frame(ksig, cset, regs);
26822 } else {
26823 - return __setup_rt_frame(ksig->sig, ksig, set, regs);
26824 + return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26825 }
26826 }
26827
26828 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26829 index 7c3a5a6..f0a8961 100644
26830 --- a/arch/x86/kernel/smp.c
26831 +++ b/arch/x86/kernel/smp.c
26832 @@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26833
26834 __setup("nonmi_ipi", nonmi_ipi_setup);
26835
26836 -struct smp_ops smp_ops = {
26837 +struct smp_ops smp_ops __read_only = {
26838 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26839 .smp_prepare_cpus = native_smp_prepare_cpus,
26840 .smp_cpus_done = native_smp_cpus_done,
26841 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26842 index 85dc05a..f8c96f6 100644
26843 --- a/arch/x86/kernel/smpboot.c
26844 +++ b/arch/x86/kernel/smpboot.c
26845 @@ -229,14 +229,18 @@ static void notrace start_secondary(void *unused)
26846
26847 enable_start_cpu0 = 0;
26848
26849 -#ifdef CONFIG_X86_32
26850 - /* switch away from the initial page table */
26851 - load_cr3(swapper_pg_dir);
26852 - __flush_tlb_all();
26853 -#endif
26854 -
26855 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26856 barrier();
26857 +
26858 + /* switch away from the initial page table */
26859 +#ifdef CONFIG_PAX_PER_CPU_PGD
26860 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26861 + __flush_tlb_all();
26862 +#elif defined(CONFIG_X86_32)
26863 + load_cr3(swapper_pg_dir);
26864 + __flush_tlb_all();
26865 +#endif
26866 +
26867 /*
26868 * Check TSC synchronization with the BP:
26869 */
26870 @@ -749,8 +753,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26871 alternatives_enable_smp();
26872
26873 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26874 - (THREAD_SIZE + task_stack_page(idle))) - 1);
26875 + (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
26876 per_cpu(current_task, cpu) = idle;
26877 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26878
26879 #ifdef CONFIG_X86_32
26880 /* Stack for startup_32 can be just as for start_secondary onwards */
26881 @@ -758,11 +763,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26882 #else
26883 clear_tsk_thread_flag(idle, TIF_FORK);
26884 initial_gs = per_cpu_offset(cpu);
26885 - per_cpu(kernel_stack, cpu) =
26886 - (unsigned long)task_stack_page(idle) -
26887 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26888 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26889 #endif
26890 +
26891 + pax_open_kernel();
26892 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26893 + pax_close_kernel();
26894 +
26895 initial_code = (unsigned long)start_secondary;
26896 stack_start = idle->thread.sp;
26897
26898 @@ -911,6 +918,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26899 /* the FPU context is blank, nobody can own it */
26900 __cpu_disable_lazy_restore(cpu);
26901
26902 +#ifdef CONFIG_PAX_PER_CPU_PGD
26903 + clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26904 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26905 + KERNEL_PGD_PTRS);
26906 + clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26907 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26908 + KERNEL_PGD_PTRS);
26909 +#endif
26910 +
26911 err = do_boot_cpu(apicid, cpu, tidle);
26912 if (err) {
26913 pr_debug("do_boot_cpu failed %d\n", err);
26914 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26915 index 9b4d51d..5d28b58 100644
26916 --- a/arch/x86/kernel/step.c
26917 +++ b/arch/x86/kernel/step.c
26918 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26919 struct desc_struct *desc;
26920 unsigned long base;
26921
26922 - seg &= ~7UL;
26923 + seg >>= 3;
26924
26925 mutex_lock(&child->mm->context.lock);
26926 - if (unlikely((seg >> 3) >= child->mm->context.size))
26927 + if (unlikely(seg >= child->mm->context.size))
26928 addr = -1L; /* bogus selector, access would fault */
26929 else {
26930 desc = child->mm->context.ldt + seg;
26931 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26932 addr += base;
26933 }
26934 mutex_unlock(&child->mm->context.lock);
26935 - }
26936 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26937 + addr = ktla_ktva(addr);
26938
26939 return addr;
26940 }
26941 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26942 unsigned char opcode[15];
26943 unsigned long addr = convert_ip_to_linear(child, regs);
26944
26945 + if (addr == -EINVAL)
26946 + return 0;
26947 +
26948 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26949 for (i = 0; i < copied; i++) {
26950 switch (opcode[i]) {
26951 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26952 new file mode 100644
26953 index 0000000..5877189
26954 --- /dev/null
26955 +++ b/arch/x86/kernel/sys_i386_32.c
26956 @@ -0,0 +1,189 @@
26957 +/*
26958 + * This file contains various random system calls that
26959 + * have a non-standard calling sequence on the Linux/i386
26960 + * platform.
26961 + */
26962 +
26963 +#include <linux/errno.h>
26964 +#include <linux/sched.h>
26965 +#include <linux/mm.h>
26966 +#include <linux/fs.h>
26967 +#include <linux/smp.h>
26968 +#include <linux/sem.h>
26969 +#include <linux/msg.h>
26970 +#include <linux/shm.h>
26971 +#include <linux/stat.h>
26972 +#include <linux/syscalls.h>
26973 +#include <linux/mman.h>
26974 +#include <linux/file.h>
26975 +#include <linux/utsname.h>
26976 +#include <linux/ipc.h>
26977 +#include <linux/elf.h>
26978 +
26979 +#include <linux/uaccess.h>
26980 +#include <linux/unistd.h>
26981 +
26982 +#include <asm/syscalls.h>
26983 +
26984 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26985 +{
26986 + unsigned long pax_task_size = TASK_SIZE;
26987 +
26988 +#ifdef CONFIG_PAX_SEGMEXEC
26989 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26990 + pax_task_size = SEGMEXEC_TASK_SIZE;
26991 +#endif
26992 +
26993 + if (flags & MAP_FIXED)
26994 + if (len > pax_task_size || addr > pax_task_size - len)
26995 + return -EINVAL;
26996 +
26997 + return 0;
26998 +}
26999 +
27000 +/*
27001 + * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27002 + */
27003 +static unsigned long get_align_mask(void)
27004 +{
27005 + if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27006 + return 0;
27007 +
27008 + if (!(current->flags & PF_RANDOMIZE))
27009 + return 0;
27010 +
27011 + return va_align.mask;
27012 +}
27013 +
27014 +unsigned long
27015 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
27016 + unsigned long len, unsigned long pgoff, unsigned long flags)
27017 +{
27018 + struct mm_struct *mm = current->mm;
27019 + struct vm_area_struct *vma;
27020 + unsigned long pax_task_size = TASK_SIZE;
27021 + struct vm_unmapped_area_info info;
27022 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27023 +
27024 +#ifdef CONFIG_PAX_SEGMEXEC
27025 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27026 + pax_task_size = SEGMEXEC_TASK_SIZE;
27027 +#endif
27028 +
27029 + pax_task_size -= PAGE_SIZE;
27030 +
27031 + if (len > pax_task_size)
27032 + return -ENOMEM;
27033 +
27034 + if (flags & MAP_FIXED)
27035 + return addr;
27036 +
27037 +#ifdef CONFIG_PAX_RANDMMAP
27038 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27039 +#endif
27040 +
27041 + if (addr) {
27042 + addr = PAGE_ALIGN(addr);
27043 + if (pax_task_size - len >= addr) {
27044 + vma = find_vma(mm, addr);
27045 + if (check_heap_stack_gap(vma, addr, len, offset))
27046 + return addr;
27047 + }
27048 + }
27049 +
27050 + info.flags = 0;
27051 + info.length = len;
27052 + info.align_mask = filp ? get_align_mask() : 0;
27053 + info.align_offset = pgoff << PAGE_SHIFT;
27054 + info.threadstack_offset = offset;
27055 +
27056 +#ifdef CONFIG_PAX_PAGEEXEC
27057 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27058 + info.low_limit = 0x00110000UL;
27059 + info.high_limit = mm->start_code;
27060 +
27061 +#ifdef CONFIG_PAX_RANDMMAP
27062 + if (mm->pax_flags & MF_PAX_RANDMMAP)
27063 + info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27064 +#endif
27065 +
27066 + if (info.low_limit < info.high_limit) {
27067 + addr = vm_unmapped_area(&info);
27068 + if (!IS_ERR_VALUE(addr))
27069 + return addr;
27070 + }
27071 + } else
27072 +#endif
27073 +
27074 + info.low_limit = mm->mmap_base;
27075 + info.high_limit = pax_task_size;
27076 +
27077 + return vm_unmapped_area(&info);
27078 +}
27079 +
27080 +unsigned long
27081 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27082 + const unsigned long len, const unsigned long pgoff,
27083 + const unsigned long flags)
27084 +{
27085 + struct vm_area_struct *vma;
27086 + struct mm_struct *mm = current->mm;
27087 + unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27088 + struct vm_unmapped_area_info info;
27089 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27090 +
27091 +#ifdef CONFIG_PAX_SEGMEXEC
27092 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
27093 + pax_task_size = SEGMEXEC_TASK_SIZE;
27094 +#endif
27095 +
27096 + pax_task_size -= PAGE_SIZE;
27097 +
27098 + /* requested length too big for entire address space */
27099 + if (len > pax_task_size)
27100 + return -ENOMEM;
27101 +
27102 + if (flags & MAP_FIXED)
27103 + return addr;
27104 +
27105 +#ifdef CONFIG_PAX_PAGEEXEC
27106 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27107 + goto bottomup;
27108 +#endif
27109 +
27110 +#ifdef CONFIG_PAX_RANDMMAP
27111 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27112 +#endif
27113 +
27114 + /* requesting a specific address */
27115 + if (addr) {
27116 + addr = PAGE_ALIGN(addr);
27117 + if (pax_task_size - len >= addr) {
27118 + vma = find_vma(mm, addr);
27119 + if (check_heap_stack_gap(vma, addr, len, offset))
27120 + return addr;
27121 + }
27122 + }
27123 +
27124 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27125 + info.length = len;
27126 + info.low_limit = PAGE_SIZE;
27127 + info.high_limit = mm->mmap_base;
27128 + info.align_mask = filp ? get_align_mask() : 0;
27129 + info.align_offset = pgoff << PAGE_SHIFT;
27130 + info.threadstack_offset = offset;
27131 +
27132 + addr = vm_unmapped_area(&info);
27133 + if (!(addr & ~PAGE_MASK))
27134 + return addr;
27135 + VM_BUG_ON(addr != -ENOMEM);
27136 +
27137 +bottomup:
27138 + /*
27139 + * A failed mmap() very likely causes application failure,
27140 + * so fall back to the bottom-up function here. This scenario
27141 + * can happen with large stack limits and large mmap()
27142 + * allocations.
27143 + */
27144 + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27145 +}
27146 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27147 index 30277e2..5664a29 100644
27148 --- a/arch/x86/kernel/sys_x86_64.c
27149 +++ b/arch/x86/kernel/sys_x86_64.c
27150 @@ -81,8 +81,8 @@ out:
27151 return error;
27152 }
27153
27154 -static void find_start_end(unsigned long flags, unsigned long *begin,
27155 - unsigned long *end)
27156 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
27157 + unsigned long *begin, unsigned long *end)
27158 {
27159 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27160 unsigned long new_begin;
27161 @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27162 *begin = new_begin;
27163 }
27164 } else {
27165 - *begin = current->mm->mmap_legacy_base;
27166 + *begin = mm->mmap_legacy_base;
27167 *end = TASK_SIZE;
27168 }
27169 }
27170 @@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27171 struct vm_area_struct *vma;
27172 struct vm_unmapped_area_info info;
27173 unsigned long begin, end;
27174 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27175
27176 if (flags & MAP_FIXED)
27177 return addr;
27178
27179 - find_start_end(flags, &begin, &end);
27180 + find_start_end(mm, flags, &begin, &end);
27181
27182 if (len > end)
27183 return -ENOMEM;
27184
27185 +#ifdef CONFIG_PAX_RANDMMAP
27186 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27187 +#endif
27188 +
27189 if (addr) {
27190 addr = PAGE_ALIGN(addr);
27191 vma = find_vma(mm, addr);
27192 - if (end - len >= addr &&
27193 - (!vma || addr + len <= vma->vm_start))
27194 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27195 return addr;
27196 }
27197
27198 @@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27199 info.high_limit = end;
27200 info.align_mask = filp ? get_align_mask() : 0;
27201 info.align_offset = pgoff << PAGE_SHIFT;
27202 + info.threadstack_offset = offset;
27203 return vm_unmapped_area(&info);
27204 }
27205
27206 @@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27207 struct mm_struct *mm = current->mm;
27208 unsigned long addr = addr0;
27209 struct vm_unmapped_area_info info;
27210 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27211
27212 /* requested length too big for entire address space */
27213 if (len > TASK_SIZE)
27214 @@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27215 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27216 goto bottomup;
27217
27218 +#ifdef CONFIG_PAX_RANDMMAP
27219 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27220 +#endif
27221 +
27222 /* requesting a specific address */
27223 if (addr) {
27224 addr = PAGE_ALIGN(addr);
27225 vma = find_vma(mm, addr);
27226 - if (TASK_SIZE - len >= addr &&
27227 - (!vma || addr + len <= vma->vm_start))
27228 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27229 return addr;
27230 }
27231
27232 @@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27233 info.high_limit = mm->mmap_base;
27234 info.align_mask = filp ? get_align_mask() : 0;
27235 info.align_offset = pgoff << PAGE_SHIFT;
27236 + info.threadstack_offset = offset;
27237 addr = vm_unmapped_area(&info);
27238 if (!(addr & ~PAGE_MASK))
27239 return addr;
27240 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27241 index 91a4496..bb87552 100644
27242 --- a/arch/x86/kernel/tboot.c
27243 +++ b/arch/x86/kernel/tboot.c
27244 @@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27245
27246 void tboot_shutdown(u32 shutdown_type)
27247 {
27248 - void (*shutdown)(void);
27249 + void (* __noreturn shutdown)(void);
27250
27251 if (!tboot_enabled())
27252 return;
27253 @@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27254
27255 switch_to_tboot_pt();
27256
27257 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27258 + shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27259 shutdown();
27260
27261 /* should not reach here */
27262 @@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27263 return -ENODEV;
27264 }
27265
27266 -static atomic_t ap_wfs_count;
27267 +static atomic_unchecked_t ap_wfs_count;
27268
27269 static int tboot_wait_for_aps(int num_aps)
27270 {
27271 @@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27272 {
27273 switch (action) {
27274 case CPU_DYING:
27275 - atomic_inc(&ap_wfs_count);
27276 + atomic_inc_unchecked(&ap_wfs_count);
27277 if (num_online_cpus() == 1)
27278 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27279 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27280 return NOTIFY_BAD;
27281 break;
27282 }
27283 @@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27284
27285 tboot_create_trampoline();
27286
27287 - atomic_set(&ap_wfs_count, 0);
27288 + atomic_set_unchecked(&ap_wfs_count, 0);
27289 register_hotcpu_notifier(&tboot_cpu_notifier);
27290
27291 #ifdef CONFIG_DEBUG_FS
27292 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27293 index 24d3c91..d06b473 100644
27294 --- a/arch/x86/kernel/time.c
27295 +++ b/arch/x86/kernel/time.c
27296 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27297 {
27298 unsigned long pc = instruction_pointer(regs);
27299
27300 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27301 + if (!user_mode(regs) && in_lock_functions(pc)) {
27302 #ifdef CONFIG_FRAME_POINTER
27303 - return *(unsigned long *)(regs->bp + sizeof(long));
27304 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27305 #else
27306 unsigned long *sp =
27307 (unsigned long *)kernel_stack_pointer(regs);
27308 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27309 * or above a saved flags. Eflags has bits 22-31 zero,
27310 * kernel addresses don't.
27311 */
27312 +
27313 +#ifdef CONFIG_PAX_KERNEXEC
27314 + return ktla_ktva(sp[0]);
27315 +#else
27316 if (sp[0] >> 22)
27317 return sp[0];
27318 if (sp[1] >> 22)
27319 return sp[1];
27320 #endif
27321 +
27322 +#endif
27323 }
27324 return pc;
27325 }
27326 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27327 index f7fec09..9991981 100644
27328 --- a/arch/x86/kernel/tls.c
27329 +++ b/arch/x86/kernel/tls.c
27330 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27331 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27332 return -EINVAL;
27333
27334 +#ifdef CONFIG_PAX_SEGMEXEC
27335 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27336 + return -EINVAL;
27337 +#endif
27338 +
27339 set_tls_desc(p, idx, &info, 1);
27340
27341 return 0;
27342 @@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27343
27344 if (kbuf)
27345 info = kbuf;
27346 - else if (__copy_from_user(infobuf, ubuf, count))
27347 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27348 return -EFAULT;
27349 else
27350 info = infobuf;
27351 diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27352 index 1c113db..287b42e 100644
27353 --- a/arch/x86/kernel/tracepoint.c
27354 +++ b/arch/x86/kernel/tracepoint.c
27355 @@ -9,11 +9,11 @@
27356 #include <linux/atomic.h>
27357
27358 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27359 -struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27360 +const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27361 (unsigned long) trace_idt_table };
27362
27363 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27364 -gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27365 +gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27366
27367 static int trace_irq_vector_refcount;
27368 static DEFINE_MUTEX(irq_vector_mutex);
27369 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27370 index b857ed8..51ae4cb 100644
27371 --- a/arch/x86/kernel/traps.c
27372 +++ b/arch/x86/kernel/traps.c
27373 @@ -66,7 +66,7 @@
27374 #include <asm/proto.h>
27375
27376 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27377 -gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27378 +gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27379 #else
27380 #include <asm/processor-flags.h>
27381 #include <asm/setup.h>
27382 @@ -75,7 +75,7 @@ asmlinkage int system_call(void);
27383 #endif
27384
27385 /* Must be page-aligned because the real IDT is used in a fixmap. */
27386 -gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27387 +gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27388
27389 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27390 EXPORT_SYMBOL_GPL(used_vectors);
27391 @@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27392 }
27393
27394 static int __kprobes
27395 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27396 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27397 struct pt_regs *regs, long error_code)
27398 {
27399 #ifdef CONFIG_X86_32
27400 - if (regs->flags & X86_VM_MASK) {
27401 + if (v8086_mode(regs)) {
27402 /*
27403 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27404 * On nmi (interrupt 2), do_trap should not be called.
27405 @@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27406 return -1;
27407 }
27408 #endif
27409 - if (!user_mode(regs)) {
27410 + if (!user_mode_novm(regs)) {
27411 if (!fixup_exception(regs)) {
27412 tsk->thread.error_code = error_code;
27413 tsk->thread.trap_nr = trapnr;
27414 +
27415 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27416 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27417 + str = "PAX: suspicious stack segment fault";
27418 +#endif
27419 +
27420 die(str, regs, error_code);
27421 }
27422 +
27423 +#ifdef CONFIG_PAX_REFCOUNT
27424 + if (trapnr == 4)
27425 + pax_report_refcount_overflow(regs);
27426 +#endif
27427 +
27428 return 0;
27429 }
27430
27431 @@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27432 }
27433
27434 static void __kprobes
27435 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27436 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27437 long error_code, siginfo_t *info)
27438 {
27439 struct task_struct *tsk = current;
27440 @@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27441 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27442 printk_ratelimit()) {
27443 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27444 - tsk->comm, tsk->pid, str,
27445 + tsk->comm, task_pid_nr(tsk), str,
27446 regs->ip, regs->sp, error_code);
27447 print_vma_addr(" in ", regs->ip);
27448 pr_cont("\n");
27449 @@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27450 conditional_sti(regs);
27451
27452 #ifdef CONFIG_X86_32
27453 - if (regs->flags & X86_VM_MASK) {
27454 + if (v8086_mode(regs)) {
27455 local_irq_enable();
27456 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27457 goto exit;
27458 @@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27459 #endif
27460
27461 tsk = current;
27462 - if (!user_mode(regs)) {
27463 + if (!user_mode_novm(regs)) {
27464 if (fixup_exception(regs))
27465 goto exit;
27466
27467 tsk->thread.error_code = error_code;
27468 tsk->thread.trap_nr = X86_TRAP_GP;
27469 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27470 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27471 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27472 +
27473 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27474 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
27475 + die("PAX: suspicious general protection fault", regs, error_code);
27476 + else
27477 +#endif
27478 +
27479 die("general protection fault", regs, error_code);
27480 + }
27481 goto exit;
27482 }
27483
27484 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27485 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
27486 + struct mm_struct *mm = tsk->mm;
27487 + unsigned long limit;
27488 +
27489 + down_write(&mm->mmap_sem);
27490 + limit = mm->context.user_cs_limit;
27491 + if (limit < TASK_SIZE) {
27492 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
27493 + up_write(&mm->mmap_sem);
27494 + return;
27495 + }
27496 + up_write(&mm->mmap_sem);
27497 + }
27498 +#endif
27499 +
27500 tsk->thread.error_code = error_code;
27501 tsk->thread.trap_nr = X86_TRAP_GP;
27502
27503 @@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
27504 /* It's safe to allow irq's after DR6 has been saved */
27505 preempt_conditional_sti(regs);
27506
27507 - if (regs->flags & X86_VM_MASK) {
27508 + if (v8086_mode(regs)) {
27509 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
27510 X86_TRAP_DB);
27511 preempt_conditional_cli(regs);
27512 @@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
27513 * We already checked v86 mode above, so we can check for kernel mode
27514 * by just checking the CPL of CS.
27515 */
27516 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
27517 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
27518 tsk->thread.debugreg6 &= ~DR_STEP;
27519 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
27520 regs->flags &= ~X86_EFLAGS_TF;
27521 @@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
27522 return;
27523 conditional_sti(regs);
27524
27525 - if (!user_mode_vm(regs))
27526 + if (!user_mode(regs))
27527 {
27528 if (!fixup_exception(regs)) {
27529 task->thread.error_code = error_code;
27530 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
27531 index 2ed8459..7cf329f 100644
27532 --- a/arch/x86/kernel/uprobes.c
27533 +++ b/arch/x86/kernel/uprobes.c
27534 @@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
27535 int ret = NOTIFY_DONE;
27536
27537 /* We are only interested in userspace traps */
27538 - if (regs && !user_mode_vm(regs))
27539 + if (regs && !user_mode(regs))
27540 return NOTIFY_DONE;
27541
27542 switch (val) {
27543 @@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
27544
27545 if (ncopied != rasize) {
27546 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
27547 - "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
27548 + "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
27549
27550 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
27551 }
27552 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
27553 index b9242ba..50c5edd 100644
27554 --- a/arch/x86/kernel/verify_cpu.S
27555 +++ b/arch/x86/kernel/verify_cpu.S
27556 @@ -20,6 +20,7 @@
27557 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
27558 * arch/x86/kernel/trampoline_64.S: secondary processor verification
27559 * arch/x86/kernel/head_32.S: processor startup
27560 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
27561 *
27562 * verify_cpu, returns the status of longmode and SSE in register %eax.
27563 * 0: Success 1: Failure
27564 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
27565 index e8edcf5..27f9344 100644
27566 --- a/arch/x86/kernel/vm86_32.c
27567 +++ b/arch/x86/kernel/vm86_32.c
27568 @@ -44,6 +44,7 @@
27569 #include <linux/ptrace.h>
27570 #include <linux/audit.h>
27571 #include <linux/stddef.h>
27572 +#include <linux/grsecurity.h>
27573
27574 #include <asm/uaccess.h>
27575 #include <asm/io.h>
27576 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
27577 do_exit(SIGSEGV);
27578 }
27579
27580 - tss = &per_cpu(init_tss, get_cpu());
27581 + tss = init_tss + get_cpu();
27582 current->thread.sp0 = current->thread.saved_sp0;
27583 current->thread.sysenter_cs = __KERNEL_CS;
27584 load_sp0(tss, &current->thread);
27585 @@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
27586
27587 if (tsk->thread.saved_sp0)
27588 return -EPERM;
27589 +
27590 +#ifdef CONFIG_GRKERNSEC_VM86
27591 + if (!capable(CAP_SYS_RAWIO)) {
27592 + gr_handle_vm86();
27593 + return -EPERM;
27594 + }
27595 +#endif
27596 +
27597 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
27598 offsetof(struct kernel_vm86_struct, vm86plus) -
27599 sizeof(info.regs));
27600 @@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
27601 int tmp;
27602 struct vm86plus_struct __user *v86;
27603
27604 +#ifdef CONFIG_GRKERNSEC_VM86
27605 + if (!capable(CAP_SYS_RAWIO)) {
27606 + gr_handle_vm86();
27607 + return -EPERM;
27608 + }
27609 +#endif
27610 +
27611 tsk = current;
27612 switch (cmd) {
27613 case VM86_REQUEST_IRQ:
27614 @@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
27615 tsk->thread.saved_fs = info->regs32->fs;
27616 tsk->thread.saved_gs = get_user_gs(info->regs32);
27617
27618 - tss = &per_cpu(init_tss, get_cpu());
27619 + tss = init_tss + get_cpu();
27620 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
27621 if (cpu_has_sep)
27622 tsk->thread.sysenter_cs = 0;
27623 @@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
27624 goto cannot_handle;
27625 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
27626 goto cannot_handle;
27627 - intr_ptr = (unsigned long __user *) (i << 2);
27628 + intr_ptr = (__force unsigned long __user *) (i << 2);
27629 if (get_user(segoffs, intr_ptr))
27630 goto cannot_handle;
27631 if ((segoffs >> 16) == BIOSSEG)
27632 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
27633 index da6b35a..977e9cf 100644
27634 --- a/arch/x86/kernel/vmlinux.lds.S
27635 +++ b/arch/x86/kernel/vmlinux.lds.S
27636 @@ -26,6 +26,13 @@
27637 #include <asm/page_types.h>
27638 #include <asm/cache.h>
27639 #include <asm/boot.h>
27640 +#include <asm/segment.h>
27641 +
27642 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27643 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
27644 +#else
27645 +#define __KERNEL_TEXT_OFFSET 0
27646 +#endif
27647
27648 #undef i386 /* in case the preprocessor is a 32bit one */
27649
27650 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
27651
27652 PHDRS {
27653 text PT_LOAD FLAGS(5); /* R_E */
27654 +#ifdef CONFIG_X86_32
27655 + module PT_LOAD FLAGS(5); /* R_E */
27656 +#endif
27657 +#ifdef CONFIG_XEN
27658 + rodata PT_LOAD FLAGS(5); /* R_E */
27659 +#else
27660 + rodata PT_LOAD FLAGS(4); /* R__ */
27661 +#endif
27662 data PT_LOAD FLAGS(6); /* RW_ */
27663 -#ifdef CONFIG_X86_64
27664 + init.begin PT_LOAD FLAGS(6); /* RW_ */
27665 #ifdef CONFIG_SMP
27666 percpu PT_LOAD FLAGS(6); /* RW_ */
27667 #endif
27668 + text.init PT_LOAD FLAGS(5); /* R_E */
27669 + text.exit PT_LOAD FLAGS(5); /* R_E */
27670 init PT_LOAD FLAGS(7); /* RWE */
27671 -#endif
27672 note PT_NOTE FLAGS(0); /* ___ */
27673 }
27674
27675 SECTIONS
27676 {
27677 #ifdef CONFIG_X86_32
27678 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
27679 - phys_startup_32 = startup_32 - LOAD_OFFSET;
27680 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
27681 #else
27682 - . = __START_KERNEL;
27683 - phys_startup_64 = startup_64 - LOAD_OFFSET;
27684 + . = __START_KERNEL;
27685 #endif
27686
27687 /* Text and read-only data */
27688 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
27689 - _text = .;
27690 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27691 /* bootstrapping code */
27692 +#ifdef CONFIG_X86_32
27693 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27694 +#else
27695 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27696 +#endif
27697 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27698 + _text = .;
27699 HEAD_TEXT
27700 . = ALIGN(8);
27701 _stext = .;
27702 @@ -104,13 +124,47 @@ SECTIONS
27703 IRQENTRY_TEXT
27704 *(.fixup)
27705 *(.gnu.warning)
27706 - /* End of text section */
27707 - _etext = .;
27708 } :text = 0x9090
27709
27710 - NOTES :text :note
27711 + . += __KERNEL_TEXT_OFFSET;
27712
27713 - EXCEPTION_TABLE(16) :text = 0x9090
27714 +#ifdef CONFIG_X86_32
27715 + . = ALIGN(PAGE_SIZE);
27716 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27717 +
27718 +#ifdef CONFIG_PAX_KERNEXEC
27719 + MODULES_EXEC_VADDR = .;
27720 + BYTE(0)
27721 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27722 + . = ALIGN(HPAGE_SIZE) - 1;
27723 + MODULES_EXEC_END = .;
27724 +#endif
27725 +
27726 + } :module
27727 +#endif
27728 +
27729 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27730 + /* End of text section */
27731 + BYTE(0)
27732 + _etext = . - __KERNEL_TEXT_OFFSET;
27733 + }
27734 +
27735 +#ifdef CONFIG_X86_32
27736 + . = ALIGN(PAGE_SIZE);
27737 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27738 + . = ALIGN(PAGE_SIZE);
27739 + *(.empty_zero_page)
27740 + *(.initial_pg_fixmap)
27741 + *(.initial_pg_pmd)
27742 + *(.initial_page_table)
27743 + *(.swapper_pg_dir)
27744 + } :rodata
27745 +#endif
27746 +
27747 + . = ALIGN(PAGE_SIZE);
27748 + NOTES :rodata :note
27749 +
27750 + EXCEPTION_TABLE(16) :rodata
27751
27752 #if defined(CONFIG_DEBUG_RODATA)
27753 /* .text should occupy whole number of pages */
27754 @@ -122,16 +176,20 @@ SECTIONS
27755
27756 /* Data */
27757 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27758 +
27759 +#ifdef CONFIG_PAX_KERNEXEC
27760 + . = ALIGN(HPAGE_SIZE);
27761 +#else
27762 + . = ALIGN(PAGE_SIZE);
27763 +#endif
27764 +
27765 /* Start of data section */
27766 _sdata = .;
27767
27768 /* init_task */
27769 INIT_TASK_DATA(THREAD_SIZE)
27770
27771 -#ifdef CONFIG_X86_32
27772 - /* 32 bit has nosave before _edata */
27773 NOSAVE_DATA
27774 -#endif
27775
27776 PAGE_ALIGNED_DATA(PAGE_SIZE)
27777
27778 @@ -172,12 +230,19 @@ SECTIONS
27779 #endif /* CONFIG_X86_64 */
27780
27781 /* Init code and data - will be freed after init */
27782 - . = ALIGN(PAGE_SIZE);
27783 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27784 + BYTE(0)
27785 +
27786 +#ifdef CONFIG_PAX_KERNEXEC
27787 + . = ALIGN(HPAGE_SIZE);
27788 +#else
27789 + . = ALIGN(PAGE_SIZE);
27790 +#endif
27791 +
27792 __init_begin = .; /* paired with __init_end */
27793 - }
27794 + } :init.begin
27795
27796 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27797 +#ifdef CONFIG_SMP
27798 /*
27799 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27800 * output PHDR, so the next output section - .init.text - should
27801 @@ -186,12 +251,27 @@ SECTIONS
27802 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27803 #endif
27804
27805 - INIT_TEXT_SECTION(PAGE_SIZE)
27806 -#ifdef CONFIG_X86_64
27807 - :init
27808 -#endif
27809 + . = ALIGN(PAGE_SIZE);
27810 + init_begin = .;
27811 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27812 + VMLINUX_SYMBOL(_sinittext) = .;
27813 + INIT_TEXT
27814 + VMLINUX_SYMBOL(_einittext) = .;
27815 + . = ALIGN(PAGE_SIZE);
27816 + } :text.init
27817
27818 - INIT_DATA_SECTION(16)
27819 + /*
27820 + * .exit.text is discard at runtime, not link time, to deal with
27821 + * references from .altinstructions and .eh_frame
27822 + */
27823 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27824 + EXIT_TEXT
27825 + . = ALIGN(16);
27826 + } :text.exit
27827 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27828 +
27829 + . = ALIGN(PAGE_SIZE);
27830 + INIT_DATA_SECTION(16) :init
27831
27832 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27833 __x86_cpu_dev_start = .;
27834 @@ -262,19 +342,12 @@ SECTIONS
27835 }
27836
27837 . = ALIGN(8);
27838 - /*
27839 - * .exit.text is discard at runtime, not link time, to deal with
27840 - * references from .altinstructions and .eh_frame
27841 - */
27842 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27843 - EXIT_TEXT
27844 - }
27845
27846 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27847 EXIT_DATA
27848 }
27849
27850 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27851 +#ifndef CONFIG_SMP
27852 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27853 #endif
27854
27855 @@ -293,16 +366,10 @@ SECTIONS
27856 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27857 __smp_locks = .;
27858 *(.smp_locks)
27859 - . = ALIGN(PAGE_SIZE);
27860 __smp_locks_end = .;
27861 + . = ALIGN(PAGE_SIZE);
27862 }
27863
27864 -#ifdef CONFIG_X86_64
27865 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27866 - NOSAVE_DATA
27867 - }
27868 -#endif
27869 -
27870 /* BSS */
27871 . = ALIGN(PAGE_SIZE);
27872 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27873 @@ -318,6 +385,7 @@ SECTIONS
27874 __brk_base = .;
27875 . += 64 * 1024; /* 64k alignment slop space */
27876 *(.brk_reservation) /* areas brk users have reserved */
27877 + . = ALIGN(HPAGE_SIZE);
27878 __brk_limit = .;
27879 }
27880
27881 @@ -344,13 +412,12 @@ SECTIONS
27882 * for the boot processor.
27883 */
27884 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27885 -INIT_PER_CPU(gdt_page);
27886 INIT_PER_CPU(irq_stack_union);
27887
27888 /*
27889 * Build-time check on the image size:
27890 */
27891 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27892 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27893 "kernel image bigger than KERNEL_IMAGE_SIZE");
27894
27895 #ifdef CONFIG_SMP
27896 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27897 index 1f96f93..d5c8f7a 100644
27898 --- a/arch/x86/kernel/vsyscall_64.c
27899 +++ b/arch/x86/kernel/vsyscall_64.c
27900 @@ -56,15 +56,13 @@
27901 DEFINE_VVAR(int, vgetcpu_mode);
27902 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27903
27904 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27905 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27906
27907 static int __init vsyscall_setup(char *str)
27908 {
27909 if (str) {
27910 if (!strcmp("emulate", str))
27911 vsyscall_mode = EMULATE;
27912 - else if (!strcmp("native", str))
27913 - vsyscall_mode = NATIVE;
27914 else if (!strcmp("none", str))
27915 vsyscall_mode = NONE;
27916 else
27917 @@ -323,8 +321,7 @@ do_ret:
27918 return true;
27919
27920 sigsegv:
27921 - force_sig(SIGSEGV, current);
27922 - return true;
27923 + do_group_exit(SIGKILL);
27924 }
27925
27926 /*
27927 @@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27928 extern char __vvar_page;
27929 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27930
27931 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27932 - vsyscall_mode == NATIVE
27933 - ? PAGE_KERNEL_VSYSCALL
27934 - : PAGE_KERNEL_VVAR);
27935 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27936 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27937 (unsigned long)VSYSCALL_START);
27938
27939 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27940 index 04068192..4d75aa6 100644
27941 --- a/arch/x86/kernel/x8664_ksyms_64.c
27942 +++ b/arch/x86/kernel/x8664_ksyms_64.c
27943 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27944 EXPORT_SYMBOL(copy_user_generic_unrolled);
27945 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27946 EXPORT_SYMBOL(__copy_user_nocache);
27947 -EXPORT_SYMBOL(_copy_from_user);
27948 -EXPORT_SYMBOL(_copy_to_user);
27949
27950 EXPORT_SYMBOL(copy_page);
27951 EXPORT_SYMBOL(clear_page);
27952 @@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
27953 EXPORT_SYMBOL(___preempt_schedule_context);
27954 #endif
27955 #endif
27956 +
27957 +#ifdef CONFIG_PAX_PER_CPU_PGD
27958 +EXPORT_SYMBOL(cpu_pgd);
27959 +#endif
27960 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27961 index 021783b..6511282 100644
27962 --- a/arch/x86/kernel/x86_init.c
27963 +++ b/arch/x86/kernel/x86_init.c
27964 @@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27965 static void default_nmi_init(void) { };
27966 static int default_i8042_detect(void) { return 1; };
27967
27968 -struct x86_platform_ops x86_platform = {
27969 +struct x86_platform_ops x86_platform __read_only = {
27970 .calibrate_tsc = native_calibrate_tsc,
27971 .get_wallclock = mach_get_cmos_time,
27972 .set_wallclock = mach_set_rtc_mmss,
27973 @@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27974 EXPORT_SYMBOL_GPL(x86_platform);
27975
27976 #if defined(CONFIG_PCI_MSI)
27977 -struct x86_msi_ops x86_msi = {
27978 +struct x86_msi_ops x86_msi __read_only = {
27979 .setup_msi_irqs = native_setup_msi_irqs,
27980 .compose_msi_msg = native_compose_msi_msg,
27981 .teardown_msi_irq = native_teardown_msi_irq,
27982 @@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
27983 }
27984 #endif
27985
27986 -struct x86_io_apic_ops x86_io_apic_ops = {
27987 +struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27988 .init = native_io_apic_init_mappings,
27989 .read = native_io_apic_read,
27990 .write = native_io_apic_write,
27991 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27992 index 422fd82..b2d262e 100644
27993 --- a/arch/x86/kernel/xsave.c
27994 +++ b/arch/x86/kernel/xsave.c
27995 @@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27996
27997 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
27998 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
27999 - err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28000 + err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28001
28002 if (!use_xsave())
28003 return err;
28004
28005 - err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28006 + err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28007
28008 /*
28009 * Read the xstate_bv which we copied (directly from the cpu or
28010 * from the state in task struct) to the user buffers.
28011 */
28012 - err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28013 + err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28014
28015 /*
28016 * For legacy compatible, we always set FP/SSE bits in the bit
28017 @@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28018 */
28019 xstate_bv |= XSTATE_FPSSE;
28020
28021 - err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28022 + err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28023
28024 return err;
28025 }
28026 @@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28027 {
28028 int err;
28029
28030 + buf = (struct xsave_struct __user *)____m(buf);
28031 if (use_xsave())
28032 err = xsave_user(buf);
28033 else if (use_fxsr())
28034 @@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28035 */
28036 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28037 {
28038 + buf = (void __user *)____m(buf);
28039 if (use_xsave()) {
28040 if ((unsigned long)buf % 64 || fx_only) {
28041 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28042 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28043 index c697625..a032162 100644
28044 --- a/arch/x86/kvm/cpuid.c
28045 +++ b/arch/x86/kvm/cpuid.c
28046 @@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28047 struct kvm_cpuid2 *cpuid,
28048 struct kvm_cpuid_entry2 __user *entries)
28049 {
28050 - int r;
28051 + int r, i;
28052
28053 r = -E2BIG;
28054 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28055 goto out;
28056 r = -EFAULT;
28057 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28058 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28059 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28060 goto out;
28061 + for (i = 0; i < cpuid->nent; ++i) {
28062 + struct kvm_cpuid_entry2 cpuid_entry;
28063 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28064 + goto out;
28065 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
28066 + }
28067 vcpu->arch.cpuid_nent = cpuid->nent;
28068 kvm_apic_set_version(vcpu);
28069 kvm_x86_ops->cpuid_update(vcpu);
28070 @@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28071 struct kvm_cpuid2 *cpuid,
28072 struct kvm_cpuid_entry2 __user *entries)
28073 {
28074 - int r;
28075 + int r, i;
28076
28077 r = -E2BIG;
28078 if (cpuid->nent < vcpu->arch.cpuid_nent)
28079 goto out;
28080 r = -EFAULT;
28081 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28082 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28083 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28084 goto out;
28085 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28086 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28087 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28088 + goto out;
28089 + }
28090 return 0;
28091
28092 out:
28093 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28094 index d86ff15..e77b023 100644
28095 --- a/arch/x86/kvm/lapic.c
28096 +++ b/arch/x86/kvm/lapic.c
28097 @@ -55,7 +55,7 @@
28098 #define APIC_BUS_CYCLE_NS 1
28099
28100 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28101 -#define apic_debug(fmt, arg...)
28102 +#define apic_debug(fmt, arg...) do {} while (0)
28103
28104 #define APIC_LVT_NUM 6
28105 /* 14 is the version for Xeon and Pentium 8.4.8*/
28106 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28107 index ad75d77..a679d32 100644
28108 --- a/arch/x86/kvm/paging_tmpl.h
28109 +++ b/arch/x86/kvm/paging_tmpl.h
28110 @@ -331,7 +331,7 @@ retry_walk:
28111 if (unlikely(kvm_is_error_hva(host_addr)))
28112 goto error;
28113
28114 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28115 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28116 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28117 goto error;
28118 walker->ptep_user[walker->level - 1] = ptep_user;
28119 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28120 index 532add1..59eb241 100644
28121 --- a/arch/x86/kvm/svm.c
28122 +++ b/arch/x86/kvm/svm.c
28123 @@ -3495,7 +3495,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28124 int cpu = raw_smp_processor_id();
28125
28126 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28127 +
28128 + pax_open_kernel();
28129 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28130 + pax_close_kernel();
28131 +
28132 load_TR_desc();
28133 }
28134
28135 @@ -3898,6 +3902,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28136 #endif
28137 #endif
28138
28139 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28140 + __set_fs(current_thread_info()->addr_limit);
28141 +#endif
28142 +
28143 reload_tss(vcpu);
28144
28145 local_irq_disable();
28146 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28147 index dcc4de3..6bf73f4 100644
28148 --- a/arch/x86/kvm/vmx.c
28149 +++ b/arch/x86/kvm/vmx.c
28150 @@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28151 #endif
28152 }
28153
28154 -static void vmcs_clear_bits(unsigned long field, u32 mask)
28155 +static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28156 {
28157 vmcs_writel(field, vmcs_readl(field) & ~mask);
28158 }
28159
28160 -static void vmcs_set_bits(unsigned long field, u32 mask)
28161 +static void vmcs_set_bits(unsigned long field, unsigned long mask)
28162 {
28163 vmcs_writel(field, vmcs_readl(field) | mask);
28164 }
28165 @@ -1522,7 +1522,11 @@ static void reload_tss(void)
28166 struct desc_struct *descs;
28167
28168 descs = (void *)gdt->address;
28169 +
28170 + pax_open_kernel();
28171 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28172 + pax_close_kernel();
28173 +
28174 load_TR_desc();
28175 }
28176
28177 @@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28178 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28179 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28180
28181 +#ifdef CONFIG_PAX_PER_CPU_PGD
28182 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28183 +#endif
28184 +
28185 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28186 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28187 vmx->loaded_vmcs->cpu = cpu;
28188 @@ -2033,7 +2041,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28189 * reads and returns guest's timestamp counter "register"
28190 * guest_tsc = host_tsc + tsc_offset -- 21.3
28191 */
28192 -static u64 guest_read_tsc(void)
28193 +static u64 __intentional_overflow(-1) guest_read_tsc(void)
28194 {
28195 u64 host_tsc, tsc_offset;
28196
28197 @@ -2987,8 +2995,11 @@ static __init int hardware_setup(void)
28198 if (!cpu_has_vmx_flexpriority())
28199 flexpriority_enabled = 0;
28200
28201 - if (!cpu_has_vmx_tpr_shadow())
28202 - kvm_x86_ops->update_cr8_intercept = NULL;
28203 + if (!cpu_has_vmx_tpr_shadow()) {
28204 + pax_open_kernel();
28205 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28206 + pax_close_kernel();
28207 + }
28208
28209 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28210 kvm_disable_largepages();
28211 @@ -2999,13 +3010,15 @@ static __init int hardware_setup(void)
28212 if (!cpu_has_vmx_apicv())
28213 enable_apicv = 0;
28214
28215 + pax_open_kernel();
28216 if (enable_apicv)
28217 - kvm_x86_ops->update_cr8_intercept = NULL;
28218 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28219 else {
28220 - kvm_x86_ops->hwapic_irr_update = NULL;
28221 - kvm_x86_ops->deliver_posted_interrupt = NULL;
28222 - kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28223 + *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28224 + *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28225 + *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28226 }
28227 + pax_close_kernel();
28228
28229 if (nested)
28230 nested_vmx_setup_ctls_msrs();
28231 @@ -4134,7 +4147,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28232
28233 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28234 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28235 +
28236 +#ifndef CONFIG_PAX_PER_CPU_PGD
28237 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28238 +#endif
28239
28240 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28241 #ifdef CONFIG_X86_64
28242 @@ -4156,7 +4172,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28243 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28244 vmx->host_idt_base = dt.address;
28245
28246 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28247 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28248
28249 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28250 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28251 @@ -7219,6 +7235,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28252 "jmp 2f \n\t"
28253 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28254 "2: "
28255 +
28256 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28257 + "ljmp %[cs],$3f\n\t"
28258 + "3: "
28259 +#endif
28260 +
28261 /* Save guest registers, load host registers, keep flags */
28262 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28263 "pop %0 \n\t"
28264 @@ -7271,6 +7293,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28265 #endif
28266 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28267 [wordsize]"i"(sizeof(ulong))
28268 +
28269 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28270 + ,[cs]"i"(__KERNEL_CS)
28271 +#endif
28272 +
28273 : "cc", "memory"
28274 #ifdef CONFIG_X86_64
28275 , "rax", "rbx", "rdi", "rsi"
28276 @@ -7284,7 +7311,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28277 if (debugctlmsr)
28278 update_debugctlmsr(debugctlmsr);
28279
28280 -#ifndef CONFIG_X86_64
28281 +#ifdef CONFIG_X86_32
28282 /*
28283 * The sysexit path does not restore ds/es, so we must set them to
28284 * a reasonable value ourselves.
28285 @@ -7293,8 +7320,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28286 * may be executed in interrupt context, which saves and restore segments
28287 * around it, nullifying its effect.
28288 */
28289 - loadsegment(ds, __USER_DS);
28290 - loadsegment(es, __USER_DS);
28291 + loadsegment(ds, __KERNEL_DS);
28292 + loadsegment(es, __KERNEL_DS);
28293 + loadsegment(ss, __KERNEL_DS);
28294 +
28295 +#ifdef CONFIG_PAX_KERNEXEC
28296 + loadsegment(fs, __KERNEL_PERCPU);
28297 +#endif
28298 +
28299 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28300 + __set_fs(current_thread_info()->addr_limit);
28301 +#endif
28302 +
28303 #endif
28304
28305 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28306 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28307 index 4e33b85..fa94855 100644
28308 --- a/arch/x86/kvm/x86.c
28309 +++ b/arch/x86/kvm/x86.c
28310 @@ -1791,8 +1791,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28311 {
28312 struct kvm *kvm = vcpu->kvm;
28313 int lm = is_long_mode(vcpu);
28314 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28315 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28316 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28317 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28318 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28319 : kvm->arch.xen_hvm_config.blob_size_32;
28320 u32 page_num = data & ~PAGE_MASK;
28321 @@ -2676,6 +2676,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28322 if (n < msr_list.nmsrs)
28323 goto out;
28324 r = -EFAULT;
28325 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28326 + goto out;
28327 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28328 num_msrs_to_save * sizeof(u32)))
28329 goto out;
28330 @@ -5485,7 +5487,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28331 };
28332 #endif
28333
28334 -int kvm_arch_init(void *opaque)
28335 +int kvm_arch_init(const void *opaque)
28336 {
28337 int r;
28338 struct kvm_x86_ops *ops = opaque;
28339 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28340 index bdf8532..f63c587 100644
28341 --- a/arch/x86/lguest/boot.c
28342 +++ b/arch/x86/lguest/boot.c
28343 @@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28344 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28345 * Launcher to reboot us.
28346 */
28347 -static void lguest_restart(char *reason)
28348 +static __noreturn void lguest_restart(char *reason)
28349 {
28350 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28351 + BUG();
28352 }
28353
28354 /*G:050
28355 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28356 index 00933d5..3a64af9 100644
28357 --- a/arch/x86/lib/atomic64_386_32.S
28358 +++ b/arch/x86/lib/atomic64_386_32.S
28359 @@ -48,6 +48,10 @@ BEGIN(read)
28360 movl (v), %eax
28361 movl 4(v), %edx
28362 RET_ENDP
28363 +BEGIN(read_unchecked)
28364 + movl (v), %eax
28365 + movl 4(v), %edx
28366 +RET_ENDP
28367 #undef v
28368
28369 #define v %esi
28370 @@ -55,6 +59,10 @@ BEGIN(set)
28371 movl %ebx, (v)
28372 movl %ecx, 4(v)
28373 RET_ENDP
28374 +BEGIN(set_unchecked)
28375 + movl %ebx, (v)
28376 + movl %ecx, 4(v)
28377 +RET_ENDP
28378 #undef v
28379
28380 #define v %esi
28381 @@ -70,6 +78,20 @@ RET_ENDP
28382 BEGIN(add)
28383 addl %eax, (v)
28384 adcl %edx, 4(v)
28385 +
28386 +#ifdef CONFIG_PAX_REFCOUNT
28387 + jno 0f
28388 + subl %eax, (v)
28389 + sbbl %edx, 4(v)
28390 + int $4
28391 +0:
28392 + _ASM_EXTABLE(0b, 0b)
28393 +#endif
28394 +
28395 +RET_ENDP
28396 +BEGIN(add_unchecked)
28397 + addl %eax, (v)
28398 + adcl %edx, 4(v)
28399 RET_ENDP
28400 #undef v
28401
28402 @@ -77,6 +99,24 @@ RET_ENDP
28403 BEGIN(add_return)
28404 addl (v), %eax
28405 adcl 4(v), %edx
28406 +
28407 +#ifdef CONFIG_PAX_REFCOUNT
28408 + into
28409 +1234:
28410 + _ASM_EXTABLE(1234b, 2f)
28411 +#endif
28412 +
28413 + movl %eax, (v)
28414 + movl %edx, 4(v)
28415 +
28416 +#ifdef CONFIG_PAX_REFCOUNT
28417 +2:
28418 +#endif
28419 +
28420 +RET_ENDP
28421 +BEGIN(add_return_unchecked)
28422 + addl (v), %eax
28423 + adcl 4(v), %edx
28424 movl %eax, (v)
28425 movl %edx, 4(v)
28426 RET_ENDP
28427 @@ -86,6 +126,20 @@ RET_ENDP
28428 BEGIN(sub)
28429 subl %eax, (v)
28430 sbbl %edx, 4(v)
28431 +
28432 +#ifdef CONFIG_PAX_REFCOUNT
28433 + jno 0f
28434 + addl %eax, (v)
28435 + adcl %edx, 4(v)
28436 + int $4
28437 +0:
28438 + _ASM_EXTABLE(0b, 0b)
28439 +#endif
28440 +
28441 +RET_ENDP
28442 +BEGIN(sub_unchecked)
28443 + subl %eax, (v)
28444 + sbbl %edx, 4(v)
28445 RET_ENDP
28446 #undef v
28447
28448 @@ -96,6 +150,27 @@ BEGIN(sub_return)
28449 sbbl $0, %edx
28450 addl (v), %eax
28451 adcl 4(v), %edx
28452 +
28453 +#ifdef CONFIG_PAX_REFCOUNT
28454 + into
28455 +1234:
28456 + _ASM_EXTABLE(1234b, 2f)
28457 +#endif
28458 +
28459 + movl %eax, (v)
28460 + movl %edx, 4(v)
28461 +
28462 +#ifdef CONFIG_PAX_REFCOUNT
28463 +2:
28464 +#endif
28465 +
28466 +RET_ENDP
28467 +BEGIN(sub_return_unchecked)
28468 + negl %edx
28469 + negl %eax
28470 + sbbl $0, %edx
28471 + addl (v), %eax
28472 + adcl 4(v), %edx
28473 movl %eax, (v)
28474 movl %edx, 4(v)
28475 RET_ENDP
28476 @@ -105,6 +180,20 @@ RET_ENDP
28477 BEGIN(inc)
28478 addl $1, (v)
28479 adcl $0, 4(v)
28480 +
28481 +#ifdef CONFIG_PAX_REFCOUNT
28482 + jno 0f
28483 + subl $1, (v)
28484 + sbbl $0, 4(v)
28485 + int $4
28486 +0:
28487 + _ASM_EXTABLE(0b, 0b)
28488 +#endif
28489 +
28490 +RET_ENDP
28491 +BEGIN(inc_unchecked)
28492 + addl $1, (v)
28493 + adcl $0, 4(v)
28494 RET_ENDP
28495 #undef v
28496
28497 @@ -114,6 +203,26 @@ BEGIN(inc_return)
28498 movl 4(v), %edx
28499 addl $1, %eax
28500 adcl $0, %edx
28501 +
28502 +#ifdef CONFIG_PAX_REFCOUNT
28503 + into
28504 +1234:
28505 + _ASM_EXTABLE(1234b, 2f)
28506 +#endif
28507 +
28508 + movl %eax, (v)
28509 + movl %edx, 4(v)
28510 +
28511 +#ifdef CONFIG_PAX_REFCOUNT
28512 +2:
28513 +#endif
28514 +
28515 +RET_ENDP
28516 +BEGIN(inc_return_unchecked)
28517 + movl (v), %eax
28518 + movl 4(v), %edx
28519 + addl $1, %eax
28520 + adcl $0, %edx
28521 movl %eax, (v)
28522 movl %edx, 4(v)
28523 RET_ENDP
28524 @@ -123,6 +232,20 @@ RET_ENDP
28525 BEGIN(dec)
28526 subl $1, (v)
28527 sbbl $0, 4(v)
28528 +
28529 +#ifdef CONFIG_PAX_REFCOUNT
28530 + jno 0f
28531 + addl $1, (v)
28532 + adcl $0, 4(v)
28533 + int $4
28534 +0:
28535 + _ASM_EXTABLE(0b, 0b)
28536 +#endif
28537 +
28538 +RET_ENDP
28539 +BEGIN(dec_unchecked)
28540 + subl $1, (v)
28541 + sbbl $0, 4(v)
28542 RET_ENDP
28543 #undef v
28544
28545 @@ -132,6 +255,26 @@ BEGIN(dec_return)
28546 movl 4(v), %edx
28547 subl $1, %eax
28548 sbbl $0, %edx
28549 +
28550 +#ifdef CONFIG_PAX_REFCOUNT
28551 + into
28552 +1234:
28553 + _ASM_EXTABLE(1234b, 2f)
28554 +#endif
28555 +
28556 + movl %eax, (v)
28557 + movl %edx, 4(v)
28558 +
28559 +#ifdef CONFIG_PAX_REFCOUNT
28560 +2:
28561 +#endif
28562 +
28563 +RET_ENDP
28564 +BEGIN(dec_return_unchecked)
28565 + movl (v), %eax
28566 + movl 4(v), %edx
28567 + subl $1, %eax
28568 + sbbl $0, %edx
28569 movl %eax, (v)
28570 movl %edx, 4(v)
28571 RET_ENDP
28572 @@ -143,6 +286,13 @@ BEGIN(add_unless)
28573 adcl %edx, %edi
28574 addl (v), %eax
28575 adcl 4(v), %edx
28576 +
28577 +#ifdef CONFIG_PAX_REFCOUNT
28578 + into
28579 +1234:
28580 + _ASM_EXTABLE(1234b, 2f)
28581 +#endif
28582 +
28583 cmpl %eax, %ecx
28584 je 3f
28585 1:
28586 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
28587 1:
28588 addl $1, %eax
28589 adcl $0, %edx
28590 +
28591 +#ifdef CONFIG_PAX_REFCOUNT
28592 + into
28593 +1234:
28594 + _ASM_EXTABLE(1234b, 2f)
28595 +#endif
28596 +
28597 movl %eax, (v)
28598 movl %edx, 4(v)
28599 movl $1, %eax
28600 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
28601 movl 4(v), %edx
28602 subl $1, %eax
28603 sbbl $0, %edx
28604 +
28605 +#ifdef CONFIG_PAX_REFCOUNT
28606 + into
28607 +1234:
28608 + _ASM_EXTABLE(1234b, 1f)
28609 +#endif
28610 +
28611 js 1f
28612 movl %eax, (v)
28613 movl %edx, 4(v)
28614 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
28615 index f5cc9eb..51fa319 100644
28616 --- a/arch/x86/lib/atomic64_cx8_32.S
28617 +++ b/arch/x86/lib/atomic64_cx8_32.S
28618 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
28619 CFI_STARTPROC
28620
28621 read64 %ecx
28622 + pax_force_retaddr
28623 ret
28624 CFI_ENDPROC
28625 ENDPROC(atomic64_read_cx8)
28626
28627 +ENTRY(atomic64_read_unchecked_cx8)
28628 + CFI_STARTPROC
28629 +
28630 + read64 %ecx
28631 + pax_force_retaddr
28632 + ret
28633 + CFI_ENDPROC
28634 +ENDPROC(atomic64_read_unchecked_cx8)
28635 +
28636 ENTRY(atomic64_set_cx8)
28637 CFI_STARTPROC
28638
28639 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
28640 cmpxchg8b (%esi)
28641 jne 1b
28642
28643 + pax_force_retaddr
28644 ret
28645 CFI_ENDPROC
28646 ENDPROC(atomic64_set_cx8)
28647
28648 +ENTRY(atomic64_set_unchecked_cx8)
28649 + CFI_STARTPROC
28650 +
28651 +1:
28652 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
28653 + * are atomic on 586 and newer */
28654 + cmpxchg8b (%esi)
28655 + jne 1b
28656 +
28657 + pax_force_retaddr
28658 + ret
28659 + CFI_ENDPROC
28660 +ENDPROC(atomic64_set_unchecked_cx8)
28661 +
28662 ENTRY(atomic64_xchg_cx8)
28663 CFI_STARTPROC
28664
28665 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
28666 cmpxchg8b (%esi)
28667 jne 1b
28668
28669 + pax_force_retaddr
28670 ret
28671 CFI_ENDPROC
28672 ENDPROC(atomic64_xchg_cx8)
28673
28674 -.macro addsub_return func ins insc
28675 -ENTRY(atomic64_\func\()_return_cx8)
28676 +.macro addsub_return func ins insc unchecked=""
28677 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28678 CFI_STARTPROC
28679 SAVE ebp
28680 SAVE ebx
28681 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
28682 movl %edx, %ecx
28683 \ins\()l %esi, %ebx
28684 \insc\()l %edi, %ecx
28685 +
28686 +.ifb \unchecked
28687 +#ifdef CONFIG_PAX_REFCOUNT
28688 + into
28689 +2:
28690 + _ASM_EXTABLE(2b, 3f)
28691 +#endif
28692 +.endif
28693 +
28694 LOCK_PREFIX
28695 cmpxchg8b (%ebp)
28696 jne 1b
28697 -
28698 -10:
28699 movl %ebx, %eax
28700 movl %ecx, %edx
28701 +
28702 +.ifb \unchecked
28703 +#ifdef CONFIG_PAX_REFCOUNT
28704 +3:
28705 +#endif
28706 +.endif
28707 +
28708 RESTORE edi
28709 RESTORE esi
28710 RESTORE ebx
28711 RESTORE ebp
28712 + pax_force_retaddr
28713 ret
28714 CFI_ENDPROC
28715 -ENDPROC(atomic64_\func\()_return_cx8)
28716 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28717 .endm
28718
28719 addsub_return add add adc
28720 addsub_return sub sub sbb
28721 +addsub_return add add adc _unchecked
28722 +addsub_return sub sub sbb _unchecked
28723
28724 -.macro incdec_return func ins insc
28725 -ENTRY(atomic64_\func\()_return_cx8)
28726 +.macro incdec_return func ins insc unchecked=""
28727 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28728 CFI_STARTPROC
28729 SAVE ebx
28730
28731 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
28732 movl %edx, %ecx
28733 \ins\()l $1, %ebx
28734 \insc\()l $0, %ecx
28735 +
28736 +.ifb \unchecked
28737 +#ifdef CONFIG_PAX_REFCOUNT
28738 + into
28739 +2:
28740 + _ASM_EXTABLE(2b, 3f)
28741 +#endif
28742 +.endif
28743 +
28744 LOCK_PREFIX
28745 cmpxchg8b (%esi)
28746 jne 1b
28747
28748 -10:
28749 movl %ebx, %eax
28750 movl %ecx, %edx
28751 +
28752 +.ifb \unchecked
28753 +#ifdef CONFIG_PAX_REFCOUNT
28754 +3:
28755 +#endif
28756 +.endif
28757 +
28758 RESTORE ebx
28759 + pax_force_retaddr
28760 ret
28761 CFI_ENDPROC
28762 -ENDPROC(atomic64_\func\()_return_cx8)
28763 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28764 .endm
28765
28766 incdec_return inc add adc
28767 incdec_return dec sub sbb
28768 +incdec_return inc add adc _unchecked
28769 +incdec_return dec sub sbb _unchecked
28770
28771 ENTRY(atomic64_dec_if_positive_cx8)
28772 CFI_STARTPROC
28773 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28774 movl %edx, %ecx
28775 subl $1, %ebx
28776 sbb $0, %ecx
28777 +
28778 +#ifdef CONFIG_PAX_REFCOUNT
28779 + into
28780 +1234:
28781 + _ASM_EXTABLE(1234b, 2f)
28782 +#endif
28783 +
28784 js 2f
28785 LOCK_PREFIX
28786 cmpxchg8b (%esi)
28787 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28788 movl %ebx, %eax
28789 movl %ecx, %edx
28790 RESTORE ebx
28791 + pax_force_retaddr
28792 ret
28793 CFI_ENDPROC
28794 ENDPROC(atomic64_dec_if_positive_cx8)
28795 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28796 movl %edx, %ecx
28797 addl %ebp, %ebx
28798 adcl %edi, %ecx
28799 +
28800 +#ifdef CONFIG_PAX_REFCOUNT
28801 + into
28802 +1234:
28803 + _ASM_EXTABLE(1234b, 3f)
28804 +#endif
28805 +
28806 LOCK_PREFIX
28807 cmpxchg8b (%esi)
28808 jne 1b
28809 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28810 CFI_ADJUST_CFA_OFFSET -8
28811 RESTORE ebx
28812 RESTORE ebp
28813 + pax_force_retaddr
28814 ret
28815 4:
28816 cmpl %edx, 4(%esp)
28817 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28818 xorl %ecx, %ecx
28819 addl $1, %ebx
28820 adcl %edx, %ecx
28821 +
28822 +#ifdef CONFIG_PAX_REFCOUNT
28823 + into
28824 +1234:
28825 + _ASM_EXTABLE(1234b, 3f)
28826 +#endif
28827 +
28828 LOCK_PREFIX
28829 cmpxchg8b (%esi)
28830 jne 1b
28831 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28832 movl $1, %eax
28833 3:
28834 RESTORE ebx
28835 + pax_force_retaddr
28836 ret
28837 CFI_ENDPROC
28838 ENDPROC(atomic64_inc_not_zero_cx8)
28839 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28840 index e78b8ee..7e173a8 100644
28841 --- a/arch/x86/lib/checksum_32.S
28842 +++ b/arch/x86/lib/checksum_32.S
28843 @@ -29,7 +29,8 @@
28844 #include <asm/dwarf2.h>
28845 #include <asm/errno.h>
28846 #include <asm/asm.h>
28847 -
28848 +#include <asm/segment.h>
28849 +
28850 /*
28851 * computes a partial checksum, e.g. for TCP/UDP fragments
28852 */
28853 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28854
28855 #define ARGBASE 16
28856 #define FP 12
28857 -
28858 -ENTRY(csum_partial_copy_generic)
28859 +
28860 +ENTRY(csum_partial_copy_generic_to_user)
28861 CFI_STARTPROC
28862 +
28863 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28864 + pushl_cfi %gs
28865 + popl_cfi %es
28866 + jmp csum_partial_copy_generic
28867 +#endif
28868 +
28869 +ENTRY(csum_partial_copy_generic_from_user)
28870 +
28871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28872 + pushl_cfi %gs
28873 + popl_cfi %ds
28874 +#endif
28875 +
28876 +ENTRY(csum_partial_copy_generic)
28877 subl $4,%esp
28878 CFI_ADJUST_CFA_OFFSET 4
28879 pushl_cfi %edi
28880 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28881 jmp 4f
28882 SRC(1: movw (%esi), %bx )
28883 addl $2, %esi
28884 -DST( movw %bx, (%edi) )
28885 +DST( movw %bx, %es:(%edi) )
28886 addl $2, %edi
28887 addw %bx, %ax
28888 adcl $0, %eax
28889 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28890 SRC(1: movl (%esi), %ebx )
28891 SRC( movl 4(%esi), %edx )
28892 adcl %ebx, %eax
28893 -DST( movl %ebx, (%edi) )
28894 +DST( movl %ebx, %es:(%edi) )
28895 adcl %edx, %eax
28896 -DST( movl %edx, 4(%edi) )
28897 +DST( movl %edx, %es:4(%edi) )
28898
28899 SRC( movl 8(%esi), %ebx )
28900 SRC( movl 12(%esi), %edx )
28901 adcl %ebx, %eax
28902 -DST( movl %ebx, 8(%edi) )
28903 +DST( movl %ebx, %es:8(%edi) )
28904 adcl %edx, %eax
28905 -DST( movl %edx, 12(%edi) )
28906 +DST( movl %edx, %es:12(%edi) )
28907
28908 SRC( movl 16(%esi), %ebx )
28909 SRC( movl 20(%esi), %edx )
28910 adcl %ebx, %eax
28911 -DST( movl %ebx, 16(%edi) )
28912 +DST( movl %ebx, %es:16(%edi) )
28913 adcl %edx, %eax
28914 -DST( movl %edx, 20(%edi) )
28915 +DST( movl %edx, %es:20(%edi) )
28916
28917 SRC( movl 24(%esi), %ebx )
28918 SRC( movl 28(%esi), %edx )
28919 adcl %ebx, %eax
28920 -DST( movl %ebx, 24(%edi) )
28921 +DST( movl %ebx, %es:24(%edi) )
28922 adcl %edx, %eax
28923 -DST( movl %edx, 28(%edi) )
28924 +DST( movl %edx, %es:28(%edi) )
28925
28926 lea 32(%esi), %esi
28927 lea 32(%edi), %edi
28928 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28929 shrl $2, %edx # This clears CF
28930 SRC(3: movl (%esi), %ebx )
28931 adcl %ebx, %eax
28932 -DST( movl %ebx, (%edi) )
28933 +DST( movl %ebx, %es:(%edi) )
28934 lea 4(%esi), %esi
28935 lea 4(%edi), %edi
28936 dec %edx
28937 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28938 jb 5f
28939 SRC( movw (%esi), %cx )
28940 leal 2(%esi), %esi
28941 -DST( movw %cx, (%edi) )
28942 +DST( movw %cx, %es:(%edi) )
28943 leal 2(%edi), %edi
28944 je 6f
28945 shll $16,%ecx
28946 SRC(5: movb (%esi), %cl )
28947 -DST( movb %cl, (%edi) )
28948 +DST( movb %cl, %es:(%edi) )
28949 6: addl %ecx, %eax
28950 adcl $0, %eax
28951 7:
28952 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28953
28954 6001:
28955 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28956 - movl $-EFAULT, (%ebx)
28957 + movl $-EFAULT, %ss:(%ebx)
28958
28959 # zero the complete destination - computing the rest
28960 # is too much work
28961 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28962
28963 6002:
28964 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28965 - movl $-EFAULT,(%ebx)
28966 + movl $-EFAULT,%ss:(%ebx)
28967 jmp 5000b
28968
28969 .previous
28970
28971 + pushl_cfi %ss
28972 + popl_cfi %ds
28973 + pushl_cfi %ss
28974 + popl_cfi %es
28975 popl_cfi %ebx
28976 CFI_RESTORE ebx
28977 popl_cfi %esi
28978 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28979 popl_cfi %ecx # equivalent to addl $4,%esp
28980 ret
28981 CFI_ENDPROC
28982 -ENDPROC(csum_partial_copy_generic)
28983 +ENDPROC(csum_partial_copy_generic_to_user)
28984
28985 #else
28986
28987 /* Version for PentiumII/PPro */
28988
28989 #define ROUND1(x) \
28990 + nop; nop; nop; \
28991 SRC(movl x(%esi), %ebx ) ; \
28992 addl %ebx, %eax ; \
28993 - DST(movl %ebx, x(%edi) ) ;
28994 + DST(movl %ebx, %es:x(%edi)) ;
28995
28996 #define ROUND(x) \
28997 + nop; nop; nop; \
28998 SRC(movl x(%esi), %ebx ) ; \
28999 adcl %ebx, %eax ; \
29000 - DST(movl %ebx, x(%edi) ) ;
29001 + DST(movl %ebx, %es:x(%edi)) ;
29002
29003 #define ARGBASE 12
29004 -
29005 -ENTRY(csum_partial_copy_generic)
29006 +
29007 +ENTRY(csum_partial_copy_generic_to_user)
29008 CFI_STARTPROC
29009 +
29010 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29011 + pushl_cfi %gs
29012 + popl_cfi %es
29013 + jmp csum_partial_copy_generic
29014 +#endif
29015 +
29016 +ENTRY(csum_partial_copy_generic_from_user)
29017 +
29018 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29019 + pushl_cfi %gs
29020 + popl_cfi %ds
29021 +#endif
29022 +
29023 +ENTRY(csum_partial_copy_generic)
29024 pushl_cfi %ebx
29025 CFI_REL_OFFSET ebx, 0
29026 pushl_cfi %edi
29027 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29028 subl %ebx, %edi
29029 lea -1(%esi),%edx
29030 andl $-32,%edx
29031 - lea 3f(%ebx,%ebx), %ebx
29032 + lea 3f(%ebx,%ebx,2), %ebx
29033 testl %esi, %esi
29034 jmp *%ebx
29035 1: addl $64,%esi
29036 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29037 jb 5f
29038 SRC( movw (%esi), %dx )
29039 leal 2(%esi), %esi
29040 -DST( movw %dx, (%edi) )
29041 +DST( movw %dx, %es:(%edi) )
29042 leal 2(%edi), %edi
29043 je 6f
29044 shll $16,%edx
29045 5:
29046 SRC( movb (%esi), %dl )
29047 -DST( movb %dl, (%edi) )
29048 +DST( movb %dl, %es:(%edi) )
29049 6: addl %edx, %eax
29050 adcl $0, %eax
29051 7:
29052 .section .fixup, "ax"
29053 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29054 - movl $-EFAULT, (%ebx)
29055 + movl $-EFAULT, %ss:(%ebx)
29056 # zero the complete destination (computing the rest is too much work)
29057 movl ARGBASE+8(%esp),%edi # dst
29058 movl ARGBASE+12(%esp),%ecx # len
29059 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29060 rep; stosb
29061 jmp 7b
29062 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29063 - movl $-EFAULT, (%ebx)
29064 + movl $-EFAULT, %ss:(%ebx)
29065 jmp 7b
29066 .previous
29067
29068 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29069 + pushl_cfi %ss
29070 + popl_cfi %ds
29071 + pushl_cfi %ss
29072 + popl_cfi %es
29073 +#endif
29074 +
29075 popl_cfi %esi
29076 CFI_RESTORE esi
29077 popl_cfi %edi
29078 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29079 CFI_RESTORE ebx
29080 ret
29081 CFI_ENDPROC
29082 -ENDPROC(csum_partial_copy_generic)
29083 +ENDPROC(csum_partial_copy_generic_to_user)
29084
29085 #undef ROUND
29086 #undef ROUND1
29087 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29088 index f2145cf..cea889d 100644
29089 --- a/arch/x86/lib/clear_page_64.S
29090 +++ b/arch/x86/lib/clear_page_64.S
29091 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29092 movl $4096/8,%ecx
29093 xorl %eax,%eax
29094 rep stosq
29095 + pax_force_retaddr
29096 ret
29097 CFI_ENDPROC
29098 ENDPROC(clear_page_c)
29099 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29100 movl $4096,%ecx
29101 xorl %eax,%eax
29102 rep stosb
29103 + pax_force_retaddr
29104 ret
29105 CFI_ENDPROC
29106 ENDPROC(clear_page_c_e)
29107 @@ -43,6 +45,7 @@ ENTRY(clear_page)
29108 leaq 64(%rdi),%rdi
29109 jnz .Lloop
29110 nop
29111 + pax_force_retaddr
29112 ret
29113 CFI_ENDPROC
29114 .Lclear_page_end:
29115 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
29116
29117 #include <asm/cpufeature.h>
29118
29119 - .section .altinstr_replacement,"ax"
29120 + .section .altinstr_replacement,"a"
29121 1: .byte 0xeb /* jmp <disp8> */
29122 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29123 2: .byte 0xeb /* jmp <disp8> */
29124 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29125 index 1e572c5..2a162cd 100644
29126 --- a/arch/x86/lib/cmpxchg16b_emu.S
29127 +++ b/arch/x86/lib/cmpxchg16b_emu.S
29128 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29129
29130 popf
29131 mov $1, %al
29132 + pax_force_retaddr
29133 ret
29134
29135 not_same:
29136 popf
29137 xor %al,%al
29138 + pax_force_retaddr
29139 ret
29140
29141 CFI_ENDPROC
29142 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29143 index 176cca6..e0d658e 100644
29144 --- a/arch/x86/lib/copy_page_64.S
29145 +++ b/arch/x86/lib/copy_page_64.S
29146 @@ -9,6 +9,7 @@ copy_page_rep:
29147 CFI_STARTPROC
29148 movl $4096/8, %ecx
29149 rep movsq
29150 + pax_force_retaddr
29151 ret
29152 CFI_ENDPROC
29153 ENDPROC(copy_page_rep)
29154 @@ -24,8 +25,8 @@ ENTRY(copy_page)
29155 CFI_ADJUST_CFA_OFFSET 2*8
29156 movq %rbx, (%rsp)
29157 CFI_REL_OFFSET rbx, 0
29158 - movq %r12, 1*8(%rsp)
29159 - CFI_REL_OFFSET r12, 1*8
29160 + movq %r13, 1*8(%rsp)
29161 + CFI_REL_OFFSET r13, 1*8
29162
29163 movl $(4096/64)-5, %ecx
29164 .p2align 4
29165 @@ -38,7 +39,7 @@ ENTRY(copy_page)
29166 movq 0x8*4(%rsi), %r9
29167 movq 0x8*5(%rsi), %r10
29168 movq 0x8*6(%rsi), %r11
29169 - movq 0x8*7(%rsi), %r12
29170 + movq 0x8*7(%rsi), %r13
29171
29172 prefetcht0 5*64(%rsi)
29173
29174 @@ -49,7 +50,7 @@ ENTRY(copy_page)
29175 movq %r9, 0x8*4(%rdi)
29176 movq %r10, 0x8*5(%rdi)
29177 movq %r11, 0x8*6(%rdi)
29178 - movq %r12, 0x8*7(%rdi)
29179 + movq %r13, 0x8*7(%rdi)
29180
29181 leaq 64 (%rsi), %rsi
29182 leaq 64 (%rdi), %rdi
29183 @@ -68,7 +69,7 @@ ENTRY(copy_page)
29184 movq 0x8*4(%rsi), %r9
29185 movq 0x8*5(%rsi), %r10
29186 movq 0x8*6(%rsi), %r11
29187 - movq 0x8*7(%rsi), %r12
29188 + movq 0x8*7(%rsi), %r13
29189
29190 movq %rax, 0x8*0(%rdi)
29191 movq %rbx, 0x8*1(%rdi)
29192 @@ -77,7 +78,7 @@ ENTRY(copy_page)
29193 movq %r9, 0x8*4(%rdi)
29194 movq %r10, 0x8*5(%rdi)
29195 movq %r11, 0x8*6(%rdi)
29196 - movq %r12, 0x8*7(%rdi)
29197 + movq %r13, 0x8*7(%rdi)
29198
29199 leaq 64(%rdi), %rdi
29200 leaq 64(%rsi), %rsi
29201 @@ -85,10 +86,11 @@ ENTRY(copy_page)
29202
29203 movq (%rsp), %rbx
29204 CFI_RESTORE rbx
29205 - movq 1*8(%rsp), %r12
29206 - CFI_RESTORE r12
29207 + movq 1*8(%rsp), %r13
29208 + CFI_RESTORE r13
29209 addq $2*8, %rsp
29210 CFI_ADJUST_CFA_OFFSET -2*8
29211 + pax_force_retaddr
29212 ret
29213 .Lcopy_page_end:
29214 CFI_ENDPROC
29215 @@ -99,7 +101,7 @@ ENDPROC(copy_page)
29216
29217 #include <asm/cpufeature.h>
29218
29219 - .section .altinstr_replacement,"ax"
29220 + .section .altinstr_replacement,"a"
29221 1: .byte 0xeb /* jmp <disp8> */
29222 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29223 2:
29224 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29225 index a30ca15..407412b 100644
29226 --- a/arch/x86/lib/copy_user_64.S
29227 +++ b/arch/x86/lib/copy_user_64.S
29228 @@ -18,31 +18,7 @@
29229 #include <asm/alternative-asm.h>
29230 #include <asm/asm.h>
29231 #include <asm/smap.h>
29232 -
29233 -/*
29234 - * By placing feature2 after feature1 in altinstructions section, we logically
29235 - * implement:
29236 - * If CPU has feature2, jmp to alt2 is used
29237 - * else if CPU has feature1, jmp to alt1 is used
29238 - * else jmp to orig is used.
29239 - */
29240 - .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29241 -0:
29242 - .byte 0xe9 /* 32bit jump */
29243 - .long \orig-1f /* by default jump to orig */
29244 -1:
29245 - .section .altinstr_replacement,"ax"
29246 -2: .byte 0xe9 /* near jump with 32bit immediate */
29247 - .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29248 -3: .byte 0xe9 /* near jump with 32bit immediate */
29249 - .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29250 - .previous
29251 -
29252 - .section .altinstructions,"a"
29253 - altinstruction_entry 0b,2b,\feature1,5,5
29254 - altinstruction_entry 0b,3b,\feature2,5,5
29255 - .previous
29256 - .endm
29257 +#include <asm/pgtable.h>
29258
29259 .macro ALIGN_DESTINATION
29260 #ifdef FIX_ALIGNMENT
29261 @@ -70,52 +46,6 @@
29262 #endif
29263 .endm
29264
29265 -/* Standard copy_to_user with segment limit checking */
29266 -ENTRY(_copy_to_user)
29267 - CFI_STARTPROC
29268 - GET_THREAD_INFO(%rax)
29269 - movq %rdi,%rcx
29270 - addq %rdx,%rcx
29271 - jc bad_to_user
29272 - cmpq TI_addr_limit(%rax),%rcx
29273 - ja bad_to_user
29274 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29275 - copy_user_generic_unrolled,copy_user_generic_string, \
29276 - copy_user_enhanced_fast_string
29277 - CFI_ENDPROC
29278 -ENDPROC(_copy_to_user)
29279 -
29280 -/* Standard copy_from_user with segment limit checking */
29281 -ENTRY(_copy_from_user)
29282 - CFI_STARTPROC
29283 - GET_THREAD_INFO(%rax)
29284 - movq %rsi,%rcx
29285 - addq %rdx,%rcx
29286 - jc bad_from_user
29287 - cmpq TI_addr_limit(%rax),%rcx
29288 - ja bad_from_user
29289 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29290 - copy_user_generic_unrolled,copy_user_generic_string, \
29291 - copy_user_enhanced_fast_string
29292 - CFI_ENDPROC
29293 -ENDPROC(_copy_from_user)
29294 -
29295 - .section .fixup,"ax"
29296 - /* must zero dest */
29297 -ENTRY(bad_from_user)
29298 -bad_from_user:
29299 - CFI_STARTPROC
29300 - movl %edx,%ecx
29301 - xorl %eax,%eax
29302 - rep
29303 - stosb
29304 -bad_to_user:
29305 - movl %edx,%eax
29306 - ret
29307 - CFI_ENDPROC
29308 -ENDPROC(bad_from_user)
29309 - .previous
29310 -
29311 /*
29312 * copy_user_generic_unrolled - memory copy with exception handling.
29313 * This version is for CPUs like P4 that don't have efficient micro
29314 @@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29315 */
29316 ENTRY(copy_user_generic_unrolled)
29317 CFI_STARTPROC
29318 + ASM_PAX_OPEN_USERLAND
29319 ASM_STAC
29320 cmpl $8,%edx
29321 jb 20f /* less then 8 bytes, go to byte copy loop */
29322 @@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29323 jnz 21b
29324 23: xor %eax,%eax
29325 ASM_CLAC
29326 + ASM_PAX_CLOSE_USERLAND
29327 + pax_force_retaddr
29328 ret
29329
29330 .section .fixup,"ax"
29331 @@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29332 */
29333 ENTRY(copy_user_generic_string)
29334 CFI_STARTPROC
29335 + ASM_PAX_OPEN_USERLAND
29336 ASM_STAC
29337 andl %edx,%edx
29338 jz 4f
29339 @@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
29340 movsb
29341 4: xorl %eax,%eax
29342 ASM_CLAC
29343 + ASM_PAX_CLOSE_USERLAND
29344 + pax_force_retaddr
29345 ret
29346
29347 .section .fixup,"ax"
29348 @@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
29349 */
29350 ENTRY(copy_user_enhanced_fast_string)
29351 CFI_STARTPROC
29352 + ASM_PAX_OPEN_USERLAND
29353 ASM_STAC
29354 andl %edx,%edx
29355 jz 2f
29356 @@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
29357 movsb
29358 2: xorl %eax,%eax
29359 ASM_CLAC
29360 + ASM_PAX_CLOSE_USERLAND
29361 + pax_force_retaddr
29362 ret
29363
29364 .section .fixup,"ax"
29365 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29366 index 6a4f43c..c70fb52 100644
29367 --- a/arch/x86/lib/copy_user_nocache_64.S
29368 +++ b/arch/x86/lib/copy_user_nocache_64.S
29369 @@ -8,6 +8,7 @@
29370
29371 #include <linux/linkage.h>
29372 #include <asm/dwarf2.h>
29373 +#include <asm/alternative-asm.h>
29374
29375 #define FIX_ALIGNMENT 1
29376
29377 @@ -16,6 +17,7 @@
29378 #include <asm/thread_info.h>
29379 #include <asm/asm.h>
29380 #include <asm/smap.h>
29381 +#include <asm/pgtable.h>
29382
29383 .macro ALIGN_DESTINATION
29384 #ifdef FIX_ALIGNMENT
29385 @@ -49,6 +51,16 @@
29386 */
29387 ENTRY(__copy_user_nocache)
29388 CFI_STARTPROC
29389 +
29390 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29391 + mov pax_user_shadow_base,%rcx
29392 + cmp %rcx,%rsi
29393 + jae 1f
29394 + add %rcx,%rsi
29395 +1:
29396 +#endif
29397 +
29398 + ASM_PAX_OPEN_USERLAND
29399 ASM_STAC
29400 cmpl $8,%edx
29401 jb 20f /* less then 8 bytes, go to byte copy loop */
29402 @@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
29403 jnz 21b
29404 23: xorl %eax,%eax
29405 ASM_CLAC
29406 + ASM_PAX_CLOSE_USERLAND
29407 sfence
29408 + pax_force_retaddr
29409 ret
29410
29411 .section .fixup,"ax"
29412 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
29413 index 2419d5f..fe52d0e 100644
29414 --- a/arch/x86/lib/csum-copy_64.S
29415 +++ b/arch/x86/lib/csum-copy_64.S
29416 @@ -9,6 +9,7 @@
29417 #include <asm/dwarf2.h>
29418 #include <asm/errno.h>
29419 #include <asm/asm.h>
29420 +#include <asm/alternative-asm.h>
29421
29422 /*
29423 * Checksum copy with exception handling.
29424 @@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
29425 CFI_ADJUST_CFA_OFFSET 7*8
29426 movq %rbx, 2*8(%rsp)
29427 CFI_REL_OFFSET rbx, 2*8
29428 - movq %r12, 3*8(%rsp)
29429 - CFI_REL_OFFSET r12, 3*8
29430 + movq %r15, 3*8(%rsp)
29431 + CFI_REL_OFFSET r15, 3*8
29432 movq %r14, 4*8(%rsp)
29433 CFI_REL_OFFSET r14, 4*8
29434 movq %r13, 5*8(%rsp)
29435 @@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
29436 movl %edx, %ecx
29437
29438 xorl %r9d, %r9d
29439 - movq %rcx, %r12
29440 + movq %rcx, %r15
29441
29442 - shrq $6, %r12
29443 + shrq $6, %r15
29444 jz .Lhandle_tail /* < 64 */
29445
29446 clc
29447
29448 /* main loop. clear in 64 byte blocks */
29449 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
29450 - /* r11: temp3, rdx: temp4, r12 loopcnt */
29451 + /* r11: temp3, rdx: temp4, r15 loopcnt */
29452 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
29453 .p2align 4
29454 .Lloop:
29455 @@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
29456 adcq %r14, %rax
29457 adcq %r13, %rax
29458
29459 - decl %r12d
29460 + decl %r15d
29461
29462 dest
29463 movq %rbx, (%rsi)
29464 @@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
29465 .Lende:
29466 movq 2*8(%rsp), %rbx
29467 CFI_RESTORE rbx
29468 - movq 3*8(%rsp), %r12
29469 - CFI_RESTORE r12
29470 + movq 3*8(%rsp), %r15
29471 + CFI_RESTORE r15
29472 movq 4*8(%rsp), %r14
29473 CFI_RESTORE r14
29474 movq 5*8(%rsp), %r13
29475 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
29476 CFI_RESTORE rbp
29477 addq $7*8, %rsp
29478 CFI_ADJUST_CFA_OFFSET -7*8
29479 + pax_force_retaddr
29480 ret
29481 CFI_RESTORE_STATE
29482
29483 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
29484 index 7609e0e..b449b98 100644
29485 --- a/arch/x86/lib/csum-wrappers_64.c
29486 +++ b/arch/x86/lib/csum-wrappers_64.c
29487 @@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
29488 len -= 2;
29489 }
29490 }
29491 + pax_open_userland();
29492 stac();
29493 - isum = csum_partial_copy_generic((__force const void *)src,
29494 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
29495 dst, len, isum, errp, NULL);
29496 clac();
29497 + pax_close_userland();
29498 if (unlikely(*errp))
29499 goto out_err;
29500
29501 @@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
29502 }
29503
29504 *errp = 0;
29505 + pax_open_userland();
29506 stac();
29507 - ret = csum_partial_copy_generic(src, (void __force *)dst,
29508 + ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
29509 len, isum, NULL, errp);
29510 clac();
29511 + pax_close_userland();
29512 return ret;
29513 }
29514 EXPORT_SYMBOL(csum_partial_copy_to_user);
29515 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
29516 index a451235..1daa956 100644
29517 --- a/arch/x86/lib/getuser.S
29518 +++ b/arch/x86/lib/getuser.S
29519 @@ -33,17 +33,40 @@
29520 #include <asm/thread_info.h>
29521 #include <asm/asm.h>
29522 #include <asm/smap.h>
29523 +#include <asm/segment.h>
29524 +#include <asm/pgtable.h>
29525 +#include <asm/alternative-asm.h>
29526 +
29527 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29528 +#define __copyuser_seg gs;
29529 +#else
29530 +#define __copyuser_seg
29531 +#endif
29532
29533 .text
29534 ENTRY(__get_user_1)
29535 CFI_STARTPROC
29536 +
29537 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29538 GET_THREAD_INFO(%_ASM_DX)
29539 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29540 jae bad_get_user
29541 ASM_STAC
29542 -1: movzbl (%_ASM_AX),%edx
29543 +
29544 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29545 + mov pax_user_shadow_base,%_ASM_DX
29546 + cmp %_ASM_DX,%_ASM_AX
29547 + jae 1234f
29548 + add %_ASM_DX,%_ASM_AX
29549 +1234:
29550 +#endif
29551 +
29552 +#endif
29553 +
29554 +1: __copyuser_seg movzbl (%_ASM_AX),%edx
29555 xor %eax,%eax
29556 ASM_CLAC
29557 + pax_force_retaddr
29558 ret
29559 CFI_ENDPROC
29560 ENDPROC(__get_user_1)
29561 @@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
29562 ENTRY(__get_user_2)
29563 CFI_STARTPROC
29564 add $1,%_ASM_AX
29565 +
29566 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29567 jc bad_get_user
29568 GET_THREAD_INFO(%_ASM_DX)
29569 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29570 jae bad_get_user
29571 ASM_STAC
29572 -2: movzwl -1(%_ASM_AX),%edx
29573 +
29574 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29575 + mov pax_user_shadow_base,%_ASM_DX
29576 + cmp %_ASM_DX,%_ASM_AX
29577 + jae 1234f
29578 + add %_ASM_DX,%_ASM_AX
29579 +1234:
29580 +#endif
29581 +
29582 +#endif
29583 +
29584 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
29585 xor %eax,%eax
29586 ASM_CLAC
29587 + pax_force_retaddr
29588 ret
29589 CFI_ENDPROC
29590 ENDPROC(__get_user_2)
29591 @@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
29592 ENTRY(__get_user_4)
29593 CFI_STARTPROC
29594 add $3,%_ASM_AX
29595 +
29596 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29597 jc bad_get_user
29598 GET_THREAD_INFO(%_ASM_DX)
29599 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29600 jae bad_get_user
29601 ASM_STAC
29602 -3: movl -3(%_ASM_AX),%edx
29603 +
29604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29605 + mov pax_user_shadow_base,%_ASM_DX
29606 + cmp %_ASM_DX,%_ASM_AX
29607 + jae 1234f
29608 + add %_ASM_DX,%_ASM_AX
29609 +1234:
29610 +#endif
29611 +
29612 +#endif
29613 +
29614 +3: __copyuser_seg movl -3(%_ASM_AX),%edx
29615 xor %eax,%eax
29616 ASM_CLAC
29617 + pax_force_retaddr
29618 ret
29619 CFI_ENDPROC
29620 ENDPROC(__get_user_4)
29621 @@ -86,10 +137,20 @@ ENTRY(__get_user_8)
29622 GET_THREAD_INFO(%_ASM_DX)
29623 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29624 jae bad_get_user
29625 +
29626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
29627 + mov pax_user_shadow_base,%_ASM_DX
29628 + cmp %_ASM_DX,%_ASM_AX
29629 + jae 1234f
29630 + add %_ASM_DX,%_ASM_AX
29631 +1234:
29632 +#endif
29633 +
29634 ASM_STAC
29635 4: movq -7(%_ASM_AX),%rdx
29636 xor %eax,%eax
29637 ASM_CLAC
29638 + pax_force_retaddr
29639 ret
29640 #else
29641 add $7,%_ASM_AX
29642 @@ -98,10 +159,11 @@ ENTRY(__get_user_8)
29643 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29644 jae bad_get_user_8
29645 ASM_STAC
29646 -4: movl -7(%_ASM_AX),%edx
29647 -5: movl -3(%_ASM_AX),%ecx
29648 +4: __copyuser_seg movl -7(%_ASM_AX),%edx
29649 +5: __copyuser_seg movl -3(%_ASM_AX),%ecx
29650 xor %eax,%eax
29651 ASM_CLAC
29652 + pax_force_retaddr
29653 ret
29654 #endif
29655 CFI_ENDPROC
29656 @@ -113,6 +175,7 @@ bad_get_user:
29657 xor %edx,%edx
29658 mov $(-EFAULT),%_ASM_AX
29659 ASM_CLAC
29660 + pax_force_retaddr
29661 ret
29662 CFI_ENDPROC
29663 END(bad_get_user)
29664 @@ -124,6 +187,7 @@ bad_get_user_8:
29665 xor %ecx,%ecx
29666 mov $(-EFAULT),%_ASM_AX
29667 ASM_CLAC
29668 + pax_force_retaddr
29669 ret
29670 CFI_ENDPROC
29671 END(bad_get_user_8)
29672 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
29673 index 54fcffe..7be149e 100644
29674 --- a/arch/x86/lib/insn.c
29675 +++ b/arch/x86/lib/insn.c
29676 @@ -20,8 +20,10 @@
29677
29678 #ifdef __KERNEL__
29679 #include <linux/string.h>
29680 +#include <asm/pgtable_types.h>
29681 #else
29682 #include <string.h>
29683 +#define ktla_ktva(addr) addr
29684 #endif
29685 #include <asm/inat.h>
29686 #include <asm/insn.h>
29687 @@ -53,8 +55,8 @@
29688 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
29689 {
29690 memset(insn, 0, sizeof(*insn));
29691 - insn->kaddr = kaddr;
29692 - insn->next_byte = kaddr;
29693 + insn->kaddr = ktla_ktva(kaddr);
29694 + insn->next_byte = ktla_ktva(kaddr);
29695 insn->x86_64 = x86_64 ? 1 : 0;
29696 insn->opnd_bytes = 4;
29697 if (x86_64)
29698 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
29699 index 05a95e7..326f2fa 100644
29700 --- a/arch/x86/lib/iomap_copy_64.S
29701 +++ b/arch/x86/lib/iomap_copy_64.S
29702 @@ -17,6 +17,7 @@
29703
29704 #include <linux/linkage.h>
29705 #include <asm/dwarf2.h>
29706 +#include <asm/alternative-asm.h>
29707
29708 /*
29709 * override generic version in lib/iomap_copy.c
29710 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
29711 CFI_STARTPROC
29712 movl %edx,%ecx
29713 rep movsd
29714 + pax_force_retaddr
29715 ret
29716 CFI_ENDPROC
29717 ENDPROC(__iowrite32_copy)
29718 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
29719 index 56313a3..0db417e 100644
29720 --- a/arch/x86/lib/memcpy_64.S
29721 +++ b/arch/x86/lib/memcpy_64.S
29722 @@ -24,7 +24,7 @@
29723 * This gets patched over the unrolled variant (below) via the
29724 * alternative instructions framework:
29725 */
29726 - .section .altinstr_replacement, "ax", @progbits
29727 + .section .altinstr_replacement, "a", @progbits
29728 .Lmemcpy_c:
29729 movq %rdi, %rax
29730 movq %rdx, %rcx
29731 @@ -33,6 +33,7 @@
29732 rep movsq
29733 movl %edx, %ecx
29734 rep movsb
29735 + pax_force_retaddr
29736 ret
29737 .Lmemcpy_e:
29738 .previous
29739 @@ -44,11 +45,12 @@
29740 * This gets patched over the unrolled variant (below) via the
29741 * alternative instructions framework:
29742 */
29743 - .section .altinstr_replacement, "ax", @progbits
29744 + .section .altinstr_replacement, "a", @progbits
29745 .Lmemcpy_c_e:
29746 movq %rdi, %rax
29747 movq %rdx, %rcx
29748 rep movsb
29749 + pax_force_retaddr
29750 ret
29751 .Lmemcpy_e_e:
29752 .previous
29753 @@ -136,6 +138,7 @@ ENTRY(memcpy)
29754 movq %r9, 1*8(%rdi)
29755 movq %r10, -2*8(%rdi, %rdx)
29756 movq %r11, -1*8(%rdi, %rdx)
29757 + pax_force_retaddr
29758 retq
29759 .p2align 4
29760 .Lless_16bytes:
29761 @@ -148,6 +151,7 @@ ENTRY(memcpy)
29762 movq -1*8(%rsi, %rdx), %r9
29763 movq %r8, 0*8(%rdi)
29764 movq %r9, -1*8(%rdi, %rdx)
29765 + pax_force_retaddr
29766 retq
29767 .p2align 4
29768 .Lless_8bytes:
29769 @@ -161,6 +165,7 @@ ENTRY(memcpy)
29770 movl -4(%rsi, %rdx), %r8d
29771 movl %ecx, (%rdi)
29772 movl %r8d, -4(%rdi, %rdx)
29773 + pax_force_retaddr
29774 retq
29775 .p2align 4
29776 .Lless_3bytes:
29777 @@ -179,6 +184,7 @@ ENTRY(memcpy)
29778 movb %cl, (%rdi)
29779
29780 .Lend:
29781 + pax_force_retaddr
29782 retq
29783 CFI_ENDPROC
29784 ENDPROC(memcpy)
29785 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29786 index 65268a6..dd1de11 100644
29787 --- a/arch/x86/lib/memmove_64.S
29788 +++ b/arch/x86/lib/memmove_64.S
29789 @@ -202,14 +202,16 @@ ENTRY(memmove)
29790 movb (%rsi), %r11b
29791 movb %r11b, (%rdi)
29792 13:
29793 + pax_force_retaddr
29794 retq
29795 CFI_ENDPROC
29796
29797 - .section .altinstr_replacement,"ax"
29798 + .section .altinstr_replacement,"a"
29799 .Lmemmove_begin_forward_efs:
29800 /* Forward moving data. */
29801 movq %rdx, %rcx
29802 rep movsb
29803 + pax_force_retaddr
29804 retq
29805 .Lmemmove_end_forward_efs:
29806 .previous
29807 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29808 index 2dcb380..2eb79fe 100644
29809 --- a/arch/x86/lib/memset_64.S
29810 +++ b/arch/x86/lib/memset_64.S
29811 @@ -16,7 +16,7 @@
29812 *
29813 * rax original destination
29814 */
29815 - .section .altinstr_replacement, "ax", @progbits
29816 + .section .altinstr_replacement, "a", @progbits
29817 .Lmemset_c:
29818 movq %rdi,%r9
29819 movq %rdx,%rcx
29820 @@ -30,6 +30,7 @@
29821 movl %edx,%ecx
29822 rep stosb
29823 movq %r9,%rax
29824 + pax_force_retaddr
29825 ret
29826 .Lmemset_e:
29827 .previous
29828 @@ -45,13 +46,14 @@
29829 *
29830 * rax original destination
29831 */
29832 - .section .altinstr_replacement, "ax", @progbits
29833 + .section .altinstr_replacement, "a", @progbits
29834 .Lmemset_c_e:
29835 movq %rdi,%r9
29836 movb %sil,%al
29837 movq %rdx,%rcx
29838 rep stosb
29839 movq %r9,%rax
29840 + pax_force_retaddr
29841 ret
29842 .Lmemset_e_e:
29843 .previous
29844 @@ -118,6 +120,7 @@ ENTRY(__memset)
29845
29846 .Lende:
29847 movq %r10,%rax
29848 + pax_force_retaddr
29849 ret
29850
29851 CFI_RESTORE_STATE
29852 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29853 index c9f2d9b..e7fd2c0 100644
29854 --- a/arch/x86/lib/mmx_32.c
29855 +++ b/arch/x86/lib/mmx_32.c
29856 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29857 {
29858 void *p;
29859 int i;
29860 + unsigned long cr0;
29861
29862 if (unlikely(in_interrupt()))
29863 return __memcpy(to, from, len);
29864 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29865 kernel_fpu_begin();
29866
29867 __asm__ __volatile__ (
29868 - "1: prefetch (%0)\n" /* This set is 28 bytes */
29869 - " prefetch 64(%0)\n"
29870 - " prefetch 128(%0)\n"
29871 - " prefetch 192(%0)\n"
29872 - " prefetch 256(%0)\n"
29873 + "1: prefetch (%1)\n" /* This set is 28 bytes */
29874 + " prefetch 64(%1)\n"
29875 + " prefetch 128(%1)\n"
29876 + " prefetch 192(%1)\n"
29877 + " prefetch 256(%1)\n"
29878 "2: \n"
29879 ".section .fixup, \"ax\"\n"
29880 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29881 + "3: \n"
29882 +
29883 +#ifdef CONFIG_PAX_KERNEXEC
29884 + " movl %%cr0, %0\n"
29885 + " movl %0, %%eax\n"
29886 + " andl $0xFFFEFFFF, %%eax\n"
29887 + " movl %%eax, %%cr0\n"
29888 +#endif
29889 +
29890 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29891 +
29892 +#ifdef CONFIG_PAX_KERNEXEC
29893 + " movl %0, %%cr0\n"
29894 +#endif
29895 +
29896 " jmp 2b\n"
29897 ".previous\n"
29898 _ASM_EXTABLE(1b, 3b)
29899 - : : "r" (from));
29900 + : "=&r" (cr0) : "r" (from) : "ax");
29901
29902 for ( ; i > 5; i--) {
29903 __asm__ __volatile__ (
29904 - "1: prefetch 320(%0)\n"
29905 - "2: movq (%0), %%mm0\n"
29906 - " movq 8(%0), %%mm1\n"
29907 - " movq 16(%0), %%mm2\n"
29908 - " movq 24(%0), %%mm3\n"
29909 - " movq %%mm0, (%1)\n"
29910 - " movq %%mm1, 8(%1)\n"
29911 - " movq %%mm2, 16(%1)\n"
29912 - " movq %%mm3, 24(%1)\n"
29913 - " movq 32(%0), %%mm0\n"
29914 - " movq 40(%0), %%mm1\n"
29915 - " movq 48(%0), %%mm2\n"
29916 - " movq 56(%0), %%mm3\n"
29917 - " movq %%mm0, 32(%1)\n"
29918 - " movq %%mm1, 40(%1)\n"
29919 - " movq %%mm2, 48(%1)\n"
29920 - " movq %%mm3, 56(%1)\n"
29921 + "1: prefetch 320(%1)\n"
29922 + "2: movq (%1), %%mm0\n"
29923 + " movq 8(%1), %%mm1\n"
29924 + " movq 16(%1), %%mm2\n"
29925 + " movq 24(%1), %%mm3\n"
29926 + " movq %%mm0, (%2)\n"
29927 + " movq %%mm1, 8(%2)\n"
29928 + " movq %%mm2, 16(%2)\n"
29929 + " movq %%mm3, 24(%2)\n"
29930 + " movq 32(%1), %%mm0\n"
29931 + " movq 40(%1), %%mm1\n"
29932 + " movq 48(%1), %%mm2\n"
29933 + " movq 56(%1), %%mm3\n"
29934 + " movq %%mm0, 32(%2)\n"
29935 + " movq %%mm1, 40(%2)\n"
29936 + " movq %%mm2, 48(%2)\n"
29937 + " movq %%mm3, 56(%2)\n"
29938 ".section .fixup, \"ax\"\n"
29939 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29940 + "3:\n"
29941 +
29942 +#ifdef CONFIG_PAX_KERNEXEC
29943 + " movl %%cr0, %0\n"
29944 + " movl %0, %%eax\n"
29945 + " andl $0xFFFEFFFF, %%eax\n"
29946 + " movl %%eax, %%cr0\n"
29947 +#endif
29948 +
29949 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29950 +
29951 +#ifdef CONFIG_PAX_KERNEXEC
29952 + " movl %0, %%cr0\n"
29953 +#endif
29954 +
29955 " jmp 2b\n"
29956 ".previous\n"
29957 _ASM_EXTABLE(1b, 3b)
29958 - : : "r" (from), "r" (to) : "memory");
29959 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29960
29961 from += 64;
29962 to += 64;
29963 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29964 static void fast_copy_page(void *to, void *from)
29965 {
29966 int i;
29967 + unsigned long cr0;
29968
29969 kernel_fpu_begin();
29970
29971 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29972 * but that is for later. -AV
29973 */
29974 __asm__ __volatile__(
29975 - "1: prefetch (%0)\n"
29976 - " prefetch 64(%0)\n"
29977 - " prefetch 128(%0)\n"
29978 - " prefetch 192(%0)\n"
29979 - " prefetch 256(%0)\n"
29980 + "1: prefetch (%1)\n"
29981 + " prefetch 64(%1)\n"
29982 + " prefetch 128(%1)\n"
29983 + " prefetch 192(%1)\n"
29984 + " prefetch 256(%1)\n"
29985 "2: \n"
29986 ".section .fixup, \"ax\"\n"
29987 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29988 + "3: \n"
29989 +
29990 +#ifdef CONFIG_PAX_KERNEXEC
29991 + " movl %%cr0, %0\n"
29992 + " movl %0, %%eax\n"
29993 + " andl $0xFFFEFFFF, %%eax\n"
29994 + " movl %%eax, %%cr0\n"
29995 +#endif
29996 +
29997 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29998 +
29999 +#ifdef CONFIG_PAX_KERNEXEC
30000 + " movl %0, %%cr0\n"
30001 +#endif
30002 +
30003 " jmp 2b\n"
30004 ".previous\n"
30005 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
30006 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30007
30008 for (i = 0; i < (4096-320)/64; i++) {
30009 __asm__ __volatile__ (
30010 - "1: prefetch 320(%0)\n"
30011 - "2: movq (%0), %%mm0\n"
30012 - " movntq %%mm0, (%1)\n"
30013 - " movq 8(%0), %%mm1\n"
30014 - " movntq %%mm1, 8(%1)\n"
30015 - " movq 16(%0), %%mm2\n"
30016 - " movntq %%mm2, 16(%1)\n"
30017 - " movq 24(%0), %%mm3\n"
30018 - " movntq %%mm3, 24(%1)\n"
30019 - " movq 32(%0), %%mm4\n"
30020 - " movntq %%mm4, 32(%1)\n"
30021 - " movq 40(%0), %%mm5\n"
30022 - " movntq %%mm5, 40(%1)\n"
30023 - " movq 48(%0), %%mm6\n"
30024 - " movntq %%mm6, 48(%1)\n"
30025 - " movq 56(%0), %%mm7\n"
30026 - " movntq %%mm7, 56(%1)\n"
30027 + "1: prefetch 320(%1)\n"
30028 + "2: movq (%1), %%mm0\n"
30029 + " movntq %%mm0, (%2)\n"
30030 + " movq 8(%1), %%mm1\n"
30031 + " movntq %%mm1, 8(%2)\n"
30032 + " movq 16(%1), %%mm2\n"
30033 + " movntq %%mm2, 16(%2)\n"
30034 + " movq 24(%1), %%mm3\n"
30035 + " movntq %%mm3, 24(%2)\n"
30036 + " movq 32(%1), %%mm4\n"
30037 + " movntq %%mm4, 32(%2)\n"
30038 + " movq 40(%1), %%mm5\n"
30039 + " movntq %%mm5, 40(%2)\n"
30040 + " movq 48(%1), %%mm6\n"
30041 + " movntq %%mm6, 48(%2)\n"
30042 + " movq 56(%1), %%mm7\n"
30043 + " movntq %%mm7, 56(%2)\n"
30044 ".section .fixup, \"ax\"\n"
30045 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30046 + "3:\n"
30047 +
30048 +#ifdef CONFIG_PAX_KERNEXEC
30049 + " movl %%cr0, %0\n"
30050 + " movl %0, %%eax\n"
30051 + " andl $0xFFFEFFFF, %%eax\n"
30052 + " movl %%eax, %%cr0\n"
30053 +#endif
30054 +
30055 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30056 +
30057 +#ifdef CONFIG_PAX_KERNEXEC
30058 + " movl %0, %%cr0\n"
30059 +#endif
30060 +
30061 " jmp 2b\n"
30062 ".previous\n"
30063 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30064 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30065
30066 from += 64;
30067 to += 64;
30068 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30069 static void fast_copy_page(void *to, void *from)
30070 {
30071 int i;
30072 + unsigned long cr0;
30073
30074 kernel_fpu_begin();
30075
30076 __asm__ __volatile__ (
30077 - "1: prefetch (%0)\n"
30078 - " prefetch 64(%0)\n"
30079 - " prefetch 128(%0)\n"
30080 - " prefetch 192(%0)\n"
30081 - " prefetch 256(%0)\n"
30082 + "1: prefetch (%1)\n"
30083 + " prefetch 64(%1)\n"
30084 + " prefetch 128(%1)\n"
30085 + " prefetch 192(%1)\n"
30086 + " prefetch 256(%1)\n"
30087 "2: \n"
30088 ".section .fixup, \"ax\"\n"
30089 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30090 + "3: \n"
30091 +
30092 +#ifdef CONFIG_PAX_KERNEXEC
30093 + " movl %%cr0, %0\n"
30094 + " movl %0, %%eax\n"
30095 + " andl $0xFFFEFFFF, %%eax\n"
30096 + " movl %%eax, %%cr0\n"
30097 +#endif
30098 +
30099 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30100 +
30101 +#ifdef CONFIG_PAX_KERNEXEC
30102 + " movl %0, %%cr0\n"
30103 +#endif
30104 +
30105 " jmp 2b\n"
30106 ".previous\n"
30107 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
30108 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30109
30110 for (i = 0; i < 4096/64; i++) {
30111 __asm__ __volatile__ (
30112 - "1: prefetch 320(%0)\n"
30113 - "2: movq (%0), %%mm0\n"
30114 - " movq 8(%0), %%mm1\n"
30115 - " movq 16(%0), %%mm2\n"
30116 - " movq 24(%0), %%mm3\n"
30117 - " movq %%mm0, (%1)\n"
30118 - " movq %%mm1, 8(%1)\n"
30119 - " movq %%mm2, 16(%1)\n"
30120 - " movq %%mm3, 24(%1)\n"
30121 - " movq 32(%0), %%mm0\n"
30122 - " movq 40(%0), %%mm1\n"
30123 - " movq 48(%0), %%mm2\n"
30124 - " movq 56(%0), %%mm3\n"
30125 - " movq %%mm0, 32(%1)\n"
30126 - " movq %%mm1, 40(%1)\n"
30127 - " movq %%mm2, 48(%1)\n"
30128 - " movq %%mm3, 56(%1)\n"
30129 + "1: prefetch 320(%1)\n"
30130 + "2: movq (%1), %%mm0\n"
30131 + " movq 8(%1), %%mm1\n"
30132 + " movq 16(%1), %%mm2\n"
30133 + " movq 24(%1), %%mm3\n"
30134 + " movq %%mm0, (%2)\n"
30135 + " movq %%mm1, 8(%2)\n"
30136 + " movq %%mm2, 16(%2)\n"
30137 + " movq %%mm3, 24(%2)\n"
30138 + " movq 32(%1), %%mm0\n"
30139 + " movq 40(%1), %%mm1\n"
30140 + " movq 48(%1), %%mm2\n"
30141 + " movq 56(%1), %%mm3\n"
30142 + " movq %%mm0, 32(%2)\n"
30143 + " movq %%mm1, 40(%2)\n"
30144 + " movq %%mm2, 48(%2)\n"
30145 + " movq %%mm3, 56(%2)\n"
30146 ".section .fixup, \"ax\"\n"
30147 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30148 + "3:\n"
30149 +
30150 +#ifdef CONFIG_PAX_KERNEXEC
30151 + " movl %%cr0, %0\n"
30152 + " movl %0, %%eax\n"
30153 + " andl $0xFFFEFFFF, %%eax\n"
30154 + " movl %%eax, %%cr0\n"
30155 +#endif
30156 +
30157 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30158 +
30159 +#ifdef CONFIG_PAX_KERNEXEC
30160 + " movl %0, %%cr0\n"
30161 +#endif
30162 +
30163 " jmp 2b\n"
30164 ".previous\n"
30165 _ASM_EXTABLE(1b, 3b)
30166 - : : "r" (from), "r" (to) : "memory");
30167 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30168
30169 from += 64;
30170 to += 64;
30171 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30172 index f6d13ee..d789440 100644
30173 --- a/arch/x86/lib/msr-reg.S
30174 +++ b/arch/x86/lib/msr-reg.S
30175 @@ -3,6 +3,7 @@
30176 #include <asm/dwarf2.h>
30177 #include <asm/asm.h>
30178 #include <asm/msr.h>
30179 +#include <asm/alternative-asm.h>
30180
30181 #ifdef CONFIG_X86_64
30182 /*
30183 @@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30184 movl %edi, 28(%r10)
30185 popq_cfi %rbp
30186 popq_cfi %rbx
30187 + pax_force_retaddr
30188 ret
30189 3:
30190 CFI_RESTORE_STATE
30191 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30192 index fc6ba17..d4d989d 100644
30193 --- a/arch/x86/lib/putuser.S
30194 +++ b/arch/x86/lib/putuser.S
30195 @@ -16,7 +16,9 @@
30196 #include <asm/errno.h>
30197 #include <asm/asm.h>
30198 #include <asm/smap.h>
30199 -
30200 +#include <asm/segment.h>
30201 +#include <asm/pgtable.h>
30202 +#include <asm/alternative-asm.h>
30203
30204 /*
30205 * __put_user_X
30206 @@ -30,57 +32,125 @@
30207 * as they get called from within inline assembly.
30208 */
30209
30210 -#define ENTER CFI_STARTPROC ; \
30211 - GET_THREAD_INFO(%_ASM_BX)
30212 -#define EXIT ASM_CLAC ; \
30213 - ret ; \
30214 +#define ENTER CFI_STARTPROC
30215 +#define EXIT ASM_CLAC ; \
30216 + pax_force_retaddr ; \
30217 + ret ; \
30218 CFI_ENDPROC
30219
30220 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30221 +#define _DEST %_ASM_CX,%_ASM_BX
30222 +#else
30223 +#define _DEST %_ASM_CX
30224 +#endif
30225 +
30226 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30227 +#define __copyuser_seg gs;
30228 +#else
30229 +#define __copyuser_seg
30230 +#endif
30231 +
30232 .text
30233 ENTRY(__put_user_1)
30234 ENTER
30235 +
30236 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30237 + GET_THREAD_INFO(%_ASM_BX)
30238 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30239 jae bad_put_user
30240 ASM_STAC
30241 -1: movb %al,(%_ASM_CX)
30242 +
30243 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30244 + mov pax_user_shadow_base,%_ASM_BX
30245 + cmp %_ASM_BX,%_ASM_CX
30246 + jb 1234f
30247 + xor %ebx,%ebx
30248 +1234:
30249 +#endif
30250 +
30251 +#endif
30252 +
30253 +1: __copyuser_seg movb %al,(_DEST)
30254 xor %eax,%eax
30255 EXIT
30256 ENDPROC(__put_user_1)
30257
30258 ENTRY(__put_user_2)
30259 ENTER
30260 +
30261 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30262 + GET_THREAD_INFO(%_ASM_BX)
30263 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30264 sub $1,%_ASM_BX
30265 cmp %_ASM_BX,%_ASM_CX
30266 jae bad_put_user
30267 ASM_STAC
30268 -2: movw %ax,(%_ASM_CX)
30269 +
30270 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30271 + mov pax_user_shadow_base,%_ASM_BX
30272 + cmp %_ASM_BX,%_ASM_CX
30273 + jb 1234f
30274 + xor %ebx,%ebx
30275 +1234:
30276 +#endif
30277 +
30278 +#endif
30279 +
30280 +2: __copyuser_seg movw %ax,(_DEST)
30281 xor %eax,%eax
30282 EXIT
30283 ENDPROC(__put_user_2)
30284
30285 ENTRY(__put_user_4)
30286 ENTER
30287 +
30288 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30289 + GET_THREAD_INFO(%_ASM_BX)
30290 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30291 sub $3,%_ASM_BX
30292 cmp %_ASM_BX,%_ASM_CX
30293 jae bad_put_user
30294 ASM_STAC
30295 -3: movl %eax,(%_ASM_CX)
30296 +
30297 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30298 + mov pax_user_shadow_base,%_ASM_BX
30299 + cmp %_ASM_BX,%_ASM_CX
30300 + jb 1234f
30301 + xor %ebx,%ebx
30302 +1234:
30303 +#endif
30304 +
30305 +#endif
30306 +
30307 +3: __copyuser_seg movl %eax,(_DEST)
30308 xor %eax,%eax
30309 EXIT
30310 ENDPROC(__put_user_4)
30311
30312 ENTRY(__put_user_8)
30313 ENTER
30314 +
30315 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30316 + GET_THREAD_INFO(%_ASM_BX)
30317 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30318 sub $7,%_ASM_BX
30319 cmp %_ASM_BX,%_ASM_CX
30320 jae bad_put_user
30321 ASM_STAC
30322 -4: mov %_ASM_AX,(%_ASM_CX)
30323 +
30324 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30325 + mov pax_user_shadow_base,%_ASM_BX
30326 + cmp %_ASM_BX,%_ASM_CX
30327 + jb 1234f
30328 + xor %ebx,%ebx
30329 +1234:
30330 +#endif
30331 +
30332 +#endif
30333 +
30334 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
30335 #ifdef CONFIG_X86_32
30336 -5: movl %edx,4(%_ASM_CX)
30337 +5: __copyuser_seg movl %edx,4(_DEST)
30338 #endif
30339 xor %eax,%eax
30340 EXIT
30341 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
30342 index 1cad221..de671ee 100644
30343 --- a/arch/x86/lib/rwlock.S
30344 +++ b/arch/x86/lib/rwlock.S
30345 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
30346 FRAME
30347 0: LOCK_PREFIX
30348 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30349 +
30350 +#ifdef CONFIG_PAX_REFCOUNT
30351 + jno 1234f
30352 + LOCK_PREFIX
30353 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30354 + int $4
30355 +1234:
30356 + _ASM_EXTABLE(1234b, 1234b)
30357 +#endif
30358 +
30359 1: rep; nop
30360 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
30361 jne 1b
30362 LOCK_PREFIX
30363 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30364 +
30365 +#ifdef CONFIG_PAX_REFCOUNT
30366 + jno 1234f
30367 + LOCK_PREFIX
30368 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30369 + int $4
30370 +1234:
30371 + _ASM_EXTABLE(1234b, 1234b)
30372 +#endif
30373 +
30374 jnz 0b
30375 ENDFRAME
30376 + pax_force_retaddr
30377 ret
30378 CFI_ENDPROC
30379 END(__write_lock_failed)
30380 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
30381 FRAME
30382 0: LOCK_PREFIX
30383 READ_LOCK_SIZE(inc) (%__lock_ptr)
30384 +
30385 +#ifdef CONFIG_PAX_REFCOUNT
30386 + jno 1234f
30387 + LOCK_PREFIX
30388 + READ_LOCK_SIZE(dec) (%__lock_ptr)
30389 + int $4
30390 +1234:
30391 + _ASM_EXTABLE(1234b, 1234b)
30392 +#endif
30393 +
30394 1: rep; nop
30395 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
30396 js 1b
30397 LOCK_PREFIX
30398 READ_LOCK_SIZE(dec) (%__lock_ptr)
30399 +
30400 +#ifdef CONFIG_PAX_REFCOUNT
30401 + jno 1234f
30402 + LOCK_PREFIX
30403 + READ_LOCK_SIZE(inc) (%__lock_ptr)
30404 + int $4
30405 +1234:
30406 + _ASM_EXTABLE(1234b, 1234b)
30407 +#endif
30408 +
30409 js 0b
30410 ENDFRAME
30411 + pax_force_retaddr
30412 ret
30413 CFI_ENDPROC
30414 END(__read_lock_failed)
30415 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30416 index 5dff5f0..cadebf4 100644
30417 --- a/arch/x86/lib/rwsem.S
30418 +++ b/arch/x86/lib/rwsem.S
30419 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30420 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30421 CFI_RESTORE __ASM_REG(dx)
30422 restore_common_regs
30423 + pax_force_retaddr
30424 ret
30425 CFI_ENDPROC
30426 ENDPROC(call_rwsem_down_read_failed)
30427 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30428 movq %rax,%rdi
30429 call rwsem_down_write_failed
30430 restore_common_regs
30431 + pax_force_retaddr
30432 ret
30433 CFI_ENDPROC
30434 ENDPROC(call_rwsem_down_write_failed)
30435 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30436 movq %rax,%rdi
30437 call rwsem_wake
30438 restore_common_regs
30439 -1: ret
30440 +1: pax_force_retaddr
30441 + ret
30442 CFI_ENDPROC
30443 ENDPROC(call_rwsem_wake)
30444
30445 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30446 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30447 CFI_RESTORE __ASM_REG(dx)
30448 restore_common_regs
30449 + pax_force_retaddr
30450 ret
30451 CFI_ENDPROC
30452 ENDPROC(call_rwsem_downgrade_wake)
30453 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30454 index a63efd6..8149fbe 100644
30455 --- a/arch/x86/lib/thunk_64.S
30456 +++ b/arch/x86/lib/thunk_64.S
30457 @@ -8,6 +8,7 @@
30458 #include <linux/linkage.h>
30459 #include <asm/dwarf2.h>
30460 #include <asm/calling.h>
30461 +#include <asm/alternative-asm.h>
30462
30463 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
30464 .macro THUNK name, func, put_ret_addr_in_rdi=0
30465 @@ -15,11 +16,11 @@
30466 \name:
30467 CFI_STARTPROC
30468
30469 - /* this one pushes 9 elems, the next one would be %rIP */
30470 - SAVE_ARGS
30471 + /* this one pushes 15+1 elems, the next one would be %rIP */
30472 + SAVE_ARGS 8
30473
30474 .if \put_ret_addr_in_rdi
30475 - movq_cfi_restore 9*8, rdi
30476 + movq_cfi_restore RIP, rdi
30477 .endif
30478
30479 call \func
30480 @@ -38,8 +39,9 @@
30481
30482 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
30483 CFI_STARTPROC
30484 - SAVE_ARGS
30485 + SAVE_ARGS 8
30486 restore:
30487 - RESTORE_ARGS
30488 + RESTORE_ARGS 1,8
30489 + pax_force_retaddr
30490 ret
30491 CFI_ENDPROC
30492 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
30493 index e2f5e21..4b22130 100644
30494 --- a/arch/x86/lib/usercopy_32.c
30495 +++ b/arch/x86/lib/usercopy_32.c
30496 @@ -42,11 +42,13 @@ do { \
30497 int __d0; \
30498 might_fault(); \
30499 __asm__ __volatile__( \
30500 + __COPYUSER_SET_ES \
30501 ASM_STAC "\n" \
30502 "0: rep; stosl\n" \
30503 " movl %2,%0\n" \
30504 "1: rep; stosb\n" \
30505 "2: " ASM_CLAC "\n" \
30506 + __COPYUSER_RESTORE_ES \
30507 ".section .fixup,\"ax\"\n" \
30508 "3: lea 0(%2,%0,4),%0\n" \
30509 " jmp 2b\n" \
30510 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
30511
30512 #ifdef CONFIG_X86_INTEL_USERCOPY
30513 static unsigned long
30514 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
30515 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
30516 {
30517 int d0, d1;
30518 __asm__ __volatile__(
30519 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30520 " .align 2,0x90\n"
30521 "3: movl 0(%4), %%eax\n"
30522 "4: movl 4(%4), %%edx\n"
30523 - "5: movl %%eax, 0(%3)\n"
30524 - "6: movl %%edx, 4(%3)\n"
30525 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
30526 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
30527 "7: movl 8(%4), %%eax\n"
30528 "8: movl 12(%4),%%edx\n"
30529 - "9: movl %%eax, 8(%3)\n"
30530 - "10: movl %%edx, 12(%3)\n"
30531 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
30532 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
30533 "11: movl 16(%4), %%eax\n"
30534 "12: movl 20(%4), %%edx\n"
30535 - "13: movl %%eax, 16(%3)\n"
30536 - "14: movl %%edx, 20(%3)\n"
30537 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
30538 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
30539 "15: movl 24(%4), %%eax\n"
30540 "16: movl 28(%4), %%edx\n"
30541 - "17: movl %%eax, 24(%3)\n"
30542 - "18: movl %%edx, 28(%3)\n"
30543 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
30544 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
30545 "19: movl 32(%4), %%eax\n"
30546 "20: movl 36(%4), %%edx\n"
30547 - "21: movl %%eax, 32(%3)\n"
30548 - "22: movl %%edx, 36(%3)\n"
30549 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
30550 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
30551 "23: movl 40(%4), %%eax\n"
30552 "24: movl 44(%4), %%edx\n"
30553 - "25: movl %%eax, 40(%3)\n"
30554 - "26: movl %%edx, 44(%3)\n"
30555 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
30556 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
30557 "27: movl 48(%4), %%eax\n"
30558 "28: movl 52(%4), %%edx\n"
30559 - "29: movl %%eax, 48(%3)\n"
30560 - "30: movl %%edx, 52(%3)\n"
30561 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
30562 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
30563 "31: movl 56(%4), %%eax\n"
30564 "32: movl 60(%4), %%edx\n"
30565 - "33: movl %%eax, 56(%3)\n"
30566 - "34: movl %%edx, 60(%3)\n"
30567 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
30568 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
30569 " addl $-64, %0\n"
30570 " addl $64, %4\n"
30571 " addl $64, %3\n"
30572 @@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30573 " shrl $2, %0\n"
30574 " andl $3, %%eax\n"
30575 " cld\n"
30576 + __COPYUSER_SET_ES
30577 "99: rep; movsl\n"
30578 "36: movl %%eax, %0\n"
30579 "37: rep; movsb\n"
30580 "100:\n"
30581 + __COPYUSER_RESTORE_ES
30582 + ".section .fixup,\"ax\"\n"
30583 + "101: lea 0(%%eax,%0,4),%0\n"
30584 + " jmp 100b\n"
30585 + ".previous\n"
30586 + _ASM_EXTABLE(1b,100b)
30587 + _ASM_EXTABLE(2b,100b)
30588 + _ASM_EXTABLE(3b,100b)
30589 + _ASM_EXTABLE(4b,100b)
30590 + _ASM_EXTABLE(5b,100b)
30591 + _ASM_EXTABLE(6b,100b)
30592 + _ASM_EXTABLE(7b,100b)
30593 + _ASM_EXTABLE(8b,100b)
30594 + _ASM_EXTABLE(9b,100b)
30595 + _ASM_EXTABLE(10b,100b)
30596 + _ASM_EXTABLE(11b,100b)
30597 + _ASM_EXTABLE(12b,100b)
30598 + _ASM_EXTABLE(13b,100b)
30599 + _ASM_EXTABLE(14b,100b)
30600 + _ASM_EXTABLE(15b,100b)
30601 + _ASM_EXTABLE(16b,100b)
30602 + _ASM_EXTABLE(17b,100b)
30603 + _ASM_EXTABLE(18b,100b)
30604 + _ASM_EXTABLE(19b,100b)
30605 + _ASM_EXTABLE(20b,100b)
30606 + _ASM_EXTABLE(21b,100b)
30607 + _ASM_EXTABLE(22b,100b)
30608 + _ASM_EXTABLE(23b,100b)
30609 + _ASM_EXTABLE(24b,100b)
30610 + _ASM_EXTABLE(25b,100b)
30611 + _ASM_EXTABLE(26b,100b)
30612 + _ASM_EXTABLE(27b,100b)
30613 + _ASM_EXTABLE(28b,100b)
30614 + _ASM_EXTABLE(29b,100b)
30615 + _ASM_EXTABLE(30b,100b)
30616 + _ASM_EXTABLE(31b,100b)
30617 + _ASM_EXTABLE(32b,100b)
30618 + _ASM_EXTABLE(33b,100b)
30619 + _ASM_EXTABLE(34b,100b)
30620 + _ASM_EXTABLE(35b,100b)
30621 + _ASM_EXTABLE(36b,100b)
30622 + _ASM_EXTABLE(37b,100b)
30623 + _ASM_EXTABLE(99b,101b)
30624 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
30625 + : "1"(to), "2"(from), "0"(size)
30626 + : "eax", "edx", "memory");
30627 + return size;
30628 +}
30629 +
30630 +static unsigned long
30631 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
30632 +{
30633 + int d0, d1;
30634 + __asm__ __volatile__(
30635 + " .align 2,0x90\n"
30636 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
30637 + " cmpl $67, %0\n"
30638 + " jbe 3f\n"
30639 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
30640 + " .align 2,0x90\n"
30641 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
30642 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
30643 + "5: movl %%eax, 0(%3)\n"
30644 + "6: movl %%edx, 4(%3)\n"
30645 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
30646 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
30647 + "9: movl %%eax, 8(%3)\n"
30648 + "10: movl %%edx, 12(%3)\n"
30649 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
30650 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
30651 + "13: movl %%eax, 16(%3)\n"
30652 + "14: movl %%edx, 20(%3)\n"
30653 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
30654 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
30655 + "17: movl %%eax, 24(%3)\n"
30656 + "18: movl %%edx, 28(%3)\n"
30657 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
30658 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
30659 + "21: movl %%eax, 32(%3)\n"
30660 + "22: movl %%edx, 36(%3)\n"
30661 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
30662 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
30663 + "25: movl %%eax, 40(%3)\n"
30664 + "26: movl %%edx, 44(%3)\n"
30665 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
30666 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
30667 + "29: movl %%eax, 48(%3)\n"
30668 + "30: movl %%edx, 52(%3)\n"
30669 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
30670 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
30671 + "33: movl %%eax, 56(%3)\n"
30672 + "34: movl %%edx, 60(%3)\n"
30673 + " addl $-64, %0\n"
30674 + " addl $64, %4\n"
30675 + " addl $64, %3\n"
30676 + " cmpl $63, %0\n"
30677 + " ja 1b\n"
30678 + "35: movl %0, %%eax\n"
30679 + " shrl $2, %0\n"
30680 + " andl $3, %%eax\n"
30681 + " cld\n"
30682 + "99: rep; "__copyuser_seg" movsl\n"
30683 + "36: movl %%eax, %0\n"
30684 + "37: rep; "__copyuser_seg" movsb\n"
30685 + "100:\n"
30686 ".section .fixup,\"ax\"\n"
30687 "101: lea 0(%%eax,%0,4),%0\n"
30688 " jmp 100b\n"
30689 @@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30690 int d0, d1;
30691 __asm__ __volatile__(
30692 " .align 2,0x90\n"
30693 - "0: movl 32(%4), %%eax\n"
30694 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30695 " cmpl $67, %0\n"
30696 " jbe 2f\n"
30697 - "1: movl 64(%4), %%eax\n"
30698 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30699 " .align 2,0x90\n"
30700 - "2: movl 0(%4), %%eax\n"
30701 - "21: movl 4(%4), %%edx\n"
30702 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30703 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30704 " movl %%eax, 0(%3)\n"
30705 " movl %%edx, 4(%3)\n"
30706 - "3: movl 8(%4), %%eax\n"
30707 - "31: movl 12(%4),%%edx\n"
30708 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30709 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30710 " movl %%eax, 8(%3)\n"
30711 " movl %%edx, 12(%3)\n"
30712 - "4: movl 16(%4), %%eax\n"
30713 - "41: movl 20(%4), %%edx\n"
30714 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30715 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30716 " movl %%eax, 16(%3)\n"
30717 " movl %%edx, 20(%3)\n"
30718 - "10: movl 24(%4), %%eax\n"
30719 - "51: movl 28(%4), %%edx\n"
30720 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30721 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30722 " movl %%eax, 24(%3)\n"
30723 " movl %%edx, 28(%3)\n"
30724 - "11: movl 32(%4), %%eax\n"
30725 - "61: movl 36(%4), %%edx\n"
30726 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30727 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30728 " movl %%eax, 32(%3)\n"
30729 " movl %%edx, 36(%3)\n"
30730 - "12: movl 40(%4), %%eax\n"
30731 - "71: movl 44(%4), %%edx\n"
30732 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30733 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30734 " movl %%eax, 40(%3)\n"
30735 " movl %%edx, 44(%3)\n"
30736 - "13: movl 48(%4), %%eax\n"
30737 - "81: movl 52(%4), %%edx\n"
30738 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30739 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30740 " movl %%eax, 48(%3)\n"
30741 " movl %%edx, 52(%3)\n"
30742 - "14: movl 56(%4), %%eax\n"
30743 - "91: movl 60(%4), %%edx\n"
30744 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30745 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30746 " movl %%eax, 56(%3)\n"
30747 " movl %%edx, 60(%3)\n"
30748 " addl $-64, %0\n"
30749 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30750 " shrl $2, %0\n"
30751 " andl $3, %%eax\n"
30752 " cld\n"
30753 - "6: rep; movsl\n"
30754 + "6: rep; "__copyuser_seg" movsl\n"
30755 " movl %%eax,%0\n"
30756 - "7: rep; movsb\n"
30757 + "7: rep; "__copyuser_seg" movsb\n"
30758 "8:\n"
30759 ".section .fixup,\"ax\"\n"
30760 "9: lea 0(%%eax,%0,4),%0\n"
30761 @@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30762
30763 __asm__ __volatile__(
30764 " .align 2,0x90\n"
30765 - "0: movl 32(%4), %%eax\n"
30766 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30767 " cmpl $67, %0\n"
30768 " jbe 2f\n"
30769 - "1: movl 64(%4), %%eax\n"
30770 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30771 " .align 2,0x90\n"
30772 - "2: movl 0(%4), %%eax\n"
30773 - "21: movl 4(%4), %%edx\n"
30774 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30775 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30776 " movnti %%eax, 0(%3)\n"
30777 " movnti %%edx, 4(%3)\n"
30778 - "3: movl 8(%4), %%eax\n"
30779 - "31: movl 12(%4),%%edx\n"
30780 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30781 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30782 " movnti %%eax, 8(%3)\n"
30783 " movnti %%edx, 12(%3)\n"
30784 - "4: movl 16(%4), %%eax\n"
30785 - "41: movl 20(%4), %%edx\n"
30786 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30787 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30788 " movnti %%eax, 16(%3)\n"
30789 " movnti %%edx, 20(%3)\n"
30790 - "10: movl 24(%4), %%eax\n"
30791 - "51: movl 28(%4), %%edx\n"
30792 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30793 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30794 " movnti %%eax, 24(%3)\n"
30795 " movnti %%edx, 28(%3)\n"
30796 - "11: movl 32(%4), %%eax\n"
30797 - "61: movl 36(%4), %%edx\n"
30798 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30799 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30800 " movnti %%eax, 32(%3)\n"
30801 " movnti %%edx, 36(%3)\n"
30802 - "12: movl 40(%4), %%eax\n"
30803 - "71: movl 44(%4), %%edx\n"
30804 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30805 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30806 " movnti %%eax, 40(%3)\n"
30807 " movnti %%edx, 44(%3)\n"
30808 - "13: movl 48(%4), %%eax\n"
30809 - "81: movl 52(%4), %%edx\n"
30810 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30811 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30812 " movnti %%eax, 48(%3)\n"
30813 " movnti %%edx, 52(%3)\n"
30814 - "14: movl 56(%4), %%eax\n"
30815 - "91: movl 60(%4), %%edx\n"
30816 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30817 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30818 " movnti %%eax, 56(%3)\n"
30819 " movnti %%edx, 60(%3)\n"
30820 " addl $-64, %0\n"
30821 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30822 " shrl $2, %0\n"
30823 " andl $3, %%eax\n"
30824 " cld\n"
30825 - "6: rep; movsl\n"
30826 + "6: rep; "__copyuser_seg" movsl\n"
30827 " movl %%eax,%0\n"
30828 - "7: rep; movsb\n"
30829 + "7: rep; "__copyuser_seg" movsb\n"
30830 "8:\n"
30831 ".section .fixup,\"ax\"\n"
30832 "9: lea 0(%%eax,%0,4),%0\n"
30833 @@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30834
30835 __asm__ __volatile__(
30836 " .align 2,0x90\n"
30837 - "0: movl 32(%4), %%eax\n"
30838 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30839 " cmpl $67, %0\n"
30840 " jbe 2f\n"
30841 - "1: movl 64(%4), %%eax\n"
30842 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30843 " .align 2,0x90\n"
30844 - "2: movl 0(%4), %%eax\n"
30845 - "21: movl 4(%4), %%edx\n"
30846 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30847 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30848 " movnti %%eax, 0(%3)\n"
30849 " movnti %%edx, 4(%3)\n"
30850 - "3: movl 8(%4), %%eax\n"
30851 - "31: movl 12(%4),%%edx\n"
30852 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30853 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30854 " movnti %%eax, 8(%3)\n"
30855 " movnti %%edx, 12(%3)\n"
30856 - "4: movl 16(%4), %%eax\n"
30857 - "41: movl 20(%4), %%edx\n"
30858 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30859 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30860 " movnti %%eax, 16(%3)\n"
30861 " movnti %%edx, 20(%3)\n"
30862 - "10: movl 24(%4), %%eax\n"
30863 - "51: movl 28(%4), %%edx\n"
30864 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30865 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30866 " movnti %%eax, 24(%3)\n"
30867 " movnti %%edx, 28(%3)\n"
30868 - "11: movl 32(%4), %%eax\n"
30869 - "61: movl 36(%4), %%edx\n"
30870 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30871 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30872 " movnti %%eax, 32(%3)\n"
30873 " movnti %%edx, 36(%3)\n"
30874 - "12: movl 40(%4), %%eax\n"
30875 - "71: movl 44(%4), %%edx\n"
30876 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30877 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30878 " movnti %%eax, 40(%3)\n"
30879 " movnti %%edx, 44(%3)\n"
30880 - "13: movl 48(%4), %%eax\n"
30881 - "81: movl 52(%4), %%edx\n"
30882 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30883 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30884 " movnti %%eax, 48(%3)\n"
30885 " movnti %%edx, 52(%3)\n"
30886 - "14: movl 56(%4), %%eax\n"
30887 - "91: movl 60(%4), %%edx\n"
30888 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30889 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30890 " movnti %%eax, 56(%3)\n"
30891 " movnti %%edx, 60(%3)\n"
30892 " addl $-64, %0\n"
30893 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30894 " shrl $2, %0\n"
30895 " andl $3, %%eax\n"
30896 " cld\n"
30897 - "6: rep; movsl\n"
30898 + "6: rep; "__copyuser_seg" movsl\n"
30899 " movl %%eax,%0\n"
30900 - "7: rep; movsb\n"
30901 + "7: rep; "__copyuser_seg" movsb\n"
30902 "8:\n"
30903 ".section .fixup,\"ax\"\n"
30904 "9: lea 0(%%eax,%0,4),%0\n"
30905 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30906 */
30907 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30908 unsigned long size);
30909 -unsigned long __copy_user_intel(void __user *to, const void *from,
30910 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30911 + unsigned long size);
30912 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30913 unsigned long size);
30914 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30915 const void __user *from, unsigned long size);
30916 #endif /* CONFIG_X86_INTEL_USERCOPY */
30917
30918 /* Generic arbitrary sized copy. */
30919 -#define __copy_user(to, from, size) \
30920 +#define __copy_user(to, from, size, prefix, set, restore) \
30921 do { \
30922 int __d0, __d1, __d2; \
30923 __asm__ __volatile__( \
30924 + set \
30925 " cmp $7,%0\n" \
30926 " jbe 1f\n" \
30927 " movl %1,%0\n" \
30928 " negl %0\n" \
30929 " andl $7,%0\n" \
30930 " subl %0,%3\n" \
30931 - "4: rep; movsb\n" \
30932 + "4: rep; "prefix"movsb\n" \
30933 " movl %3,%0\n" \
30934 " shrl $2,%0\n" \
30935 " andl $3,%3\n" \
30936 " .align 2,0x90\n" \
30937 - "0: rep; movsl\n" \
30938 + "0: rep; "prefix"movsl\n" \
30939 " movl %3,%0\n" \
30940 - "1: rep; movsb\n" \
30941 + "1: rep; "prefix"movsb\n" \
30942 "2:\n" \
30943 + restore \
30944 ".section .fixup,\"ax\"\n" \
30945 "5: addl %3,%0\n" \
30946 " jmp 2b\n" \
30947 @@ -538,14 +650,14 @@ do { \
30948 " negl %0\n" \
30949 " andl $7,%0\n" \
30950 " subl %0,%3\n" \
30951 - "4: rep; movsb\n" \
30952 + "4: rep; "__copyuser_seg"movsb\n" \
30953 " movl %3,%0\n" \
30954 " shrl $2,%0\n" \
30955 " andl $3,%3\n" \
30956 " .align 2,0x90\n" \
30957 - "0: rep; movsl\n" \
30958 + "0: rep; "__copyuser_seg"movsl\n" \
30959 " movl %3,%0\n" \
30960 - "1: rep; movsb\n" \
30961 + "1: rep; "__copyuser_seg"movsb\n" \
30962 "2:\n" \
30963 ".section .fixup,\"ax\"\n" \
30964 "5: addl %3,%0\n" \
30965 @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30966 {
30967 stac();
30968 if (movsl_is_ok(to, from, n))
30969 - __copy_user(to, from, n);
30970 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30971 else
30972 - n = __copy_user_intel(to, from, n);
30973 + n = __generic_copy_to_user_intel(to, from, n);
30974 clac();
30975 return n;
30976 }
30977 @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30978 {
30979 stac();
30980 if (movsl_is_ok(to, from, n))
30981 - __copy_user(to, from, n);
30982 + __copy_user(to, from, n, __copyuser_seg, "", "");
30983 else
30984 - n = __copy_user_intel((void __user *)to,
30985 - (const void *)from, n);
30986 + n = __generic_copy_from_user_intel(to, from, n);
30987 clac();
30988 return n;
30989 }
30990 @@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30991 if (n > 64 && cpu_has_xmm2)
30992 n = __copy_user_intel_nocache(to, from, n);
30993 else
30994 - __copy_user(to, from, n);
30995 + __copy_user(to, from, n, __copyuser_seg, "", "");
30996 #else
30997 - __copy_user(to, from, n);
30998 + __copy_user(to, from, n, __copyuser_seg, "", "");
30999 #endif
31000 clac();
31001 return n;
31002 }
31003 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31004
31005 -/**
31006 - * copy_to_user: - Copy a block of data into user space.
31007 - * @to: Destination address, in user space.
31008 - * @from: Source address, in kernel space.
31009 - * @n: Number of bytes to copy.
31010 - *
31011 - * Context: User context only. This function may sleep.
31012 - *
31013 - * Copy data from kernel space to user space.
31014 - *
31015 - * Returns number of bytes that could not be copied.
31016 - * On success, this will be zero.
31017 - */
31018 -unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31019 +#ifdef CONFIG_PAX_MEMORY_UDEREF
31020 +void __set_fs(mm_segment_t x)
31021 {
31022 - if (access_ok(VERIFY_WRITE, to, n))
31023 - n = __copy_to_user(to, from, n);
31024 - return n;
31025 + switch (x.seg) {
31026 + case 0:
31027 + loadsegment(gs, 0);
31028 + break;
31029 + case TASK_SIZE_MAX:
31030 + loadsegment(gs, __USER_DS);
31031 + break;
31032 + case -1UL:
31033 + loadsegment(gs, __KERNEL_DS);
31034 + break;
31035 + default:
31036 + BUG();
31037 + }
31038 }
31039 -EXPORT_SYMBOL(_copy_to_user);
31040 +EXPORT_SYMBOL(__set_fs);
31041
31042 -/**
31043 - * copy_from_user: - Copy a block of data from user space.
31044 - * @to: Destination address, in kernel space.
31045 - * @from: Source address, in user space.
31046 - * @n: Number of bytes to copy.
31047 - *
31048 - * Context: User context only. This function may sleep.
31049 - *
31050 - * Copy data from user space to kernel space.
31051 - *
31052 - * Returns number of bytes that could not be copied.
31053 - * On success, this will be zero.
31054 - *
31055 - * If some data could not be copied, this function will pad the copied
31056 - * data to the requested size using zero bytes.
31057 - */
31058 -unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31059 +void set_fs(mm_segment_t x)
31060 {
31061 - if (access_ok(VERIFY_READ, from, n))
31062 - n = __copy_from_user(to, from, n);
31063 - else
31064 - memset(to, 0, n);
31065 - return n;
31066 + current_thread_info()->addr_limit = x;
31067 + __set_fs(x);
31068 }
31069 -EXPORT_SYMBOL(_copy_from_user);
31070 +EXPORT_SYMBOL(set_fs);
31071 +#endif
31072 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31073 index c905e89..01ab928 100644
31074 --- a/arch/x86/lib/usercopy_64.c
31075 +++ b/arch/x86/lib/usercopy_64.c
31076 @@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31077 might_fault();
31078 /* no memory constraint because it doesn't change any memory gcc knows
31079 about */
31080 + pax_open_userland();
31081 stac();
31082 asm volatile(
31083 " testq %[size8],%[size8]\n"
31084 @@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31085 _ASM_EXTABLE(0b,3b)
31086 _ASM_EXTABLE(1b,2b)
31087 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31088 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31089 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31090 [zero] "r" (0UL), [eight] "r" (8UL));
31091 clac();
31092 + pax_close_userland();
31093 return size;
31094 }
31095 EXPORT_SYMBOL(__clear_user);
31096 @@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31097 }
31098 EXPORT_SYMBOL(clear_user);
31099
31100 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31101 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31102 {
31103 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31104 - return copy_user_generic((__force void *)to, (__force void *)from, len);
31105 - }
31106 - return len;
31107 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31108 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31109 + return len;
31110 }
31111 EXPORT_SYMBOL(copy_in_user);
31112
31113 @@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31114 * it is not necessary to optimize tail handling.
31115 */
31116 __visible unsigned long
31117 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31118 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31119 {
31120 char c;
31121 unsigned zero_len;
31122
31123 + clac();
31124 + pax_close_userland();
31125 for (; len; --len, to++) {
31126 if (__get_user_nocheck(c, from++, sizeof(char)))
31127 break;
31128 @@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31129 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31130 if (__put_user_nocheck(c, to++, sizeof(char)))
31131 break;
31132 - clac();
31133 return len;
31134 }
31135 diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31136 index 6a19ad9..1c48f9a 100644
31137 --- a/arch/x86/mm/Makefile
31138 +++ b/arch/x86/mm/Makefile
31139 @@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31140 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31141
31142 obj-$(CONFIG_MEMTEST) += memtest.o
31143 +
31144 +quote:="
31145 +obj-$(CONFIG_X86_64) += uderef_64.o
31146 +CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31147 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31148 index 903ec1e..c4166b2 100644
31149 --- a/arch/x86/mm/extable.c
31150 +++ b/arch/x86/mm/extable.c
31151 @@ -6,12 +6,24 @@
31152 static inline unsigned long
31153 ex_insn_addr(const struct exception_table_entry *x)
31154 {
31155 - return (unsigned long)&x->insn + x->insn;
31156 + unsigned long reloc = 0;
31157 +
31158 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31159 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31160 +#endif
31161 +
31162 + return (unsigned long)&x->insn + x->insn + reloc;
31163 }
31164 static inline unsigned long
31165 ex_fixup_addr(const struct exception_table_entry *x)
31166 {
31167 - return (unsigned long)&x->fixup + x->fixup;
31168 + unsigned long reloc = 0;
31169 +
31170 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31171 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31172 +#endif
31173 +
31174 + return (unsigned long)&x->fixup + x->fixup + reloc;
31175 }
31176
31177 int fixup_exception(struct pt_regs *regs)
31178 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31179 unsigned long new_ip;
31180
31181 #ifdef CONFIG_PNPBIOS
31182 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31183 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31184 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31185 extern u32 pnp_bios_is_utter_crap;
31186 pnp_bios_is_utter_crap = 1;
31187 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31188 i += 4;
31189 p->fixup -= i;
31190 i += 4;
31191 +
31192 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31193 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31194 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31195 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31196 +#endif
31197 +
31198 }
31199 }
31200
31201 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31202 index 6dea040..31e52ff 100644
31203 --- a/arch/x86/mm/fault.c
31204 +++ b/arch/x86/mm/fault.c
31205 @@ -14,11 +14,18 @@
31206 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31207 #include <linux/prefetch.h> /* prefetchw */
31208 #include <linux/context_tracking.h> /* exception_enter(), ... */
31209 +#include <linux/unistd.h>
31210 +#include <linux/compiler.h>
31211
31212 #include <asm/traps.h> /* dotraplinkage, ... */
31213 #include <asm/pgalloc.h> /* pgd_*(), ... */
31214 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31215 #include <asm/fixmap.h> /* VSYSCALL_START */
31216 +#include <asm/tlbflush.h>
31217 +
31218 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31219 +#include <asm/stacktrace.h>
31220 +#endif
31221
31222 #define CREATE_TRACE_POINTS
31223 #include <asm/trace/exceptions.h>
31224 @@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs)
31225 int ret = 0;
31226
31227 /* kprobe_running() needs smp_processor_id() */
31228 - if (kprobes_built_in() && !user_mode_vm(regs)) {
31229 + if (kprobes_built_in() && !user_mode(regs)) {
31230 preempt_disable();
31231 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31232 ret = 1;
31233 @@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31234 return !instr_lo || (instr_lo>>1) == 1;
31235 case 0x00:
31236 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31237 - if (probe_kernel_address(instr, opcode))
31238 + if (user_mode(regs)) {
31239 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31240 + return 0;
31241 + } else if (probe_kernel_address(instr, opcode))
31242 return 0;
31243
31244 *prefetch = (instr_lo == 0xF) &&
31245 @@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31246 while (instr < max_instr) {
31247 unsigned char opcode;
31248
31249 - if (probe_kernel_address(instr, opcode))
31250 + if (user_mode(regs)) {
31251 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31252 + break;
31253 + } else if (probe_kernel_address(instr, opcode))
31254 break;
31255
31256 instr++;
31257 @@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31258 force_sig_info(si_signo, &info, tsk);
31259 }
31260
31261 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31262 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31263 +#endif
31264 +
31265 +#ifdef CONFIG_PAX_EMUTRAMP
31266 +static int pax_handle_fetch_fault(struct pt_regs *regs);
31267 +#endif
31268 +
31269 +#ifdef CONFIG_PAX_PAGEEXEC
31270 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31271 +{
31272 + pgd_t *pgd;
31273 + pud_t *pud;
31274 + pmd_t *pmd;
31275 +
31276 + pgd = pgd_offset(mm, address);
31277 + if (!pgd_present(*pgd))
31278 + return NULL;
31279 + pud = pud_offset(pgd, address);
31280 + if (!pud_present(*pud))
31281 + return NULL;
31282 + pmd = pmd_offset(pud, address);
31283 + if (!pmd_present(*pmd))
31284 + return NULL;
31285 + return pmd;
31286 +}
31287 +#endif
31288 +
31289 DEFINE_SPINLOCK(pgd_lock);
31290 LIST_HEAD(pgd_list);
31291
31292 @@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31293 for (address = VMALLOC_START & PMD_MASK;
31294 address >= TASK_SIZE && address < FIXADDR_TOP;
31295 address += PMD_SIZE) {
31296 +
31297 +#ifdef CONFIG_PAX_PER_CPU_PGD
31298 + unsigned long cpu;
31299 +#else
31300 struct page *page;
31301 +#endif
31302
31303 spin_lock(&pgd_lock);
31304 +
31305 +#ifdef CONFIG_PAX_PER_CPU_PGD
31306 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31307 + pgd_t *pgd = get_cpu_pgd(cpu, user);
31308 + pmd_t *ret;
31309 +
31310 + ret = vmalloc_sync_one(pgd, address);
31311 + if (!ret)
31312 + break;
31313 + pgd = get_cpu_pgd(cpu, kernel);
31314 +#else
31315 list_for_each_entry(page, &pgd_list, lru) {
31316 + pgd_t *pgd;
31317 spinlock_t *pgt_lock;
31318 pmd_t *ret;
31319
31320 @@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31321 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31322
31323 spin_lock(pgt_lock);
31324 - ret = vmalloc_sync_one(page_address(page), address);
31325 + pgd = page_address(page);
31326 +#endif
31327 +
31328 + ret = vmalloc_sync_one(pgd, address);
31329 +
31330 +#ifndef CONFIG_PAX_PER_CPU_PGD
31331 spin_unlock(pgt_lock);
31332 +#endif
31333
31334 if (!ret)
31335 break;
31336 @@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31337 * an interrupt in the middle of a task switch..
31338 */
31339 pgd_paddr = read_cr3();
31340 +
31341 +#ifdef CONFIG_PAX_PER_CPU_PGD
31342 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31343 + vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31344 +#endif
31345 +
31346 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31347 if (!pmd_k)
31348 return -1;
31349 @@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31350 * happen within a race in page table update. In the later
31351 * case just flush:
31352 */
31353 - pgd = pgd_offset(current->active_mm, address);
31354 +
31355 pgd_ref = pgd_offset_k(address);
31356 if (pgd_none(*pgd_ref))
31357 return -1;
31358
31359 +#ifdef CONFIG_PAX_PER_CPU_PGD
31360 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31361 + pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31362 + if (pgd_none(*pgd)) {
31363 + set_pgd(pgd, *pgd_ref);
31364 + arch_flush_lazy_mmu_mode();
31365 + } else {
31366 + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31367 + }
31368 + pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31369 +#else
31370 + pgd = pgd_offset(current->active_mm, address);
31371 +#endif
31372 +
31373 if (pgd_none(*pgd)) {
31374 set_pgd(pgd, *pgd_ref);
31375 arch_flush_lazy_mmu_mode();
31376 @@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31377 static int is_errata100(struct pt_regs *regs, unsigned long address)
31378 {
31379 #ifdef CONFIG_X86_64
31380 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31381 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31382 return 1;
31383 #endif
31384 return 0;
31385 @@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31386 }
31387
31388 static const char nx_warning[] = KERN_CRIT
31389 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31390 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31391
31392 static void
31393 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31394 @@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31395 if (!oops_may_print())
31396 return;
31397
31398 - if (error_code & PF_INSTR) {
31399 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31400 unsigned int level;
31401
31402 pte_t *pte = lookup_address(address, &level);
31403
31404 if (pte && pte_present(*pte) && !pte_exec(*pte))
31405 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31406 + printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31407 }
31408
31409 +#ifdef CONFIG_PAX_KERNEXEC
31410 + if (init_mm.start_code <= address && address < init_mm.end_code) {
31411 + if (current->signal->curr_ip)
31412 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31413 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
31414 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31415 + else
31416 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31417 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31418 + }
31419 +#endif
31420 +
31421 printk(KERN_ALERT "BUG: unable to handle kernel ");
31422 if (address < PAGE_SIZE)
31423 printk(KERN_CONT "NULL pointer dereference");
31424 @@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31425 return;
31426 }
31427 #endif
31428 +
31429 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31430 + if (pax_is_fetch_fault(regs, error_code, address)) {
31431 +
31432 +#ifdef CONFIG_PAX_EMUTRAMP
31433 + switch (pax_handle_fetch_fault(regs)) {
31434 + case 2:
31435 + return;
31436 + }
31437 +#endif
31438 +
31439 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31440 + do_group_exit(SIGKILL);
31441 + }
31442 +#endif
31443 +
31444 /* Kernel addresses are always protection faults: */
31445 if (address >= TASK_SIZE)
31446 error_code |= PF_PROT;
31447 @@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
31448 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
31449 printk(KERN_ERR
31450 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
31451 - tsk->comm, tsk->pid, address);
31452 + tsk->comm, task_pid_nr(tsk), address);
31453 code = BUS_MCEERR_AR;
31454 }
31455 #endif
31456 @@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
31457 return 1;
31458 }
31459
31460 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31461 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
31462 +{
31463 + pte_t *pte;
31464 + pmd_t *pmd;
31465 + spinlock_t *ptl;
31466 + unsigned char pte_mask;
31467 +
31468 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
31469 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
31470 + return 0;
31471 +
31472 + /* PaX: it's our fault, let's handle it if we can */
31473 +
31474 + /* PaX: take a look at read faults before acquiring any locks */
31475 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
31476 + /* instruction fetch attempt from a protected page in user mode */
31477 + up_read(&mm->mmap_sem);
31478 +
31479 +#ifdef CONFIG_PAX_EMUTRAMP
31480 + switch (pax_handle_fetch_fault(regs)) {
31481 + case 2:
31482 + return 1;
31483 + }
31484 +#endif
31485 +
31486 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31487 + do_group_exit(SIGKILL);
31488 + }
31489 +
31490 + pmd = pax_get_pmd(mm, address);
31491 + if (unlikely(!pmd))
31492 + return 0;
31493 +
31494 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
31495 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
31496 + pte_unmap_unlock(pte, ptl);
31497 + return 0;
31498 + }
31499 +
31500 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
31501 + /* write attempt to a protected page in user mode */
31502 + pte_unmap_unlock(pte, ptl);
31503 + return 0;
31504 + }
31505 +
31506 +#ifdef CONFIG_SMP
31507 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
31508 +#else
31509 + if (likely(address > get_limit(regs->cs)))
31510 +#endif
31511 + {
31512 + set_pte(pte, pte_mkread(*pte));
31513 + __flush_tlb_one(address);
31514 + pte_unmap_unlock(pte, ptl);
31515 + up_read(&mm->mmap_sem);
31516 + return 1;
31517 + }
31518 +
31519 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
31520 +
31521 + /*
31522 + * PaX: fill DTLB with user rights and retry
31523 + */
31524 + __asm__ __volatile__ (
31525 + "orb %2,(%1)\n"
31526 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
31527 +/*
31528 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
31529 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
31530 + * page fault when examined during a TLB load attempt. this is true not only
31531 + * for PTEs holding a non-present entry but also present entries that will
31532 + * raise a page fault (such as those set up by PaX, or the copy-on-write
31533 + * mechanism). in effect it means that we do *not* need to flush the TLBs
31534 + * for our target pages since their PTEs are simply not in the TLBs at all.
31535 +
31536 + * the best thing in omitting it is that we gain around 15-20% speed in the
31537 + * fast path of the page fault handler and can get rid of tracing since we
31538 + * can no longer flush unintended entries.
31539 + */
31540 + "invlpg (%0)\n"
31541 +#endif
31542 + __copyuser_seg"testb $0,(%0)\n"
31543 + "xorb %3,(%1)\n"
31544 + :
31545 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
31546 + : "memory", "cc");
31547 + pte_unmap_unlock(pte, ptl);
31548 + up_read(&mm->mmap_sem);
31549 + return 1;
31550 +}
31551 +#endif
31552 +
31553 /*
31554 * Handle a spurious fault caused by a stale TLB entry.
31555 *
31556 @@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
31557 static inline int
31558 access_error(unsigned long error_code, struct vm_area_struct *vma)
31559 {
31560 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
31561 + return 1;
31562 +
31563 if (error_code & PF_WRITE) {
31564 /* write, present and write, not present: */
31565 if (unlikely(!(vma->vm_flags & VM_WRITE)))
31566 @@ -1010,7 +1218,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
31567 if (error_code & PF_USER)
31568 return false;
31569
31570 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
31571 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
31572 return false;
31573
31574 return true;
31575 @@ -1037,6 +1245,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31576 /* Get the faulting address: */
31577 address = read_cr2();
31578
31579 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31580 + if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
31581 + if (!search_exception_tables(regs->ip)) {
31582 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31583 + bad_area_nosemaphore(regs, error_code, address);
31584 + return;
31585 + }
31586 + if (address < pax_user_shadow_base) {
31587 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31588 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
31589 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
31590 + } else
31591 + address -= pax_user_shadow_base;
31592 + }
31593 +#endif
31594 +
31595 /*
31596 * Detect and handle instructions that would cause a page fault for
31597 * both a tracked kernel page and a userspace page.
31598 @@ -1114,7 +1338,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31599 * User-mode registers count as a user access even for any
31600 * potential system fault or CPU buglet:
31601 */
31602 - if (user_mode_vm(regs)) {
31603 + if (user_mode(regs)) {
31604 local_irq_enable();
31605 error_code |= PF_USER;
31606 flags |= FAULT_FLAG_USER;
31607 @@ -1161,6 +1385,11 @@ retry:
31608 might_sleep();
31609 }
31610
31611 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31612 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
31613 + return;
31614 +#endif
31615 +
31616 vma = find_vma(mm, address);
31617 if (unlikely(!vma)) {
31618 bad_area(regs, error_code, address);
31619 @@ -1172,18 +1401,24 @@ retry:
31620 bad_area(regs, error_code, address);
31621 return;
31622 }
31623 - if (error_code & PF_USER) {
31624 - /*
31625 - * Accessing the stack below %sp is always a bug.
31626 - * The large cushion allows instructions like enter
31627 - * and pusha to work. ("enter $65535, $31" pushes
31628 - * 32 pointers and then decrements %sp by 65535.)
31629 - */
31630 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
31631 - bad_area(regs, error_code, address);
31632 - return;
31633 - }
31634 + /*
31635 + * Accessing the stack below %sp is always a bug.
31636 + * The large cushion allows instructions like enter
31637 + * and pusha to work. ("enter $65535, $31" pushes
31638 + * 32 pointers and then decrements %sp by 65535.)
31639 + */
31640 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
31641 + bad_area(regs, error_code, address);
31642 + return;
31643 }
31644 +
31645 +#ifdef CONFIG_PAX_SEGMEXEC
31646 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
31647 + bad_area(regs, error_code, address);
31648 + return;
31649 + }
31650 +#endif
31651 +
31652 if (unlikely(expand_stack(vma, address))) {
31653 bad_area(regs, error_code, address);
31654 return;
31655 @@ -1277,3 +1512,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
31656 __do_page_fault(regs, error_code);
31657 exception_exit(prev_state);
31658 }
31659 +
31660 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31661 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
31662 +{
31663 + struct mm_struct *mm = current->mm;
31664 + unsigned long ip = regs->ip;
31665 +
31666 + if (v8086_mode(regs))
31667 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
31668 +
31669 +#ifdef CONFIG_PAX_PAGEEXEC
31670 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
31671 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
31672 + return true;
31673 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
31674 + return true;
31675 + return false;
31676 + }
31677 +#endif
31678 +
31679 +#ifdef CONFIG_PAX_SEGMEXEC
31680 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
31681 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
31682 + return true;
31683 + return false;
31684 + }
31685 +#endif
31686 +
31687 + return false;
31688 +}
31689 +#endif
31690 +
31691 +#ifdef CONFIG_PAX_EMUTRAMP
31692 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
31693 +{
31694 + int err;
31695 +
31696 + do { /* PaX: libffi trampoline emulation */
31697 + unsigned char mov, jmp;
31698 + unsigned int addr1, addr2;
31699 +
31700 +#ifdef CONFIG_X86_64
31701 + if ((regs->ip + 9) >> 32)
31702 + break;
31703 +#endif
31704 +
31705 + err = get_user(mov, (unsigned char __user *)regs->ip);
31706 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31707 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31708 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31709 +
31710 + if (err)
31711 + break;
31712 +
31713 + if (mov == 0xB8 && jmp == 0xE9) {
31714 + regs->ax = addr1;
31715 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31716 + return 2;
31717 + }
31718 + } while (0);
31719 +
31720 + do { /* PaX: gcc trampoline emulation #1 */
31721 + unsigned char mov1, mov2;
31722 + unsigned short jmp;
31723 + unsigned int addr1, addr2;
31724 +
31725 +#ifdef CONFIG_X86_64
31726 + if ((regs->ip + 11) >> 32)
31727 + break;
31728 +#endif
31729 +
31730 + err = get_user(mov1, (unsigned char __user *)regs->ip);
31731 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31732 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
31733 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31734 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
31735 +
31736 + if (err)
31737 + break;
31738 +
31739 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31740 + regs->cx = addr1;
31741 + regs->ax = addr2;
31742 + regs->ip = addr2;
31743 + return 2;
31744 + }
31745 + } while (0);
31746 +
31747 + do { /* PaX: gcc trampoline emulation #2 */
31748 + unsigned char mov, jmp;
31749 + unsigned int addr1, addr2;
31750 +
31751 +#ifdef CONFIG_X86_64
31752 + if ((regs->ip + 9) >> 32)
31753 + break;
31754 +#endif
31755 +
31756 + err = get_user(mov, (unsigned char __user *)regs->ip);
31757 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31758 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31759 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31760 +
31761 + if (err)
31762 + break;
31763 +
31764 + if (mov == 0xB9 && jmp == 0xE9) {
31765 + regs->cx = addr1;
31766 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31767 + return 2;
31768 + }
31769 + } while (0);
31770 +
31771 + return 1; /* PaX in action */
31772 +}
31773 +
31774 +#ifdef CONFIG_X86_64
31775 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31776 +{
31777 + int err;
31778 +
31779 + do { /* PaX: libffi trampoline emulation */
31780 + unsigned short mov1, mov2, jmp1;
31781 + unsigned char stcclc, jmp2;
31782 + unsigned long addr1, addr2;
31783 +
31784 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31785 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31786 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31787 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31788 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31789 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31790 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31791 +
31792 + if (err)
31793 + break;
31794 +
31795 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31796 + regs->r11 = addr1;
31797 + regs->r10 = addr2;
31798 + if (stcclc == 0xF8)
31799 + regs->flags &= ~X86_EFLAGS_CF;
31800 + else
31801 + regs->flags |= X86_EFLAGS_CF;
31802 + regs->ip = addr1;
31803 + return 2;
31804 + }
31805 + } while (0);
31806 +
31807 + do { /* PaX: gcc trampoline emulation #1 */
31808 + unsigned short mov1, mov2, jmp1;
31809 + unsigned char jmp2;
31810 + unsigned int addr1;
31811 + unsigned long addr2;
31812 +
31813 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31814 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31815 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31816 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31817 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31818 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31819 +
31820 + if (err)
31821 + break;
31822 +
31823 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31824 + regs->r11 = addr1;
31825 + regs->r10 = addr2;
31826 + regs->ip = addr1;
31827 + return 2;
31828 + }
31829 + } while (0);
31830 +
31831 + do { /* PaX: gcc trampoline emulation #2 */
31832 + unsigned short mov1, mov2, jmp1;
31833 + unsigned char jmp2;
31834 + unsigned long addr1, addr2;
31835 +
31836 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31837 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31838 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31839 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31840 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31841 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31842 +
31843 + if (err)
31844 + break;
31845 +
31846 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31847 + regs->r11 = addr1;
31848 + regs->r10 = addr2;
31849 + regs->ip = addr1;
31850 + return 2;
31851 + }
31852 + } while (0);
31853 +
31854 + return 1; /* PaX in action */
31855 +}
31856 +#endif
31857 +
31858 +/*
31859 + * PaX: decide what to do with offenders (regs->ip = fault address)
31860 + *
31861 + * returns 1 when task should be killed
31862 + * 2 when gcc trampoline was detected
31863 + */
31864 +static int pax_handle_fetch_fault(struct pt_regs *regs)
31865 +{
31866 + if (v8086_mode(regs))
31867 + return 1;
31868 +
31869 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31870 + return 1;
31871 +
31872 +#ifdef CONFIG_X86_32
31873 + return pax_handle_fetch_fault_32(regs);
31874 +#else
31875 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31876 + return pax_handle_fetch_fault_32(regs);
31877 + else
31878 + return pax_handle_fetch_fault_64(regs);
31879 +#endif
31880 +}
31881 +#endif
31882 +
31883 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31884 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31885 +{
31886 + long i;
31887 +
31888 + printk(KERN_ERR "PAX: bytes at PC: ");
31889 + for (i = 0; i < 20; i++) {
31890 + unsigned char c;
31891 + if (get_user(c, (unsigned char __force_user *)pc+i))
31892 + printk(KERN_CONT "?? ");
31893 + else
31894 + printk(KERN_CONT "%02x ", c);
31895 + }
31896 + printk("\n");
31897 +
31898 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31899 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
31900 + unsigned long c;
31901 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
31902 +#ifdef CONFIG_X86_32
31903 + printk(KERN_CONT "???????? ");
31904 +#else
31905 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31906 + printk(KERN_CONT "???????? ???????? ");
31907 + else
31908 + printk(KERN_CONT "???????????????? ");
31909 +#endif
31910 + } else {
31911 +#ifdef CONFIG_X86_64
31912 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31913 + printk(KERN_CONT "%08x ", (unsigned int)c);
31914 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31915 + } else
31916 +#endif
31917 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31918 + }
31919 + }
31920 + printk("\n");
31921 +}
31922 +#endif
31923 +
31924 +/**
31925 + * probe_kernel_write(): safely attempt to write to a location
31926 + * @dst: address to write to
31927 + * @src: pointer to the data that shall be written
31928 + * @size: size of the data chunk
31929 + *
31930 + * Safely write to address @dst from the buffer at @src. If a kernel fault
31931 + * happens, handle that and return -EFAULT.
31932 + */
31933 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31934 +{
31935 + long ret;
31936 + mm_segment_t old_fs = get_fs();
31937 +
31938 + set_fs(KERNEL_DS);
31939 + pagefault_disable();
31940 + pax_open_kernel();
31941 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31942 + pax_close_kernel();
31943 + pagefault_enable();
31944 + set_fs(old_fs);
31945 +
31946 + return ret ? -EFAULT : 0;
31947 +}
31948 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31949 index 0596e8e..5626789 100644
31950 --- a/arch/x86/mm/gup.c
31951 +++ b/arch/x86/mm/gup.c
31952 @@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31953 addr = start;
31954 len = (unsigned long) nr_pages << PAGE_SHIFT;
31955 end = start + len;
31956 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31957 + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
31958 (void __user *)start, len)))
31959 return 0;
31960
31961 @@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31962 goto slow_irqon;
31963 #endif
31964
31965 + if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
31966 + (void __user *)start, len)))
31967 + return 0;
31968 +
31969 /*
31970 * XXX: batch / limit 'nr', to avoid large irq off latency
31971 * needs some instrumenting to determine the common sizes used by
31972 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31973 index 4500142..53a363c 100644
31974 --- a/arch/x86/mm/highmem_32.c
31975 +++ b/arch/x86/mm/highmem_32.c
31976 @@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31977 idx = type + KM_TYPE_NR*smp_processor_id();
31978 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31979 BUG_ON(!pte_none(*(kmap_pte-idx)));
31980 +
31981 + pax_open_kernel();
31982 set_pte(kmap_pte-idx, mk_pte(page, prot));
31983 + pax_close_kernel();
31984 +
31985 arch_flush_lazy_mmu_mode();
31986
31987 return (void *)vaddr;
31988 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31989 index 9d980d8..6bbfacb 100644
31990 --- a/arch/x86/mm/hugetlbpage.c
31991 +++ b/arch/x86/mm/hugetlbpage.c
31992 @@ -92,23 +92,30 @@ int pmd_huge_support(void)
31993 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31994 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31995 unsigned long addr, unsigned long len,
31996 - unsigned long pgoff, unsigned long flags)
31997 + unsigned long pgoff, unsigned long flags, unsigned long offset)
31998 {
31999 struct hstate *h = hstate_file(file);
32000 struct vm_unmapped_area_info info;
32001 -
32002 +
32003 info.flags = 0;
32004 info.length = len;
32005 info.low_limit = TASK_UNMAPPED_BASE;
32006 +
32007 +#ifdef CONFIG_PAX_RANDMMAP
32008 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32009 + info.low_limit += current->mm->delta_mmap;
32010 +#endif
32011 +
32012 info.high_limit = TASK_SIZE;
32013 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32014 info.align_offset = 0;
32015 + info.threadstack_offset = offset;
32016 return vm_unmapped_area(&info);
32017 }
32018
32019 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32020 unsigned long addr0, unsigned long len,
32021 - unsigned long pgoff, unsigned long flags)
32022 + unsigned long pgoff, unsigned long flags, unsigned long offset)
32023 {
32024 struct hstate *h = hstate_file(file);
32025 struct vm_unmapped_area_info info;
32026 @@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32027 info.high_limit = current->mm->mmap_base;
32028 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32029 info.align_offset = 0;
32030 + info.threadstack_offset = offset;
32031 addr = vm_unmapped_area(&info);
32032
32033 /*
32034 @@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32035 VM_BUG_ON(addr != -ENOMEM);
32036 info.flags = 0;
32037 info.low_limit = TASK_UNMAPPED_BASE;
32038 +
32039 +#ifdef CONFIG_PAX_RANDMMAP
32040 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32041 + info.low_limit += current->mm->delta_mmap;
32042 +#endif
32043 +
32044 info.high_limit = TASK_SIZE;
32045 addr = vm_unmapped_area(&info);
32046 }
32047 @@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32048 struct hstate *h = hstate_file(file);
32049 struct mm_struct *mm = current->mm;
32050 struct vm_area_struct *vma;
32051 + unsigned long pax_task_size = TASK_SIZE;
32052 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32053
32054 if (len & ~huge_page_mask(h))
32055 return -EINVAL;
32056 - if (len > TASK_SIZE)
32057 +
32058 +#ifdef CONFIG_PAX_SEGMEXEC
32059 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32060 + pax_task_size = SEGMEXEC_TASK_SIZE;
32061 +#endif
32062 +
32063 + pax_task_size -= PAGE_SIZE;
32064 +
32065 + if (len > pax_task_size)
32066 return -ENOMEM;
32067
32068 if (flags & MAP_FIXED) {
32069 @@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32070 return addr;
32071 }
32072
32073 +#ifdef CONFIG_PAX_RANDMMAP
32074 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32075 +#endif
32076 +
32077 if (addr) {
32078 addr = ALIGN(addr, huge_page_size(h));
32079 vma = find_vma(mm, addr);
32080 - if (TASK_SIZE - len >= addr &&
32081 - (!vma || addr + len <= vma->vm_start))
32082 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32083 return addr;
32084 }
32085 if (mm->get_unmapped_area == arch_get_unmapped_area)
32086 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32087 - pgoff, flags);
32088 + pgoff, flags, offset);
32089 else
32090 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32091 - pgoff, flags);
32092 + pgoff, flags, offset);
32093 }
32094
32095 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
32096 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32097 index f971306..e83e0f6 100644
32098 --- a/arch/x86/mm/init.c
32099 +++ b/arch/x86/mm/init.c
32100 @@ -4,6 +4,7 @@
32101 #include <linux/swap.h>
32102 #include <linux/memblock.h>
32103 #include <linux/bootmem.h> /* for max_low_pfn */
32104 +#include <linux/tboot.h>
32105
32106 #include <asm/cacheflush.h>
32107 #include <asm/e820.h>
32108 @@ -17,6 +18,8 @@
32109 #include <asm/proto.h>
32110 #include <asm/dma.h> /* for MAX_DMA_PFN */
32111 #include <asm/microcode.h>
32112 +#include <asm/desc.h>
32113 +#include <asm/bios_ebda.h>
32114
32115 #include "mm_internal.h"
32116
32117 @@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32118 early_ioremap_page_table_range_init();
32119 #endif
32120
32121 +#ifdef CONFIG_PAX_PER_CPU_PGD
32122 + clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32123 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32124 + KERNEL_PGD_PTRS);
32125 + clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32126 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32127 + KERNEL_PGD_PTRS);
32128 + load_cr3(get_cpu_pgd(0, kernel));
32129 +#else
32130 load_cr3(swapper_pg_dir);
32131 +#endif
32132 +
32133 __flush_tlb_all();
32134
32135 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32136 @@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32137 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32138 * mmio resources as well as potential bios/acpi data regions.
32139 */
32140 +
32141 +#ifdef CONFIG_GRKERNSEC_KMEM
32142 +static unsigned int ebda_start __read_only;
32143 +static unsigned int ebda_end __read_only;
32144 +#endif
32145 +
32146 int devmem_is_allowed(unsigned long pagenr)
32147 {
32148 - if (pagenr < 256)
32149 +#ifdef CONFIG_GRKERNSEC_KMEM
32150 + /* allow BDA */
32151 + if (!pagenr)
32152 return 1;
32153 + /* allow EBDA */
32154 + if (pagenr >= ebda_start && pagenr < ebda_end)
32155 + return 1;
32156 + /* if tboot is in use, allow access to its hardcoded serial log range */
32157 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32158 + return 1;
32159 +#else
32160 + if (!pagenr)
32161 + return 1;
32162 +#ifdef CONFIG_VM86
32163 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32164 + return 1;
32165 +#endif
32166 +#endif
32167 +
32168 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32169 + return 1;
32170 +#ifdef CONFIG_GRKERNSEC_KMEM
32171 + /* throw out everything else below 1MB */
32172 + if (pagenr <= 256)
32173 + return 0;
32174 +#endif
32175 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32176 return 0;
32177 if (!page_is_ram(pagenr))
32178 @@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32179 #endif
32180 }
32181
32182 +#ifdef CONFIG_GRKERNSEC_KMEM
32183 +static inline void gr_init_ebda(void)
32184 +{
32185 + unsigned int ebda_addr;
32186 + unsigned int ebda_size = 0;
32187 +
32188 + ebda_addr = get_bios_ebda();
32189 + if (ebda_addr) {
32190 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32191 + ebda_size <<= 10;
32192 + }
32193 + if (ebda_addr && ebda_size) {
32194 + ebda_start = ebda_addr >> PAGE_SHIFT;
32195 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32196 + } else {
32197 + ebda_start = 0x9f000 >> PAGE_SHIFT;
32198 + ebda_end = 0xa0000 >> PAGE_SHIFT;
32199 + }
32200 +}
32201 +#else
32202 +static inline void gr_init_ebda(void) { }
32203 +#endif
32204 +
32205 void free_initmem(void)
32206 {
32207 +#ifdef CONFIG_PAX_KERNEXEC
32208 +#ifdef CONFIG_X86_32
32209 + /* PaX: limit KERNEL_CS to actual size */
32210 + unsigned long addr, limit;
32211 + struct desc_struct d;
32212 + int cpu;
32213 +#else
32214 + pgd_t *pgd;
32215 + pud_t *pud;
32216 + pmd_t *pmd;
32217 + unsigned long addr, end;
32218 +#endif
32219 +#endif
32220 +
32221 + gr_init_ebda();
32222 +
32223 +#ifdef CONFIG_PAX_KERNEXEC
32224 +#ifdef CONFIG_X86_32
32225 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32226 + limit = (limit - 1UL) >> PAGE_SHIFT;
32227 +
32228 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32229 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32230 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32231 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32232 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32233 + }
32234 +
32235 + /* PaX: make KERNEL_CS read-only */
32236 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32237 + if (!paravirt_enabled())
32238 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32239 +/*
32240 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32241 + pgd = pgd_offset_k(addr);
32242 + pud = pud_offset(pgd, addr);
32243 + pmd = pmd_offset(pud, addr);
32244 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32245 + }
32246 +*/
32247 +#ifdef CONFIG_X86_PAE
32248 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32249 +/*
32250 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32251 + pgd = pgd_offset_k(addr);
32252 + pud = pud_offset(pgd, addr);
32253 + pmd = pmd_offset(pud, addr);
32254 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32255 + }
32256 +*/
32257 +#endif
32258 +
32259 +#ifdef CONFIG_MODULES
32260 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32261 +#endif
32262 +
32263 +#else
32264 + /* PaX: make kernel code/rodata read-only, rest non-executable */
32265 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32266 + pgd = pgd_offset_k(addr);
32267 + pud = pud_offset(pgd, addr);
32268 + pmd = pmd_offset(pud, addr);
32269 + if (!pmd_present(*pmd))
32270 + continue;
32271 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32272 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32273 + else
32274 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32275 + }
32276 +
32277 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32278 + end = addr + KERNEL_IMAGE_SIZE;
32279 + for (; addr < end; addr += PMD_SIZE) {
32280 + pgd = pgd_offset_k(addr);
32281 + pud = pud_offset(pgd, addr);
32282 + pmd = pmd_offset(pud, addr);
32283 + if (!pmd_present(*pmd))
32284 + continue;
32285 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32286 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32287 + }
32288 +#endif
32289 +
32290 + flush_tlb_all();
32291 +#endif
32292 +
32293 free_init_pages("unused kernel",
32294 (unsigned long)(&__init_begin),
32295 (unsigned long)(&__init_end));
32296 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32297 index 4287f1f..3b99c71 100644
32298 --- a/arch/x86/mm/init_32.c
32299 +++ b/arch/x86/mm/init_32.c
32300 @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32301 bool __read_mostly __vmalloc_start_set = false;
32302
32303 /*
32304 - * Creates a middle page table and puts a pointer to it in the
32305 - * given global directory entry. This only returns the gd entry
32306 - * in non-PAE compilation mode, since the middle layer is folded.
32307 - */
32308 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
32309 -{
32310 - pud_t *pud;
32311 - pmd_t *pmd_table;
32312 -
32313 -#ifdef CONFIG_X86_PAE
32314 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32315 - pmd_table = (pmd_t *)alloc_low_page();
32316 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32317 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32318 - pud = pud_offset(pgd, 0);
32319 - BUG_ON(pmd_table != pmd_offset(pud, 0));
32320 -
32321 - return pmd_table;
32322 - }
32323 -#endif
32324 - pud = pud_offset(pgd, 0);
32325 - pmd_table = pmd_offset(pud, 0);
32326 -
32327 - return pmd_table;
32328 -}
32329 -
32330 -/*
32331 * Create a page table and place a pointer to it in a middle page
32332 * directory entry:
32333 */
32334 @@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32335 pte_t *page_table = (pte_t *)alloc_low_page();
32336
32337 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32338 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32339 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32340 +#else
32341 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32342 +#endif
32343 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32344 }
32345
32346 return pte_offset_kernel(pmd, 0);
32347 }
32348
32349 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
32350 +{
32351 + pud_t *pud;
32352 + pmd_t *pmd_table;
32353 +
32354 + pud = pud_offset(pgd, 0);
32355 + pmd_table = pmd_offset(pud, 0);
32356 +
32357 + return pmd_table;
32358 +}
32359 +
32360 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32361 {
32362 int pgd_idx = pgd_index(vaddr);
32363 @@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32364 int pgd_idx, pmd_idx;
32365 unsigned long vaddr;
32366 pgd_t *pgd;
32367 + pud_t *pud;
32368 pmd_t *pmd;
32369 pte_t *pte = NULL;
32370 unsigned long count = page_table_range_init_count(start, end);
32371 @@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32372 pgd = pgd_base + pgd_idx;
32373
32374 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32375 - pmd = one_md_table_init(pgd);
32376 - pmd = pmd + pmd_index(vaddr);
32377 + pud = pud_offset(pgd, vaddr);
32378 + pmd = pmd_offset(pud, vaddr);
32379 +
32380 +#ifdef CONFIG_X86_PAE
32381 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32382 +#endif
32383 +
32384 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32385 pmd++, pmd_idx++) {
32386 pte = page_table_kmap_check(one_page_table_init(pmd),
32387 @@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32388 }
32389 }
32390
32391 -static inline int is_kernel_text(unsigned long addr)
32392 +static inline int is_kernel_text(unsigned long start, unsigned long end)
32393 {
32394 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32395 - return 1;
32396 - return 0;
32397 + if ((start > ktla_ktva((unsigned long)_etext) ||
32398 + end <= ktla_ktva((unsigned long)_stext)) &&
32399 + (start > ktla_ktva((unsigned long)_einittext) ||
32400 + end <= ktla_ktva((unsigned long)_sinittext)) &&
32401 +
32402 +#ifdef CONFIG_ACPI_SLEEP
32403 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32404 +#endif
32405 +
32406 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32407 + return 0;
32408 + return 1;
32409 }
32410
32411 /*
32412 @@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32413 unsigned long last_map_addr = end;
32414 unsigned long start_pfn, end_pfn;
32415 pgd_t *pgd_base = swapper_pg_dir;
32416 - int pgd_idx, pmd_idx, pte_ofs;
32417 + unsigned int pgd_idx, pmd_idx, pte_ofs;
32418 unsigned long pfn;
32419 pgd_t *pgd;
32420 + pud_t *pud;
32421 pmd_t *pmd;
32422 pte_t *pte;
32423 unsigned pages_2m, pages_4k;
32424 @@ -291,8 +295,13 @@ repeat:
32425 pfn = start_pfn;
32426 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32427 pgd = pgd_base + pgd_idx;
32428 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32429 - pmd = one_md_table_init(pgd);
32430 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32431 + pud = pud_offset(pgd, 0);
32432 + pmd = pmd_offset(pud, 0);
32433 +
32434 +#ifdef CONFIG_X86_PAE
32435 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32436 +#endif
32437
32438 if (pfn >= end_pfn)
32439 continue;
32440 @@ -304,14 +313,13 @@ repeat:
32441 #endif
32442 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32443 pmd++, pmd_idx++) {
32444 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32445 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32446
32447 /*
32448 * Map with big pages if possible, otherwise
32449 * create normal page tables:
32450 */
32451 if (use_pse) {
32452 - unsigned int addr2;
32453 pgprot_t prot = PAGE_KERNEL_LARGE;
32454 /*
32455 * first pass will use the same initial
32456 @@ -322,11 +330,7 @@ repeat:
32457 _PAGE_PSE);
32458
32459 pfn &= PMD_MASK >> PAGE_SHIFT;
32460 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
32461 - PAGE_OFFSET + PAGE_SIZE-1;
32462 -
32463 - if (is_kernel_text(addr) ||
32464 - is_kernel_text(addr2))
32465 + if (is_kernel_text(address, address + PMD_SIZE))
32466 prot = PAGE_KERNEL_LARGE_EXEC;
32467
32468 pages_2m++;
32469 @@ -343,7 +347,7 @@ repeat:
32470 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32471 pte += pte_ofs;
32472 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
32473 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
32474 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
32475 pgprot_t prot = PAGE_KERNEL;
32476 /*
32477 * first pass will use the same initial
32478 @@ -351,7 +355,7 @@ repeat:
32479 */
32480 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
32481
32482 - if (is_kernel_text(addr))
32483 + if (is_kernel_text(address, address + PAGE_SIZE))
32484 prot = PAGE_KERNEL_EXEC;
32485
32486 pages_4k++;
32487 @@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
32488
32489 pud = pud_offset(pgd, va);
32490 pmd = pmd_offset(pud, va);
32491 - if (!pmd_present(*pmd))
32492 + if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
32493 break;
32494
32495 /* should not be large page here */
32496 @@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
32497
32498 static void __init pagetable_init(void)
32499 {
32500 - pgd_t *pgd_base = swapper_pg_dir;
32501 -
32502 - permanent_kmaps_init(pgd_base);
32503 + permanent_kmaps_init(swapper_pg_dir);
32504 }
32505
32506 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
32507 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
32508 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32509
32510 /* user-defined highmem size */
32511 @@ -787,10 +789,10 @@ void __init mem_init(void)
32512 ((unsigned long)&__init_end -
32513 (unsigned long)&__init_begin) >> 10,
32514
32515 - (unsigned long)&_etext, (unsigned long)&_edata,
32516 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
32517 + (unsigned long)&_sdata, (unsigned long)&_edata,
32518 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
32519
32520 - (unsigned long)&_text, (unsigned long)&_etext,
32521 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
32522 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
32523
32524 /*
32525 @@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
32526 if (!kernel_set_to_readonly)
32527 return;
32528
32529 + start = ktla_ktva(start);
32530 pr_debug("Set kernel text: %lx - %lx for read write\n",
32531 start, start+size);
32532
32533 @@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
32534 if (!kernel_set_to_readonly)
32535 return;
32536
32537 + start = ktla_ktva(start);
32538 pr_debug("Set kernel text: %lx - %lx for read only\n",
32539 start, start+size);
32540
32541 @@ -922,6 +926,7 @@ void mark_rodata_ro(void)
32542 unsigned long start = PFN_ALIGN(_text);
32543 unsigned long size = PFN_ALIGN(_etext) - start;
32544
32545 + start = ktla_ktva(start);
32546 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
32547 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
32548 size >> 10);
32549 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
32550 index 104d56a..62ba13f1 100644
32551 --- a/arch/x86/mm/init_64.c
32552 +++ b/arch/x86/mm/init_64.c
32553 @@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
32554 * around without checking the pgd every time.
32555 */
32556
32557 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
32558 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
32559 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32560
32561 int force_personality32;
32562 @@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32563
32564 for (address = start; address <= end; address += PGDIR_SIZE) {
32565 const pgd_t *pgd_ref = pgd_offset_k(address);
32566 +
32567 +#ifdef CONFIG_PAX_PER_CPU_PGD
32568 + unsigned long cpu;
32569 +#else
32570 struct page *page;
32571 +#endif
32572
32573 if (pgd_none(*pgd_ref))
32574 continue;
32575
32576 spin_lock(&pgd_lock);
32577 +
32578 +#ifdef CONFIG_PAX_PER_CPU_PGD
32579 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32580 + pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
32581 +
32582 + if (pgd_none(*pgd))
32583 + set_pgd(pgd, *pgd_ref);
32584 + else
32585 + BUG_ON(pgd_page_vaddr(*pgd)
32586 + != pgd_page_vaddr(*pgd_ref));
32587 + pgd = pgd_offset_cpu(cpu, kernel, address);
32588 +#else
32589 list_for_each_entry(page, &pgd_list, lru) {
32590 pgd_t *pgd;
32591 spinlock_t *pgt_lock;
32592 @@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32593 /* the pgt_lock only for Xen */
32594 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32595 spin_lock(pgt_lock);
32596 +#endif
32597
32598 if (pgd_none(*pgd))
32599 set_pgd(pgd, *pgd_ref);
32600 @@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32601 BUG_ON(pgd_page_vaddr(*pgd)
32602 != pgd_page_vaddr(*pgd_ref));
32603
32604 +#ifndef CONFIG_PAX_PER_CPU_PGD
32605 spin_unlock(pgt_lock);
32606 +#endif
32607 +
32608 }
32609 spin_unlock(&pgd_lock);
32610 }
32611 @@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
32612 {
32613 if (pgd_none(*pgd)) {
32614 pud_t *pud = (pud_t *)spp_getpage();
32615 - pgd_populate(&init_mm, pgd, pud);
32616 + pgd_populate_kernel(&init_mm, pgd, pud);
32617 if (pud != pud_offset(pgd, 0))
32618 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
32619 pud, pud_offset(pgd, 0));
32620 @@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
32621 {
32622 if (pud_none(*pud)) {
32623 pmd_t *pmd = (pmd_t *) spp_getpage();
32624 - pud_populate(&init_mm, pud, pmd);
32625 + pud_populate_kernel(&init_mm, pud, pmd);
32626 if (pmd != pmd_offset(pud, 0))
32627 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
32628 pmd, pmd_offset(pud, 0));
32629 @@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
32630 pmd = fill_pmd(pud, vaddr);
32631 pte = fill_pte(pmd, vaddr);
32632
32633 + pax_open_kernel();
32634 set_pte(pte, new_pte);
32635 + pax_close_kernel();
32636
32637 /*
32638 * It's enough to flush this one mapping.
32639 @@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
32640 pgd = pgd_offset_k((unsigned long)__va(phys));
32641 if (pgd_none(*pgd)) {
32642 pud = (pud_t *) spp_getpage();
32643 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
32644 - _PAGE_USER));
32645 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
32646 }
32647 pud = pud_offset(pgd, (unsigned long)__va(phys));
32648 if (pud_none(*pud)) {
32649 pmd = (pmd_t *) spp_getpage();
32650 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
32651 - _PAGE_USER));
32652 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
32653 }
32654 pmd = pmd_offset(pud, phys);
32655 BUG_ON(!pmd_none(*pmd));
32656 @@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
32657 prot);
32658
32659 spin_lock(&init_mm.page_table_lock);
32660 - pud_populate(&init_mm, pud, pmd);
32661 + pud_populate_kernel(&init_mm, pud, pmd);
32662 spin_unlock(&init_mm.page_table_lock);
32663 }
32664 __flush_tlb_all();
32665 @@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
32666 page_size_mask);
32667
32668 spin_lock(&init_mm.page_table_lock);
32669 - pgd_populate(&init_mm, pgd, pud);
32670 + pgd_populate_kernel(&init_mm, pgd, pud);
32671 spin_unlock(&init_mm.page_table_lock);
32672 pgd_changed = true;
32673 }
32674 @@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
32675 static struct vm_area_struct gate_vma = {
32676 .vm_start = VSYSCALL_START,
32677 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
32678 - .vm_page_prot = PAGE_READONLY_EXEC,
32679 - .vm_flags = VM_READ | VM_EXEC
32680 + .vm_page_prot = PAGE_READONLY,
32681 + .vm_flags = VM_READ
32682 };
32683
32684 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
32685 @@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
32686
32687 const char *arch_vma_name(struct vm_area_struct *vma)
32688 {
32689 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
32690 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
32691 return "[vdso]";
32692 if (vma == &gate_vma)
32693 return "[vsyscall]";
32694 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
32695 index 7b179b4..6bd17777 100644
32696 --- a/arch/x86/mm/iomap_32.c
32697 +++ b/arch/x86/mm/iomap_32.c
32698 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
32699 type = kmap_atomic_idx_push();
32700 idx = type + KM_TYPE_NR * smp_processor_id();
32701 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32702 +
32703 + pax_open_kernel();
32704 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
32705 + pax_close_kernel();
32706 +
32707 arch_flush_lazy_mmu_mode();
32708
32709 return (void *)vaddr;
32710 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
32711 index 799580c..72f9fe0 100644
32712 --- a/arch/x86/mm/ioremap.c
32713 +++ b/arch/x86/mm/ioremap.c
32714 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
32715 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
32716 int is_ram = page_is_ram(pfn);
32717
32718 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
32719 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
32720 return NULL;
32721 WARN_ON_ONCE(is_ram);
32722 }
32723 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
32724 *
32725 * Caller must ensure there is only one unmapping for the same pointer.
32726 */
32727 -void iounmap(volatile void __iomem *addr)
32728 +void iounmap(const volatile void __iomem *addr)
32729 {
32730 struct vm_struct *p, *o;
32731
32732 @@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32733
32734 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
32735 if (page_is_ram(start >> PAGE_SHIFT))
32736 +#ifdef CONFIG_HIGHMEM
32737 + if ((start >> PAGE_SHIFT) < max_low_pfn)
32738 +#endif
32739 return __va(phys);
32740
32741 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32742 @@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32743 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32744 {
32745 if (page_is_ram(phys >> PAGE_SHIFT))
32746 +#ifdef CONFIG_HIGHMEM
32747 + if ((phys >> PAGE_SHIFT) < max_low_pfn)
32748 +#endif
32749 return;
32750
32751 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32752 @@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32753 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32754
32755 static __initdata int after_paging_init;
32756 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32757 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32758
32759 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32760 {
32761 @@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32762 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32763
32764 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32765 - memset(bm_pte, 0, sizeof(bm_pte));
32766 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
32767 + pmd_populate_user(&init_mm, pmd, bm_pte);
32768
32769 /*
32770 * The boot-ioremap range spans multiple pmds, for which
32771 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32772 index d87dd6d..bf3fa66 100644
32773 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
32774 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32775 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32776 * memory (e.g. tracked pages)? For now, we need this to avoid
32777 * invoking kmemcheck for PnP BIOS calls.
32778 */
32779 - if (regs->flags & X86_VM_MASK)
32780 + if (v8086_mode(regs))
32781 return false;
32782 - if (regs->cs != __KERNEL_CS)
32783 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32784 return false;
32785
32786 pte = kmemcheck_pte_lookup(address);
32787 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32788 index 25e7e13..1964579 100644
32789 --- a/arch/x86/mm/mmap.c
32790 +++ b/arch/x86/mm/mmap.c
32791 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32792 * Leave an at least ~128 MB hole with possible stack randomization.
32793 */
32794 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32795 -#define MAX_GAP (TASK_SIZE/6*5)
32796 +#define MAX_GAP (pax_task_size/6*5)
32797
32798 static int mmap_is_legacy(void)
32799 {
32800 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32801 return rnd << PAGE_SHIFT;
32802 }
32803
32804 -static unsigned long mmap_base(void)
32805 +static unsigned long mmap_base(struct mm_struct *mm)
32806 {
32807 unsigned long gap = rlimit(RLIMIT_STACK);
32808 + unsigned long pax_task_size = TASK_SIZE;
32809 +
32810 +#ifdef CONFIG_PAX_SEGMEXEC
32811 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32812 + pax_task_size = SEGMEXEC_TASK_SIZE;
32813 +#endif
32814
32815 if (gap < MIN_GAP)
32816 gap = MIN_GAP;
32817 else if (gap > MAX_GAP)
32818 gap = MAX_GAP;
32819
32820 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32821 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32822 }
32823
32824 /*
32825 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32826 * does, but not when emulating X86_32
32827 */
32828 -static unsigned long mmap_legacy_base(void)
32829 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
32830 {
32831 - if (mmap_is_ia32())
32832 + if (mmap_is_ia32()) {
32833 +
32834 +#ifdef CONFIG_PAX_SEGMEXEC
32835 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32836 + return SEGMEXEC_TASK_UNMAPPED_BASE;
32837 + else
32838 +#endif
32839 +
32840 return TASK_UNMAPPED_BASE;
32841 - else
32842 + } else
32843 return TASK_UNMAPPED_BASE + mmap_rnd();
32844 }
32845
32846 @@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32847 */
32848 void arch_pick_mmap_layout(struct mm_struct *mm)
32849 {
32850 - mm->mmap_legacy_base = mmap_legacy_base();
32851 - mm->mmap_base = mmap_base();
32852 + mm->mmap_legacy_base = mmap_legacy_base(mm);
32853 + mm->mmap_base = mmap_base(mm);
32854 +
32855 +#ifdef CONFIG_PAX_RANDMMAP
32856 + if (mm->pax_flags & MF_PAX_RANDMMAP) {
32857 + mm->mmap_legacy_base += mm->delta_mmap;
32858 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32859 + }
32860 +#endif
32861
32862 if (mmap_is_legacy()) {
32863 mm->mmap_base = mm->mmap_legacy_base;
32864 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32865 index 0057a7a..95c7edd 100644
32866 --- a/arch/x86/mm/mmio-mod.c
32867 +++ b/arch/x86/mm/mmio-mod.c
32868 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32869 break;
32870 default:
32871 {
32872 - unsigned char *ip = (unsigned char *)instptr;
32873 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32874 my_trace->opcode = MMIO_UNKNOWN_OP;
32875 my_trace->width = 0;
32876 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32877 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32878 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32879 void __iomem *addr)
32880 {
32881 - static atomic_t next_id;
32882 + static atomic_unchecked_t next_id;
32883 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32884 /* These are page-unaligned. */
32885 struct mmiotrace_map map = {
32886 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32887 .private = trace
32888 },
32889 .phys = offset,
32890 - .id = atomic_inc_return(&next_id)
32891 + .id = atomic_inc_return_unchecked(&next_id)
32892 };
32893 map.map_id = trace->id;
32894
32895 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32896 ioremap_trace_core(offset, size, addr);
32897 }
32898
32899 -static void iounmap_trace_core(volatile void __iomem *addr)
32900 +static void iounmap_trace_core(const volatile void __iomem *addr)
32901 {
32902 struct mmiotrace_map map = {
32903 .phys = 0,
32904 @@ -328,7 +328,7 @@ not_enabled:
32905 }
32906 }
32907
32908 -void mmiotrace_iounmap(volatile void __iomem *addr)
32909 +void mmiotrace_iounmap(const volatile void __iomem *addr)
32910 {
32911 might_sleep();
32912 if (is_enabled()) /* recheck and proper locking in *_core() */
32913 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32914 index 24aec58..c39fe8b 100644
32915 --- a/arch/x86/mm/numa.c
32916 +++ b/arch/x86/mm/numa.c
32917 @@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32918 return true;
32919 }
32920
32921 -static int __init numa_register_memblks(struct numa_meminfo *mi)
32922 +static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32923 {
32924 unsigned long uninitialized_var(pfn_align);
32925 int i, nid;
32926 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32927 index d0b1773..4c3327c 100644
32928 --- a/arch/x86/mm/pageattr-test.c
32929 +++ b/arch/x86/mm/pageattr-test.c
32930 @@ -36,7 +36,7 @@ enum {
32931
32932 static int pte_testbit(pte_t pte)
32933 {
32934 - return pte_flags(pte) & _PAGE_UNUSED1;
32935 + return pte_flags(pte) & _PAGE_CPA_TEST;
32936 }
32937
32938 struct split_state {
32939 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32940 index bb32480..75f2f5e 100644
32941 --- a/arch/x86/mm/pageattr.c
32942 +++ b/arch/x86/mm/pageattr.c
32943 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32944 */
32945 #ifdef CONFIG_PCI_BIOS
32946 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32947 - pgprot_val(forbidden) |= _PAGE_NX;
32948 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32949 #endif
32950
32951 /*
32952 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32953 * Does not cover __inittext since that is gone later on. On
32954 * 64bit we do not enforce !NX on the low mapping
32955 */
32956 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
32957 - pgprot_val(forbidden) |= _PAGE_NX;
32958 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32959 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32960
32961 +#ifdef CONFIG_DEBUG_RODATA
32962 /*
32963 * The .rodata section needs to be read-only. Using the pfn
32964 * catches all aliases.
32965 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32966 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32967 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32968 pgprot_val(forbidden) |= _PAGE_RW;
32969 +#endif
32970
32971 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32972 /*
32973 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32974 }
32975 #endif
32976
32977 +#ifdef CONFIG_PAX_KERNEXEC
32978 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32979 + pgprot_val(forbidden) |= _PAGE_RW;
32980 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32981 + }
32982 +#endif
32983 +
32984 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32985
32986 return prot;
32987 @@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32988 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32989 {
32990 /* change init_mm */
32991 + pax_open_kernel();
32992 set_pte_atomic(kpte, pte);
32993 +
32994 #ifdef CONFIG_X86_32
32995 if (!SHARED_KERNEL_PMD) {
32996 +
32997 +#ifdef CONFIG_PAX_PER_CPU_PGD
32998 + unsigned long cpu;
32999 +#else
33000 struct page *page;
33001 +#endif
33002
33003 +#ifdef CONFIG_PAX_PER_CPU_PGD
33004 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33005 + pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33006 +#else
33007 list_for_each_entry(page, &pgd_list, lru) {
33008 - pgd_t *pgd;
33009 + pgd_t *pgd = (pgd_t *)page_address(page);
33010 +#endif
33011 +
33012 pud_t *pud;
33013 pmd_t *pmd;
33014
33015 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
33016 + pgd += pgd_index(address);
33017 pud = pud_offset(pgd, address);
33018 pmd = pmd_offset(pud, address);
33019 set_pte_atomic((pte_t *)pmd, pte);
33020 }
33021 }
33022 #endif
33023 + pax_close_kernel();
33024 }
33025
33026 static int
33027 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33028 index 6574388..87e9bef 100644
33029 --- a/arch/x86/mm/pat.c
33030 +++ b/arch/x86/mm/pat.c
33031 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33032
33033 if (!entry) {
33034 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33035 - current->comm, current->pid, start, end - 1);
33036 + current->comm, task_pid_nr(current), start, end - 1);
33037 return -EINVAL;
33038 }
33039
33040 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33041
33042 while (cursor < to) {
33043 if (!devmem_is_allowed(pfn)) {
33044 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33045 - current->comm, from, to - 1);
33046 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33047 + current->comm, from, to - 1, cursor);
33048 return 0;
33049 }
33050 cursor += PAGE_SIZE;
33051 @@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33052 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33053 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33054 "for [mem %#010Lx-%#010Lx]\n",
33055 - current->comm, current->pid,
33056 + current->comm, task_pid_nr(current),
33057 cattr_name(flags),
33058 base, (unsigned long long)(base + size-1));
33059 return -EINVAL;
33060 @@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33061 flags = lookup_memtype(paddr);
33062 if (want_flags != flags) {
33063 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33064 - current->comm, current->pid,
33065 + current->comm, task_pid_nr(current),
33066 cattr_name(want_flags),
33067 (unsigned long long)paddr,
33068 (unsigned long long)(paddr + size - 1),
33069 @@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33070 free_memtype(paddr, paddr + size);
33071 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33072 " for [mem %#010Lx-%#010Lx], got %s\n",
33073 - current->comm, current->pid,
33074 + current->comm, task_pid_nr(current),
33075 cattr_name(want_flags),
33076 (unsigned long long)paddr,
33077 (unsigned long long)(paddr + size - 1),
33078 diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33079 index 415f6c4..d319983 100644
33080 --- a/arch/x86/mm/pat_rbtree.c
33081 +++ b/arch/x86/mm/pat_rbtree.c
33082 @@ -160,7 +160,7 @@ success:
33083
33084 failure:
33085 printk(KERN_INFO "%s:%d conflicting memory types "
33086 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33087 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33088 end, cattr_name(found_type), cattr_name(match->type));
33089 return -EBUSY;
33090 }
33091 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33092 index 9f0614d..92ae64a 100644
33093 --- a/arch/x86/mm/pf_in.c
33094 +++ b/arch/x86/mm/pf_in.c
33095 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33096 int i;
33097 enum reason_type rv = OTHERS;
33098
33099 - p = (unsigned char *)ins_addr;
33100 + p = (unsigned char *)ktla_ktva(ins_addr);
33101 p += skip_prefix(p, &prf);
33102 p += get_opcode(p, &opcode);
33103
33104 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33105 struct prefix_bits prf;
33106 int i;
33107
33108 - p = (unsigned char *)ins_addr;
33109 + p = (unsigned char *)ktla_ktva(ins_addr);
33110 p += skip_prefix(p, &prf);
33111 p += get_opcode(p, &opcode);
33112
33113 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33114 struct prefix_bits prf;
33115 int i;
33116
33117 - p = (unsigned char *)ins_addr;
33118 + p = (unsigned char *)ktla_ktva(ins_addr);
33119 p += skip_prefix(p, &prf);
33120 p += get_opcode(p, &opcode);
33121
33122 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33123 struct prefix_bits prf;
33124 int i;
33125
33126 - p = (unsigned char *)ins_addr;
33127 + p = (unsigned char *)ktla_ktva(ins_addr);
33128 p += skip_prefix(p, &prf);
33129 p += get_opcode(p, &opcode);
33130 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33131 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33132 struct prefix_bits prf;
33133 int i;
33134
33135 - p = (unsigned char *)ins_addr;
33136 + p = (unsigned char *)ktla_ktva(ins_addr);
33137 p += skip_prefix(p, &prf);
33138 p += get_opcode(p, &opcode);
33139 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33140 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33141 index c96314a..433b127 100644
33142 --- a/arch/x86/mm/pgtable.c
33143 +++ b/arch/x86/mm/pgtable.c
33144 @@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33145 list_del(&page->lru);
33146 }
33147
33148 -#define UNSHARED_PTRS_PER_PGD \
33149 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33150 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33151 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33152
33153 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33154 +{
33155 + unsigned int count = USER_PGD_PTRS;
33156
33157 + if (!pax_user_shadow_base)
33158 + return;
33159 +
33160 + while (count--)
33161 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33162 +}
33163 +#endif
33164 +
33165 +#ifdef CONFIG_PAX_PER_CPU_PGD
33166 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33167 +{
33168 + unsigned int count = USER_PGD_PTRS;
33169 +
33170 + while (count--) {
33171 + pgd_t pgd;
33172 +
33173 +#ifdef CONFIG_X86_64
33174 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33175 +#else
33176 + pgd = *src++;
33177 +#endif
33178 +
33179 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33180 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33181 +#endif
33182 +
33183 + *dst++ = pgd;
33184 + }
33185 +
33186 +}
33187 +#endif
33188 +
33189 +#ifdef CONFIG_X86_64
33190 +#define pxd_t pud_t
33191 +#define pyd_t pgd_t
33192 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33193 +#define pgtable_pxd_page_ctor(page) true
33194 +#define pgtable_pxd_page_dtor(page)
33195 +#define pxd_free(mm, pud) pud_free((mm), (pud))
33196 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33197 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
33198 +#define PYD_SIZE PGDIR_SIZE
33199 +#else
33200 +#define pxd_t pmd_t
33201 +#define pyd_t pud_t
33202 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33203 +#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33204 +#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33205 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
33206 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33207 +#define pyd_offset(mm, address) pud_offset((mm), (address))
33208 +#define PYD_SIZE PUD_SIZE
33209 +#endif
33210 +
33211 +#ifdef CONFIG_PAX_PER_CPU_PGD
33212 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33213 +static inline void pgd_dtor(pgd_t *pgd) {}
33214 +#else
33215 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33216 {
33217 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33218 @@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33219 pgd_list_del(pgd);
33220 spin_unlock(&pgd_lock);
33221 }
33222 +#endif
33223
33224 /*
33225 * List of all pgd's needed for non-PAE so it can invalidate entries
33226 @@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33227 * -- nyc
33228 */
33229
33230 -#ifdef CONFIG_X86_PAE
33231 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33232 /*
33233 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33234 * updating the top-level pagetable entries to guarantee the
33235 @@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33236 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33237 * and initialize the kernel pmds here.
33238 */
33239 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33240 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33241
33242 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33243 {
33244 @@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33245 */
33246 flush_tlb_mm(mm);
33247 }
33248 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33249 +#define PREALLOCATED_PXDS USER_PGD_PTRS
33250 #else /* !CONFIG_X86_PAE */
33251
33252 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33253 -#define PREALLOCATED_PMDS 0
33254 +#define PREALLOCATED_PXDS 0
33255
33256 #endif /* CONFIG_X86_PAE */
33257
33258 -static void free_pmds(pmd_t *pmds[])
33259 +static void free_pxds(pxd_t *pxds[])
33260 {
33261 int i;
33262
33263 - for(i = 0; i < PREALLOCATED_PMDS; i++)
33264 - if (pmds[i]) {
33265 - pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33266 - free_page((unsigned long)pmds[i]);
33267 + for(i = 0; i < PREALLOCATED_PXDS; i++)
33268 + if (pxds[i]) {
33269 + pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33270 + free_page((unsigned long)pxds[i]);
33271 }
33272 }
33273
33274 -static int preallocate_pmds(pmd_t *pmds[])
33275 +static int preallocate_pxds(pxd_t *pxds[])
33276 {
33277 int i;
33278 bool failed = false;
33279
33280 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
33281 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33282 - if (!pmd)
33283 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
33284 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33285 + if (!pxd)
33286 failed = true;
33287 - if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33288 - free_page((unsigned long)pmd);
33289 - pmd = NULL;
33290 + if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33291 + free_page((unsigned long)pxd);
33292 + pxd = NULL;
33293 failed = true;
33294 }
33295 - pmds[i] = pmd;
33296 + pxds[i] = pxd;
33297 }
33298
33299 if (failed) {
33300 - free_pmds(pmds);
33301 + free_pxds(pxds);
33302 return -ENOMEM;
33303 }
33304
33305 @@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33306 * preallocate which never got a corresponding vma will need to be
33307 * freed manually.
33308 */
33309 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33310 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33311 {
33312 int i;
33313
33314 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
33315 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
33316 pgd_t pgd = pgdp[i];
33317
33318 if (pgd_val(pgd) != 0) {
33319 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33320 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33321
33322 - pgdp[i] = native_make_pgd(0);
33323 + set_pgd(pgdp + i, native_make_pgd(0));
33324
33325 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33326 - pmd_free(mm, pmd);
33327 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33328 + pxd_free(mm, pxd);
33329 }
33330 }
33331 }
33332
33333 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33334 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33335 {
33336 - pud_t *pud;
33337 + pyd_t *pyd;
33338 int i;
33339
33340 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33341 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33342 return;
33343
33344 - pud = pud_offset(pgd, 0);
33345 -
33346 - for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33347 - pmd_t *pmd = pmds[i];
33348 +#ifdef CONFIG_X86_64
33349 + pyd = pyd_offset(mm, 0L);
33350 +#else
33351 + pyd = pyd_offset(pgd, 0L);
33352 +#endif
33353
33354 + for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33355 + pxd_t *pxd = pxds[i];
33356 if (i >= KERNEL_PGD_BOUNDARY)
33357 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33358 - sizeof(pmd_t) * PTRS_PER_PMD);
33359 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33360 + sizeof(pxd_t) * PTRS_PER_PMD);
33361
33362 - pud_populate(mm, pud, pmd);
33363 + pyd_populate(mm, pyd, pxd);
33364 }
33365 }
33366
33367 pgd_t *pgd_alloc(struct mm_struct *mm)
33368 {
33369 pgd_t *pgd;
33370 - pmd_t *pmds[PREALLOCATED_PMDS];
33371 + pxd_t *pxds[PREALLOCATED_PXDS];
33372
33373 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33374
33375 @@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33376
33377 mm->pgd = pgd;
33378
33379 - if (preallocate_pmds(pmds) != 0)
33380 + if (preallocate_pxds(pxds) != 0)
33381 goto out_free_pgd;
33382
33383 if (paravirt_pgd_alloc(mm) != 0)
33384 - goto out_free_pmds;
33385 + goto out_free_pxds;
33386
33387 /*
33388 * Make sure that pre-populating the pmds is atomic with
33389 @@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33390 spin_lock(&pgd_lock);
33391
33392 pgd_ctor(mm, pgd);
33393 - pgd_prepopulate_pmd(mm, pgd, pmds);
33394 + pgd_prepopulate_pxd(mm, pgd, pxds);
33395
33396 spin_unlock(&pgd_lock);
33397
33398 return pgd;
33399
33400 -out_free_pmds:
33401 - free_pmds(pmds);
33402 +out_free_pxds:
33403 + free_pxds(pxds);
33404 out_free_pgd:
33405 free_page((unsigned long)pgd);
33406 out:
33407 @@ -313,7 +380,7 @@ out:
33408
33409 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33410 {
33411 - pgd_mop_up_pmds(mm, pgd);
33412 + pgd_mop_up_pxds(mm, pgd);
33413 pgd_dtor(pgd);
33414 paravirt_pgd_free(mm, pgd);
33415 free_page((unsigned long)pgd);
33416 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33417 index a69bcb8..19068ab 100644
33418 --- a/arch/x86/mm/pgtable_32.c
33419 +++ b/arch/x86/mm/pgtable_32.c
33420 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33421 return;
33422 }
33423 pte = pte_offset_kernel(pmd, vaddr);
33424 +
33425 + pax_open_kernel();
33426 if (pte_val(pteval))
33427 set_pte_at(&init_mm, vaddr, pte, pteval);
33428 else
33429 pte_clear(&init_mm, vaddr, pte);
33430 + pax_close_kernel();
33431
33432 /*
33433 * It's enough to flush this one mapping.
33434 diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
33435 index e666cbb..61788c45 100644
33436 --- a/arch/x86/mm/physaddr.c
33437 +++ b/arch/x86/mm/physaddr.c
33438 @@ -10,7 +10,7 @@
33439 #ifdef CONFIG_X86_64
33440
33441 #ifdef CONFIG_DEBUG_VIRTUAL
33442 -unsigned long __phys_addr(unsigned long x)
33443 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33444 {
33445 unsigned long y = x - __START_KERNEL_map;
33446
33447 @@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
33448 #else
33449
33450 #ifdef CONFIG_DEBUG_VIRTUAL
33451 -unsigned long __phys_addr(unsigned long x)
33452 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33453 {
33454 unsigned long phys_addr = x - PAGE_OFFSET;
33455 /* VMALLOC_* aren't constants */
33456 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
33457 index 90555bf..f5f1828 100644
33458 --- a/arch/x86/mm/setup_nx.c
33459 +++ b/arch/x86/mm/setup_nx.c
33460 @@ -5,8 +5,10 @@
33461 #include <asm/pgtable.h>
33462 #include <asm/proto.h>
33463
33464 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33465 static int disable_nx;
33466
33467 +#ifndef CONFIG_PAX_PAGEEXEC
33468 /*
33469 * noexec = on|off
33470 *
33471 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
33472 return 0;
33473 }
33474 early_param("noexec", noexec_setup);
33475 +#endif
33476 +
33477 +#endif
33478
33479 void x86_configure_nx(void)
33480 {
33481 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33482 if (cpu_has_nx && !disable_nx)
33483 __supported_pte_mask |= _PAGE_NX;
33484 else
33485 +#endif
33486 __supported_pte_mask &= ~_PAGE_NX;
33487 }
33488
33489 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
33490 index ae699b3..f1b2ad2 100644
33491 --- a/arch/x86/mm/tlb.c
33492 +++ b/arch/x86/mm/tlb.c
33493 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
33494 BUG();
33495 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
33496 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
33497 +
33498 +#ifndef CONFIG_PAX_PER_CPU_PGD
33499 load_cr3(swapper_pg_dir);
33500 +#endif
33501 +
33502 }
33503 }
33504 EXPORT_SYMBOL_GPL(leave_mm);
33505 diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
33506 new file mode 100644
33507 index 0000000..dace51c
33508 --- /dev/null
33509 +++ b/arch/x86/mm/uderef_64.c
33510 @@ -0,0 +1,37 @@
33511 +#include <linux/mm.h>
33512 +#include <asm/pgtable.h>
33513 +#include <asm/uaccess.h>
33514 +
33515 +#ifdef CONFIG_PAX_MEMORY_UDEREF
33516 +/* PaX: due to the special call convention these functions must
33517 + * - remain leaf functions under all configurations,
33518 + * - never be called directly, only dereferenced from the wrappers.
33519 + */
33520 +void __pax_open_userland(void)
33521 +{
33522 + unsigned int cpu;
33523 +
33524 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
33525 + return;
33526 +
33527 + cpu = raw_get_cpu();
33528 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
33529 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
33530 + raw_put_cpu_no_resched();
33531 +}
33532 +EXPORT_SYMBOL(__pax_open_userland);
33533 +
33534 +void __pax_close_userland(void)
33535 +{
33536 + unsigned int cpu;
33537 +
33538 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
33539 + return;
33540 +
33541 + cpu = raw_get_cpu();
33542 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
33543 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
33544 + raw_put_cpu_no_resched();
33545 +}
33546 +EXPORT_SYMBOL(__pax_close_userland);
33547 +#endif
33548 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
33549 index 877b9a1..f746de8 100644
33550 --- a/arch/x86/net/bpf_jit.S
33551 +++ b/arch/x86/net/bpf_jit.S
33552 @@ -9,6 +9,7 @@
33553 */
33554 #include <linux/linkage.h>
33555 #include <asm/dwarf2.h>
33556 +#include <asm/alternative-asm.h>
33557
33558 /*
33559 * Calling convention :
33560 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
33561 jle bpf_slow_path_word
33562 mov (SKBDATA,%rsi),%eax
33563 bswap %eax /* ntohl() */
33564 + pax_force_retaddr
33565 ret
33566
33567 sk_load_half:
33568 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
33569 jle bpf_slow_path_half
33570 movzwl (SKBDATA,%rsi),%eax
33571 rol $8,%ax # ntohs()
33572 + pax_force_retaddr
33573 ret
33574
33575 sk_load_byte:
33576 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
33577 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
33578 jle bpf_slow_path_byte
33579 movzbl (SKBDATA,%rsi),%eax
33580 + pax_force_retaddr
33581 ret
33582
33583 /**
33584 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
33585 movzbl (SKBDATA,%rsi),%ebx
33586 and $15,%bl
33587 shl $2,%bl
33588 + pax_force_retaddr
33589 ret
33590
33591 /* rsi contains offset and can be scratched */
33592 @@ -109,6 +114,7 @@ bpf_slow_path_word:
33593 js bpf_error
33594 mov -12(%rbp),%eax
33595 bswap %eax
33596 + pax_force_retaddr
33597 ret
33598
33599 bpf_slow_path_half:
33600 @@ -117,12 +123,14 @@ bpf_slow_path_half:
33601 mov -12(%rbp),%ax
33602 rol $8,%ax
33603 movzwl %ax,%eax
33604 + pax_force_retaddr
33605 ret
33606
33607 bpf_slow_path_byte:
33608 bpf_slow_path_common(1)
33609 js bpf_error
33610 movzbl -12(%rbp),%eax
33611 + pax_force_retaddr
33612 ret
33613
33614 bpf_slow_path_byte_msh:
33615 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
33616 and $15,%al
33617 shl $2,%al
33618 xchg %eax,%ebx
33619 + pax_force_retaddr
33620 ret
33621
33622 #define sk_negative_common(SIZE) \
33623 @@ -140,7 +149,7 @@ bpf_slow_path_byte_msh:
33624 push %r9; \
33625 push SKBDATA; \
33626 /* rsi already has offset */ \
33627 - mov $SIZE,%ecx; /* size */ \
33628 + mov $SIZE,%edx; /* size */ \
33629 call bpf_internal_load_pointer_neg_helper; \
33630 test %rax,%rax; \
33631 pop SKBDATA; \
33632 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
33633 sk_negative_common(4)
33634 mov (%rax), %eax
33635 bswap %eax
33636 + pax_force_retaddr
33637 ret
33638
33639 bpf_slow_path_half_neg:
33640 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
33641 mov (%rax),%ax
33642 rol $8,%ax
33643 movzwl %ax,%eax
33644 + pax_force_retaddr
33645 ret
33646
33647 bpf_slow_path_byte_neg:
33648 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
33649 .globl sk_load_byte_negative_offset
33650 sk_negative_common(1)
33651 movzbl (%rax), %eax
33652 + pax_force_retaddr
33653 ret
33654
33655 bpf_slow_path_byte_msh_neg:
33656 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
33657 and $15,%al
33658 shl $2,%al
33659 xchg %eax,%ebx
33660 + pax_force_retaddr
33661 ret
33662
33663 bpf_error:
33664 @@ -197,4 +210,5 @@ bpf_error:
33665 xor %eax,%eax
33666 mov -8(%rbp),%rbx
33667 leaveq
33668 + pax_force_retaddr
33669 ret
33670 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
33671 index 4ed75dd..3cf24f0b 100644
33672 --- a/arch/x86/net/bpf_jit_comp.c
33673 +++ b/arch/x86/net/bpf_jit_comp.c
33674 @@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
33675 return ptr + len;
33676 }
33677
33678 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33679 +#define MAX_INSTR_CODE_SIZE 96
33680 +#else
33681 +#define MAX_INSTR_CODE_SIZE 64
33682 +#endif
33683 +
33684 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
33685
33686 #define EMIT1(b1) EMIT(b1, 1)
33687 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
33688 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
33689 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
33690 +
33691 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33692 +/* original constant will appear in ecx */
33693 +#define DILUTE_CONST_SEQUENCE(_off, _key) \
33694 +do { \
33695 + /* mov ecx, randkey */ \
33696 + EMIT1(0xb9); \
33697 + EMIT(_key, 4); \
33698 + /* xor ecx, randkey ^ off */ \
33699 + EMIT2(0x81, 0xf1); \
33700 + EMIT((_key) ^ (_off), 4); \
33701 +} while (0)
33702 +
33703 +#define EMIT1_off32(b1, _off) \
33704 +do { \
33705 + switch (b1) { \
33706 + case 0x05: /* add eax, imm32 */ \
33707 + case 0x2d: /* sub eax, imm32 */ \
33708 + case 0x25: /* and eax, imm32 */ \
33709 + case 0x0d: /* or eax, imm32 */ \
33710 + case 0xb8: /* mov eax, imm32 */ \
33711 + case 0x35: /* xor eax, imm32 */ \
33712 + case 0x3d: /* cmp eax, imm32 */ \
33713 + case 0xa9: /* test eax, imm32 */ \
33714 + DILUTE_CONST_SEQUENCE(_off, randkey); \
33715 + EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
33716 + break; \
33717 + case 0xbb: /* mov ebx, imm32 */ \
33718 + DILUTE_CONST_SEQUENCE(_off, randkey); \
33719 + /* mov ebx, ecx */ \
33720 + EMIT2(0x89, 0xcb); \
33721 + break; \
33722 + case 0xbe: /* mov esi, imm32 */ \
33723 + DILUTE_CONST_SEQUENCE(_off, randkey); \
33724 + /* mov esi, ecx */ \
33725 + EMIT2(0x89, 0xce); \
33726 + break; \
33727 + case 0xe8: /* call rel imm32, always to known funcs */ \
33728 + EMIT1(b1); \
33729 + EMIT(_off, 4); \
33730 + break; \
33731 + case 0xe9: /* jmp rel imm32 */ \
33732 + EMIT1(b1); \
33733 + EMIT(_off, 4); \
33734 + /* prevent fall-through, we're not called if off = 0 */ \
33735 + EMIT(0xcccccccc, 4); \
33736 + EMIT(0xcccccccc, 4); \
33737 + break; \
33738 + default: \
33739 + BUILD_BUG(); \
33740 + } \
33741 +} while (0)
33742 +
33743 +#define EMIT2_off32(b1, b2, _off) \
33744 +do { \
33745 + if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
33746 + EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
33747 + EMIT(randkey, 4); \
33748 + EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
33749 + EMIT((_off) - randkey, 4); \
33750 + } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
33751 + DILUTE_CONST_SEQUENCE(_off, randkey); \
33752 + /* imul eax, ecx */ \
33753 + EMIT3(0x0f, 0xaf, 0xc1); \
33754 + } else { \
33755 + BUILD_BUG(); \
33756 + } \
33757 +} while (0)
33758 +#else
33759 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
33760 +#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
33761 +#endif
33762
33763 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33764 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33765 @@ -91,6 +168,24 @@ do { \
33766 #define X86_JBE 0x76
33767 #define X86_JA 0x77
33768
33769 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33770 +#define APPEND_FLOW_VERIFY() \
33771 +do { \
33772 + /* mov ecx, randkey */ \
33773 + EMIT1(0xb9); \
33774 + EMIT(randkey, 4); \
33775 + /* cmp ecx, randkey */ \
33776 + EMIT2(0x81, 0xf9); \
33777 + EMIT(randkey, 4); \
33778 + /* jz after 8 int 3s */ \
33779 + EMIT2(0x74, 0x08); \
33780 + EMIT(0xcccccccc, 4); \
33781 + EMIT(0xcccccccc, 4); \
33782 +} while (0)
33783 +#else
33784 +#define APPEND_FLOW_VERIFY() do { } while (0)
33785 +#endif
33786 +
33787 #define EMIT_COND_JMP(op, offset) \
33788 do { \
33789 if (is_near(offset)) \
33790 @@ -98,6 +193,7 @@ do { \
33791 else { \
33792 EMIT2(0x0f, op + 0x10); \
33793 EMIT(offset, 4); /* jxx .+off32 */ \
33794 + APPEND_FLOW_VERIFY(); \
33795 } \
33796 } while (0)
33797
33798 @@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33799 return -1;
33800 }
33801
33802 -struct bpf_binary_header {
33803 - unsigned int pages;
33804 - /* Note : for security reasons, bpf code will follow a randomly
33805 - * sized amount of int3 instructions
33806 - */
33807 - u8 image[];
33808 -};
33809 -
33810 -static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33811 +/* Note : for security reasons, bpf code will follow a randomly
33812 + * sized amount of int3 instructions
33813 + */
33814 +static u8 *bpf_alloc_binary(unsigned int proglen,
33815 u8 **image_ptr)
33816 {
33817 unsigned int sz, hole;
33818 - struct bpf_binary_header *header;
33819 + u8 *header;
33820
33821 /* Most of BPF filters are really small,
33822 * but if some of them fill a page, allow at least
33823 * 128 extra bytes to insert a random section of int3
33824 */
33825 - sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33826 - header = module_alloc(sz);
33827 + sz = round_up(proglen + 128, PAGE_SIZE);
33828 + header = module_alloc_exec(sz);
33829 if (!header)
33830 return NULL;
33831
33832 + pax_open_kernel();
33833 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33834 + pax_close_kernel();
33835
33836 - header->pages = sz / PAGE_SIZE;
33837 - hole = sz - (proglen + sizeof(*header));
33838 + hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33839
33840 /* insert a random number of int3 instructions before BPF code */
33841 - *image_ptr = &header->image[prandom_u32() % hole];
33842 + *image_ptr = &header[prandom_u32() % hole];
33843 return header;
33844 }
33845
33846 void bpf_jit_compile(struct sk_filter *fp)
33847 {
33848 - u8 temp[64];
33849 + u8 temp[MAX_INSTR_CODE_SIZE];
33850 u8 *prog;
33851 unsigned int proglen, oldproglen = 0;
33852 int ilen, i;
33853 int t_offset, f_offset;
33854 u8 t_op, f_op, seen = 0, pass;
33855 u8 *image = NULL;
33856 - struct bpf_binary_header *header = NULL;
33857 + u8 *header = NULL;
33858 u8 *func;
33859 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33860 unsigned int cleanup_addr; /* epilogue code offset */
33861 unsigned int *addrs;
33862 const struct sock_filter *filter = fp->insns;
33863 int flen = fp->len;
33864 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33865 + unsigned int randkey;
33866 +#endif
33867
33868 if (!bpf_jit_enable)
33869 return;
33870 @@ -203,10 +298,10 @@ void bpf_jit_compile(struct sk_filter *fp)
33871 return;
33872
33873 /* Before first pass, make a rough estimation of addrs[]
33874 - * each bpf instruction is translated to less than 64 bytes
33875 + * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33876 */
33877 for (proglen = 0, i = 0; i < flen; i++) {
33878 - proglen += 64;
33879 + proglen += MAX_INSTR_CODE_SIZE;
33880 addrs[i] = proglen;
33881 }
33882 cleanup_addr = proglen; /* epilogue address */
33883 @@ -285,6 +380,10 @@ void bpf_jit_compile(struct sk_filter *fp)
33884 for (i = 0; i < flen; i++) {
33885 unsigned int K = filter[i].k;
33886
33887 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33888 + randkey = prandom_u32();
33889 +#endif
33890 +
33891 switch (filter[i].code) {
33892 case BPF_S_ALU_ADD_X: /* A += X; */
33893 seen |= SEEN_XREG;
33894 @@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33895 case BPF_S_ALU_MUL_K: /* A *= K */
33896 if (is_imm8(K))
33897 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33898 - else {
33899 - EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33900 - EMIT(K, 4);
33901 - }
33902 + else
33903 + EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33904 break;
33905 case BPF_S_ALU_DIV_X: /* A /= X; */
33906 seen |= SEEN_XREG;
33907 @@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33908 break;
33909 }
33910 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33911 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33912 + DILUTE_CONST_SEQUENCE(K, randkey);
33913 +#else
33914 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33915 +#endif
33916 EMIT2(0xf7, 0xf1); /* div %ecx */
33917 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33918 break;
33919 @@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33920 if (K == 1)
33921 break;
33922 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33923 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33924 + DILUTE_CONST_SEQUENCE(K, randkey);
33925 +#else
33926 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33927 +#endif
33928 EMIT2(0xf7, 0xf1); /* div %ecx */
33929 break;
33930 case BPF_S_ALU_AND_X:
33931 @@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33932 if (is_imm8(K)) {
33933 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33934 } else {
33935 - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33936 - EMIT(K, 4);
33937 + EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33938 }
33939 } else {
33940 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33941 @@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33942 if (unlikely(proglen + ilen > oldproglen)) {
33943 pr_err("bpb_jit_compile fatal error\n");
33944 kfree(addrs);
33945 - module_free(NULL, header);
33946 + module_free_exec(NULL, image);
33947 return;
33948 }
33949 + pax_open_kernel();
33950 memcpy(image + proglen, temp, ilen);
33951 + pax_close_kernel();
33952 }
33953 proglen += ilen;
33954 addrs[i] = proglen;
33955 @@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33956
33957 if (image) {
33958 bpf_flush_icache(header, image + proglen);
33959 - set_memory_ro((unsigned long)header, header->pages);
33960 fp->bpf_func = (void *)image;
33961 }
33962 out:
33963 @@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33964 {
33965 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33966 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33967 - struct bpf_binary_header *header = (void *)addr;
33968
33969 - set_memory_rw(addr, header->pages);
33970 - module_free(NULL, header);
33971 + set_memory_rw(addr, 1);
33972 + module_free_exec(NULL, (void *)addr);
33973 kfree(fp);
33974 }
33975
33976 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33977 index 5d04be5..2beeaa2 100644
33978 --- a/arch/x86/oprofile/backtrace.c
33979 +++ b/arch/x86/oprofile/backtrace.c
33980 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33981 struct stack_frame_ia32 *fp;
33982 unsigned long bytes;
33983
33984 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33985 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33986 if (bytes != 0)
33987 return NULL;
33988
33989 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33990 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33991
33992 oprofile_add_trace(bufhead[0].return_address);
33993
33994 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33995 struct stack_frame bufhead[2];
33996 unsigned long bytes;
33997
33998 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33999 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34000 if (bytes != 0)
34001 return NULL;
34002
34003 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34004 {
34005 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34006
34007 - if (!user_mode_vm(regs)) {
34008 + if (!user_mode(regs)) {
34009 unsigned long stack = kernel_stack_pointer(regs);
34010 if (depth)
34011 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34012 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34013 index 6890d84..1dad1f1 100644
34014 --- a/arch/x86/oprofile/nmi_int.c
34015 +++ b/arch/x86/oprofile/nmi_int.c
34016 @@ -23,6 +23,7 @@
34017 #include <asm/nmi.h>
34018 #include <asm/msr.h>
34019 #include <asm/apic.h>
34020 +#include <asm/pgtable.h>
34021
34022 #include "op_counter.h"
34023 #include "op_x86_model.h"
34024 @@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34025 if (ret)
34026 return ret;
34027
34028 - if (!model->num_virt_counters)
34029 - model->num_virt_counters = model->num_counters;
34030 + if (!model->num_virt_counters) {
34031 + pax_open_kernel();
34032 + *(unsigned int *)&model->num_virt_counters = model->num_counters;
34033 + pax_close_kernel();
34034 + }
34035
34036 mux_init(ops);
34037
34038 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34039 index 50d86c0..7985318 100644
34040 --- a/arch/x86/oprofile/op_model_amd.c
34041 +++ b/arch/x86/oprofile/op_model_amd.c
34042 @@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34043 num_counters = AMD64_NUM_COUNTERS;
34044 }
34045
34046 - op_amd_spec.num_counters = num_counters;
34047 - op_amd_spec.num_controls = num_counters;
34048 - op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34049 + pax_open_kernel();
34050 + *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34051 + *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34052 + *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34053 + pax_close_kernel();
34054
34055 return 0;
34056 }
34057 diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34058 index d90528e..0127e2b 100644
34059 --- a/arch/x86/oprofile/op_model_ppro.c
34060 +++ b/arch/x86/oprofile/op_model_ppro.c
34061 @@ -19,6 +19,7 @@
34062 #include <asm/msr.h>
34063 #include <asm/apic.h>
34064 #include <asm/nmi.h>
34065 +#include <asm/pgtable.h>
34066
34067 #include "op_x86_model.h"
34068 #include "op_counter.h"
34069 @@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34070
34071 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34072
34073 - op_arch_perfmon_spec.num_counters = num_counters;
34074 - op_arch_perfmon_spec.num_controls = num_counters;
34075 + pax_open_kernel();
34076 + *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34077 + *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34078 + pax_close_kernel();
34079 }
34080
34081 static int arch_perfmon_init(struct oprofile_operations *ignore)
34082 diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34083 index 71e8a67..6a313bb 100644
34084 --- a/arch/x86/oprofile/op_x86_model.h
34085 +++ b/arch/x86/oprofile/op_x86_model.h
34086 @@ -52,7 +52,7 @@ struct op_x86_model_spec {
34087 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34088 struct op_msrs const * const msrs);
34089 #endif
34090 -};
34091 +} __do_const;
34092
34093 struct op_counter_config;
34094
34095 diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34096 index 51384ca..a25f51e 100644
34097 --- a/arch/x86/pci/intel_mid_pci.c
34098 +++ b/arch/x86/pci/intel_mid_pci.c
34099 @@ -241,7 +241,7 @@ int __init intel_mid_pci_init(void)
34100 pr_info("Intel MID platform detected, using MID PCI ops\n");
34101 pci_mmcfg_late_init();
34102 pcibios_enable_irq = intel_mid_pci_irq_enable;
34103 - pci_root_ops = intel_mid_pci_ops;
34104 + memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34105 pci_soc_mode = 1;
34106 /* Continue with standard init */
34107 return 1;
34108 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34109 index 372e9b8..e775a6c 100644
34110 --- a/arch/x86/pci/irq.c
34111 +++ b/arch/x86/pci/irq.c
34112 @@ -50,7 +50,7 @@ struct irq_router {
34113 struct irq_router_handler {
34114 u16 vendor;
34115 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34116 -};
34117 +} __do_const;
34118
34119 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34120 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
34121 @@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34122 return 0;
34123 }
34124
34125 -static __initdata struct irq_router_handler pirq_routers[] = {
34126 +static __initconst const struct irq_router_handler pirq_routers[] = {
34127 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34128 { PCI_VENDOR_ID_AL, ali_router_probe },
34129 { PCI_VENDOR_ID_ITE, ite_router_probe },
34130 @@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
34131 static void __init pirq_find_router(struct irq_router *r)
34132 {
34133 struct irq_routing_table *rt = pirq_table;
34134 - struct irq_router_handler *h;
34135 + const struct irq_router_handler *h;
34136
34137 #ifdef CONFIG_PCI_BIOS
34138 if (!rt->signature) {
34139 @@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34140 return 0;
34141 }
34142
34143 -static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34144 +static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34145 {
34146 .callback = fix_broken_hp_bios_irq9,
34147 .ident = "HP Pavilion N5400 Series Laptop",
34148 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34149 index c77b24a..c979855 100644
34150 --- a/arch/x86/pci/pcbios.c
34151 +++ b/arch/x86/pci/pcbios.c
34152 @@ -79,7 +79,7 @@ union bios32 {
34153 static struct {
34154 unsigned long address;
34155 unsigned short segment;
34156 -} bios32_indirect = { 0, __KERNEL_CS };
34157 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
34158
34159 /*
34160 * Returns the entry point for the given service, NULL on error
34161 @@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
34162 unsigned long length; /* %ecx */
34163 unsigned long entry; /* %edx */
34164 unsigned long flags;
34165 + struct desc_struct d, *gdt;
34166
34167 local_irq_save(flags);
34168 - __asm__("lcall *(%%edi); cld"
34169 +
34170 + gdt = get_cpu_gdt_table(smp_processor_id());
34171 +
34172 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34173 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34174 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34175 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34176 +
34177 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34178 : "=a" (return_code),
34179 "=b" (address),
34180 "=c" (length),
34181 "=d" (entry)
34182 : "0" (service),
34183 "1" (0),
34184 - "D" (&bios32_indirect));
34185 + "D" (&bios32_indirect),
34186 + "r"(__PCIBIOS_DS)
34187 + : "memory");
34188 +
34189 + pax_open_kernel();
34190 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34191 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34192 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34193 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34194 + pax_close_kernel();
34195 +
34196 local_irq_restore(flags);
34197
34198 switch (return_code) {
34199 - case 0:
34200 - return address + entry;
34201 - case 0x80: /* Not present */
34202 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34203 - return 0;
34204 - default: /* Shouldn't happen */
34205 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34206 - service, return_code);
34207 + case 0: {
34208 + int cpu;
34209 + unsigned char flags;
34210 +
34211 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34212 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34213 + printk(KERN_WARNING "bios32_service: not valid\n");
34214 return 0;
34215 + }
34216 + address = address + PAGE_OFFSET;
34217 + length += 16UL; /* some BIOSs underreport this... */
34218 + flags = 4;
34219 + if (length >= 64*1024*1024) {
34220 + length >>= PAGE_SHIFT;
34221 + flags |= 8;
34222 + }
34223 +
34224 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34225 + gdt = get_cpu_gdt_table(cpu);
34226 + pack_descriptor(&d, address, length, 0x9b, flags);
34227 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34228 + pack_descriptor(&d, address, length, 0x93, flags);
34229 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34230 + }
34231 + return entry;
34232 + }
34233 + case 0x80: /* Not present */
34234 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34235 + return 0;
34236 + default: /* Shouldn't happen */
34237 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34238 + service, return_code);
34239 + return 0;
34240 }
34241 }
34242
34243 static struct {
34244 unsigned long address;
34245 unsigned short segment;
34246 -} pci_indirect = { 0, __KERNEL_CS };
34247 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34248
34249 -static int pci_bios_present;
34250 +static int pci_bios_present __read_only;
34251
34252 static int check_pcibios(void)
34253 {
34254 @@ -131,11 +174,13 @@ static int check_pcibios(void)
34255 unsigned long flags, pcibios_entry;
34256
34257 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34258 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34259 + pci_indirect.address = pcibios_entry;
34260
34261 local_irq_save(flags);
34262 - __asm__(
34263 - "lcall *(%%edi); cld\n\t"
34264 + __asm__("movw %w6, %%ds\n\t"
34265 + "lcall *%%ss:(%%edi); cld\n\t"
34266 + "push %%ss\n\t"
34267 + "pop %%ds\n\t"
34268 "jc 1f\n\t"
34269 "xor %%ah, %%ah\n"
34270 "1:"
34271 @@ -144,7 +189,8 @@ static int check_pcibios(void)
34272 "=b" (ebx),
34273 "=c" (ecx)
34274 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34275 - "D" (&pci_indirect)
34276 + "D" (&pci_indirect),
34277 + "r" (__PCIBIOS_DS)
34278 : "memory");
34279 local_irq_restore(flags);
34280
34281 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34282
34283 switch (len) {
34284 case 1:
34285 - __asm__("lcall *(%%esi); cld\n\t"
34286 + __asm__("movw %w6, %%ds\n\t"
34287 + "lcall *%%ss:(%%esi); cld\n\t"
34288 + "push %%ss\n\t"
34289 + "pop %%ds\n\t"
34290 "jc 1f\n\t"
34291 "xor %%ah, %%ah\n"
34292 "1:"
34293 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34294 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34295 "b" (bx),
34296 "D" ((long)reg),
34297 - "S" (&pci_indirect));
34298 + "S" (&pci_indirect),
34299 + "r" (__PCIBIOS_DS));
34300 /*
34301 * Zero-extend the result beyond 8 bits, do not trust the
34302 * BIOS having done it:
34303 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34304 *value &= 0xff;
34305 break;
34306 case 2:
34307 - __asm__("lcall *(%%esi); cld\n\t"
34308 + __asm__("movw %w6, %%ds\n\t"
34309 + "lcall *%%ss:(%%esi); cld\n\t"
34310 + "push %%ss\n\t"
34311 + "pop %%ds\n\t"
34312 "jc 1f\n\t"
34313 "xor %%ah, %%ah\n"
34314 "1:"
34315 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34316 : "1" (PCIBIOS_READ_CONFIG_WORD),
34317 "b" (bx),
34318 "D" ((long)reg),
34319 - "S" (&pci_indirect));
34320 + "S" (&pci_indirect),
34321 + "r" (__PCIBIOS_DS));
34322 /*
34323 * Zero-extend the result beyond 16 bits, do not trust the
34324 * BIOS having done it:
34325 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34326 *value &= 0xffff;
34327 break;
34328 case 4:
34329 - __asm__("lcall *(%%esi); cld\n\t"
34330 + __asm__("movw %w6, %%ds\n\t"
34331 + "lcall *%%ss:(%%esi); cld\n\t"
34332 + "push %%ss\n\t"
34333 + "pop %%ds\n\t"
34334 "jc 1f\n\t"
34335 "xor %%ah, %%ah\n"
34336 "1:"
34337 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34338 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34339 "b" (bx),
34340 "D" ((long)reg),
34341 - "S" (&pci_indirect));
34342 + "S" (&pci_indirect),
34343 + "r" (__PCIBIOS_DS));
34344 break;
34345 }
34346
34347 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34348
34349 switch (len) {
34350 case 1:
34351 - __asm__("lcall *(%%esi); cld\n\t"
34352 + __asm__("movw %w6, %%ds\n\t"
34353 + "lcall *%%ss:(%%esi); cld\n\t"
34354 + "push %%ss\n\t"
34355 + "pop %%ds\n\t"
34356 "jc 1f\n\t"
34357 "xor %%ah, %%ah\n"
34358 "1:"
34359 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34360 "c" (value),
34361 "b" (bx),
34362 "D" ((long)reg),
34363 - "S" (&pci_indirect));
34364 + "S" (&pci_indirect),
34365 + "r" (__PCIBIOS_DS));
34366 break;
34367 case 2:
34368 - __asm__("lcall *(%%esi); cld\n\t"
34369 + __asm__("movw %w6, %%ds\n\t"
34370 + "lcall *%%ss:(%%esi); cld\n\t"
34371 + "push %%ss\n\t"
34372 + "pop %%ds\n\t"
34373 "jc 1f\n\t"
34374 "xor %%ah, %%ah\n"
34375 "1:"
34376 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34377 "c" (value),
34378 "b" (bx),
34379 "D" ((long)reg),
34380 - "S" (&pci_indirect));
34381 + "S" (&pci_indirect),
34382 + "r" (__PCIBIOS_DS));
34383 break;
34384 case 4:
34385 - __asm__("lcall *(%%esi); cld\n\t"
34386 + __asm__("movw %w6, %%ds\n\t"
34387 + "lcall *%%ss:(%%esi); cld\n\t"
34388 + "push %%ss\n\t"
34389 + "pop %%ds\n\t"
34390 "jc 1f\n\t"
34391 "xor %%ah, %%ah\n"
34392 "1:"
34393 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34394 "c" (value),
34395 "b" (bx),
34396 "D" ((long)reg),
34397 - "S" (&pci_indirect));
34398 + "S" (&pci_indirect),
34399 + "r" (__PCIBIOS_DS));
34400 break;
34401 }
34402
34403 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34404
34405 DBG("PCI: Fetching IRQ routing table... ");
34406 __asm__("push %%es\n\t"
34407 + "movw %w8, %%ds\n\t"
34408 "push %%ds\n\t"
34409 "pop %%es\n\t"
34410 - "lcall *(%%esi); cld\n\t"
34411 + "lcall *%%ss:(%%esi); cld\n\t"
34412 "pop %%es\n\t"
34413 + "push %%ss\n\t"
34414 + "pop %%ds\n"
34415 "jc 1f\n\t"
34416 "xor %%ah, %%ah\n"
34417 "1:"
34418 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34419 "1" (0),
34420 "D" ((long) &opt),
34421 "S" (&pci_indirect),
34422 - "m" (opt)
34423 + "m" (opt),
34424 + "r" (__PCIBIOS_DS)
34425 : "memory");
34426 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34427 if (ret & 0xff00)
34428 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34429 {
34430 int ret;
34431
34432 - __asm__("lcall *(%%esi); cld\n\t"
34433 + __asm__("movw %w5, %%ds\n\t"
34434 + "lcall *%%ss:(%%esi); cld\n\t"
34435 + "push %%ss\n\t"
34436 + "pop %%ds\n"
34437 "jc 1f\n\t"
34438 "xor %%ah, %%ah\n"
34439 "1:"
34440 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34441 : "0" (PCIBIOS_SET_PCI_HW_INT),
34442 "b" ((dev->bus->number << 8) | dev->devfn),
34443 "c" ((irq << 8) | (pin + 10)),
34444 - "S" (&pci_indirect));
34445 + "S" (&pci_indirect),
34446 + "r" (__PCIBIOS_DS));
34447 return !(ret & 0xff00);
34448 }
34449 EXPORT_SYMBOL(pcibios_set_irq_routing);
34450 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34451 index 40e4469..d915bf9 100644
34452 --- a/arch/x86/platform/efi/efi_32.c
34453 +++ b/arch/x86/platform/efi/efi_32.c
34454 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
34455 {
34456 struct desc_ptr gdt_descr;
34457
34458 +#ifdef CONFIG_PAX_KERNEXEC
34459 + struct desc_struct d;
34460 +#endif
34461 +
34462 local_irq_save(efi_rt_eflags);
34463
34464 load_cr3(initial_page_table);
34465 __flush_tlb_all();
34466
34467 +#ifdef CONFIG_PAX_KERNEXEC
34468 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34469 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34470 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34471 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34472 +#endif
34473 +
34474 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34475 gdt_descr.size = GDT_SIZE - 1;
34476 load_gdt(&gdt_descr);
34477 @@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
34478 {
34479 struct desc_ptr gdt_descr;
34480
34481 +#ifdef CONFIG_PAX_KERNEXEC
34482 + struct desc_struct d;
34483 +
34484 + memset(&d, 0, sizeof d);
34485 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34486 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34487 +#endif
34488 +
34489 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34490 gdt_descr.size = GDT_SIZE - 1;
34491 load_gdt(&gdt_descr);
34492
34493 +#ifdef CONFIG_PAX_PER_CPU_PGD
34494 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34495 +#else
34496 load_cr3(swapper_pg_dir);
34497 +#endif
34498 +
34499 __flush_tlb_all();
34500
34501 local_irq_restore(efi_rt_eflags);
34502 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34503 index 39a0e7f1..872396e 100644
34504 --- a/arch/x86/platform/efi/efi_64.c
34505 +++ b/arch/x86/platform/efi/efi_64.c
34506 @@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
34507 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34508 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34509 }
34510 +
34511 +#ifdef CONFIG_PAX_PER_CPU_PGD
34512 + load_cr3(swapper_pg_dir);
34513 +#endif
34514 +
34515 __flush_tlb_all();
34516 }
34517
34518 @@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
34519 for (pgd = 0; pgd < n_pgds; pgd++)
34520 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34521 kfree(save_pgd);
34522 +
34523 +#ifdef CONFIG_PAX_PER_CPU_PGD
34524 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34525 +#endif
34526 +
34527 __flush_tlb_all();
34528 local_irq_restore(efi_flags);
34529 early_code_mapping_set_exec(0);
34530 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34531 index fbe66e6..eae5e38 100644
34532 --- a/arch/x86/platform/efi/efi_stub_32.S
34533 +++ b/arch/x86/platform/efi/efi_stub_32.S
34534 @@ -6,7 +6,9 @@
34535 */
34536
34537 #include <linux/linkage.h>
34538 +#include <linux/init.h>
34539 #include <asm/page_types.h>
34540 +#include <asm/segment.h>
34541
34542 /*
34543 * efi_call_phys(void *, ...) is a function with variable parameters.
34544 @@ -20,7 +22,7 @@
34545 * service functions will comply with gcc calling convention, too.
34546 */
34547
34548 -.text
34549 +__INIT
34550 ENTRY(efi_call_phys)
34551 /*
34552 * 0. The function can only be called in Linux kernel. So CS has been
34553 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34554 * The mapping of lower virtual memory has been created in prelog and
34555 * epilog.
34556 */
34557 - movl $1f, %edx
34558 - subl $__PAGE_OFFSET, %edx
34559 - jmp *%edx
34560 +#ifdef CONFIG_PAX_KERNEXEC
34561 + movl $(__KERNEXEC_EFI_DS), %edx
34562 + mov %edx, %ds
34563 + mov %edx, %es
34564 + mov %edx, %ss
34565 + addl $2f,(1f)
34566 + ljmp *(1f)
34567 +
34568 +__INITDATA
34569 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34570 +.previous
34571 +
34572 +2:
34573 + subl $2b,(1b)
34574 +#else
34575 + jmp 1f-__PAGE_OFFSET
34576 1:
34577 +#endif
34578
34579 /*
34580 * 2. Now on the top of stack is the return
34581 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34582 * parameter 2, ..., param n. To make things easy, we save the return
34583 * address of efi_call_phys in a global variable.
34584 */
34585 - popl %edx
34586 - movl %edx, saved_return_addr
34587 - /* get the function pointer into ECX*/
34588 - popl %ecx
34589 - movl %ecx, efi_rt_function_ptr
34590 - movl $2f, %edx
34591 - subl $__PAGE_OFFSET, %edx
34592 - pushl %edx
34593 + popl (saved_return_addr)
34594 + popl (efi_rt_function_ptr)
34595
34596 /*
34597 * 3. Clear PG bit in %CR0.
34598 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34599 /*
34600 * 5. Call the physical function.
34601 */
34602 - jmp *%ecx
34603 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
34604
34605 -2:
34606 /*
34607 * 6. After EFI runtime service returns, control will return to
34608 * following instruction. We'd better readjust stack pointer first.
34609 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34610 movl %cr0, %edx
34611 orl $0x80000000, %edx
34612 movl %edx, %cr0
34613 - jmp 1f
34614 -1:
34615 +
34616 /*
34617 * 8. Now restore the virtual mode from flat mode by
34618 * adding EIP with PAGE_OFFSET.
34619 */
34620 - movl $1f, %edx
34621 - jmp *%edx
34622 +#ifdef CONFIG_PAX_KERNEXEC
34623 + movl $(__KERNEL_DS), %edx
34624 + mov %edx, %ds
34625 + mov %edx, %es
34626 + mov %edx, %ss
34627 + ljmp $(__KERNEL_CS),$1f
34628 +#else
34629 + jmp 1f+__PAGE_OFFSET
34630 +#endif
34631 1:
34632
34633 /*
34634 * 9. Balance the stack. And because EAX contain the return value,
34635 * we'd better not clobber it.
34636 */
34637 - leal efi_rt_function_ptr, %edx
34638 - movl (%edx), %ecx
34639 - pushl %ecx
34640 + pushl (efi_rt_function_ptr)
34641
34642 /*
34643 - * 10. Push the saved return address onto the stack and return.
34644 + * 10. Return to the saved return address.
34645 */
34646 - leal saved_return_addr, %edx
34647 - movl (%edx), %ecx
34648 - pushl %ecx
34649 - ret
34650 + jmpl *(saved_return_addr)
34651 ENDPROC(efi_call_phys)
34652 .previous
34653
34654 -.data
34655 +__INITDATA
34656 saved_return_addr:
34657 .long 0
34658 efi_rt_function_ptr:
34659 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34660 index 4c07cca..2c8427d 100644
34661 --- a/arch/x86/platform/efi/efi_stub_64.S
34662 +++ b/arch/x86/platform/efi/efi_stub_64.S
34663 @@ -7,6 +7,7 @@
34664 */
34665
34666 #include <linux/linkage.h>
34667 +#include <asm/alternative-asm.h>
34668
34669 #define SAVE_XMM \
34670 mov %rsp, %rax; \
34671 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
34672 call *%rdi
34673 addq $32, %rsp
34674 RESTORE_XMM
34675 + pax_force_retaddr 0, 1
34676 ret
34677 ENDPROC(efi_call0)
34678
34679 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
34680 call *%rdi
34681 addq $32, %rsp
34682 RESTORE_XMM
34683 + pax_force_retaddr 0, 1
34684 ret
34685 ENDPROC(efi_call1)
34686
34687 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
34688 call *%rdi
34689 addq $32, %rsp
34690 RESTORE_XMM
34691 + pax_force_retaddr 0, 1
34692 ret
34693 ENDPROC(efi_call2)
34694
34695 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
34696 call *%rdi
34697 addq $32, %rsp
34698 RESTORE_XMM
34699 + pax_force_retaddr 0, 1
34700 ret
34701 ENDPROC(efi_call3)
34702
34703 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
34704 call *%rdi
34705 addq $32, %rsp
34706 RESTORE_XMM
34707 + pax_force_retaddr 0, 1
34708 ret
34709 ENDPROC(efi_call4)
34710
34711 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
34712 call *%rdi
34713 addq $48, %rsp
34714 RESTORE_XMM
34715 + pax_force_retaddr 0, 1
34716 ret
34717 ENDPROC(efi_call5)
34718
34719 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
34720 call *%rdi
34721 addq $48, %rsp
34722 RESTORE_XMM
34723 + pax_force_retaddr 0, 1
34724 ret
34725 ENDPROC(efi_call6)
34726 diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34727 index f90e290..435f0dd 100644
34728 --- a/arch/x86/platform/intel-mid/intel-mid.c
34729 +++ b/arch/x86/platform/intel-mid/intel-mid.c
34730 @@ -65,9 +65,10 @@ static void intel_mid_power_off(void)
34731 {
34732 }
34733
34734 -static void intel_mid_reboot(void)
34735 +static void __noreturn intel_mid_reboot(void)
34736 {
34737 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34738 + BUG();
34739 }
34740
34741 static unsigned long __init intel_mid_calibrate_tsc(void)
34742 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
34743 index d6ee929..3637cb5 100644
34744 --- a/arch/x86/platform/olpc/olpc_dt.c
34745 +++ b/arch/x86/platform/olpc/olpc_dt.c
34746 @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
34747 return res;
34748 }
34749
34750 -static struct of_pdt_ops prom_olpc_ops __initdata = {
34751 +static struct of_pdt_ops prom_olpc_ops __initconst = {
34752 .nextprop = olpc_dt_nextprop,
34753 .getproplen = olpc_dt_getproplen,
34754 .getproperty = olpc_dt_getproperty,
34755 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
34756 index 424f4c9..f2a2988 100644
34757 --- a/arch/x86/power/cpu.c
34758 +++ b/arch/x86/power/cpu.c
34759 @@ -137,11 +137,8 @@ static void do_fpu_end(void)
34760 static void fix_processor_context(void)
34761 {
34762 int cpu = smp_processor_id();
34763 - struct tss_struct *t = &per_cpu(init_tss, cpu);
34764 -#ifdef CONFIG_X86_64
34765 - struct desc_struct *desc = get_cpu_gdt_table(cpu);
34766 - tss_desc tss;
34767 -#endif
34768 + struct tss_struct *t = init_tss + cpu;
34769 +
34770 set_tss_desc(cpu, t); /*
34771 * This just modifies memory; should not be
34772 * necessary. But... This is necessary, because
34773 @@ -150,10 +147,6 @@ static void fix_processor_context(void)
34774 */
34775
34776 #ifdef CONFIG_X86_64
34777 - memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34778 - tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34779 - write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34780 -
34781 syscall_init(); /* This sets MSR_*STAR and related */
34782 #endif
34783 load_TR_desc(); /* This does ltr */
34784 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34785 index a44f457..9140171 100644
34786 --- a/arch/x86/realmode/init.c
34787 +++ b/arch/x86/realmode/init.c
34788 @@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34789 __va(real_mode_header->trampoline_header);
34790
34791 #ifdef CONFIG_X86_32
34792 - trampoline_header->start = __pa_symbol(startup_32_smp);
34793 + trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34794 +
34795 +#ifdef CONFIG_PAX_KERNEXEC
34796 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34797 +#endif
34798 +
34799 + trampoline_header->boot_cs = __BOOT_CS;
34800 trampoline_header->gdt_limit = __BOOT_DS + 7;
34801 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34802 #else
34803 @@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34804 *trampoline_cr4_features = read_cr4();
34805
34806 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34807 - trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34808 + trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34809 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34810 #endif
34811 }
34812 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34813 index 9cac825..4890b25 100644
34814 --- a/arch/x86/realmode/rm/Makefile
34815 +++ b/arch/x86/realmode/rm/Makefile
34816 @@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34817 $(call cc-option, -fno-unit-at-a-time)) \
34818 $(call cc-option, -fno-stack-protector) \
34819 $(call cc-option, -mpreferred-stack-boundary=2)
34820 +ifdef CONSTIFY_PLUGIN
34821 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34822 +endif
34823 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34824 GCOV_PROFILE := n
34825 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34826 index a28221d..93c40f1 100644
34827 --- a/arch/x86/realmode/rm/header.S
34828 +++ b/arch/x86/realmode/rm/header.S
34829 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34830 #endif
34831 /* APM/BIOS reboot */
34832 .long pa_machine_real_restart_asm
34833 -#ifdef CONFIG_X86_64
34834 +#ifdef CONFIG_X86_32
34835 + .long __KERNEL_CS
34836 +#else
34837 .long __KERNEL32_CS
34838 #endif
34839 END(real_mode_header)
34840 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34841 index c1b2791..f9e31c7 100644
34842 --- a/arch/x86/realmode/rm/trampoline_32.S
34843 +++ b/arch/x86/realmode/rm/trampoline_32.S
34844 @@ -25,6 +25,12 @@
34845 #include <asm/page_types.h>
34846 #include "realmode.h"
34847
34848 +#ifdef CONFIG_PAX_KERNEXEC
34849 +#define ta(X) (X)
34850 +#else
34851 +#define ta(X) (pa_ ## X)
34852 +#endif
34853 +
34854 .text
34855 .code16
34856
34857 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34858
34859 cli # We should be safe anyway
34860
34861 - movl tr_start, %eax # where we need to go
34862 -
34863 movl $0xA5A5A5A5, trampoline_status
34864 # write marker for master knows we're running
34865
34866 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34867 movw $1, %dx # protected mode (PE) bit
34868 lmsw %dx # into protected mode
34869
34870 - ljmpl $__BOOT_CS, $pa_startup_32
34871 + ljmpl *(trampoline_header)
34872
34873 .section ".text32","ax"
34874 .code32
34875 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34876 .balign 8
34877 GLOBAL(trampoline_header)
34878 tr_start: .space 4
34879 - tr_gdt_pad: .space 2
34880 + tr_boot_cs: .space 2
34881 tr_gdt: .space 6
34882 END(trampoline_header)
34883
34884 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34885 index bb360dc..d0fd8f8 100644
34886 --- a/arch/x86/realmode/rm/trampoline_64.S
34887 +++ b/arch/x86/realmode/rm/trampoline_64.S
34888 @@ -94,6 +94,7 @@ ENTRY(startup_32)
34889 movl %edx, %gs
34890
34891 movl pa_tr_cr4, %eax
34892 + andl $~X86_CR4_PCIDE, %eax
34893 movl %eax, %cr4 # Enable PAE mode
34894
34895 # Setup trampoline 4 level pagetables
34896 @@ -107,7 +108,7 @@ ENTRY(startup_32)
34897 wrmsr
34898
34899 # Enable paging and in turn activate Long Mode
34900 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34901 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
34902 movl %eax, %cr0
34903
34904 /*
34905 diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34906 index e812034..c747134 100644
34907 --- a/arch/x86/tools/Makefile
34908 +++ b/arch/x86/tools/Makefile
34909 @@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34910
34911 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34912
34913 -HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34914 +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34915 hostprogs-y += relocs
34916 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34917 relocs: $(obj)/relocs
34918 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34919 index f7bab68..b6d9886 100644
34920 --- a/arch/x86/tools/relocs.c
34921 +++ b/arch/x86/tools/relocs.c
34922 @@ -1,5 +1,7 @@
34923 /* This is included from relocs_32/64.c */
34924
34925 +#include "../../../include/generated/autoconf.h"
34926 +
34927 #define ElfW(type) _ElfW(ELF_BITS, type)
34928 #define _ElfW(bits, type) __ElfW(bits, type)
34929 #define __ElfW(bits, type) Elf##bits##_##type
34930 @@ -11,6 +13,7 @@
34931 #define Elf_Sym ElfW(Sym)
34932
34933 static Elf_Ehdr ehdr;
34934 +static Elf_Phdr *phdr;
34935
34936 struct relocs {
34937 uint32_t *offset;
34938 @@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34939 }
34940 }
34941
34942 +static void read_phdrs(FILE *fp)
34943 +{
34944 + unsigned int i;
34945 +
34946 + phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34947 + if (!phdr) {
34948 + die("Unable to allocate %d program headers\n",
34949 + ehdr.e_phnum);
34950 + }
34951 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34952 + die("Seek to %d failed: %s\n",
34953 + ehdr.e_phoff, strerror(errno));
34954 + }
34955 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34956 + die("Cannot read ELF program headers: %s\n",
34957 + strerror(errno));
34958 + }
34959 + for(i = 0; i < ehdr.e_phnum; i++) {
34960 + phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34961 + phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34962 + phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34963 + phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34964 + phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34965 + phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34966 + phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34967 + phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34968 + }
34969 +
34970 +}
34971 +
34972 static void read_shdrs(FILE *fp)
34973 {
34974 - int i;
34975 + unsigned int i;
34976 Elf_Shdr shdr;
34977
34978 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34979 @@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34980
34981 static void read_strtabs(FILE *fp)
34982 {
34983 - int i;
34984 + unsigned int i;
34985 for (i = 0; i < ehdr.e_shnum; i++) {
34986 struct section *sec = &secs[i];
34987 if (sec->shdr.sh_type != SHT_STRTAB) {
34988 @@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34989
34990 static void read_symtabs(FILE *fp)
34991 {
34992 - int i,j;
34993 + unsigned int i,j;
34994 for (i = 0; i < ehdr.e_shnum; i++) {
34995 struct section *sec = &secs[i];
34996 if (sec->shdr.sh_type != SHT_SYMTAB) {
34997 @@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34998 }
34999
35000
35001 -static void read_relocs(FILE *fp)
35002 +static void read_relocs(FILE *fp, int use_real_mode)
35003 {
35004 - int i,j;
35005 + unsigned int i,j;
35006 + uint32_t base;
35007 +
35008 for (i = 0; i < ehdr.e_shnum; i++) {
35009 struct section *sec = &secs[i];
35010 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35011 @@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
35012 die("Cannot read symbol table: %s\n",
35013 strerror(errno));
35014 }
35015 + base = 0;
35016 +
35017 +#ifdef CONFIG_X86_32
35018 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35019 + if (phdr[j].p_type != PT_LOAD )
35020 + continue;
35021 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35022 + continue;
35023 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35024 + break;
35025 + }
35026 +#endif
35027 +
35028 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35029 Elf_Rel *rel = &sec->reltab[j];
35030 - rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35031 + rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35032 rel->r_info = elf_xword_to_cpu(rel->r_info);
35033 #if (SHT_REL_TYPE == SHT_RELA)
35034 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35035 @@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
35036
35037 static void print_absolute_symbols(void)
35038 {
35039 - int i;
35040 + unsigned int i;
35041 const char *format;
35042
35043 if (ELF_BITS == 64)
35044 @@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
35045 for (i = 0; i < ehdr.e_shnum; i++) {
35046 struct section *sec = &secs[i];
35047 char *sym_strtab;
35048 - int j;
35049 + unsigned int j;
35050
35051 if (sec->shdr.sh_type != SHT_SYMTAB) {
35052 continue;
35053 @@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
35054
35055 static void print_absolute_relocs(void)
35056 {
35057 - int i, printed = 0;
35058 + unsigned int i, printed = 0;
35059 const char *format;
35060
35061 if (ELF_BITS == 64)
35062 @@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
35063 struct section *sec_applies, *sec_symtab;
35064 char *sym_strtab;
35065 Elf_Sym *sh_symtab;
35066 - int j;
35067 + unsigned int j;
35068 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35069 continue;
35070 }
35071 @@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35072 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35073 Elf_Sym *sym, const char *symname))
35074 {
35075 - int i;
35076 + unsigned int i;
35077 /* Walk through the relocations */
35078 for (i = 0; i < ehdr.e_shnum; i++) {
35079 char *sym_strtab;
35080 Elf_Sym *sh_symtab;
35081 struct section *sec_applies, *sec_symtab;
35082 - int j;
35083 + unsigned int j;
35084 struct section *sec = &secs[i];
35085
35086 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35087 @@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35088 {
35089 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35090 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35091 + char *sym_strtab = sec->link->link->strtab;
35092 +
35093 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35094 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35095 + return 0;
35096 +
35097 +#ifdef CONFIG_PAX_KERNEXEC
35098 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35099 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35100 + return 0;
35101 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35102 + return 0;
35103 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35104 + return 0;
35105 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35106 + return 0;
35107 +#endif
35108
35109 switch (r_type) {
35110 case R_386_NONE:
35111 @@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35112
35113 static void emit_relocs(int as_text, int use_real_mode)
35114 {
35115 - int i;
35116 + unsigned int i;
35117 int (*write_reloc)(uint32_t, FILE *) = write32;
35118 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35119 const char *symname);
35120 @@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35121 {
35122 regex_init(use_real_mode);
35123 read_ehdr(fp);
35124 + read_phdrs(fp);
35125 read_shdrs(fp);
35126 read_strtabs(fp);
35127 read_symtabs(fp);
35128 - read_relocs(fp);
35129 + read_relocs(fp, use_real_mode);
35130 if (ELF_BITS == 64)
35131 percpu_init();
35132 if (show_absolute_syms) {
35133 diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35134 index 80ffa5b..a33bd15 100644
35135 --- a/arch/x86/um/tls_32.c
35136 +++ b/arch/x86/um/tls_32.c
35137 @@ -260,7 +260,7 @@ out:
35138 if (unlikely(task == current &&
35139 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35140 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35141 - "without flushed TLS.", current->pid);
35142 + "without flushed TLS.", task_pid_nr(current));
35143 }
35144
35145 return 0;
35146 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35147 index fd14be1..e3c79c0 100644
35148 --- a/arch/x86/vdso/Makefile
35149 +++ b/arch/x86/vdso/Makefile
35150 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
35151 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35152 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35153
35154 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35155 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35156 GCOV_PROFILE := n
35157
35158 #
35159 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35160 index d6bfb87..876ee18 100644
35161 --- a/arch/x86/vdso/vdso32-setup.c
35162 +++ b/arch/x86/vdso/vdso32-setup.c
35163 @@ -25,6 +25,7 @@
35164 #include <asm/tlbflush.h>
35165 #include <asm/vdso.h>
35166 #include <asm/proto.h>
35167 +#include <asm/mman.h>
35168
35169 enum {
35170 VDSO_DISABLED = 0,
35171 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
35172 void enable_sep_cpu(void)
35173 {
35174 int cpu = get_cpu();
35175 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
35176 + struct tss_struct *tss = init_tss + cpu;
35177
35178 if (!boot_cpu_has(X86_FEATURE_SEP)) {
35179 put_cpu();
35180 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
35181 gate_vma.vm_start = FIXADDR_USER_START;
35182 gate_vma.vm_end = FIXADDR_USER_END;
35183 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35184 - gate_vma.vm_page_prot = __P101;
35185 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35186
35187 return 0;
35188 }
35189 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35190 if (compat)
35191 addr = VDSO_HIGH_BASE;
35192 else {
35193 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
35194 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
35195 if (IS_ERR_VALUE(addr)) {
35196 ret = addr;
35197 goto up_fail;
35198 }
35199 }
35200
35201 - current->mm->context.vdso = (void *)addr;
35202 + current->mm->context.vdso = addr;
35203
35204 if (compat_uses_vma || !compat) {
35205 /*
35206 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35207 }
35208
35209 current_thread_info()->sysenter_return =
35210 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35211 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35212
35213 up_fail:
35214 if (ret)
35215 - current->mm->context.vdso = NULL;
35216 + current->mm->context.vdso = 0;
35217
35218 up_write(&mm->mmap_sem);
35219
35220 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
35221
35222 const char *arch_vma_name(struct vm_area_struct *vma)
35223 {
35224 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
35225 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
35226 return "[vdso]";
35227 +
35228 +#ifdef CONFIG_PAX_SEGMEXEC
35229 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
35230 + return "[vdso]";
35231 +#endif
35232 +
35233 return NULL;
35234 }
35235
35236 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
35237 * Check to see if the corresponding task was created in compat vdso
35238 * mode.
35239 */
35240 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
35241 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
35242 return &gate_vma;
35243 return NULL;
35244 }
35245 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35246 index 431e875..cbb23f3 100644
35247 --- a/arch/x86/vdso/vma.c
35248 +++ b/arch/x86/vdso/vma.c
35249 @@ -16,8 +16,6 @@
35250 #include <asm/vdso.h>
35251 #include <asm/page.h>
35252
35253 -unsigned int __read_mostly vdso_enabled = 1;
35254 -
35255 extern char vdso_start[], vdso_end[];
35256 extern unsigned short vdso_sync_cpuid;
35257
35258 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
35259 * unaligned here as a result of stack start randomization.
35260 */
35261 addr = PAGE_ALIGN(addr);
35262 - addr = align_vdso_addr(addr);
35263
35264 return addr;
35265 }
35266 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
35267 unsigned size)
35268 {
35269 struct mm_struct *mm = current->mm;
35270 - unsigned long addr;
35271 + unsigned long addr = 0;
35272 int ret;
35273
35274 - if (!vdso_enabled)
35275 - return 0;
35276 -
35277 down_write(&mm->mmap_sem);
35278 +
35279 +#ifdef CONFIG_PAX_RANDMMAP
35280 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
35281 +#endif
35282 +
35283 addr = vdso_addr(mm->start_stack, size);
35284 + addr = align_vdso_addr(addr);
35285 addr = get_unmapped_area(NULL, addr, size, 0, 0);
35286 if (IS_ERR_VALUE(addr)) {
35287 ret = addr;
35288 goto up_fail;
35289 }
35290
35291 - current->mm->context.vdso = (void *)addr;
35292 + mm->context.vdso = addr;
35293
35294 ret = install_special_mapping(mm, addr, size,
35295 VM_READ|VM_EXEC|
35296 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
35297 pages);
35298 - if (ret) {
35299 - current->mm->context.vdso = NULL;
35300 - goto up_fail;
35301 - }
35302 + if (ret)
35303 + mm->context.vdso = 0;
35304
35305 up_fail:
35306 up_write(&mm->mmap_sem);
35307 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35308 vdsox32_size);
35309 }
35310 #endif
35311 -
35312 -static __init int vdso_setup(char *s)
35313 -{
35314 - vdso_enabled = simple_strtoul(s, NULL, 0);
35315 - return 0;
35316 -}
35317 -__setup("vdso=", vdso_setup);
35318 diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35319 index 1a3c765..3d2e8d1 100644
35320 --- a/arch/x86/xen/Kconfig
35321 +++ b/arch/x86/xen/Kconfig
35322 @@ -9,6 +9,7 @@ config XEN
35323 select XEN_HAVE_PVMMU
35324 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
35325 depends on X86_TSC
35326 + depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35327 help
35328 This is the Linux Xen port. Enabling this will allow the
35329 kernel to boot in a paravirtualized environment under the
35330 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35331 index fa6ade7..73da73a5 100644
35332 --- a/arch/x86/xen/enlighten.c
35333 +++ b/arch/x86/xen/enlighten.c
35334 @@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35335
35336 struct shared_info xen_dummy_shared_info;
35337
35338 -void *xen_initial_gdt;
35339 -
35340 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35341 __read_mostly int xen_have_vector_callback;
35342 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35343 @@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35344 {
35345 unsigned long va = dtr->address;
35346 unsigned int size = dtr->size + 1;
35347 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35348 - unsigned long frames[pages];
35349 + unsigned long frames[65536 / PAGE_SIZE];
35350 int f;
35351
35352 /*
35353 @@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35354 {
35355 unsigned long va = dtr->address;
35356 unsigned int size = dtr->size + 1;
35357 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35358 - unsigned long frames[pages];
35359 + unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35360 int f;
35361
35362 /*
35363 @@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35364 * 8-byte entries, or 16 4k pages..
35365 */
35366
35367 - BUG_ON(size > 65536);
35368 + BUG_ON(size > GDT_SIZE);
35369 BUG_ON(va & ~PAGE_MASK);
35370
35371 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35372 @@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35373 return 0;
35374 }
35375
35376 -static void set_xen_basic_apic_ops(void)
35377 +static void __init set_xen_basic_apic_ops(void)
35378 {
35379 apic->read = xen_apic_read;
35380 apic->write = xen_apic_write;
35381 @@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35382 #endif
35383 };
35384
35385 -static void xen_reboot(int reason)
35386 +static __noreturn void xen_reboot(int reason)
35387 {
35388 struct sched_shutdown r = { .reason = reason };
35389
35390 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35391 - BUG();
35392 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35393 + BUG();
35394 }
35395
35396 -static void xen_restart(char *msg)
35397 +static __noreturn void xen_restart(char *msg)
35398 {
35399 xen_reboot(SHUTDOWN_reboot);
35400 }
35401
35402 -static void xen_emergency_restart(void)
35403 +static __noreturn void xen_emergency_restart(void)
35404 {
35405 xen_reboot(SHUTDOWN_reboot);
35406 }
35407
35408 -static void xen_machine_halt(void)
35409 +static __noreturn void xen_machine_halt(void)
35410 {
35411 xen_reboot(SHUTDOWN_poweroff);
35412 }
35413
35414 -static void xen_machine_power_off(void)
35415 +static __noreturn void xen_machine_power_off(void)
35416 {
35417 if (pm_power_off)
35418 pm_power_off();
35419 @@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
35420 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35421
35422 /* Work out if we support NX */
35423 - x86_configure_nx();
35424 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35425 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35426 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35427 + unsigned l, h;
35428 +
35429 + __supported_pte_mask |= _PAGE_NX;
35430 + rdmsr(MSR_EFER, l, h);
35431 + l |= EFER_NX;
35432 + wrmsr(MSR_EFER, l, h);
35433 + }
35434 +#endif
35435
35436 xen_setup_features();
35437
35438 @@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
35439
35440 machine_ops = xen_machine_ops;
35441
35442 - /*
35443 - * The only reliable way to retain the initial address of the
35444 - * percpu gdt_page is to remember it here, so we can go and
35445 - * mark it RW later, when the initial percpu area is freed.
35446 - */
35447 - xen_initial_gdt = &per_cpu(gdt_page, 0);
35448 -
35449 xen_smp_init();
35450
35451 #ifdef CONFIG_ACPI_NUMA
35452 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35453 index 3c76c3d..7327d91 100644
35454 --- a/arch/x86/xen/mmu.c
35455 +++ b/arch/x86/xen/mmu.c
35456 @@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
35457 /* Assume pteval_t is equivalent to all the other *val_t types. */
35458 static pteval_t pte_mfn_to_pfn(pteval_t val)
35459 {
35460 - if (pteval_present(val)) {
35461 + if (val & _PAGE_PRESENT) {
35462 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35463 unsigned long pfn = mfn_to_pfn(mfn);
35464
35465 @@ -379,9 +379,9 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35466 return val;
35467 }
35468
35469 -static pteval_t pte_pfn_to_mfn(pteval_t val)
35470 +static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35471 {
35472 - if (pteval_present(val)) {
35473 + if (val & _PAGE_PRESENT) {
35474 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35475 pteval_t flags = val & PTE_FLAGS_MASK;
35476 unsigned long mfn;
35477 @@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35478 /* L3_k[510] -> level2_kernel_pgt
35479 * L3_i[511] -> level2_fixmap_pgt */
35480 convert_pfn_mfn(level3_kernel_pgt);
35481 + convert_pfn_mfn(level3_vmalloc_start_pgt);
35482 + convert_pfn_mfn(level3_vmalloc_end_pgt);
35483 + convert_pfn_mfn(level3_vmemmap_pgt);
35484
35485 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35486 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
35487 @@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35488 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35489 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35490 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35491 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35492 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35493 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35494 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35495 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35496 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35497 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35498 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35499
35500 @@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
35501 pv_mmu_ops.set_pud = xen_set_pud;
35502 #if PAGETABLE_LEVELS == 4
35503 pv_mmu_ops.set_pgd = xen_set_pgd;
35504 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35505 #endif
35506
35507 /* This will work as long as patching hasn't happened yet
35508 @@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35509 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35510 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35511 .set_pgd = xen_set_pgd_hyper,
35512 + .set_pgd_batched = xen_set_pgd_hyper,
35513
35514 .alloc_pud = xen_alloc_pmd_init,
35515 .release_pud = xen_release_pmd_init,
35516 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35517 index c36b325..b0f1518 100644
35518 --- a/arch/x86/xen/smp.c
35519 +++ b/arch/x86/xen/smp.c
35520 @@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35521 native_smp_prepare_boot_cpu();
35522
35523 if (xen_pv_domain()) {
35524 - /* We've switched to the "real" per-cpu gdt, so make sure the
35525 - old memory can be recycled */
35526 - make_lowmem_page_readwrite(xen_initial_gdt);
35527 -
35528 #ifdef CONFIG_X86_32
35529 /*
35530 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35531 * expects __USER_DS
35532 */
35533 - loadsegment(ds, __USER_DS);
35534 - loadsegment(es, __USER_DS);
35535 + loadsegment(ds, __KERNEL_DS);
35536 + loadsegment(es, __KERNEL_DS);
35537 #endif
35538
35539 xen_filter_cpu_maps();
35540 @@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35541 ctxt->user_regs.ss = __KERNEL_DS;
35542 #ifdef CONFIG_X86_32
35543 ctxt->user_regs.fs = __KERNEL_PERCPU;
35544 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35545 + savesegment(gs, ctxt->user_regs.gs);
35546 #else
35547 ctxt->gs_base_kernel = per_cpu_offset(cpu);
35548 #endif
35549 @@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35550
35551 {
35552 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35553 - ctxt->user_regs.ds = __USER_DS;
35554 - ctxt->user_regs.es = __USER_DS;
35555 + ctxt->user_regs.ds = __KERNEL_DS;
35556 + ctxt->user_regs.es = __KERNEL_DS;
35557
35558 xen_copy_trap_info(ctxt->trap_ctxt);
35559
35560 @@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35561 int rc;
35562
35563 per_cpu(current_task, cpu) = idle;
35564 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
35565 #ifdef CONFIG_X86_32
35566 irq_ctx_init(cpu);
35567 #else
35568 clear_tsk_thread_flag(idle, TIF_FORK);
35569 - per_cpu(kernel_stack, cpu) =
35570 - (unsigned long)task_stack_page(idle) -
35571 - KERNEL_STACK_OFFSET + THREAD_SIZE;
35572 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35573 #endif
35574 xen_setup_runstate_info(cpu);
35575 xen_setup_timer(cpu);
35576 @@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35577
35578 void __init xen_smp_init(void)
35579 {
35580 - smp_ops = xen_smp_ops;
35581 + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35582 xen_fill_possible_map();
35583 }
35584
35585 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35586 index 33ca6e4..0ded929 100644
35587 --- a/arch/x86/xen/xen-asm_32.S
35588 +++ b/arch/x86/xen/xen-asm_32.S
35589 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
35590 ESP_OFFSET=4 # bytes pushed onto stack
35591
35592 /*
35593 - * Store vcpu_info pointer for easy access. Do it this way to
35594 - * avoid having to reload %fs
35595 + * Store vcpu_info pointer for easy access.
35596 */
35597 #ifdef CONFIG_SMP
35598 - GET_THREAD_INFO(%eax)
35599 - movl %ss:TI_cpu(%eax), %eax
35600 - movl %ss:__per_cpu_offset(,%eax,4), %eax
35601 - mov %ss:xen_vcpu(%eax), %eax
35602 + push %fs
35603 + mov $(__KERNEL_PERCPU), %eax
35604 + mov %eax, %fs
35605 + mov PER_CPU_VAR(xen_vcpu), %eax
35606 + pop %fs
35607 #else
35608 movl %ss:xen_vcpu, %eax
35609 #endif
35610 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35611 index 7faed58..ba4427c 100644
35612 --- a/arch/x86/xen/xen-head.S
35613 +++ b/arch/x86/xen/xen-head.S
35614 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
35615 #ifdef CONFIG_X86_32
35616 mov %esi,xen_start_info
35617 mov $init_thread_union+THREAD_SIZE,%esp
35618 +#ifdef CONFIG_SMP
35619 + movl $cpu_gdt_table,%edi
35620 + movl $__per_cpu_load,%eax
35621 + movw %ax,__KERNEL_PERCPU + 2(%edi)
35622 + rorl $16,%eax
35623 + movb %al,__KERNEL_PERCPU + 4(%edi)
35624 + movb %ah,__KERNEL_PERCPU + 7(%edi)
35625 + movl $__per_cpu_end - 1,%eax
35626 + subl $__per_cpu_start,%eax
35627 + movw %ax,__KERNEL_PERCPU + 0(%edi)
35628 +#endif
35629 #else
35630 mov %rsi,xen_start_info
35631 mov $init_thread_union+THREAD_SIZE,%rsp
35632 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35633 index 95f8c61..611d6e8 100644
35634 --- a/arch/x86/xen/xen-ops.h
35635 +++ b/arch/x86/xen/xen-ops.h
35636 @@ -10,8 +10,6 @@
35637 extern const char xen_hypervisor_callback[];
35638 extern const char xen_failsafe_callback[];
35639
35640 -extern void *xen_initial_gdt;
35641 -
35642 struct trap_info;
35643 void xen_copy_trap_info(struct trap_info *traps);
35644
35645 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35646 index 525bd3d..ef888b1 100644
35647 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
35648 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35649 @@ -119,9 +119,9 @@
35650 ----------------------------------------------------------------------*/
35651
35652 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35653 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35654 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35655 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35656 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35657
35658 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35659 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35660 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35661 index 2f33760..835e50a 100644
35662 --- a/arch/xtensa/variants/fsf/include/variant/core.h
35663 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
35664 @@ -11,6 +11,7 @@
35665 #ifndef _XTENSA_CORE_H
35666 #define _XTENSA_CORE_H
35667
35668 +#include <linux/const.h>
35669
35670 /****************************************************************************
35671 Parameters Useful for Any Code, USER or PRIVILEGED
35672 @@ -112,9 +113,9 @@
35673 ----------------------------------------------------------------------*/
35674
35675 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35676 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35677 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35678 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35679 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35680
35681 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35682 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35683 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
35684 index af00795..2bb8105 100644
35685 --- a/arch/xtensa/variants/s6000/include/variant/core.h
35686 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
35687 @@ -11,6 +11,7 @@
35688 #ifndef _XTENSA_CORE_CONFIGURATION_H
35689 #define _XTENSA_CORE_CONFIGURATION_H
35690
35691 +#include <linux/const.h>
35692
35693 /****************************************************************************
35694 Parameters Useful for Any Code, USER or PRIVILEGED
35695 @@ -118,9 +119,9 @@
35696 ----------------------------------------------------------------------*/
35697
35698 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35699 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35700 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35701 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35702 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35703
35704 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
35705 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
35706 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
35707 index 4e491d9..c8e18e4 100644
35708 --- a/block/blk-cgroup.c
35709 +++ b/block/blk-cgroup.c
35710 @@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
35711 static struct cgroup_subsys_state *
35712 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35713 {
35714 - static atomic64_t id_seq = ATOMIC64_INIT(0);
35715 + static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
35716 struct blkcg *blkcg;
35717
35718 if (!parent_css) {
35719 @@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35720
35721 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
35722 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
35723 - blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
35724 + blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
35725 done:
35726 spin_lock_init(&blkcg->lock);
35727 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
35728 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
35729 index 1855bf5..af12b06 100644
35730 --- a/block/blk-iopoll.c
35731 +++ b/block/blk-iopoll.c
35732 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
35733 }
35734 EXPORT_SYMBOL(blk_iopoll_complete);
35735
35736 -static void blk_iopoll_softirq(struct softirq_action *h)
35737 +static __latent_entropy void blk_iopoll_softirq(void)
35738 {
35739 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
35740 int rearm = 0, budget = blk_iopoll_budget;
35741 diff --git a/block/blk-map.c b/block/blk-map.c
35742 index 623e1cd..ca1e109 100644
35743 --- a/block/blk-map.c
35744 +++ b/block/blk-map.c
35745 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
35746 if (!len || !kbuf)
35747 return -EINVAL;
35748
35749 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
35750 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
35751 if (do_copy)
35752 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
35753 else
35754 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
35755 index 57790c1..5e988dd 100644
35756 --- a/block/blk-softirq.c
35757 +++ b/block/blk-softirq.c
35758 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
35759 * Softirq action handler - move entries to local list and loop over them
35760 * while passing them to the queue registered handler.
35761 */
35762 -static void blk_done_softirq(struct softirq_action *h)
35763 +static __latent_entropy void blk_done_softirq(void)
35764 {
35765 struct list_head *cpu_list, local_list;
35766
35767 diff --git a/block/bsg.c b/block/bsg.c
35768 index 420a5a9..23834aa 100644
35769 --- a/block/bsg.c
35770 +++ b/block/bsg.c
35771 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
35772 struct sg_io_v4 *hdr, struct bsg_device *bd,
35773 fmode_t has_write_perm)
35774 {
35775 + unsigned char tmpcmd[sizeof(rq->__cmd)];
35776 + unsigned char *cmdptr;
35777 +
35778 if (hdr->request_len > BLK_MAX_CDB) {
35779 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
35780 if (!rq->cmd)
35781 return -ENOMEM;
35782 - }
35783 + cmdptr = rq->cmd;
35784 + } else
35785 + cmdptr = tmpcmd;
35786
35787 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35788 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35789 hdr->request_len))
35790 return -EFAULT;
35791
35792 + if (cmdptr != rq->cmd)
35793 + memcpy(rq->cmd, cmdptr, hdr->request_len);
35794 +
35795 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35796 if (blk_verify_command(rq->cmd, has_write_perm))
35797 return -EPERM;
35798 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35799 index fbd5a67..f24fd95 100644
35800 --- a/block/compat_ioctl.c
35801 +++ b/block/compat_ioctl.c
35802 @@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
35803 cgc = compat_alloc_user_space(sizeof(*cgc));
35804 cgc32 = compat_ptr(arg);
35805
35806 - if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
35807 + if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
35808 get_user(data, &cgc32->buffer) ||
35809 put_user(compat_ptr(data), &cgc->buffer) ||
35810 copy_in_user(&cgc->buflen, &cgc32->buflen,
35811 @@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35812 err |= __get_user(f->spec1, &uf->spec1);
35813 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35814 err |= __get_user(name, &uf->name);
35815 - f->name = compat_ptr(name);
35816 + f->name = (void __force_kernel *)compat_ptr(name);
35817 if (err) {
35818 err = -EFAULT;
35819 goto out;
35820 diff --git a/block/genhd.c b/block/genhd.c
35821 index 791f419..89f21c4 100644
35822 --- a/block/genhd.c
35823 +++ b/block/genhd.c
35824 @@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35825
35826 /*
35827 * Register device numbers dev..(dev+range-1)
35828 - * range must be nonzero
35829 + * Noop if @range is zero.
35830 * The hash chain is sorted on range, so that subranges can override.
35831 */
35832 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35833 struct kobject *(*probe)(dev_t, int *, void *),
35834 int (*lock)(dev_t, void *), void *data)
35835 {
35836 - kobj_map(bdev_map, devt, range, module, probe, lock, data);
35837 + if (range)
35838 + kobj_map(bdev_map, devt, range, module, probe, lock, data);
35839 }
35840
35841 EXPORT_SYMBOL(blk_register_region);
35842
35843 +/* undo blk_register_region(), noop if @range is zero */
35844 void blk_unregister_region(dev_t devt, unsigned long range)
35845 {
35846 - kobj_unmap(bdev_map, devt, range);
35847 + if (range)
35848 + kobj_unmap(bdev_map, devt, range);
35849 }
35850
35851 EXPORT_SYMBOL(blk_unregister_region);
35852 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35853 index dc51f46..d5446a8 100644
35854 --- a/block/partitions/efi.c
35855 +++ b/block/partitions/efi.c
35856 @@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35857 if (!gpt)
35858 return NULL;
35859
35860 + if (!le32_to_cpu(gpt->num_partition_entries))
35861 + return NULL;
35862 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35863 + if (!pte)
35864 + return NULL;
35865 +
35866 count = le32_to_cpu(gpt->num_partition_entries) *
35867 le32_to_cpu(gpt->sizeof_partition_entry);
35868 - if (!count)
35869 - return NULL;
35870 - pte = kmalloc(count, GFP_KERNEL);
35871 - if (!pte)
35872 - return NULL;
35873 -
35874 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35875 (u8 *) pte, count) < count) {
35876 kfree(pte);
35877 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35878 index 625e3e4..b5339f9 100644
35879 --- a/block/scsi_ioctl.c
35880 +++ b/block/scsi_ioctl.c
35881 @@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35882 return put_user(0, p);
35883 }
35884
35885 -static int sg_get_timeout(struct request_queue *q)
35886 +static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35887 {
35888 return jiffies_to_clock_t(q->sg_timeout);
35889 }
35890 @@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35891 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35892 struct sg_io_hdr *hdr, fmode_t mode)
35893 {
35894 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35895 + unsigned char tmpcmd[sizeof(rq->__cmd)];
35896 + unsigned char *cmdptr;
35897 +
35898 + if (rq->cmd != rq->__cmd)
35899 + cmdptr = rq->cmd;
35900 + else
35901 + cmdptr = tmpcmd;
35902 +
35903 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35904 return -EFAULT;
35905 +
35906 + if (cmdptr != rq->cmd)
35907 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35908 +
35909 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35910 return -EPERM;
35911
35912 @@ -415,6 +427,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35913 int err;
35914 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35915 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35916 + unsigned char tmpcmd[sizeof(rq->__cmd)];
35917 + unsigned char *cmdptr;
35918
35919 if (!sic)
35920 return -EINVAL;
35921 @@ -448,9 +462,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35922 */
35923 err = -EFAULT;
35924 rq->cmd_len = cmdlen;
35925 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
35926 +
35927 + if (rq->cmd != rq->__cmd)
35928 + cmdptr = rq->cmd;
35929 + else
35930 + cmdptr = tmpcmd;
35931 +
35932 + if (copy_from_user(cmdptr, sic->data, cmdlen))
35933 goto error;
35934
35935 + if (rq->cmd != cmdptr)
35936 + memcpy(rq->cmd, cmdptr, cmdlen);
35937 +
35938 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35939 goto error;
35940
35941 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35942 index 7bdd61b..afec999 100644
35943 --- a/crypto/cryptd.c
35944 +++ b/crypto/cryptd.c
35945 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35946
35947 struct cryptd_blkcipher_request_ctx {
35948 crypto_completion_t complete;
35949 -};
35950 +} __no_const;
35951
35952 struct cryptd_hash_ctx {
35953 struct crypto_shash *child;
35954 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35955
35956 struct cryptd_aead_request_ctx {
35957 crypto_completion_t complete;
35958 -};
35959 +} __no_const;
35960
35961 static void cryptd_queue_worker(struct work_struct *work);
35962
35963 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35964 index f8c920c..ab2cb5a 100644
35965 --- a/crypto/pcrypt.c
35966 +++ b/crypto/pcrypt.c
35967 @@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35968 int ret;
35969
35970 pinst->kobj.kset = pcrypt_kset;
35971 - ret = kobject_add(&pinst->kobj, NULL, name);
35972 + ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35973 if (!ret)
35974 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35975
35976 diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
35977 index 15dddc1..b61cf0c 100644
35978 --- a/drivers/acpi/acpica/hwxfsleep.c
35979 +++ b/drivers/acpi/acpica/hwxfsleep.c
35980 @@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
35981 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
35982
35983 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
35984 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35985 - acpi_hw_extended_sleep},
35986 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35987 - acpi_hw_extended_wake_prep},
35988 - {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
35989 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35990 + .extended_function = acpi_hw_extended_sleep},
35991 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35992 + .extended_function = acpi_hw_extended_wake_prep},
35993 + {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
35994 + .extended_function = acpi_hw_extended_wake}
35995 };
35996
35997 /*
35998 diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35999 index 21ba34a..cb05966 100644
36000 --- a/drivers/acpi/apei/apei-internal.h
36001 +++ b/drivers/acpi/apei/apei-internal.h
36002 @@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36003 struct apei_exec_ins_type {
36004 u32 flags;
36005 apei_exec_ins_func_t run;
36006 -};
36007 +} __do_const;
36008
36009 struct apei_exec_context {
36010 u32 ip;
36011 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36012 index a30bc31..b91c4d5 100644
36013 --- a/drivers/acpi/apei/ghes.c
36014 +++ b/drivers/acpi/apei/ghes.c
36015 @@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
36016 const struct acpi_hest_generic *generic,
36017 const struct acpi_generic_status *estatus)
36018 {
36019 - static atomic_t seqno;
36020 + static atomic_unchecked_t seqno;
36021 unsigned int curr_seqno;
36022 char pfx_seq[64];
36023
36024 @@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
36025 else
36026 pfx = KERN_ERR;
36027 }
36028 - curr_seqno = atomic_inc_return(&seqno);
36029 + curr_seqno = atomic_inc_return_unchecked(&seqno);
36030 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36031 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36032 pfx_seq, generic->header.source_id);
36033 diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36034 index a83e3c6..c3d617f 100644
36035 --- a/drivers/acpi/bgrt.c
36036 +++ b/drivers/acpi/bgrt.c
36037 @@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36038 if (!bgrt_image)
36039 return -ENODEV;
36040
36041 - bin_attr_image.private = bgrt_image;
36042 - bin_attr_image.size = bgrt_image_size;
36043 + pax_open_kernel();
36044 + *(void **)&bin_attr_image.private = bgrt_image;
36045 + *(size_t *)&bin_attr_image.size = bgrt_image_size;
36046 + pax_close_kernel();
36047
36048 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36049 if (!bgrt_kobj)
36050 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36051 index 078c4f7..410e272 100644
36052 --- a/drivers/acpi/blacklist.c
36053 +++ b/drivers/acpi/blacklist.c
36054 @@ -52,7 +52,7 @@ struct acpi_blacklist_item {
36055 u32 is_critical_error;
36056 };
36057
36058 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36059 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36060
36061 /*
36062 * POLICY: If *anything* doesn't work, put it on the blacklist.
36063 @@ -164,7 +164,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36064 return 0;
36065 }
36066
36067 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36068 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36069 {
36070 .callback = dmi_disable_osi_vista,
36071 .ident = "Fujitsu Siemens",
36072 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36073 index 12b62f2..dc2aac8 100644
36074 --- a/drivers/acpi/custom_method.c
36075 +++ b/drivers/acpi/custom_method.c
36076 @@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36077 struct acpi_table_header table;
36078 acpi_status status;
36079
36080 +#ifdef CONFIG_GRKERNSEC_KMEM
36081 + return -EPERM;
36082 +#endif
36083 +
36084 if (!(*ppos)) {
36085 /* parse the table header to get the table length */
36086 if (count <= sizeof(struct acpi_table_header))
36087 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36088 index 644516d..643937e 100644
36089 --- a/drivers/acpi/processor_idle.c
36090 +++ b/drivers/acpi/processor_idle.c
36091 @@ -963,7 +963,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36092 {
36093 int i, count = CPUIDLE_DRIVER_STATE_START;
36094 struct acpi_processor_cx *cx;
36095 - struct cpuidle_state *state;
36096 + cpuidle_state_no_const *state;
36097 struct cpuidle_driver *drv = &acpi_idle_driver;
36098
36099 if (!pr->flags.power_setup_done)
36100 diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36101 index 6dbc3ca..b8b59a0 100644
36102 --- a/drivers/acpi/sysfs.c
36103 +++ b/drivers/acpi/sysfs.c
36104 @@ -425,11 +425,11 @@ static u32 num_counters;
36105 static struct attribute **all_attrs;
36106 static u32 acpi_gpe_count;
36107
36108 -static struct attribute_group interrupt_stats_attr_group = {
36109 +static attribute_group_no_const interrupt_stats_attr_group = {
36110 .name = "interrupts",
36111 };
36112
36113 -static struct kobj_attribute *counter_attrs;
36114 +static kobj_attribute_no_const *counter_attrs;
36115
36116 static void delete_gpe_attr_array(void)
36117 {
36118 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36119 index c482f8c..c832240 100644
36120 --- a/drivers/ata/libahci.c
36121 +++ b/drivers/ata/libahci.c
36122 @@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
36123 }
36124 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36125
36126 -static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36127 +static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36128 struct ata_taskfile *tf, int is_cmd, u16 flags,
36129 unsigned long timeout_msec)
36130 {
36131 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36132 index 8cb2522..a815e54 100644
36133 --- a/drivers/ata/libata-core.c
36134 +++ b/drivers/ata/libata-core.c
36135 @@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36136 static void ata_dev_xfermask(struct ata_device *dev);
36137 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36138
36139 -atomic_t ata_print_id = ATOMIC_INIT(0);
36140 +atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36141
36142 struct ata_force_param {
36143 const char *name;
36144 @@ -4851,7 +4851,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36145 struct ata_port *ap;
36146 unsigned int tag;
36147
36148 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36149 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36150 ap = qc->ap;
36151
36152 qc->flags = 0;
36153 @@ -4867,7 +4867,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36154 struct ata_port *ap;
36155 struct ata_link *link;
36156
36157 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36158 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36159 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36160 ap = qc->ap;
36161 link = qc->dev->link;
36162 @@ -5986,6 +5986,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36163 return;
36164
36165 spin_lock(&lock);
36166 + pax_open_kernel();
36167
36168 for (cur = ops->inherits; cur; cur = cur->inherits) {
36169 void **inherit = (void **)cur;
36170 @@ -5999,8 +6000,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36171 if (IS_ERR(*pp))
36172 *pp = NULL;
36173
36174 - ops->inherits = NULL;
36175 + *(struct ata_port_operations **)&ops->inherits = NULL;
36176
36177 + pax_close_kernel();
36178 spin_unlock(&lock);
36179 }
36180
36181 @@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36182
36183 /* give ports names and add SCSI hosts */
36184 for (i = 0; i < host->n_ports; i++) {
36185 - host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36186 + host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36187 host->ports[i]->local_port_no = i + 1;
36188 }
36189
36190 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36191 index ef8567d..8bdbd03 100644
36192 --- a/drivers/ata/libata-scsi.c
36193 +++ b/drivers/ata/libata-scsi.c
36194 @@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap)
36195
36196 if (rc)
36197 return rc;
36198 - ap->print_id = atomic_inc_return(&ata_print_id);
36199 + ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36200 return 0;
36201 }
36202 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36203 diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36204 index 45b5ab3..98446b8 100644
36205 --- a/drivers/ata/libata.h
36206 +++ b/drivers/ata/libata.h
36207 @@ -53,7 +53,7 @@ enum {
36208 ATA_DNXFER_QUIET = (1 << 31),
36209 };
36210
36211 -extern atomic_t ata_print_id;
36212 +extern atomic_unchecked_t ata_print_id;
36213 extern int atapi_passthru16;
36214 extern int libata_fua;
36215 extern int libata_noacpi;
36216 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36217 index 73492dd..ca2bff5 100644
36218 --- a/drivers/ata/pata_arasan_cf.c
36219 +++ b/drivers/ata/pata_arasan_cf.c
36220 @@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36221 /* Handle platform specific quirks */
36222 if (quirk) {
36223 if (quirk & CF_BROKEN_PIO) {
36224 - ap->ops->set_piomode = NULL;
36225 + pax_open_kernel();
36226 + *(void **)&ap->ops->set_piomode = NULL;
36227 + pax_close_kernel();
36228 ap->pio_mask = 0;
36229 }
36230 if (quirk & CF_BROKEN_MWDMA)
36231 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36232 index f9b983a..887b9d8 100644
36233 --- a/drivers/atm/adummy.c
36234 +++ b/drivers/atm/adummy.c
36235 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36236 vcc->pop(vcc, skb);
36237 else
36238 dev_kfree_skb_any(skb);
36239 - atomic_inc(&vcc->stats->tx);
36240 + atomic_inc_unchecked(&vcc->stats->tx);
36241
36242 return 0;
36243 }
36244 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36245 index 62a7607..cc4be104 100644
36246 --- a/drivers/atm/ambassador.c
36247 +++ b/drivers/atm/ambassador.c
36248 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36249 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36250
36251 // VC layer stats
36252 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36253 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36254
36255 // free the descriptor
36256 kfree (tx_descr);
36257 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36258 dump_skb ("<<<", vc, skb);
36259
36260 // VC layer stats
36261 - atomic_inc(&atm_vcc->stats->rx);
36262 + atomic_inc_unchecked(&atm_vcc->stats->rx);
36263 __net_timestamp(skb);
36264 // end of our responsibility
36265 atm_vcc->push (atm_vcc, skb);
36266 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36267 } else {
36268 PRINTK (KERN_INFO, "dropped over-size frame");
36269 // should we count this?
36270 - atomic_inc(&atm_vcc->stats->rx_drop);
36271 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36272 }
36273
36274 } else {
36275 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36276 }
36277
36278 if (check_area (skb->data, skb->len)) {
36279 - atomic_inc(&atm_vcc->stats->tx_err);
36280 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36281 return -ENOMEM; // ?
36282 }
36283
36284 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36285 index 0e3f8f9..765a7a5 100644
36286 --- a/drivers/atm/atmtcp.c
36287 +++ b/drivers/atm/atmtcp.c
36288 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36289 if (vcc->pop) vcc->pop(vcc,skb);
36290 else dev_kfree_skb(skb);
36291 if (dev_data) return 0;
36292 - atomic_inc(&vcc->stats->tx_err);
36293 + atomic_inc_unchecked(&vcc->stats->tx_err);
36294 return -ENOLINK;
36295 }
36296 size = skb->len+sizeof(struct atmtcp_hdr);
36297 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36298 if (!new_skb) {
36299 if (vcc->pop) vcc->pop(vcc,skb);
36300 else dev_kfree_skb(skb);
36301 - atomic_inc(&vcc->stats->tx_err);
36302 + atomic_inc_unchecked(&vcc->stats->tx_err);
36303 return -ENOBUFS;
36304 }
36305 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36306 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36307 if (vcc->pop) vcc->pop(vcc,skb);
36308 else dev_kfree_skb(skb);
36309 out_vcc->push(out_vcc,new_skb);
36310 - atomic_inc(&vcc->stats->tx);
36311 - atomic_inc(&out_vcc->stats->rx);
36312 + atomic_inc_unchecked(&vcc->stats->tx);
36313 + atomic_inc_unchecked(&out_vcc->stats->rx);
36314 return 0;
36315 }
36316
36317 @@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36318 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
36319 read_unlock(&vcc_sklist_lock);
36320 if (!out_vcc) {
36321 - atomic_inc(&vcc->stats->tx_err);
36322 + atomic_inc_unchecked(&vcc->stats->tx_err);
36323 goto done;
36324 }
36325 skb_pull(skb,sizeof(struct atmtcp_hdr));
36326 @@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36327 __net_timestamp(new_skb);
36328 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36329 out_vcc->push(out_vcc,new_skb);
36330 - atomic_inc(&vcc->stats->tx);
36331 - atomic_inc(&out_vcc->stats->rx);
36332 + atomic_inc_unchecked(&vcc->stats->tx);
36333 + atomic_inc_unchecked(&out_vcc->stats->rx);
36334 done:
36335 if (vcc->pop) vcc->pop(vcc,skb);
36336 else dev_kfree_skb(skb);
36337 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36338 index b1955ba..b179940 100644
36339 --- a/drivers/atm/eni.c
36340 +++ b/drivers/atm/eni.c
36341 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36342 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36343 vcc->dev->number);
36344 length = 0;
36345 - atomic_inc(&vcc->stats->rx_err);
36346 + atomic_inc_unchecked(&vcc->stats->rx_err);
36347 }
36348 else {
36349 length = ATM_CELL_SIZE-1; /* no HEC */
36350 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36351 size);
36352 }
36353 eff = length = 0;
36354 - atomic_inc(&vcc->stats->rx_err);
36355 + atomic_inc_unchecked(&vcc->stats->rx_err);
36356 }
36357 else {
36358 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36359 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36360 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36361 vcc->dev->number,vcc->vci,length,size << 2,descr);
36362 length = eff = 0;
36363 - atomic_inc(&vcc->stats->rx_err);
36364 + atomic_inc_unchecked(&vcc->stats->rx_err);
36365 }
36366 }
36367 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36368 @@ -767,7 +767,7 @@ rx_dequeued++;
36369 vcc->push(vcc,skb);
36370 pushed++;
36371 }
36372 - atomic_inc(&vcc->stats->rx);
36373 + atomic_inc_unchecked(&vcc->stats->rx);
36374 }
36375 wake_up(&eni_dev->rx_wait);
36376 }
36377 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
36378 PCI_DMA_TODEVICE);
36379 if (vcc->pop) vcc->pop(vcc,skb);
36380 else dev_kfree_skb_irq(skb);
36381 - atomic_inc(&vcc->stats->tx);
36382 + atomic_inc_unchecked(&vcc->stats->tx);
36383 wake_up(&eni_dev->tx_wait);
36384 dma_complete++;
36385 }
36386 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36387 index b41c948..a002b17 100644
36388 --- a/drivers/atm/firestream.c
36389 +++ b/drivers/atm/firestream.c
36390 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36391 }
36392 }
36393
36394 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36395 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36396
36397 fs_dprintk (FS_DEBUG_TXMEM, "i");
36398 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36399 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36400 #endif
36401 skb_put (skb, qe->p1 & 0xffff);
36402 ATM_SKB(skb)->vcc = atm_vcc;
36403 - atomic_inc(&atm_vcc->stats->rx);
36404 + atomic_inc_unchecked(&atm_vcc->stats->rx);
36405 __net_timestamp(skb);
36406 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36407 atm_vcc->push (atm_vcc, skb);
36408 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36409 kfree (pe);
36410 }
36411 if (atm_vcc)
36412 - atomic_inc(&atm_vcc->stats->rx_drop);
36413 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36414 break;
36415 case 0x1f: /* Reassembly abort: no buffers. */
36416 /* Silently increment error counter. */
36417 if (atm_vcc)
36418 - atomic_inc(&atm_vcc->stats->rx_drop);
36419 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36420 break;
36421 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36422 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36423 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36424 index 204814e..cede831 100644
36425 --- a/drivers/atm/fore200e.c
36426 +++ b/drivers/atm/fore200e.c
36427 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36428 #endif
36429 /* check error condition */
36430 if (*entry->status & STATUS_ERROR)
36431 - atomic_inc(&vcc->stats->tx_err);
36432 + atomic_inc_unchecked(&vcc->stats->tx_err);
36433 else
36434 - atomic_inc(&vcc->stats->tx);
36435 + atomic_inc_unchecked(&vcc->stats->tx);
36436 }
36437 }
36438
36439 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36440 if (skb == NULL) {
36441 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36442
36443 - atomic_inc(&vcc->stats->rx_drop);
36444 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36445 return -ENOMEM;
36446 }
36447
36448 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36449
36450 dev_kfree_skb_any(skb);
36451
36452 - atomic_inc(&vcc->stats->rx_drop);
36453 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36454 return -ENOMEM;
36455 }
36456
36457 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36458
36459 vcc->push(vcc, skb);
36460 - atomic_inc(&vcc->stats->rx);
36461 + atomic_inc_unchecked(&vcc->stats->rx);
36462
36463 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36464
36465 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36466 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36467 fore200e->atm_dev->number,
36468 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36469 - atomic_inc(&vcc->stats->rx_err);
36470 + atomic_inc_unchecked(&vcc->stats->rx_err);
36471 }
36472 }
36473
36474 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36475 goto retry_here;
36476 }
36477
36478 - atomic_inc(&vcc->stats->tx_err);
36479 + atomic_inc_unchecked(&vcc->stats->tx_err);
36480
36481 fore200e->tx_sat++;
36482 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36483 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36484 index 8557adc..3fb5d55 100644
36485 --- a/drivers/atm/he.c
36486 +++ b/drivers/atm/he.c
36487 @@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36488
36489 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36490 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36491 - atomic_inc(&vcc->stats->rx_drop);
36492 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36493 goto return_host_buffers;
36494 }
36495
36496 @@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36497 RBRQ_LEN_ERR(he_dev->rbrq_head)
36498 ? "LEN_ERR" : "",
36499 vcc->vpi, vcc->vci);
36500 - atomic_inc(&vcc->stats->rx_err);
36501 + atomic_inc_unchecked(&vcc->stats->rx_err);
36502 goto return_host_buffers;
36503 }
36504
36505 @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36506 vcc->push(vcc, skb);
36507 spin_lock(&he_dev->global_lock);
36508
36509 - atomic_inc(&vcc->stats->rx);
36510 + atomic_inc_unchecked(&vcc->stats->rx);
36511
36512 return_host_buffers:
36513 ++pdus_assembled;
36514 @@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36515 tpd->vcc->pop(tpd->vcc, tpd->skb);
36516 else
36517 dev_kfree_skb_any(tpd->skb);
36518 - atomic_inc(&tpd->vcc->stats->tx_err);
36519 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36520 }
36521 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36522 return;
36523 @@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36524 vcc->pop(vcc, skb);
36525 else
36526 dev_kfree_skb_any(skb);
36527 - atomic_inc(&vcc->stats->tx_err);
36528 + atomic_inc_unchecked(&vcc->stats->tx_err);
36529 return -EINVAL;
36530 }
36531
36532 @@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36533 vcc->pop(vcc, skb);
36534 else
36535 dev_kfree_skb_any(skb);
36536 - atomic_inc(&vcc->stats->tx_err);
36537 + atomic_inc_unchecked(&vcc->stats->tx_err);
36538 return -EINVAL;
36539 }
36540 #endif
36541 @@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36542 vcc->pop(vcc, skb);
36543 else
36544 dev_kfree_skb_any(skb);
36545 - atomic_inc(&vcc->stats->tx_err);
36546 + atomic_inc_unchecked(&vcc->stats->tx_err);
36547 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36548 return -ENOMEM;
36549 }
36550 @@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36551 vcc->pop(vcc, skb);
36552 else
36553 dev_kfree_skb_any(skb);
36554 - atomic_inc(&vcc->stats->tx_err);
36555 + atomic_inc_unchecked(&vcc->stats->tx_err);
36556 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36557 return -ENOMEM;
36558 }
36559 @@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36560 __enqueue_tpd(he_dev, tpd, cid);
36561 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36562
36563 - atomic_inc(&vcc->stats->tx);
36564 + atomic_inc_unchecked(&vcc->stats->tx);
36565
36566 return 0;
36567 }
36568 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36569 index 1dc0519..1aadaf7 100644
36570 --- a/drivers/atm/horizon.c
36571 +++ b/drivers/atm/horizon.c
36572 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36573 {
36574 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36575 // VC layer stats
36576 - atomic_inc(&vcc->stats->rx);
36577 + atomic_inc_unchecked(&vcc->stats->rx);
36578 __net_timestamp(skb);
36579 // end of our responsibility
36580 vcc->push (vcc, skb);
36581 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36582 dev->tx_iovec = NULL;
36583
36584 // VC layer stats
36585 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36586 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36587
36588 // free the skb
36589 hrz_kfree_skb (skb);
36590 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36591 index 1bdf104..9dc44b1 100644
36592 --- a/drivers/atm/idt77252.c
36593 +++ b/drivers/atm/idt77252.c
36594 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36595 else
36596 dev_kfree_skb(skb);
36597
36598 - atomic_inc(&vcc->stats->tx);
36599 + atomic_inc_unchecked(&vcc->stats->tx);
36600 }
36601
36602 atomic_dec(&scq->used);
36603 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36604 if ((sb = dev_alloc_skb(64)) == NULL) {
36605 printk("%s: Can't allocate buffers for aal0.\n",
36606 card->name);
36607 - atomic_add(i, &vcc->stats->rx_drop);
36608 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
36609 break;
36610 }
36611 if (!atm_charge(vcc, sb->truesize)) {
36612 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36613 card->name);
36614 - atomic_add(i - 1, &vcc->stats->rx_drop);
36615 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36616 dev_kfree_skb(sb);
36617 break;
36618 }
36619 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36620 ATM_SKB(sb)->vcc = vcc;
36621 __net_timestamp(sb);
36622 vcc->push(vcc, sb);
36623 - atomic_inc(&vcc->stats->rx);
36624 + atomic_inc_unchecked(&vcc->stats->rx);
36625
36626 cell += ATM_CELL_PAYLOAD;
36627 }
36628 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36629 "(CDC: %08x)\n",
36630 card->name, len, rpp->len, readl(SAR_REG_CDC));
36631 recycle_rx_pool_skb(card, rpp);
36632 - atomic_inc(&vcc->stats->rx_err);
36633 + atomic_inc_unchecked(&vcc->stats->rx_err);
36634 return;
36635 }
36636 if (stat & SAR_RSQE_CRC) {
36637 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36638 recycle_rx_pool_skb(card, rpp);
36639 - atomic_inc(&vcc->stats->rx_err);
36640 + atomic_inc_unchecked(&vcc->stats->rx_err);
36641 return;
36642 }
36643 if (skb_queue_len(&rpp->queue) > 1) {
36644 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36645 RXPRINTK("%s: Can't alloc RX skb.\n",
36646 card->name);
36647 recycle_rx_pool_skb(card, rpp);
36648 - atomic_inc(&vcc->stats->rx_err);
36649 + atomic_inc_unchecked(&vcc->stats->rx_err);
36650 return;
36651 }
36652 if (!atm_charge(vcc, skb->truesize)) {
36653 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36654 __net_timestamp(skb);
36655
36656 vcc->push(vcc, skb);
36657 - atomic_inc(&vcc->stats->rx);
36658 + atomic_inc_unchecked(&vcc->stats->rx);
36659
36660 return;
36661 }
36662 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36663 __net_timestamp(skb);
36664
36665 vcc->push(vcc, skb);
36666 - atomic_inc(&vcc->stats->rx);
36667 + atomic_inc_unchecked(&vcc->stats->rx);
36668
36669 if (skb->truesize > SAR_FB_SIZE_3)
36670 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36671 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36672 if (vcc->qos.aal != ATM_AAL0) {
36673 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36674 card->name, vpi, vci);
36675 - atomic_inc(&vcc->stats->rx_drop);
36676 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36677 goto drop;
36678 }
36679
36680 if ((sb = dev_alloc_skb(64)) == NULL) {
36681 printk("%s: Can't allocate buffers for AAL0.\n",
36682 card->name);
36683 - atomic_inc(&vcc->stats->rx_err);
36684 + atomic_inc_unchecked(&vcc->stats->rx_err);
36685 goto drop;
36686 }
36687
36688 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
36689 ATM_SKB(sb)->vcc = vcc;
36690 __net_timestamp(sb);
36691 vcc->push(vcc, sb);
36692 - atomic_inc(&vcc->stats->rx);
36693 + atomic_inc_unchecked(&vcc->stats->rx);
36694
36695 drop:
36696 skb_pull(queue, 64);
36697 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36698
36699 if (vc == NULL) {
36700 printk("%s: NULL connection in send().\n", card->name);
36701 - atomic_inc(&vcc->stats->tx_err);
36702 + atomic_inc_unchecked(&vcc->stats->tx_err);
36703 dev_kfree_skb(skb);
36704 return -EINVAL;
36705 }
36706 if (!test_bit(VCF_TX, &vc->flags)) {
36707 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
36708 - atomic_inc(&vcc->stats->tx_err);
36709 + atomic_inc_unchecked(&vcc->stats->tx_err);
36710 dev_kfree_skb(skb);
36711 return -EINVAL;
36712 }
36713 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36714 break;
36715 default:
36716 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
36717 - atomic_inc(&vcc->stats->tx_err);
36718 + atomic_inc_unchecked(&vcc->stats->tx_err);
36719 dev_kfree_skb(skb);
36720 return -EINVAL;
36721 }
36722
36723 if (skb_shinfo(skb)->nr_frags != 0) {
36724 printk("%s: No scatter-gather yet.\n", card->name);
36725 - atomic_inc(&vcc->stats->tx_err);
36726 + atomic_inc_unchecked(&vcc->stats->tx_err);
36727 dev_kfree_skb(skb);
36728 return -EINVAL;
36729 }
36730 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36731
36732 err = queue_skb(card, vc, skb, oam);
36733 if (err) {
36734 - atomic_inc(&vcc->stats->tx_err);
36735 + atomic_inc_unchecked(&vcc->stats->tx_err);
36736 dev_kfree_skb(skb);
36737 return err;
36738 }
36739 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
36740 skb = dev_alloc_skb(64);
36741 if (!skb) {
36742 printk("%s: Out of memory in send_oam().\n", card->name);
36743 - atomic_inc(&vcc->stats->tx_err);
36744 + atomic_inc_unchecked(&vcc->stats->tx_err);
36745 return -ENOMEM;
36746 }
36747 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
36748 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
36749 index 4217f29..88f547a 100644
36750 --- a/drivers/atm/iphase.c
36751 +++ b/drivers/atm/iphase.c
36752 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
36753 status = (u_short) (buf_desc_ptr->desc_mode);
36754 if (status & (RX_CER | RX_PTE | RX_OFL))
36755 {
36756 - atomic_inc(&vcc->stats->rx_err);
36757 + atomic_inc_unchecked(&vcc->stats->rx_err);
36758 IF_ERR(printk("IA: bad packet, dropping it");)
36759 if (status & RX_CER) {
36760 IF_ERR(printk(" cause: packet CRC error\n");)
36761 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
36762 len = dma_addr - buf_addr;
36763 if (len > iadev->rx_buf_sz) {
36764 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
36765 - atomic_inc(&vcc->stats->rx_err);
36766 + atomic_inc_unchecked(&vcc->stats->rx_err);
36767 goto out_free_desc;
36768 }
36769
36770 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36771 ia_vcc = INPH_IA_VCC(vcc);
36772 if (ia_vcc == NULL)
36773 {
36774 - atomic_inc(&vcc->stats->rx_err);
36775 + atomic_inc_unchecked(&vcc->stats->rx_err);
36776 atm_return(vcc, skb->truesize);
36777 dev_kfree_skb_any(skb);
36778 goto INCR_DLE;
36779 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36780 if ((length > iadev->rx_buf_sz) || (length >
36781 (skb->len - sizeof(struct cpcs_trailer))))
36782 {
36783 - atomic_inc(&vcc->stats->rx_err);
36784 + atomic_inc_unchecked(&vcc->stats->rx_err);
36785 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
36786 length, skb->len);)
36787 atm_return(vcc, skb->truesize);
36788 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36789
36790 IF_RX(printk("rx_dle_intr: skb push");)
36791 vcc->push(vcc,skb);
36792 - atomic_inc(&vcc->stats->rx);
36793 + atomic_inc_unchecked(&vcc->stats->rx);
36794 iadev->rx_pkt_cnt++;
36795 }
36796 INCR_DLE:
36797 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36798 {
36799 struct k_sonet_stats *stats;
36800 stats = &PRIV(_ia_dev[board])->sonet_stats;
36801 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36802 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36803 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36804 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36805 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36806 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36807 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36808 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36809 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36810 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36811 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36812 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36813 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36814 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36815 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36816 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36817 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36818 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36819 }
36820 ia_cmds.status = 0;
36821 break;
36822 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36823 if ((desc == 0) || (desc > iadev->num_tx_desc))
36824 {
36825 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36826 - atomic_inc(&vcc->stats->tx);
36827 + atomic_inc_unchecked(&vcc->stats->tx);
36828 if (vcc->pop)
36829 vcc->pop(vcc, skb);
36830 else
36831 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36832 ATM_DESC(skb) = vcc->vci;
36833 skb_queue_tail(&iadev->tx_dma_q, skb);
36834
36835 - atomic_inc(&vcc->stats->tx);
36836 + atomic_inc_unchecked(&vcc->stats->tx);
36837 iadev->tx_pkt_cnt++;
36838 /* Increment transaction counter */
36839 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36840
36841 #if 0
36842 /* add flow control logic */
36843 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36844 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36845 if (iavcc->vc_desc_cnt > 10) {
36846 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36847 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36848 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36849 index fa7d701..1e404c7 100644
36850 --- a/drivers/atm/lanai.c
36851 +++ b/drivers/atm/lanai.c
36852 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36853 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36854 lanai_endtx(lanai, lvcc);
36855 lanai_free_skb(lvcc->tx.atmvcc, skb);
36856 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36857 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36858 }
36859
36860 /* Try to fill the buffer - don't call unless there is backlog */
36861 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36862 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36863 __net_timestamp(skb);
36864 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36865 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36866 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36867 out:
36868 lvcc->rx.buf.ptr = end;
36869 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36870 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36871 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36872 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36873 lanai->stats.service_rxnotaal5++;
36874 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36875 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36876 return 0;
36877 }
36878 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36879 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36880 int bytes;
36881 read_unlock(&vcc_sklist_lock);
36882 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36883 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36884 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36885 lvcc->stats.x.aal5.service_trash++;
36886 bytes = (SERVICE_GET_END(s) * 16) -
36887 (((unsigned long) lvcc->rx.buf.ptr) -
36888 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36889 }
36890 if (s & SERVICE_STREAM) {
36891 read_unlock(&vcc_sklist_lock);
36892 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36893 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36894 lvcc->stats.x.aal5.service_stream++;
36895 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36896 "PDU on VCI %d!\n", lanai->number, vci);
36897 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36898 return 0;
36899 }
36900 DPRINTK("got rx crc error on vci %d\n", vci);
36901 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36902 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36903 lvcc->stats.x.aal5.service_rxcrc++;
36904 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36905 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36906 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36907 index 5aca5f4..ce3a6b0 100644
36908 --- a/drivers/atm/nicstar.c
36909 +++ b/drivers/atm/nicstar.c
36910 @@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36911 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36912 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36913 card->index);
36914 - atomic_inc(&vcc->stats->tx_err);
36915 + atomic_inc_unchecked(&vcc->stats->tx_err);
36916 dev_kfree_skb_any(skb);
36917 return -EINVAL;
36918 }
36919 @@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36920 if (!vc->tx) {
36921 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36922 card->index);
36923 - atomic_inc(&vcc->stats->tx_err);
36924 + atomic_inc_unchecked(&vcc->stats->tx_err);
36925 dev_kfree_skb_any(skb);
36926 return -EINVAL;
36927 }
36928 @@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36929 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36930 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36931 card->index);
36932 - atomic_inc(&vcc->stats->tx_err);
36933 + atomic_inc_unchecked(&vcc->stats->tx_err);
36934 dev_kfree_skb_any(skb);
36935 return -EINVAL;
36936 }
36937
36938 if (skb_shinfo(skb)->nr_frags != 0) {
36939 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36940 - atomic_inc(&vcc->stats->tx_err);
36941 + atomic_inc_unchecked(&vcc->stats->tx_err);
36942 dev_kfree_skb_any(skb);
36943 return -EINVAL;
36944 }
36945 @@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36946 }
36947
36948 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36949 - atomic_inc(&vcc->stats->tx_err);
36950 + atomic_inc_unchecked(&vcc->stats->tx_err);
36951 dev_kfree_skb_any(skb);
36952 return -EIO;
36953 }
36954 - atomic_inc(&vcc->stats->tx);
36955 + atomic_inc_unchecked(&vcc->stats->tx);
36956
36957 return 0;
36958 }
36959 @@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36960 printk
36961 ("nicstar%d: Can't allocate buffers for aal0.\n",
36962 card->index);
36963 - atomic_add(i, &vcc->stats->rx_drop);
36964 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
36965 break;
36966 }
36967 if (!atm_charge(vcc, sb->truesize)) {
36968 RXPRINTK
36969 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36970 card->index);
36971 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36972 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36973 dev_kfree_skb_any(sb);
36974 break;
36975 }
36976 @@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36977 ATM_SKB(sb)->vcc = vcc;
36978 __net_timestamp(sb);
36979 vcc->push(vcc, sb);
36980 - atomic_inc(&vcc->stats->rx);
36981 + atomic_inc_unchecked(&vcc->stats->rx);
36982 cell += ATM_CELL_PAYLOAD;
36983 }
36984
36985 @@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36986 if (iovb == NULL) {
36987 printk("nicstar%d: Out of iovec buffers.\n",
36988 card->index);
36989 - atomic_inc(&vcc->stats->rx_drop);
36990 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36991 recycle_rx_buf(card, skb);
36992 return;
36993 }
36994 @@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36995 small or large buffer itself. */
36996 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36997 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36998 - atomic_inc(&vcc->stats->rx_err);
36999 + atomic_inc_unchecked(&vcc->stats->rx_err);
37000 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37001 NS_MAX_IOVECS);
37002 NS_PRV_IOVCNT(iovb) = 0;
37003 @@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37004 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37005 card->index);
37006 which_list(card, skb);
37007 - atomic_inc(&vcc->stats->rx_err);
37008 + atomic_inc_unchecked(&vcc->stats->rx_err);
37009 recycle_rx_buf(card, skb);
37010 vc->rx_iov = NULL;
37011 recycle_iov_buf(card, iovb);
37012 @@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37013 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37014 card->index);
37015 which_list(card, skb);
37016 - atomic_inc(&vcc->stats->rx_err);
37017 + atomic_inc_unchecked(&vcc->stats->rx_err);
37018 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37019 NS_PRV_IOVCNT(iovb));
37020 vc->rx_iov = NULL;
37021 @@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37022 printk(" - PDU size mismatch.\n");
37023 else
37024 printk(".\n");
37025 - atomic_inc(&vcc->stats->rx_err);
37026 + atomic_inc_unchecked(&vcc->stats->rx_err);
37027 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37028 NS_PRV_IOVCNT(iovb));
37029 vc->rx_iov = NULL;
37030 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37031 /* skb points to a small buffer */
37032 if (!atm_charge(vcc, skb->truesize)) {
37033 push_rxbufs(card, skb);
37034 - atomic_inc(&vcc->stats->rx_drop);
37035 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37036 } else {
37037 skb_put(skb, len);
37038 dequeue_sm_buf(card, skb);
37039 @@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37040 ATM_SKB(skb)->vcc = vcc;
37041 __net_timestamp(skb);
37042 vcc->push(vcc, skb);
37043 - atomic_inc(&vcc->stats->rx);
37044 + atomic_inc_unchecked(&vcc->stats->rx);
37045 }
37046 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37047 struct sk_buff *sb;
37048 @@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37049 if (len <= NS_SMBUFSIZE) {
37050 if (!atm_charge(vcc, sb->truesize)) {
37051 push_rxbufs(card, sb);
37052 - atomic_inc(&vcc->stats->rx_drop);
37053 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37054 } else {
37055 skb_put(sb, len);
37056 dequeue_sm_buf(card, sb);
37057 @@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37058 ATM_SKB(sb)->vcc = vcc;
37059 __net_timestamp(sb);
37060 vcc->push(vcc, sb);
37061 - atomic_inc(&vcc->stats->rx);
37062 + atomic_inc_unchecked(&vcc->stats->rx);
37063 }
37064
37065 push_rxbufs(card, skb);
37066 @@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37067
37068 if (!atm_charge(vcc, skb->truesize)) {
37069 push_rxbufs(card, skb);
37070 - atomic_inc(&vcc->stats->rx_drop);
37071 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37072 } else {
37073 dequeue_lg_buf(card, skb);
37074 #ifdef NS_USE_DESTRUCTORS
37075 @@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37076 ATM_SKB(skb)->vcc = vcc;
37077 __net_timestamp(skb);
37078 vcc->push(vcc, skb);
37079 - atomic_inc(&vcc->stats->rx);
37080 + atomic_inc_unchecked(&vcc->stats->rx);
37081 }
37082
37083 push_rxbufs(card, sb);
37084 @@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37085 printk
37086 ("nicstar%d: Out of huge buffers.\n",
37087 card->index);
37088 - atomic_inc(&vcc->stats->rx_drop);
37089 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37090 recycle_iovec_rx_bufs(card,
37091 (struct iovec *)
37092 iovb->data,
37093 @@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37094 card->hbpool.count++;
37095 } else
37096 dev_kfree_skb_any(hb);
37097 - atomic_inc(&vcc->stats->rx_drop);
37098 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37099 } else {
37100 /* Copy the small buffer to the huge buffer */
37101 sb = (struct sk_buff *)iov->iov_base;
37102 @@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37103 #endif /* NS_USE_DESTRUCTORS */
37104 __net_timestamp(hb);
37105 vcc->push(vcc, hb);
37106 - atomic_inc(&vcc->stats->rx);
37107 + atomic_inc_unchecked(&vcc->stats->rx);
37108 }
37109 }
37110
37111 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37112 index 32784d1..4a8434a 100644
37113 --- a/drivers/atm/solos-pci.c
37114 +++ b/drivers/atm/solos-pci.c
37115 @@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
37116 }
37117 atm_charge(vcc, skb->truesize);
37118 vcc->push(vcc, skb);
37119 - atomic_inc(&vcc->stats->rx);
37120 + atomic_inc_unchecked(&vcc->stats->rx);
37121 break;
37122
37123 case PKT_STATUS:
37124 @@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37125 vcc = SKB_CB(oldskb)->vcc;
37126
37127 if (vcc) {
37128 - atomic_inc(&vcc->stats->tx);
37129 + atomic_inc_unchecked(&vcc->stats->tx);
37130 solos_pop(vcc, oldskb);
37131 } else {
37132 dev_kfree_skb_irq(oldskb);
37133 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37134 index 0215934..ce9f5b1 100644
37135 --- a/drivers/atm/suni.c
37136 +++ b/drivers/atm/suni.c
37137 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37138
37139
37140 #define ADD_LIMITED(s,v) \
37141 - atomic_add((v),&stats->s); \
37142 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37143 + atomic_add_unchecked((v),&stats->s); \
37144 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37145
37146
37147 static void suni_hz(unsigned long from_timer)
37148 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37149 index 5120a96..e2572bd 100644
37150 --- a/drivers/atm/uPD98402.c
37151 +++ b/drivers/atm/uPD98402.c
37152 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37153 struct sonet_stats tmp;
37154 int error = 0;
37155
37156 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37157 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37158 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37159 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37160 if (zero && !error) {
37161 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37162
37163
37164 #define ADD_LIMITED(s,v) \
37165 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37166 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37167 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37168 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37169 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37170 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37171
37172
37173 static void stat_event(struct atm_dev *dev)
37174 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37175 if (reason & uPD98402_INT_PFM) stat_event(dev);
37176 if (reason & uPD98402_INT_PCO) {
37177 (void) GET(PCOCR); /* clear interrupt cause */
37178 - atomic_add(GET(HECCT),
37179 + atomic_add_unchecked(GET(HECCT),
37180 &PRIV(dev)->sonet_stats.uncorr_hcs);
37181 }
37182 if ((reason & uPD98402_INT_RFO) &&
37183 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37184 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37185 uPD98402_INT_LOS),PIMR); /* enable them */
37186 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37187 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37188 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37189 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37190 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37191 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37192 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37193 return 0;
37194 }
37195
37196 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37197 index 969c3c2..9b72956 100644
37198 --- a/drivers/atm/zatm.c
37199 +++ b/drivers/atm/zatm.c
37200 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37201 }
37202 if (!size) {
37203 dev_kfree_skb_irq(skb);
37204 - if (vcc) atomic_inc(&vcc->stats->rx_err);
37205 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37206 continue;
37207 }
37208 if (!atm_charge(vcc,skb->truesize)) {
37209 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37210 skb->len = size;
37211 ATM_SKB(skb)->vcc = vcc;
37212 vcc->push(vcc,skb);
37213 - atomic_inc(&vcc->stats->rx);
37214 + atomic_inc_unchecked(&vcc->stats->rx);
37215 }
37216 zout(pos & 0xffff,MTA(mbx));
37217 #if 0 /* probably a stupid idea */
37218 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37219 skb_queue_head(&zatm_vcc->backlog,skb);
37220 break;
37221 }
37222 - atomic_inc(&vcc->stats->tx);
37223 + atomic_inc_unchecked(&vcc->stats->tx);
37224 wake_up(&zatm_vcc->tx_wait);
37225 }
37226
37227 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37228 index 73f6c29..b0c0e13 100644
37229 --- a/drivers/base/bus.c
37230 +++ b/drivers/base/bus.c
37231 @@ -1115,7 +1115,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37232 return -EINVAL;
37233
37234 mutex_lock(&subsys->p->mutex);
37235 - list_add_tail(&sif->node, &subsys->p->interfaces);
37236 + pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37237 if (sif->add_dev) {
37238 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37239 while ((dev = subsys_dev_iter_next(&iter)))
37240 @@ -1140,7 +1140,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37241 subsys = sif->subsys;
37242
37243 mutex_lock(&subsys->p->mutex);
37244 - list_del_init(&sif->node);
37245 + pax_list_del_init((struct list_head *)&sif->node);
37246 if (sif->remove_dev) {
37247 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37248 while ((dev = subsys_dev_iter_next(&iter)))
37249 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37250 index 0f38201..6c2b444 100644
37251 --- a/drivers/base/devtmpfs.c
37252 +++ b/drivers/base/devtmpfs.c
37253 @@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37254 if (!thread)
37255 return 0;
37256
37257 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37258 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37259 if (err)
37260 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37261 else
37262 @@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37263 *err = sys_unshare(CLONE_NEWNS);
37264 if (*err)
37265 goto out;
37266 - *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37267 + *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37268 if (*err)
37269 goto out;
37270 - sys_chdir("/.."); /* will traverse into overmounted root */
37271 - sys_chroot(".");
37272 + sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37273 + sys_chroot((char __force_user *)".");
37274 complete(&setup_done);
37275 while (1) {
37276 spin_lock(&req_lock);
37277 diff --git a/drivers/base/node.c b/drivers/base/node.c
37278 index bc9f43b..29703b8 100644
37279 --- a/drivers/base/node.c
37280 +++ b/drivers/base/node.c
37281 @@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37282 struct node_attr {
37283 struct device_attribute attr;
37284 enum node_states state;
37285 -};
37286 +} __do_const;
37287
37288 static ssize_t show_node_state(struct device *dev,
37289 struct device_attribute *attr, char *buf)
37290 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37291 index bfb8955..4ebff34 100644
37292 --- a/drivers/base/power/domain.c
37293 +++ b/drivers/base/power/domain.c
37294 @@ -1809,9 +1809,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
37295
37296 if (dev->power.subsys_data->domain_data) {
37297 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
37298 - gpd_data->ops = (struct gpd_dev_ops){ NULL };
37299 + memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
37300 if (clear_td)
37301 - gpd_data->td = (struct gpd_timing_data){ 0 };
37302 + memset(&gpd_data->td, 0, sizeof(gpd_data->td));
37303
37304 if (--gpd_data->refcount == 0) {
37305 dev->power.subsys_data->domain_data = NULL;
37306 @@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37307 {
37308 struct cpuidle_driver *cpuidle_drv;
37309 struct gpd_cpu_data *cpu_data;
37310 - struct cpuidle_state *idle_state;
37311 + cpuidle_state_no_const *idle_state;
37312 int ret = 0;
37313
37314 if (IS_ERR_OR_NULL(genpd) || state < 0)
37315 @@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37316 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37317 {
37318 struct gpd_cpu_data *cpu_data;
37319 - struct cpuidle_state *idle_state;
37320 + cpuidle_state_no_const *idle_state;
37321 int ret = 0;
37322
37323 if (IS_ERR_OR_NULL(genpd))
37324 diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37325 index 03e089a..0e9560c 100644
37326 --- a/drivers/base/power/sysfs.c
37327 +++ b/drivers/base/power/sysfs.c
37328 @@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37329 return -EIO;
37330 }
37331 }
37332 - return sprintf(buf, p);
37333 + return sprintf(buf, "%s", p);
37334 }
37335
37336 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37337 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37338 index 2d56f41..8830f19 100644
37339 --- a/drivers/base/power/wakeup.c
37340 +++ b/drivers/base/power/wakeup.c
37341 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
37342 * They need to be modified together atomically, so it's better to use one
37343 * atomic variable to hold them both.
37344 */
37345 -static atomic_t combined_event_count = ATOMIC_INIT(0);
37346 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37347
37348 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37349 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37350
37351 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37352 {
37353 - unsigned int comb = atomic_read(&combined_event_count);
37354 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
37355
37356 *cnt = (comb >> IN_PROGRESS_BITS);
37357 *inpr = comb & MAX_IN_PROGRESS;
37358 @@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37359 ws->start_prevent_time = ws->last_time;
37360
37361 /* Increment the counter of events in progress. */
37362 - cec = atomic_inc_return(&combined_event_count);
37363 + cec = atomic_inc_return_unchecked(&combined_event_count);
37364
37365 trace_wakeup_source_activate(ws->name, cec);
37366 }
37367 @@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37368 * Increment the counter of registered wakeup events and decrement the
37369 * couter of wakeup events in progress simultaneously.
37370 */
37371 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37372 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37373 trace_wakeup_source_deactivate(ws->name, cec);
37374
37375 split_counters(&cnt, &inpr);
37376 diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37377 index e8d11b6..7b1b36f 100644
37378 --- a/drivers/base/syscore.c
37379 +++ b/drivers/base/syscore.c
37380 @@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37381 void register_syscore_ops(struct syscore_ops *ops)
37382 {
37383 mutex_lock(&syscore_ops_lock);
37384 - list_add_tail(&ops->node, &syscore_ops_list);
37385 + pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37386 mutex_unlock(&syscore_ops_lock);
37387 }
37388 EXPORT_SYMBOL_GPL(register_syscore_ops);
37389 @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37390 void unregister_syscore_ops(struct syscore_ops *ops)
37391 {
37392 mutex_lock(&syscore_ops_lock);
37393 - list_del(&ops->node);
37394 + pax_list_del((struct list_head *)&ops->node);
37395 mutex_unlock(&syscore_ops_lock);
37396 }
37397 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37398 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37399 index b35fc4f..c902870 100644
37400 --- a/drivers/block/cciss.c
37401 +++ b/drivers/block/cciss.c
37402 @@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
37403 while (!list_empty(&h->reqQ)) {
37404 c = list_entry(h->reqQ.next, CommandList_struct, list);
37405 /* can't do anything if fifo is full */
37406 - if ((h->access.fifo_full(h))) {
37407 + if ((h->access->fifo_full(h))) {
37408 dev_warn(&h->pdev->dev, "fifo full\n");
37409 break;
37410 }
37411 @@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
37412 h->Qdepth--;
37413
37414 /* Tell the controller execute command */
37415 - h->access.submit_command(h, c);
37416 + h->access->submit_command(h, c);
37417
37418 /* Put job onto the completed Q */
37419 addQ(&h->cmpQ, c);
37420 @@ -3447,17 +3447,17 @@ startio:
37421
37422 static inline unsigned long get_next_completion(ctlr_info_t *h)
37423 {
37424 - return h->access.command_completed(h);
37425 + return h->access->command_completed(h);
37426 }
37427
37428 static inline int interrupt_pending(ctlr_info_t *h)
37429 {
37430 - return h->access.intr_pending(h);
37431 + return h->access->intr_pending(h);
37432 }
37433
37434 static inline long interrupt_not_for_us(ctlr_info_t *h)
37435 {
37436 - return ((h->access.intr_pending(h) == 0) ||
37437 + return ((h->access->intr_pending(h) == 0) ||
37438 (h->interrupts_enabled == 0));
37439 }
37440
37441 @@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
37442 u32 a;
37443
37444 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37445 - return h->access.command_completed(h);
37446 + return h->access->command_completed(h);
37447
37448 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37449 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37450 @@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37451 trans_support & CFGTBL_Trans_use_short_tags);
37452
37453 /* Change the access methods to the performant access methods */
37454 - h->access = SA5_performant_access;
37455 + h->access = &SA5_performant_access;
37456 h->transMethod = CFGTBL_Trans_Performant;
37457
37458 return;
37459 @@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37460 if (prod_index < 0)
37461 return -ENODEV;
37462 h->product_name = products[prod_index].product_name;
37463 - h->access = *(products[prod_index].access);
37464 + h->access = products[prod_index].access;
37465
37466 if (cciss_board_disabled(h)) {
37467 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37468 @@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
37469 }
37470
37471 /* make sure the board interrupts are off */
37472 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
37473 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
37474 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37475 if (rc)
37476 goto clean2;
37477 @@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
37478 * fake ones to scoop up any residual completions.
37479 */
37480 spin_lock_irqsave(&h->lock, flags);
37481 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
37482 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
37483 spin_unlock_irqrestore(&h->lock, flags);
37484 free_irq(h->intr[h->intr_mode], h);
37485 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37486 @@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
37487 dev_info(&h->pdev->dev, "Board READY.\n");
37488 dev_info(&h->pdev->dev,
37489 "Waiting for stale completions to drain.\n");
37490 - h->access.set_intr_mask(h, CCISS_INTR_ON);
37491 + h->access->set_intr_mask(h, CCISS_INTR_ON);
37492 msleep(10000);
37493 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
37494 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
37495
37496 rc = controller_reset_failed(h->cfgtable);
37497 if (rc)
37498 @@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
37499 cciss_scsi_setup(h);
37500
37501 /* Turn the interrupts on so we can service requests */
37502 - h->access.set_intr_mask(h, CCISS_INTR_ON);
37503 + h->access->set_intr_mask(h, CCISS_INTR_ON);
37504
37505 /* Get the firmware version */
37506 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37507 @@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37508 kfree(flush_buf);
37509 if (return_code != IO_OK)
37510 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37511 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
37512 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
37513 free_irq(h->intr[h->intr_mode], h);
37514 }
37515
37516 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37517 index 7fda30e..2f27946 100644
37518 --- a/drivers/block/cciss.h
37519 +++ b/drivers/block/cciss.h
37520 @@ -101,7 +101,7 @@ struct ctlr_info
37521 /* information about each logical volume */
37522 drive_info_struct *drv[CISS_MAX_LUN];
37523
37524 - struct access_method access;
37525 + struct access_method *access;
37526
37527 /* queue and queue Info */
37528 struct list_head reqQ;
37529 @@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37530 }
37531
37532 static struct access_method SA5_access = {
37533 - SA5_submit_command,
37534 - SA5_intr_mask,
37535 - SA5_fifo_full,
37536 - SA5_intr_pending,
37537 - SA5_completed,
37538 + .submit_command = SA5_submit_command,
37539 + .set_intr_mask = SA5_intr_mask,
37540 + .fifo_full = SA5_fifo_full,
37541 + .intr_pending = SA5_intr_pending,
37542 + .command_completed = SA5_completed,
37543 };
37544
37545 static struct access_method SA5B_access = {
37546 - SA5_submit_command,
37547 - SA5B_intr_mask,
37548 - SA5_fifo_full,
37549 - SA5B_intr_pending,
37550 - SA5_completed,
37551 + .submit_command = SA5_submit_command,
37552 + .set_intr_mask = SA5B_intr_mask,
37553 + .fifo_full = SA5_fifo_full,
37554 + .intr_pending = SA5B_intr_pending,
37555 + .command_completed = SA5_completed,
37556 };
37557
37558 static struct access_method SA5_performant_access = {
37559 - SA5_submit_command,
37560 - SA5_performant_intr_mask,
37561 - SA5_fifo_full,
37562 - SA5_performant_intr_pending,
37563 - SA5_performant_completed,
37564 + .submit_command = SA5_submit_command,
37565 + .set_intr_mask = SA5_performant_intr_mask,
37566 + .fifo_full = SA5_fifo_full,
37567 + .intr_pending = SA5_performant_intr_pending,
37568 + .command_completed = SA5_performant_completed,
37569 };
37570
37571 struct board_type {
37572 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37573 index 2b94403..fd6ad1f 100644
37574 --- a/drivers/block/cpqarray.c
37575 +++ b/drivers/block/cpqarray.c
37576 @@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37577 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37578 goto Enomem4;
37579 }
37580 - hba[i]->access.set_intr_mask(hba[i], 0);
37581 + hba[i]->access->set_intr_mask(hba[i], 0);
37582 if (request_irq(hba[i]->intr, do_ida_intr,
37583 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37584 {
37585 @@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37586 add_timer(&hba[i]->timer);
37587
37588 /* Enable IRQ now that spinlock and rate limit timer are set up */
37589 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37590 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37591
37592 for(j=0; j<NWD; j++) {
37593 struct gendisk *disk = ida_gendisk[i][j];
37594 @@ -694,7 +694,7 @@ DBGINFO(
37595 for(i=0; i<NR_PRODUCTS; i++) {
37596 if (board_id == products[i].board_id) {
37597 c->product_name = products[i].product_name;
37598 - c->access = *(products[i].access);
37599 + c->access = products[i].access;
37600 break;
37601 }
37602 }
37603 @@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37604 hba[ctlr]->intr = intr;
37605 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37606 hba[ctlr]->product_name = products[j].product_name;
37607 - hba[ctlr]->access = *(products[j].access);
37608 + hba[ctlr]->access = products[j].access;
37609 hba[ctlr]->ctlr = ctlr;
37610 hba[ctlr]->board_id = board_id;
37611 hba[ctlr]->pci_dev = NULL; /* not PCI */
37612 @@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37613
37614 while((c = h->reqQ) != NULL) {
37615 /* Can't do anything if we're busy */
37616 - if (h->access.fifo_full(h) == 0)
37617 + if (h->access->fifo_full(h) == 0)
37618 return;
37619
37620 /* Get the first entry from the request Q */
37621 @@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37622 h->Qdepth--;
37623
37624 /* Tell the controller to do our bidding */
37625 - h->access.submit_command(h, c);
37626 + h->access->submit_command(h, c);
37627
37628 /* Get onto the completion Q */
37629 addQ(&h->cmpQ, c);
37630 @@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37631 unsigned long flags;
37632 __u32 a,a1;
37633
37634 - istat = h->access.intr_pending(h);
37635 + istat = h->access->intr_pending(h);
37636 /* Is this interrupt for us? */
37637 if (istat == 0)
37638 return IRQ_NONE;
37639 @@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37640 */
37641 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37642 if (istat & FIFO_NOT_EMPTY) {
37643 - while((a = h->access.command_completed(h))) {
37644 + while((a = h->access->command_completed(h))) {
37645 a1 = a; a &= ~3;
37646 if ((c = h->cmpQ) == NULL)
37647 {
37648 @@ -1448,11 +1448,11 @@ static int sendcmd(
37649 /*
37650 * Disable interrupt
37651 */
37652 - info_p->access.set_intr_mask(info_p, 0);
37653 + info_p->access->set_intr_mask(info_p, 0);
37654 /* Make sure there is room in the command FIFO */
37655 /* Actually it should be completely empty at this time. */
37656 for (i = 200000; i > 0; i--) {
37657 - temp = info_p->access.fifo_full(info_p);
37658 + temp = info_p->access->fifo_full(info_p);
37659 if (temp != 0) {
37660 break;
37661 }
37662 @@ -1465,7 +1465,7 @@ DBG(
37663 /*
37664 * Send the cmd
37665 */
37666 - info_p->access.submit_command(info_p, c);
37667 + info_p->access->submit_command(info_p, c);
37668 complete = pollcomplete(ctlr);
37669
37670 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37671 @@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37672 * we check the new geometry. Then turn interrupts back on when
37673 * we're done.
37674 */
37675 - host->access.set_intr_mask(host, 0);
37676 + host->access->set_intr_mask(host, 0);
37677 getgeometry(ctlr);
37678 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37679 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37680
37681 for(i=0; i<NWD; i++) {
37682 struct gendisk *disk = ida_gendisk[ctlr][i];
37683 @@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37684 /* Wait (up to 2 seconds) for a command to complete */
37685
37686 for (i = 200000; i > 0; i--) {
37687 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
37688 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
37689 if (done == 0) {
37690 udelay(10); /* a short fixed delay */
37691 } else
37692 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
37693 index be73e9d..7fbf140 100644
37694 --- a/drivers/block/cpqarray.h
37695 +++ b/drivers/block/cpqarray.h
37696 @@ -99,7 +99,7 @@ struct ctlr_info {
37697 drv_info_t drv[NWD];
37698 struct proc_dir_entry *proc;
37699
37700 - struct access_method access;
37701 + struct access_method *access;
37702
37703 cmdlist_t *reqQ;
37704 cmdlist_t *cmpQ;
37705 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
37706 index 0e06f0c..c47b81d 100644
37707 --- a/drivers/block/drbd/drbd_int.h
37708 +++ b/drivers/block/drbd/drbd_int.h
37709 @@ -582,7 +582,7 @@ struct drbd_epoch {
37710 struct drbd_tconn *tconn;
37711 struct list_head list;
37712 unsigned int barrier_nr;
37713 - atomic_t epoch_size; /* increased on every request added. */
37714 + atomic_unchecked_t epoch_size; /* increased on every request added. */
37715 atomic_t active; /* increased on every req. added, and dec on every finished. */
37716 unsigned long flags;
37717 };
37718 @@ -1022,7 +1022,7 @@ struct drbd_conf {
37719 unsigned int al_tr_number;
37720 int al_tr_cycle;
37721 wait_queue_head_t seq_wait;
37722 - atomic_t packet_seq;
37723 + atomic_unchecked_t packet_seq;
37724 unsigned int peer_seq;
37725 spinlock_t peer_seq_lock;
37726 unsigned int minor;
37727 @@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
37728 char __user *uoptval;
37729 int err;
37730
37731 - uoptval = (char __user __force *)optval;
37732 + uoptval = (char __force_user *)optval;
37733
37734 set_fs(KERNEL_DS);
37735 if (level == SOL_SOCKET)
37736 diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
37737 index 89c497c..9c736ae 100644
37738 --- a/drivers/block/drbd/drbd_interval.c
37739 +++ b/drivers/block/drbd/drbd_interval.c
37740 @@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
37741 }
37742
37743 static const struct rb_augment_callbacks augment_callbacks = {
37744 - augment_propagate,
37745 - augment_copy,
37746 - augment_rotate,
37747 + .propagate = augment_propagate,
37748 + .copy = augment_copy,
37749 + .rotate = augment_rotate,
37750 };
37751
37752 /**
37753 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
37754 index 9e3818b..7b64c92 100644
37755 --- a/drivers/block/drbd/drbd_main.c
37756 +++ b/drivers/block/drbd/drbd_main.c
37757 @@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
37758 p->sector = sector;
37759 p->block_id = block_id;
37760 p->blksize = blksize;
37761 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37762 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37763 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
37764 }
37765
37766 @@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
37767 return -EIO;
37768 p->sector = cpu_to_be64(req->i.sector);
37769 p->block_id = (unsigned long)req;
37770 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37771 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37772 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
37773 if (mdev->state.conn >= C_SYNC_SOURCE &&
37774 mdev->state.conn <= C_PAUSED_SYNC_T)
37775 @@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
37776 {
37777 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
37778
37779 - if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
37780 - conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
37781 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
37782 + conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
37783 kfree(tconn->current_epoch);
37784
37785 idr_destroy(&tconn->volumes);
37786 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
37787 index c706d50..5e1b472 100644
37788 --- a/drivers/block/drbd/drbd_nl.c
37789 +++ b/drivers/block/drbd/drbd_nl.c
37790 @@ -3440,7 +3440,7 @@ out:
37791
37792 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37793 {
37794 - static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37795 + static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37796 struct sk_buff *msg;
37797 struct drbd_genlmsghdr *d_out;
37798 unsigned seq;
37799 @@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37800 return;
37801 }
37802
37803 - seq = atomic_inc_return(&drbd_genl_seq);
37804 + seq = atomic_inc_return_unchecked(&drbd_genl_seq);
37805 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
37806 if (!msg)
37807 goto failed;
37808 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
37809 index 6fa6673..b7f97e9 100644
37810 --- a/drivers/block/drbd/drbd_receiver.c
37811 +++ b/drivers/block/drbd/drbd_receiver.c
37812 @@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
37813 {
37814 int err;
37815
37816 - atomic_set(&mdev->packet_seq, 0);
37817 + atomic_set_unchecked(&mdev->packet_seq, 0);
37818 mdev->peer_seq = 0;
37819
37820 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
37821 @@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37822 do {
37823 next_epoch = NULL;
37824
37825 - epoch_size = atomic_read(&epoch->epoch_size);
37826 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
37827
37828 switch (ev & ~EV_CLEANUP) {
37829 case EV_PUT:
37830 @@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37831 rv = FE_DESTROYED;
37832 } else {
37833 epoch->flags = 0;
37834 - atomic_set(&epoch->epoch_size, 0);
37835 + atomic_set_unchecked(&epoch->epoch_size, 0);
37836 /* atomic_set(&epoch->active, 0); is already zero */
37837 if (rv == FE_STILL_LIVE)
37838 rv = FE_RECYCLED;
37839 @@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37840 conn_wait_active_ee_empty(tconn);
37841 drbd_flush(tconn);
37842
37843 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
37844 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37845 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
37846 if (epoch)
37847 break;
37848 @@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37849 }
37850
37851 epoch->flags = 0;
37852 - atomic_set(&epoch->epoch_size, 0);
37853 + atomic_set_unchecked(&epoch->epoch_size, 0);
37854 atomic_set(&epoch->active, 0);
37855
37856 spin_lock(&tconn->epoch_lock);
37857 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
37858 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37859 list_add(&epoch->list, &tconn->current_epoch->list);
37860 tconn->current_epoch = epoch;
37861 tconn->epochs++;
37862 @@ -2163,7 +2163,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37863
37864 err = wait_for_and_update_peer_seq(mdev, peer_seq);
37865 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37866 - atomic_inc(&tconn->current_epoch->epoch_size);
37867 + atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37868 err2 = drbd_drain_block(mdev, pi->size);
37869 if (!err)
37870 err = err2;
37871 @@ -2197,7 +2197,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37872
37873 spin_lock(&tconn->epoch_lock);
37874 peer_req->epoch = tconn->current_epoch;
37875 - atomic_inc(&peer_req->epoch->epoch_size);
37876 + atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37877 atomic_inc(&peer_req->epoch->active);
37878 spin_unlock(&tconn->epoch_lock);
37879
37880 @@ -4344,7 +4344,7 @@ struct data_cmd {
37881 int expect_payload;
37882 size_t pkt_size;
37883 int (*fn)(struct drbd_tconn *, struct packet_info *);
37884 -};
37885 +} __do_const;
37886
37887 static struct data_cmd drbd_cmd_handler[] = {
37888 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37889 @@ -4464,7 +4464,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37890 if (!list_empty(&tconn->current_epoch->list))
37891 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37892 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37893 - atomic_set(&tconn->current_epoch->epoch_size, 0);
37894 + atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37895 tconn->send.seen_any_write_yet = false;
37896
37897 conn_info(tconn, "Connection closed\n");
37898 @@ -5220,7 +5220,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37899 struct asender_cmd {
37900 size_t pkt_size;
37901 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37902 -};
37903 +} __do_const;
37904
37905 static struct asender_cmd asender_tbl[] = {
37906 [P_PING] = { 0, got_Ping },
37907 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37908 index c8dac73..1800093 100644
37909 --- a/drivers/block/loop.c
37910 +++ b/drivers/block/loop.c
37911 @@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37912
37913 file_start_write(file);
37914 set_fs(get_ds());
37915 - bw = file->f_op->write(file, buf, len, &pos);
37916 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37917 set_fs(old_fs);
37918 file_end_write(file);
37919 if (likely(bw == len))
37920 diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
37921 index 83a598e..2de5ce3 100644
37922 --- a/drivers/block/null_blk.c
37923 +++ b/drivers/block/null_blk.c
37924 @@ -407,14 +407,24 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
37925 return 0;
37926 }
37927
37928 -static struct blk_mq_ops null_mq_ops = {
37929 - .queue_rq = null_queue_rq,
37930 - .map_queue = blk_mq_map_queue,
37931 +static struct blk_mq_ops null_mq_single_ops = {
37932 + .queue_rq = null_queue_rq,
37933 + .map_queue = blk_mq_map_queue,
37934 .init_hctx = null_init_hctx,
37935 + .alloc_hctx = blk_mq_alloc_single_hw_queue,
37936 + .free_hctx = blk_mq_free_single_hw_queue,
37937 +};
37938 +
37939 +static struct blk_mq_ops null_mq_per_node_ops = {
37940 + .queue_rq = null_queue_rq,
37941 + .map_queue = blk_mq_map_queue,
37942 + .init_hctx = null_init_hctx,
37943 + .alloc_hctx = null_alloc_hctx,
37944 + .free_hctx = null_free_hctx,
37945 };
37946
37947 static struct blk_mq_reg null_mq_reg = {
37948 - .ops = &null_mq_ops,
37949 + .ops = &null_mq_single_ops,
37950 .queue_depth = 64,
37951 .cmd_size = sizeof(struct nullb_cmd),
37952 .flags = BLK_MQ_F_SHOULD_MERGE,
37953 @@ -545,13 +555,8 @@ static int null_add_dev(void)
37954 null_mq_reg.queue_depth = hw_queue_depth;
37955 null_mq_reg.nr_hw_queues = submit_queues;
37956
37957 - if (use_per_node_hctx) {
37958 - null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
37959 - null_mq_reg.ops->free_hctx = null_free_hctx;
37960 - } else {
37961 - null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
37962 - null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
37963 - }
37964 + if (use_per_node_hctx)
37965 + null_mq_reg.ops = &null_mq_per_node_ops;
37966
37967 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
37968 } else if (queue_mode == NULL_Q_BIO) {
37969 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37970 index ff8668c..f62167a 100644
37971 --- a/drivers/block/pktcdvd.c
37972 +++ b/drivers/block/pktcdvd.c
37973 @@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37974
37975 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37976 {
37977 - return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37978 + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37979 }
37980
37981 /*
37982 @@ -1883,7 +1883,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37983 return -EROFS;
37984 }
37985 pd->settings.fp = ti.fp;
37986 - pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37987 + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37988
37989 if (ti.nwa_v) {
37990 pd->nwa = be32_to_cpu(ti.next_writable);
37991 diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
37992 index e5565fb..71be10b4 100644
37993 --- a/drivers/block/smart1,2.h
37994 +++ b/drivers/block/smart1,2.h
37995 @@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
37996 }
37997
37998 static struct access_method smart4_access = {
37999 - smart4_submit_command,
38000 - smart4_intr_mask,
38001 - smart4_fifo_full,
38002 - smart4_intr_pending,
38003 - smart4_completed,
38004 + .submit_command = smart4_submit_command,
38005 + .set_intr_mask = smart4_intr_mask,
38006 + .fifo_full = smart4_fifo_full,
38007 + .intr_pending = smart4_intr_pending,
38008 + .command_completed = smart4_completed,
38009 };
38010
38011 /*
38012 @@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38013 }
38014
38015 static struct access_method smart2_access = {
38016 - smart2_submit_command,
38017 - smart2_intr_mask,
38018 - smart2_fifo_full,
38019 - smart2_intr_pending,
38020 - smart2_completed,
38021 + .submit_command = smart2_submit_command,
38022 + .set_intr_mask = smart2_intr_mask,
38023 + .fifo_full = smart2_fifo_full,
38024 + .intr_pending = smart2_intr_pending,
38025 + .command_completed = smart2_completed,
38026 };
38027
38028 /*
38029 @@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38030 }
38031
38032 static struct access_method smart2e_access = {
38033 - smart2e_submit_command,
38034 - smart2e_intr_mask,
38035 - smart2e_fifo_full,
38036 - smart2e_intr_pending,
38037 - smart2e_completed,
38038 + .submit_command = smart2e_submit_command,
38039 + .set_intr_mask = smart2e_intr_mask,
38040 + .fifo_full = smart2e_fifo_full,
38041 + .intr_pending = smart2e_intr_pending,
38042 + .command_completed = smart2e_completed,
38043 };
38044
38045 /*
38046 @@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38047 }
38048
38049 static struct access_method smart1_access = {
38050 - smart1_submit_command,
38051 - smart1_intr_mask,
38052 - smart1_fifo_full,
38053 - smart1_intr_pending,
38054 - smart1_completed,
38055 + .submit_command = smart1_submit_command,
38056 + .set_intr_mask = smart1_intr_mask,
38057 + .fifo_full = smart1_fifo_full,
38058 + .intr_pending = smart1_intr_pending,
38059 + .command_completed = smart1_completed,
38060 };
38061 diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38062 index f038dba..bb74c08 100644
38063 --- a/drivers/bluetooth/btwilink.c
38064 +++ b/drivers/bluetooth/btwilink.c
38065 @@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38066
38067 static int bt_ti_probe(struct platform_device *pdev)
38068 {
38069 - static struct ti_st *hst;
38070 + struct ti_st *hst;
38071 struct hci_dev *hdev;
38072 int err;
38073
38074 diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
38075 index b6739cb..962fd35 100644
38076 --- a/drivers/bus/arm-cci.c
38077 +++ b/drivers/bus/arm-cci.c
38078 @@ -979,7 +979,7 @@ static int cci_probe(void)
38079
38080 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
38081
38082 - ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
38083 + ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
38084 if (!ports)
38085 return -ENOMEM;
38086
38087 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38088 index 8a3aff7..d7538c2 100644
38089 --- a/drivers/cdrom/cdrom.c
38090 +++ b/drivers/cdrom/cdrom.c
38091 @@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38092 ENSURE(reset, CDC_RESET);
38093 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38094 cdi->mc_flags = 0;
38095 - cdo->n_minors = 0;
38096 cdi->options = CDO_USE_FFLAGS;
38097
38098 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
38099 @@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38100 else
38101 cdi->cdda_method = CDDA_OLD;
38102
38103 - if (!cdo->generic_packet)
38104 - cdo->generic_packet = cdrom_dummy_generic_packet;
38105 + if (!cdo->generic_packet) {
38106 + pax_open_kernel();
38107 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38108 + pax_close_kernel();
38109 + }
38110
38111 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38112 mutex_lock(&cdrom_mutex);
38113 @@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38114 if (cdi->exit)
38115 cdi->exit(cdi);
38116
38117 - cdi->ops->n_minors--;
38118 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38119 }
38120
38121 @@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38122 */
38123 nr = nframes;
38124 do {
38125 - cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38126 + cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38127 if (cgc.buffer)
38128 break;
38129
38130 @@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38131 struct cdrom_device_info *cdi;
38132 int ret;
38133
38134 - ret = scnprintf(info + *pos, max_size - *pos, header);
38135 + ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38136 if (!ret)
38137 return 1;
38138
38139 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38140 index 5980cb9..6d7bd7e 100644
38141 --- a/drivers/cdrom/gdrom.c
38142 +++ b/drivers/cdrom/gdrom.c
38143 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38144 .audio_ioctl = gdrom_audio_ioctl,
38145 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38146 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38147 - .n_minors = 1,
38148 };
38149
38150 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38151 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38152 index fa3243d..8c98297 100644
38153 --- a/drivers/char/Kconfig
38154 +++ b/drivers/char/Kconfig
38155 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38156
38157 config DEVKMEM
38158 bool "/dev/kmem virtual device support"
38159 - default y
38160 + default n
38161 + depends on !GRKERNSEC_KMEM
38162 help
38163 Say Y here if you want to support the /dev/kmem device. The
38164 /dev/kmem device is rarely used, but can be used for certain
38165 @@ -576,6 +577,7 @@ config DEVPORT
38166 bool
38167 depends on !M68K
38168 depends on ISA || PCI
38169 + depends on !GRKERNSEC_KMEM
38170 default y
38171
38172 source "drivers/s390/char/Kconfig"
38173 diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38174 index a48e05b..6bac831 100644
38175 --- a/drivers/char/agp/compat_ioctl.c
38176 +++ b/drivers/char/agp/compat_ioctl.c
38177 @@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38178 return -ENOMEM;
38179 }
38180
38181 - if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38182 + if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38183 sizeof(*usegment) * ureserve.seg_count)) {
38184 kfree(usegment);
38185 kfree(ksegment);
38186 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38187 index 1b19239..b87b143 100644
38188 --- a/drivers/char/agp/frontend.c
38189 +++ b/drivers/char/agp/frontend.c
38190 @@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38191 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38192 return -EFAULT;
38193
38194 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38195 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38196 return -EFAULT;
38197
38198 client = agp_find_client_by_pid(reserve.pid);
38199 @@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38200 if (segment == NULL)
38201 return -ENOMEM;
38202
38203 - if (copy_from_user(segment, (void __user *) reserve.seg_list,
38204 + if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38205 sizeof(struct agp_segment) * reserve.seg_count)) {
38206 kfree(segment);
38207 return -EFAULT;
38208 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38209 index 4f94375..413694e 100644
38210 --- a/drivers/char/genrtc.c
38211 +++ b/drivers/char/genrtc.c
38212 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38213 switch (cmd) {
38214
38215 case RTC_PLL_GET:
38216 + memset(&pll, 0, sizeof(pll));
38217 if (get_rtc_pll(&pll))
38218 return -EINVAL;
38219 else
38220 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38221 index 5d9c31d..c94ccb5 100644
38222 --- a/drivers/char/hpet.c
38223 +++ b/drivers/char/hpet.c
38224 @@ -578,7 +578,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38225 }
38226
38227 static int
38228 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38229 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38230 struct hpet_info *info)
38231 {
38232 struct hpet_timer __iomem *timer;
38233 diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
38234 index 86fe45c..c0ea948 100644
38235 --- a/drivers/char/hw_random/intel-rng.c
38236 +++ b/drivers/char/hw_random/intel-rng.c
38237 @@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
38238
38239 if (no_fwh_detect)
38240 return -ENODEV;
38241 - printk(warning);
38242 + printk("%s", warning);
38243 return -EBUSY;
38244 }
38245
38246 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38247 index ec4e10f..f2a763b 100644
38248 --- a/drivers/char/ipmi/ipmi_msghandler.c
38249 +++ b/drivers/char/ipmi/ipmi_msghandler.c
38250 @@ -420,7 +420,7 @@ struct ipmi_smi {
38251 struct proc_dir_entry *proc_dir;
38252 char proc_dir_name[10];
38253
38254 - atomic_t stats[IPMI_NUM_STATS];
38255 + atomic_unchecked_t stats[IPMI_NUM_STATS];
38256
38257 /*
38258 * run_to_completion duplicate of smb_info, smi_info
38259 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
38260
38261
38262 #define ipmi_inc_stat(intf, stat) \
38263 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38264 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38265 #define ipmi_get_stat(intf, stat) \
38266 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38267 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38268
38269 static int is_lan_addr(struct ipmi_addr *addr)
38270 {
38271 @@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38272 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38273 init_waitqueue_head(&intf->waitq);
38274 for (i = 0; i < IPMI_NUM_STATS; i++)
38275 - atomic_set(&intf->stats[i], 0);
38276 + atomic_set_unchecked(&intf->stats[i], 0);
38277
38278 intf->proc_dir = NULL;
38279
38280 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38281 index 15e4a60..b046093 100644
38282 --- a/drivers/char/ipmi/ipmi_si_intf.c
38283 +++ b/drivers/char/ipmi/ipmi_si_intf.c
38284 @@ -280,7 +280,7 @@ struct smi_info {
38285 unsigned char slave_addr;
38286
38287 /* Counters and things for the proc filesystem. */
38288 - atomic_t stats[SI_NUM_STATS];
38289 + atomic_unchecked_t stats[SI_NUM_STATS];
38290
38291 struct task_struct *thread;
38292
38293 @@ -289,9 +289,9 @@ struct smi_info {
38294 };
38295
38296 #define smi_inc_stat(smi, stat) \
38297 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38298 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38299 #define smi_get_stat(smi, stat) \
38300 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38301 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38302
38303 #define SI_MAX_PARMS 4
38304
38305 @@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
38306 atomic_set(&new_smi->req_events, 0);
38307 new_smi->run_to_completion = 0;
38308 for (i = 0; i < SI_NUM_STATS; i++)
38309 - atomic_set(&new_smi->stats[i], 0);
38310 + atomic_set_unchecked(&new_smi->stats[i], 0);
38311
38312 new_smi->interrupt_disabled = 1;
38313 atomic_set(&new_smi->stop_operation, 0);
38314 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38315 index f895a8c..2bc9147 100644
38316 --- a/drivers/char/mem.c
38317 +++ b/drivers/char/mem.c
38318 @@ -18,6 +18,7 @@
38319 #include <linux/raw.h>
38320 #include <linux/tty.h>
38321 #include <linux/capability.h>
38322 +#include <linux/security.h>
38323 #include <linux/ptrace.h>
38324 #include <linux/device.h>
38325 #include <linux/highmem.h>
38326 @@ -37,6 +38,10 @@
38327
38328 #define DEVPORT_MINOR 4
38329
38330 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38331 +extern const struct file_operations grsec_fops;
38332 +#endif
38333 +
38334 static inline unsigned long size_inside_page(unsigned long start,
38335 unsigned long size)
38336 {
38337 @@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38338
38339 while (cursor < to) {
38340 if (!devmem_is_allowed(pfn)) {
38341 +#ifdef CONFIG_GRKERNSEC_KMEM
38342 + gr_handle_mem_readwrite(from, to);
38343 +#else
38344 printk(KERN_INFO
38345 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38346 current->comm, from, to);
38347 +#endif
38348 return 0;
38349 }
38350 cursor += PAGE_SIZE;
38351 @@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38352 }
38353 return 1;
38354 }
38355 +#elif defined(CONFIG_GRKERNSEC_KMEM)
38356 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38357 +{
38358 + return 0;
38359 +}
38360 #else
38361 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38362 {
38363 @@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38364
38365 while (count > 0) {
38366 unsigned long remaining;
38367 + char *temp;
38368
38369 sz = size_inside_page(p, count);
38370
38371 @@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38372 if (!ptr)
38373 return -EFAULT;
38374
38375 - remaining = copy_to_user(buf, ptr, sz);
38376 +#ifdef CONFIG_PAX_USERCOPY
38377 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38378 + if (!temp) {
38379 + unxlate_dev_mem_ptr(p, ptr);
38380 + return -ENOMEM;
38381 + }
38382 + memcpy(temp, ptr, sz);
38383 +#else
38384 + temp = ptr;
38385 +#endif
38386 +
38387 + remaining = copy_to_user(buf, temp, sz);
38388 +
38389 +#ifdef CONFIG_PAX_USERCOPY
38390 + kfree(temp);
38391 +#endif
38392 +
38393 unxlate_dev_mem_ptr(p, ptr);
38394 if (remaining)
38395 return -EFAULT;
38396 @@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38397 size_t count, loff_t *ppos)
38398 {
38399 unsigned long p = *ppos;
38400 - ssize_t low_count, read, sz;
38401 + ssize_t low_count, read, sz, err = 0;
38402 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38403 - int err = 0;
38404
38405 read = 0;
38406 if (p < (unsigned long) high_memory) {
38407 @@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38408 }
38409 #endif
38410 while (low_count > 0) {
38411 + char *temp;
38412 +
38413 sz = size_inside_page(p, low_count);
38414
38415 /*
38416 @@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38417 */
38418 kbuf = xlate_dev_kmem_ptr((char *)p);
38419
38420 - if (copy_to_user(buf, kbuf, sz))
38421 +#ifdef CONFIG_PAX_USERCOPY
38422 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38423 + if (!temp)
38424 + return -ENOMEM;
38425 + memcpy(temp, kbuf, sz);
38426 +#else
38427 + temp = kbuf;
38428 +#endif
38429 +
38430 + err = copy_to_user(buf, temp, sz);
38431 +
38432 +#ifdef CONFIG_PAX_USERCOPY
38433 + kfree(temp);
38434 +#endif
38435 +
38436 + if (err)
38437 return -EFAULT;
38438 buf += sz;
38439 p += sz;
38440 @@ -822,6 +869,9 @@ static const struct memdev {
38441 #ifdef CONFIG_PRINTK
38442 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38443 #endif
38444 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38445 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38446 +#endif
38447 };
38448
38449 static int memory_open(struct inode *inode, struct file *filp)
38450 @@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
38451 continue;
38452
38453 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38454 - NULL, devlist[minor].name);
38455 + NULL, "%s", devlist[minor].name);
38456 }
38457
38458 return tty_init();
38459 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38460 index 9df78e2..01ba9ae 100644
38461 --- a/drivers/char/nvram.c
38462 +++ b/drivers/char/nvram.c
38463 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38464
38465 spin_unlock_irq(&rtc_lock);
38466
38467 - if (copy_to_user(buf, contents, tmp - contents))
38468 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38469 return -EFAULT;
38470
38471 *ppos = i;
38472 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38473 index d39cca6..8c1e269 100644
38474 --- a/drivers/char/pcmcia/synclink_cs.c
38475 +++ b/drivers/char/pcmcia/synclink_cs.c
38476 @@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38477
38478 if (debug_level >= DEBUG_LEVEL_INFO)
38479 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38480 - __FILE__, __LINE__, info->device_name, port->count);
38481 + __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38482
38483 - WARN_ON(!port->count);
38484 + WARN_ON(!atomic_read(&port->count));
38485
38486 if (tty_port_close_start(port, tty, filp) == 0)
38487 goto cleanup;
38488 @@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38489 cleanup:
38490 if (debug_level >= DEBUG_LEVEL_INFO)
38491 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38492 - tty->driver->name, port->count);
38493 + tty->driver->name, atomic_read(&port->count));
38494 }
38495
38496 /* Wait until the transmitter is empty.
38497 @@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38498
38499 if (debug_level >= DEBUG_LEVEL_INFO)
38500 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38501 - __FILE__, __LINE__, tty->driver->name, port->count);
38502 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38503
38504 /* If port is closing, signal caller to try again */
38505 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
38506 @@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38507 goto cleanup;
38508 }
38509 spin_lock(&port->lock);
38510 - port->count++;
38511 + atomic_inc(&port->count);
38512 spin_unlock(&port->lock);
38513 spin_unlock_irqrestore(&info->netlock, flags);
38514
38515 - if (port->count == 1) {
38516 + if (atomic_read(&port->count) == 1) {
38517 /* 1st open on this device, init hardware */
38518 retval = startup(info, tty);
38519 if (retval < 0)
38520 @@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38521 unsigned short new_crctype;
38522
38523 /* return error if TTY interface open */
38524 - if (info->port.count)
38525 + if (atomic_read(&info->port.count))
38526 return -EBUSY;
38527
38528 switch (encoding)
38529 @@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
38530
38531 /* arbitrate between network and tty opens */
38532 spin_lock_irqsave(&info->netlock, flags);
38533 - if (info->port.count != 0 || info->netcount != 0) {
38534 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38535 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38536 spin_unlock_irqrestore(&info->netlock, flags);
38537 return -EBUSY;
38538 @@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38539 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38540
38541 /* return error if TTY interface open */
38542 - if (info->port.count)
38543 + if (atomic_read(&info->port.count))
38544 return -EBUSY;
38545
38546 if (cmd != SIOCWANDEV)
38547 diff --git a/drivers/char/random.c b/drivers/char/random.c
38548 index 429b75b..a7f4145 100644
38549 --- a/drivers/char/random.c
38550 +++ b/drivers/char/random.c
38551 @@ -270,10 +270,17 @@
38552 /*
38553 * Configuration information
38554 */
38555 +#ifdef CONFIG_GRKERNSEC_RANDNET
38556 +#define INPUT_POOL_SHIFT 14
38557 +#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
38558 +#define OUTPUT_POOL_SHIFT 12
38559 +#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
38560 +#else
38561 #define INPUT_POOL_SHIFT 12
38562 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
38563 #define OUTPUT_POOL_SHIFT 10
38564 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
38565 +#endif
38566 #define SEC_XFER_SIZE 512
38567 #define EXTRACT_SIZE 10
38568
38569 @@ -284,9 +291,6 @@
38570 /*
38571 * To allow fractional bits to be tracked, the entropy_count field is
38572 * denominated in units of 1/8th bits.
38573 - *
38574 - * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38575 - * credit_entropy_bits() needs to be 64 bits wide.
38576 */
38577 #define ENTROPY_SHIFT 3
38578 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38579 @@ -361,12 +365,19 @@ static struct poolinfo {
38580 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
38581 int tap1, tap2, tap3, tap4, tap5;
38582 } poolinfo_table[] = {
38583 +#ifdef CONFIG_GRKERNSEC_RANDNET
38584 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
38585 + { S(512), 411, 308, 208, 104, 1 },
38586 + /* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
38587 + { S(128), 104, 76, 51, 25, 1 },
38588 +#else
38589 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
38590 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
38591 { S(128), 104, 76, 51, 25, 1 },
38592 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
38593 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
38594 { S(32), 26, 19, 14, 7, 1 },
38595 +#endif
38596 #if 0
38597 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
38598 { S(2048), 1638, 1231, 819, 411, 1 },
38599 @@ -433,9 +444,9 @@ struct entropy_store {
38600 };
38601
38602 static void push_to_pool(struct work_struct *work);
38603 -static __u32 input_pool_data[INPUT_POOL_WORDS];
38604 -static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38605 -static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38606 +static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38607 +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38608 +static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38609
38610 static struct entropy_store input_pool = {
38611 .poolinfo = &poolinfo_table[0],
38612 @@ -524,8 +535,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
38613 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
38614 }
38615
38616 - ACCESS_ONCE(r->input_rotate) = input_rotate;
38617 - ACCESS_ONCE(r->add_ptr) = i;
38618 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
38619 + ACCESS_ONCE_RW(r->add_ptr) = i;
38620 smp_wmb();
38621
38622 if (out)
38623 @@ -632,7 +643,7 @@ retry:
38624 /* The +2 corresponds to the /4 in the denominator */
38625
38626 do {
38627 - unsigned int anfrac = min(pnfrac, pool_size/2);
38628 + u64 anfrac = min(pnfrac, pool_size/2);
38629 unsigned int add =
38630 ((pool_size - entropy_count)*anfrac*3) >> s;
38631
38632 @@ -1151,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38633
38634 extract_buf(r, tmp);
38635 i = min_t(int, nbytes, EXTRACT_SIZE);
38636 - if (copy_to_user(buf, tmp, i)) {
38637 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38638 ret = -EFAULT;
38639 break;
38640 }
38641 @@ -1507,7 +1518,7 @@ EXPORT_SYMBOL(generate_random_uuid);
38642 #include <linux/sysctl.h>
38643
38644 static int min_read_thresh = 8, min_write_thresh;
38645 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
38646 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
38647 static int max_write_thresh = INPUT_POOL_WORDS * 32;
38648 static char sysctl_bootid[16];
38649
38650 @@ -1523,7 +1534,7 @@ static char sysctl_bootid[16];
38651 static int proc_do_uuid(struct ctl_table *table, int write,
38652 void __user *buffer, size_t *lenp, loff_t *ppos)
38653 {
38654 - struct ctl_table fake_table;
38655 + ctl_table_no_const fake_table;
38656 unsigned char buf[64], tmp_uuid[16], *uuid;
38657
38658 uuid = table->data;
38659 @@ -1553,7 +1564,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38660 static int proc_do_entropy(ctl_table *table, int write,
38661 void __user *buffer, size_t *lenp, loff_t *ppos)
38662 {
38663 - ctl_table fake_table;
38664 + ctl_table_no_const fake_table;
38665 int entropy_count;
38666
38667 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38668 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38669 index 7cc1fe22..b602d6b 100644
38670 --- a/drivers/char/sonypi.c
38671 +++ b/drivers/char/sonypi.c
38672 @@ -54,6 +54,7 @@
38673
38674 #include <asm/uaccess.h>
38675 #include <asm/io.h>
38676 +#include <asm/local.h>
38677
38678 #include <linux/sonypi.h>
38679
38680 @@ -490,7 +491,7 @@ static struct sonypi_device {
38681 spinlock_t fifo_lock;
38682 wait_queue_head_t fifo_proc_list;
38683 struct fasync_struct *fifo_async;
38684 - int open_count;
38685 + local_t open_count;
38686 int model;
38687 struct input_dev *input_jog_dev;
38688 struct input_dev *input_key_dev;
38689 @@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
38690 static int sonypi_misc_release(struct inode *inode, struct file *file)
38691 {
38692 mutex_lock(&sonypi_device.lock);
38693 - sonypi_device.open_count--;
38694 + local_dec(&sonypi_device.open_count);
38695 mutex_unlock(&sonypi_device.lock);
38696 return 0;
38697 }
38698 @@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
38699 {
38700 mutex_lock(&sonypi_device.lock);
38701 /* Flush input queue on first open */
38702 - if (!sonypi_device.open_count)
38703 + if (!local_read(&sonypi_device.open_count))
38704 kfifo_reset(&sonypi_device.fifo);
38705 - sonypi_device.open_count++;
38706 + local_inc(&sonypi_device.open_count);
38707 mutex_unlock(&sonypi_device.lock);
38708
38709 return 0;
38710 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
38711 index 64420b3..5c40b56 100644
38712 --- a/drivers/char/tpm/tpm_acpi.c
38713 +++ b/drivers/char/tpm/tpm_acpi.c
38714 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
38715 virt = acpi_os_map_memory(start, len);
38716 if (!virt) {
38717 kfree(log->bios_event_log);
38718 + log->bios_event_log = NULL;
38719 printk("%s: ERROR - Unable to map memory\n", __func__);
38720 return -EIO;
38721 }
38722
38723 - memcpy_fromio(log->bios_event_log, virt, len);
38724 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
38725
38726 acpi_os_unmap_memory(virt, len);
38727 return 0;
38728 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
38729 index 59f7cb2..bac8b6d 100644
38730 --- a/drivers/char/tpm/tpm_eventlog.c
38731 +++ b/drivers/char/tpm/tpm_eventlog.c
38732 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
38733 event = addr;
38734
38735 if ((event->event_type == 0 && event->event_size == 0) ||
38736 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
38737 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
38738 return NULL;
38739
38740 return addr;
38741 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
38742 return NULL;
38743
38744 if ((event->event_type == 0 && event->event_size == 0) ||
38745 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
38746 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
38747 return NULL;
38748
38749 (*pos)++;
38750 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
38751 int i;
38752
38753 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
38754 - seq_putc(m, data[i]);
38755 + if (!seq_putc(m, data[i]))
38756 + return -EFAULT;
38757
38758 return 0;
38759 }
38760 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
38761 index feea87c..18aefff 100644
38762 --- a/drivers/char/virtio_console.c
38763 +++ b/drivers/char/virtio_console.c
38764 @@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
38765 if (to_user) {
38766 ssize_t ret;
38767
38768 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
38769 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
38770 if (ret)
38771 return -EFAULT;
38772 } else {
38773 @@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
38774 if (!port_has_data(port) && !port->host_connected)
38775 return 0;
38776
38777 - return fill_readbuf(port, ubuf, count, true);
38778 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
38779 }
38780
38781 static int wait_port_writable(struct port *port, bool nonblock)
38782 diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
38783 index a33f46f..a720eed 100644
38784 --- a/drivers/clk/clk-composite.c
38785 +++ b/drivers/clk/clk-composite.c
38786 @@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
38787 struct clk *clk;
38788 struct clk_init_data init;
38789 struct clk_composite *composite;
38790 - struct clk_ops *clk_composite_ops;
38791 + clk_ops_no_const *clk_composite_ops;
38792
38793 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
38794 if (!composite) {
38795 diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
38796 index 81dd31a..ef5c542 100644
38797 --- a/drivers/clk/socfpga/clk.c
38798 +++ b/drivers/clk/socfpga/clk.c
38799 @@ -22,6 +22,7 @@
38800 #include <linux/clk-provider.h>
38801 #include <linux/io.h>
38802 #include <linux/of.h>
38803 +#include <asm/pgtable.h>
38804
38805 /* Clock Manager offsets */
38806 #define CLKMGR_CTRL 0x0
38807 @@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
38808 streq(clk_name, "periph_pll") ||
38809 streq(clk_name, "sdram_pll")) {
38810 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
38811 - clk_pll_ops.enable = clk_gate_ops.enable;
38812 - clk_pll_ops.disable = clk_gate_ops.disable;
38813 + pax_open_kernel();
38814 + *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
38815 + *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
38816 + pax_close_kernel();
38817 }
38818
38819 clk = clk_register(NULL, &socfpga_clk->hw.hw);
38820 @@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
38821 return parent_rate / div;
38822 }
38823
38824 -static struct clk_ops gateclk_ops = {
38825 +static clk_ops_no_const gateclk_ops __read_only = {
38826 .recalc_rate = socfpga_clk_recalc_rate,
38827 .get_parent = socfpga_clk_get_parent,
38828 .set_parent = socfpga_clk_set_parent,
38829 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
38830 index caf41eb..223d27a 100644
38831 --- a/drivers/cpufreq/acpi-cpufreq.c
38832 +++ b/drivers/cpufreq/acpi-cpufreq.c
38833 @@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
38834 return sprintf(buf, "%u\n", boost_enabled);
38835 }
38836
38837 -static struct global_attr global_boost = __ATTR(boost, 0644,
38838 +static global_attr_no_const global_boost = __ATTR(boost, 0644,
38839 show_global_boost,
38840 store_global_boost);
38841
38842 @@ -693,8 +693,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38843 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
38844 per_cpu(acfreq_data, cpu) = data;
38845
38846 - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
38847 - acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38848 + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
38849 + pax_open_kernel();
38850 + *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38851 + pax_close_kernel();
38852 + }
38853
38854 result = acpi_processor_register_performance(data->acpi_data, cpu);
38855 if (result)
38856 @@ -827,7 +830,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38857 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
38858 break;
38859 case ACPI_ADR_SPACE_FIXED_HARDWARE:
38860 - acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38861 + pax_open_kernel();
38862 + *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38863 + pax_close_kernel();
38864 break;
38865 default:
38866 break;
38867 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
38868 index 99a443e..8cb6f02 100644
38869 --- a/drivers/cpufreq/cpufreq.c
38870 +++ b/drivers/cpufreq/cpufreq.c
38871 @@ -1878,7 +1878,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
38872 #endif
38873
38874 mutex_lock(&cpufreq_governor_mutex);
38875 - list_del(&governor->governor_list);
38876 + pax_list_del(&governor->governor_list);
38877 mutex_unlock(&cpufreq_governor_mutex);
38878 return;
38879 }
38880 @@ -2108,7 +2108,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
38881 return NOTIFY_OK;
38882 }
38883
38884 -static struct notifier_block __refdata cpufreq_cpu_notifier = {
38885 +static struct notifier_block cpufreq_cpu_notifier = {
38886 .notifier_call = cpufreq_cpu_callback,
38887 };
38888
38889 @@ -2141,8 +2141,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
38890
38891 pr_debug("trying to register driver %s\n", driver_data->name);
38892
38893 - if (driver_data->setpolicy)
38894 - driver_data->flags |= CPUFREQ_CONST_LOOPS;
38895 + if (driver_data->setpolicy) {
38896 + pax_open_kernel();
38897 + *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
38898 + pax_close_kernel();
38899 + }
38900
38901 write_lock_irqsave(&cpufreq_driver_lock, flags);
38902 if (cpufreq_driver) {
38903 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
38904 index e6be635..f8a90dc 100644
38905 --- a/drivers/cpufreq/cpufreq_governor.c
38906 +++ b/drivers/cpufreq/cpufreq_governor.c
38907 @@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38908 struct dbs_data *dbs_data;
38909 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
38910 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
38911 - struct od_ops *od_ops = NULL;
38912 + const struct od_ops *od_ops = NULL;
38913 struct od_dbs_tuners *od_tuners = NULL;
38914 struct cs_dbs_tuners *cs_tuners = NULL;
38915 struct cpu_dbs_common_info *cpu_cdbs;
38916 @@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38917
38918 if ((cdata->governor == GOV_CONSERVATIVE) &&
38919 (!policy->governor->initialized)) {
38920 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38921 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38922
38923 cpufreq_register_notifier(cs_ops->notifier_block,
38924 CPUFREQ_TRANSITION_NOTIFIER);
38925 @@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38926
38927 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
38928 (policy->governor->initialized == 1)) {
38929 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38930 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38931
38932 cpufreq_unregister_notifier(cs_ops->notifier_block,
38933 CPUFREQ_TRANSITION_NOTIFIER);
38934 diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
38935 index b5f2b86..daa801b 100644
38936 --- a/drivers/cpufreq/cpufreq_governor.h
38937 +++ b/drivers/cpufreq/cpufreq_governor.h
38938 @@ -205,7 +205,7 @@ struct common_dbs_data {
38939 void (*exit)(struct dbs_data *dbs_data);
38940
38941 /* Governor specific ops, see below */
38942 - void *gov_ops;
38943 + const void *gov_ops;
38944 };
38945
38946 /* Governor Per policy data */
38947 @@ -225,7 +225,7 @@ struct od_ops {
38948 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
38949 unsigned int freq_next, unsigned int relation);
38950 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
38951 -};
38952 +} __no_const;
38953
38954 struct cs_ops {
38955 struct notifier_block *notifier_block;
38956 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
38957 index 18d4091..434be15 100644
38958 --- a/drivers/cpufreq/cpufreq_ondemand.c
38959 +++ b/drivers/cpufreq/cpufreq_ondemand.c
38960 @@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
38961
38962 define_get_cpu_dbs_routines(od_cpu_dbs_info);
38963
38964 -static struct od_ops od_ops = {
38965 +static struct od_ops od_ops __read_only = {
38966 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
38967 .powersave_bias_target = generic_powersave_bias_target,
38968 .freq_increase = dbs_freq_increase,
38969 @@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
38970 (struct cpufreq_policy *, unsigned int, unsigned int),
38971 unsigned int powersave_bias)
38972 {
38973 - od_ops.powersave_bias_target = f;
38974 + pax_open_kernel();
38975 + *(void **)&od_ops.powersave_bias_target = f;
38976 + pax_close_kernel();
38977 od_set_powersave_bias(powersave_bias);
38978 }
38979 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
38980
38981 void od_unregister_powersave_bias_handler(void)
38982 {
38983 - od_ops.powersave_bias_target = generic_powersave_bias_target;
38984 + pax_open_kernel();
38985 + *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
38986 + pax_close_kernel();
38987 od_set_powersave_bias(0);
38988 }
38989 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
38990 diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
38991 index 4cf0d28..5830372 100644
38992 --- a/drivers/cpufreq/cpufreq_stats.c
38993 +++ b/drivers/cpufreq/cpufreq_stats.c
38994 @@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
38995 }
38996
38997 /* priority=1 so this will get called before cpufreq_remove_dev */
38998 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
38999 +static struct notifier_block cpufreq_stat_cpu_notifier = {
39000 .notifier_call = cpufreq_stat_cpu_callback,
39001 .priority = 1,
39002 };
39003 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39004 index b687df8..ae733fc 100644
39005 --- a/drivers/cpufreq/intel_pstate.c
39006 +++ b/drivers/cpufreq/intel_pstate.c
39007 @@ -123,10 +123,10 @@ struct pstate_funcs {
39008 struct cpu_defaults {
39009 struct pstate_adjust_policy pid_policy;
39010 struct pstate_funcs funcs;
39011 -};
39012 +} __do_const;
39013
39014 static struct pstate_adjust_policy pid_params;
39015 -static struct pstate_funcs pstate_funcs;
39016 +static struct pstate_funcs *pstate_funcs;
39017
39018 struct perf_limits {
39019 int no_turbo;
39020 @@ -517,7 +517,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39021
39022 cpu->pstate.current_pstate = pstate;
39023
39024 - pstate_funcs.set(cpu, pstate);
39025 + pstate_funcs->set(cpu, pstate);
39026 }
39027
39028 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
39029 @@ -539,12 +539,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39030 {
39031 sprintf(cpu->name, "Intel 2nd generation core");
39032
39033 - cpu->pstate.min_pstate = pstate_funcs.get_min();
39034 - cpu->pstate.max_pstate = pstate_funcs.get_max();
39035 - cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39036 + cpu->pstate.min_pstate = pstate_funcs->get_min();
39037 + cpu->pstate.max_pstate = pstate_funcs->get_max();
39038 + cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39039
39040 - if (pstate_funcs.get_vid)
39041 - pstate_funcs.get_vid(cpu);
39042 + if (pstate_funcs->get_vid)
39043 + pstate_funcs->get_vid(cpu);
39044
39045 /*
39046 * goto max pstate so we don't slow up boot if we are built-in if we are
39047 @@ -808,9 +808,9 @@ static int intel_pstate_msrs_not_valid(void)
39048 rdmsrl(MSR_IA32_APERF, aperf);
39049 rdmsrl(MSR_IA32_MPERF, mperf);
39050
39051 - if (!pstate_funcs.get_max() ||
39052 - !pstate_funcs.get_min() ||
39053 - !pstate_funcs.get_turbo())
39054 + if (!pstate_funcs->get_max() ||
39055 + !pstate_funcs->get_min() ||
39056 + !pstate_funcs->get_turbo())
39057 return -ENODEV;
39058
39059 rdmsrl(MSR_IA32_APERF, tmp);
39060 @@ -824,7 +824,7 @@ static int intel_pstate_msrs_not_valid(void)
39061 return 0;
39062 }
39063
39064 -static void copy_pid_params(struct pstate_adjust_policy *policy)
39065 +static void copy_pid_params(const struct pstate_adjust_policy *policy)
39066 {
39067 pid_params.sample_rate_ms = policy->sample_rate_ms;
39068 pid_params.p_gain_pct = policy->p_gain_pct;
39069 @@ -836,11 +836,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39070
39071 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39072 {
39073 - pstate_funcs.get_max = funcs->get_max;
39074 - pstate_funcs.get_min = funcs->get_min;
39075 - pstate_funcs.get_turbo = funcs->get_turbo;
39076 - pstate_funcs.set = funcs->set;
39077 - pstate_funcs.get_vid = funcs->get_vid;
39078 + pstate_funcs = funcs;
39079 }
39080
39081 #if IS_ENABLED(CONFIG_ACPI)
39082 diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39083 index 3d1cba9..0ab21d2 100644
39084 --- a/drivers/cpufreq/p4-clockmod.c
39085 +++ b/drivers/cpufreq/p4-clockmod.c
39086 @@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39087 case 0x0F: /* Core Duo */
39088 case 0x16: /* Celeron Core */
39089 case 0x1C: /* Atom */
39090 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39091 + pax_open_kernel();
39092 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39093 + pax_close_kernel();
39094 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39095 case 0x0D: /* Pentium M (Dothan) */
39096 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39097 + pax_open_kernel();
39098 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39099 + pax_close_kernel();
39100 /* fall through */
39101 case 0x09: /* Pentium M (Banias) */
39102 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39103 @@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39104
39105 /* on P-4s, the TSC runs with constant frequency independent whether
39106 * throttling is active or not. */
39107 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39108 + pax_open_kernel();
39109 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39110 + pax_close_kernel();
39111
39112 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39113 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39114 diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39115 index 724ffbd..ad83692 100644
39116 --- a/drivers/cpufreq/sparc-us3-cpufreq.c
39117 +++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39118 @@ -18,14 +18,12 @@
39119 #include <asm/head.h>
39120 #include <asm/timer.h>
39121
39122 -static struct cpufreq_driver *cpufreq_us3_driver;
39123 -
39124 struct us3_freq_percpu_info {
39125 struct cpufreq_frequency_table table[4];
39126 };
39127
39128 /* Indexed by cpu number. */
39129 -static struct us3_freq_percpu_info *us3_freq_table;
39130 +static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39131
39132 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39133 * in the Safari config register.
39134 @@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39135
39136 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39137 {
39138 - if (cpufreq_us3_driver) {
39139 - cpufreq_frequency_table_put_attr(policy->cpu);
39140 - us3_freq_target(policy, 0);
39141 - }
39142 + cpufreq_frequency_table_put_attr(policy->cpu);
39143 + us3_freq_target(policy, 0);
39144
39145 return 0;
39146 }
39147
39148 +static int __init us3_freq_init(void);
39149 +static void __exit us3_freq_exit(void);
39150 +
39151 +static struct cpufreq_driver cpufreq_us3_driver = {
39152 + .init = us3_freq_cpu_init,
39153 + .verify = cpufreq_generic_frequency_table_verify,
39154 + .target_index = us3_freq_target,
39155 + .get = us3_freq_get,
39156 + .exit = us3_freq_cpu_exit,
39157 + .owner = THIS_MODULE,
39158 + .name = "UltraSPARC-III",
39159 +
39160 +};
39161 +
39162 static int __init us3_freq_init(void)
39163 {
39164 unsigned long manuf, impl, ver;
39165 @@ -180,55 +190,15 @@ static int __init us3_freq_init(void)
39166 (impl == CHEETAH_IMPL ||
39167 impl == CHEETAH_PLUS_IMPL ||
39168 impl == JAGUAR_IMPL ||
39169 - impl == PANTHER_IMPL)) {
39170 - struct cpufreq_driver *driver;
39171 -
39172 - ret = -ENOMEM;
39173 - driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39174 - if (!driver)
39175 - goto err_out;
39176 -
39177 - us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39178 - GFP_KERNEL);
39179 - if (!us3_freq_table)
39180 - goto err_out;
39181 -
39182 - driver->init = us3_freq_cpu_init;
39183 - driver->verify = cpufreq_generic_frequency_table_verify;
39184 - driver->target_index = us3_freq_target;
39185 - driver->get = us3_freq_get;
39186 - driver->exit = us3_freq_cpu_exit;
39187 - strcpy(driver->name, "UltraSPARC-III");
39188 -
39189 - cpufreq_us3_driver = driver;
39190 - ret = cpufreq_register_driver(driver);
39191 - if (ret)
39192 - goto err_out;
39193 -
39194 - return 0;
39195 -
39196 -err_out:
39197 - if (driver) {
39198 - kfree(driver);
39199 - cpufreq_us3_driver = NULL;
39200 - }
39201 - kfree(us3_freq_table);
39202 - us3_freq_table = NULL;
39203 - return ret;
39204 - }
39205 + impl == PANTHER_IMPL))
39206 + return cpufreq_register_driver(&cpufreq_us3_driver);
39207
39208 return -ENODEV;
39209 }
39210
39211 static void __exit us3_freq_exit(void)
39212 {
39213 - if (cpufreq_us3_driver) {
39214 - cpufreq_unregister_driver(cpufreq_us3_driver);
39215 - kfree(cpufreq_us3_driver);
39216 - cpufreq_us3_driver = NULL;
39217 - kfree(us3_freq_table);
39218 - us3_freq_table = NULL;
39219 - }
39220 + cpufreq_unregister_driver(&cpufreq_us3_driver);
39221 }
39222
39223 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39224 diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39225 index 4e1daca..e707b61 100644
39226 --- a/drivers/cpufreq/speedstep-centrino.c
39227 +++ b/drivers/cpufreq/speedstep-centrino.c
39228 @@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39229 !cpu_has(cpu, X86_FEATURE_EST))
39230 return -ENODEV;
39231
39232 - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39233 - centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39234 + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39235 + pax_open_kernel();
39236 + *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39237 + pax_close_kernel();
39238 + }
39239
39240 if (policy->cpu != 0)
39241 return -ENODEV;
39242 diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39243 index 06dbe7c..c2c8671 100644
39244 --- a/drivers/cpuidle/driver.c
39245 +++ b/drivers/cpuidle/driver.c
39246 @@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev,
39247
39248 static void poll_idle_init(struct cpuidle_driver *drv)
39249 {
39250 - struct cpuidle_state *state = &drv->states[0];
39251 + cpuidle_state_no_const *state = &drv->states[0];
39252
39253 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39254 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39255 diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39256 index ca89412..a7b9c49 100644
39257 --- a/drivers/cpuidle/governor.c
39258 +++ b/drivers/cpuidle/governor.c
39259 @@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39260 mutex_lock(&cpuidle_lock);
39261 if (__cpuidle_find_governor(gov->name) == NULL) {
39262 ret = 0;
39263 - list_add_tail(&gov->governor_list, &cpuidle_governors);
39264 + pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39265 if (!cpuidle_curr_governor ||
39266 cpuidle_curr_governor->rating < gov->rating)
39267 cpuidle_switch_governor(gov);
39268 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39269 index e918b6d..f87ea80 100644
39270 --- a/drivers/cpuidle/sysfs.c
39271 +++ b/drivers/cpuidle/sysfs.c
39272 @@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39273 NULL
39274 };
39275
39276 -static struct attribute_group cpuidle_attr_group = {
39277 +static attribute_group_no_const cpuidle_attr_group = {
39278 .attrs = cpuidle_default_attrs,
39279 .name = "cpuidle",
39280 };
39281 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39282 index 12fea3e..1e28f47 100644
39283 --- a/drivers/crypto/hifn_795x.c
39284 +++ b/drivers/crypto/hifn_795x.c
39285 @@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39286 MODULE_PARM_DESC(hifn_pll_ref,
39287 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39288
39289 -static atomic_t hifn_dev_number;
39290 +static atomic_unchecked_t hifn_dev_number;
39291
39292 #define ACRYPTO_OP_DECRYPT 0
39293 #define ACRYPTO_OP_ENCRYPT 1
39294 @@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39295 goto err_out_disable_pci_device;
39296
39297 snprintf(name, sizeof(name), "hifn%d",
39298 - atomic_inc_return(&hifn_dev_number)-1);
39299 + atomic_inc_return_unchecked(&hifn_dev_number)-1);
39300
39301 err = pci_request_regions(pdev, name);
39302 if (err)
39303 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39304 index a0b2f7e..1b6f028 100644
39305 --- a/drivers/devfreq/devfreq.c
39306 +++ b/drivers/devfreq/devfreq.c
39307 @@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39308 goto err_out;
39309 }
39310
39311 - list_add(&governor->node, &devfreq_governor_list);
39312 + pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39313
39314 list_for_each_entry(devfreq, &devfreq_list, node) {
39315 int ret = 0;
39316 @@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39317 }
39318 }
39319
39320 - list_del(&governor->node);
39321 + pax_list_del((struct list_head *)&governor->node);
39322 err_out:
39323 mutex_unlock(&devfreq_list_lock);
39324
39325 diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39326 index 0d765c0..60b7480 100644
39327 --- a/drivers/dma/sh/shdmac.c
39328 +++ b/drivers/dma/sh/shdmac.c
39329 @@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39330 return ret;
39331 }
39332
39333 -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39334 +static struct notifier_block sh_dmae_nmi_notifier = {
39335 .notifier_call = sh_dmae_nmi_handler,
39336
39337 /* Run before NMI debug handler and KGDB */
39338 diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39339 index 1026743..80b081c 100644
39340 --- a/drivers/edac/edac_device.c
39341 +++ b/drivers/edac/edac_device.c
39342 @@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39343 */
39344 int edac_device_alloc_index(void)
39345 {
39346 - static atomic_t device_indexes = ATOMIC_INIT(0);
39347 + static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39348
39349 - return atomic_inc_return(&device_indexes) - 1;
39350 + return atomic_inc_return_unchecked(&device_indexes) - 1;
39351 }
39352 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39353
39354 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39355 index e5bdf21..b8f9055 100644
39356 --- a/drivers/edac/edac_mc_sysfs.c
39357 +++ b/drivers/edac/edac_mc_sysfs.c
39358 @@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
39359 struct dev_ch_attribute {
39360 struct device_attribute attr;
39361 int channel;
39362 -};
39363 +} __do_const;
39364
39365 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39366 struct dev_ch_attribute dev_attr_legacy_##_name = \
39367 @@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39368 }
39369
39370 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39371 + pax_open_kernel();
39372 if (mci->get_sdram_scrub_rate) {
39373 - dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39374 - dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39375 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39376 + *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39377 }
39378 if (mci->set_sdram_scrub_rate) {
39379 - dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39380 - dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39381 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39382 + *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39383 }
39384 + pax_close_kernel();
39385 err = device_create_file(&mci->dev,
39386 &dev_attr_sdram_scrub_rate);
39387 if (err) {
39388 diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39389 index 2cf44b4d..6dd2dc7 100644
39390 --- a/drivers/edac/edac_pci.c
39391 +++ b/drivers/edac/edac_pci.c
39392 @@ -29,7 +29,7 @@
39393
39394 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39395 static LIST_HEAD(edac_pci_list);
39396 -static atomic_t pci_indexes = ATOMIC_INIT(0);
39397 +static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39398
39399 /*
39400 * edac_pci_alloc_ctl_info
39401 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39402 */
39403 int edac_pci_alloc_index(void)
39404 {
39405 - return atomic_inc_return(&pci_indexes) - 1;
39406 + return atomic_inc_return_unchecked(&pci_indexes) - 1;
39407 }
39408 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39409
39410 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39411 index e8658e4..22746d6 100644
39412 --- a/drivers/edac/edac_pci_sysfs.c
39413 +++ b/drivers/edac/edac_pci_sysfs.c
39414 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39415 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39416 static int edac_pci_poll_msec = 1000; /* one second workq period */
39417
39418 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
39419 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39420 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39421 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39422
39423 static struct kobject *edac_pci_top_main_kobj;
39424 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39425 @@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
39426 void *value;
39427 ssize_t(*show) (void *, char *);
39428 ssize_t(*store) (void *, const char *, size_t);
39429 -};
39430 +} __do_const;
39431
39432 /* Set of show/store abstract level functions for PCI Parity object */
39433 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39434 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39435 edac_printk(KERN_CRIT, EDAC_PCI,
39436 "Signaled System Error on %s\n",
39437 pci_name(dev));
39438 - atomic_inc(&pci_nonparity_count);
39439 + atomic_inc_unchecked(&pci_nonparity_count);
39440 }
39441
39442 if (status & (PCI_STATUS_PARITY)) {
39443 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39444 "Master Data Parity Error on %s\n",
39445 pci_name(dev));
39446
39447 - atomic_inc(&pci_parity_count);
39448 + atomic_inc_unchecked(&pci_parity_count);
39449 }
39450
39451 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39452 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39453 "Detected Parity Error on %s\n",
39454 pci_name(dev));
39455
39456 - atomic_inc(&pci_parity_count);
39457 + atomic_inc_unchecked(&pci_parity_count);
39458 }
39459 }
39460
39461 @@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39462 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39463 "Signaled System Error on %s\n",
39464 pci_name(dev));
39465 - atomic_inc(&pci_nonparity_count);
39466 + atomic_inc_unchecked(&pci_nonparity_count);
39467 }
39468
39469 if (status & (PCI_STATUS_PARITY)) {
39470 @@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39471 "Master Data Parity Error on "
39472 "%s\n", pci_name(dev));
39473
39474 - atomic_inc(&pci_parity_count);
39475 + atomic_inc_unchecked(&pci_parity_count);
39476 }
39477
39478 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39479 @@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39480 "Detected Parity Error on %s\n",
39481 pci_name(dev));
39482
39483 - atomic_inc(&pci_parity_count);
39484 + atomic_inc_unchecked(&pci_parity_count);
39485 }
39486 }
39487 }
39488 @@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
39489 if (!check_pci_errors)
39490 return;
39491
39492 - before_count = atomic_read(&pci_parity_count);
39493 + before_count = atomic_read_unchecked(&pci_parity_count);
39494
39495 /* scan all PCI devices looking for a Parity Error on devices and
39496 * bridges.
39497 @@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
39498 /* Only if operator has selected panic on PCI Error */
39499 if (edac_pci_get_panic_on_pe()) {
39500 /* If the count is different 'after' from 'before' */
39501 - if (before_count != atomic_read(&pci_parity_count))
39502 + if (before_count != atomic_read_unchecked(&pci_parity_count))
39503 panic("EDAC: PCI Parity Error");
39504 }
39505 }
39506 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39507 index 51b7e3a..aa8a3e8 100644
39508 --- a/drivers/edac/mce_amd.h
39509 +++ b/drivers/edac/mce_amd.h
39510 @@ -77,7 +77,7 @@ struct amd_decoder_ops {
39511 bool (*mc0_mce)(u16, u8);
39512 bool (*mc1_mce)(u16, u8);
39513 bool (*mc2_mce)(u16, u8);
39514 -};
39515 +} __no_const;
39516
39517 void amd_report_gart_errors(bool);
39518 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39519 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39520 index 57ea7f4..af06b76 100644
39521 --- a/drivers/firewire/core-card.c
39522 +++ b/drivers/firewire/core-card.c
39523 @@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39524 const struct fw_card_driver *driver,
39525 struct device *device)
39526 {
39527 - static atomic_t index = ATOMIC_INIT(-1);
39528 + static atomic_unchecked_t index = ATOMIC_INIT(-1);
39529
39530 - card->index = atomic_inc_return(&index);
39531 + card->index = atomic_inc_return_unchecked(&index);
39532 card->driver = driver;
39533 card->device = device;
39534 card->current_tlabel = 0;
39535 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39536
39537 void fw_core_remove_card(struct fw_card *card)
39538 {
39539 - struct fw_card_driver dummy_driver = dummy_driver_template;
39540 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
39541
39542 card->driver->update_phy_reg(card, 4,
39543 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39544 diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39545 index 2c6d5e1..a2cca6b 100644
39546 --- a/drivers/firewire/core-device.c
39547 +++ b/drivers/firewire/core-device.c
39548 @@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39549 struct config_rom_attribute {
39550 struct device_attribute attr;
39551 u32 key;
39552 -};
39553 +} __do_const;
39554
39555 static ssize_t show_immediate(struct device *dev,
39556 struct device_attribute *dattr, char *buf)
39557 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39558 index 0e79951..b180217 100644
39559 --- a/drivers/firewire/core-transaction.c
39560 +++ b/drivers/firewire/core-transaction.c
39561 @@ -38,6 +38,7 @@
39562 #include <linux/timer.h>
39563 #include <linux/types.h>
39564 #include <linux/workqueue.h>
39565 +#include <linux/sched.h>
39566
39567 #include <asm/byteorder.h>
39568
39569 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39570 index 515a42c..5ecf3ba 100644
39571 --- a/drivers/firewire/core.h
39572 +++ b/drivers/firewire/core.h
39573 @@ -111,6 +111,7 @@ struct fw_card_driver {
39574
39575 int (*stop_iso)(struct fw_iso_context *ctx);
39576 };
39577 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39578
39579 void fw_card_initialize(struct fw_card *card,
39580 const struct fw_card_driver *driver, struct device *device);
39581 diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
39582 index 94a58a0..f5eba42 100644
39583 --- a/drivers/firmware/dmi-id.c
39584 +++ b/drivers/firmware/dmi-id.c
39585 @@ -16,7 +16,7 @@
39586 struct dmi_device_attribute{
39587 struct device_attribute dev_attr;
39588 int field;
39589 -};
39590 +} __do_const;
39591 #define to_dmi_dev_attr(_dev_attr) \
39592 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
39593
39594 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
39595 index c7e81ff..94a7401 100644
39596 --- a/drivers/firmware/dmi_scan.c
39597 +++ b/drivers/firmware/dmi_scan.c
39598 @@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
39599 if (buf == NULL)
39600 return -1;
39601
39602 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
39603 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
39604
39605 iounmap(buf);
39606 return 0;
39607 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
39608 index 1491dd4..aa910db 100644
39609 --- a/drivers/firmware/efi/cper.c
39610 +++ b/drivers/firmware/efi/cper.c
39611 @@ -41,12 +41,12 @@
39612 */
39613 u64 cper_next_record_id(void)
39614 {
39615 - static atomic64_t seq;
39616 + static atomic64_unchecked_t seq;
39617
39618 - if (!atomic64_read(&seq))
39619 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
39620 + if (!atomic64_read_unchecked(&seq))
39621 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
39622
39623 - return atomic64_inc_return(&seq);
39624 + return atomic64_inc_return_unchecked(&seq);
39625 }
39626 EXPORT_SYMBOL_GPL(cper_next_record_id);
39627
39628 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
39629 index 2e2fbde..7676c8b 100644
39630 --- a/drivers/firmware/efi/efi.c
39631 +++ b/drivers/firmware/efi/efi.c
39632 @@ -81,14 +81,16 @@ static struct attribute_group efi_subsys_attr_group = {
39633 };
39634
39635 static struct efivars generic_efivars;
39636 -static struct efivar_operations generic_ops;
39637 +static efivar_operations_no_const generic_ops __read_only;
39638
39639 static int generic_ops_register(void)
39640 {
39641 - generic_ops.get_variable = efi.get_variable;
39642 - generic_ops.set_variable = efi.set_variable;
39643 - generic_ops.get_next_variable = efi.get_next_variable;
39644 - generic_ops.query_variable_store = efi_query_variable_store;
39645 + pax_open_kernel();
39646 + *(void **)&generic_ops.get_variable = efi.get_variable;
39647 + *(void **)&generic_ops.set_variable = efi.set_variable;
39648 + *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
39649 + *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
39650 + pax_close_kernel();
39651
39652 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
39653 }
39654 diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
39655 index 3dc2482..7bd2f61 100644
39656 --- a/drivers/firmware/efi/efivars.c
39657 +++ b/drivers/firmware/efi/efivars.c
39658 @@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
39659 static int
39660 create_efivars_bin_attributes(void)
39661 {
39662 - struct bin_attribute *attr;
39663 + bin_attribute_no_const *attr;
39664 int error;
39665
39666 /* new_var */
39667 diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
39668 index 2a90ba6..07f3733 100644
39669 --- a/drivers/firmware/google/memconsole.c
39670 +++ b/drivers/firmware/google/memconsole.c
39671 @@ -147,7 +147,9 @@ static int __init memconsole_init(void)
39672 if (!found_memconsole())
39673 return -ENODEV;
39674
39675 - memconsole_bin_attr.size = memconsole_length;
39676 + pax_open_kernel();
39677 + *(size_t *)&memconsole_bin_attr.size = memconsole_length;
39678 + pax_close_kernel();
39679
39680 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
39681
39682 diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
39683 index ec19036..8ffafc2 100644
39684 --- a/drivers/gpio/gpio-em.c
39685 +++ b/drivers/gpio/gpio-em.c
39686 @@ -257,7 +257,7 @@ static int em_gio_probe(struct platform_device *pdev)
39687 struct em_gio_priv *p;
39688 struct resource *io[2], *irq[2];
39689 struct gpio_chip *gpio_chip;
39690 - struct irq_chip *irq_chip;
39691 + irq_chip_no_const *irq_chip;
39692 const char *name = dev_name(&pdev->dev);
39693 int ret;
39694
39695 diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
39696 index 814addb..0937d7f 100644
39697 --- a/drivers/gpio/gpio-ich.c
39698 +++ b/drivers/gpio/gpio-ich.c
39699 @@ -71,7 +71,7 @@ struct ichx_desc {
39700 /* Some chipsets have quirks, let these use their own request/get */
39701 int (*request)(struct gpio_chip *chip, unsigned offset);
39702 int (*get)(struct gpio_chip *chip, unsigned offset);
39703 -};
39704 +} __do_const;
39705
39706 static struct {
39707 spinlock_t lock;
39708 diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
39709 index 8b7e719..dc089dc 100644
39710 --- a/drivers/gpio/gpio-rcar.c
39711 +++ b/drivers/gpio/gpio-rcar.c
39712 @@ -316,7 +316,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
39713 struct gpio_rcar_priv *p;
39714 struct resource *io, *irq;
39715 struct gpio_chip *gpio_chip;
39716 - struct irq_chip *irq_chip;
39717 + irq_chip_no_const *irq_chip;
39718 const char *name = dev_name(&pdev->dev);
39719 int ret;
39720
39721 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
39722 index 9902732..64b62dd 100644
39723 --- a/drivers/gpio/gpio-vr41xx.c
39724 +++ b/drivers/gpio/gpio-vr41xx.c
39725 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
39726 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
39727 maskl, pendl, maskh, pendh);
39728
39729 - atomic_inc(&irq_err_count);
39730 + atomic_inc_unchecked(&irq_err_count);
39731
39732 return -EINVAL;
39733 }
39734 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
39735 index d6cf77c..2842146 100644
39736 --- a/drivers/gpu/drm/drm_crtc.c
39737 +++ b/drivers/gpu/drm/drm_crtc.c
39738 @@ -3102,7 +3102,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
39739 goto done;
39740 }
39741
39742 - if (copy_to_user(&enum_ptr[copied].name,
39743 + if (copy_to_user(enum_ptr[copied].name,
39744 &prop_enum->name, DRM_PROP_NAME_LEN)) {
39745 ret = -EFAULT;
39746 goto done;
39747 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
39748 index 01361ab..891e821 100644
39749 --- a/drivers/gpu/drm/drm_crtc_helper.c
39750 +++ b/drivers/gpu/drm/drm_crtc_helper.c
39751 @@ -338,7 +338,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
39752 struct drm_crtc *tmp;
39753 int crtc_mask = 1;
39754
39755 - WARN(!crtc, "checking null crtc?\n");
39756 + BUG_ON(!crtc);
39757
39758 dev = crtc->dev;
39759
39760 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
39761 index d9137e4..69b73a0 100644
39762 --- a/drivers/gpu/drm/drm_drv.c
39763 +++ b/drivers/gpu/drm/drm_drv.c
39764 @@ -233,7 +233,7 @@ module_exit(drm_core_exit);
39765 /**
39766 * Copy and IOCTL return string to user space
39767 */
39768 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
39769 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
39770 {
39771 int len;
39772
39773 @@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
39774 struct drm_file *file_priv = filp->private_data;
39775 struct drm_device *dev;
39776 const struct drm_ioctl_desc *ioctl = NULL;
39777 - drm_ioctl_t *func;
39778 + drm_ioctl_no_const_t func;
39779 unsigned int nr = DRM_IOCTL_NR(cmd);
39780 int retcode = -EINVAL;
39781 char stack_kdata[128];
39782 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
39783 index c5b929c..8a3b8be 100644
39784 --- a/drivers/gpu/drm/drm_fops.c
39785 +++ b/drivers/gpu/drm/drm_fops.c
39786 @@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
39787 if (drm_device_is_unplugged(dev))
39788 return -ENODEV;
39789
39790 - if (!dev->open_count++)
39791 + if (local_inc_return(&dev->open_count) == 1)
39792 need_setup = 1;
39793 mutex_lock(&dev->struct_mutex);
39794 old_imapping = inode->i_mapping;
39795 @@ -127,7 +127,7 @@ err_undo:
39796 iput(container_of(dev->dev_mapping, struct inode, i_data));
39797 dev->dev_mapping = old_mapping;
39798 mutex_unlock(&dev->struct_mutex);
39799 - dev->open_count--;
39800 + local_dec(&dev->open_count);
39801 return retcode;
39802 }
39803 EXPORT_SYMBOL(drm_open);
39804 @@ -467,7 +467,7 @@ int drm_release(struct inode *inode, struct file *filp)
39805
39806 mutex_lock(&drm_global_mutex);
39807
39808 - DRM_DEBUG("open_count = %d\n", dev->open_count);
39809 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
39810
39811 if (dev->driver->preclose)
39812 dev->driver->preclose(dev, file_priv);
39813 @@ -476,10 +476,10 @@ int drm_release(struct inode *inode, struct file *filp)
39814 * Begin inline drm_release
39815 */
39816
39817 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
39818 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
39819 task_pid_nr(current),
39820 (long)old_encode_dev(file_priv->minor->device),
39821 - dev->open_count);
39822 + local_read(&dev->open_count));
39823
39824 /* Release any auth tokens that might point to this file_priv,
39825 (do that under the drm_global_mutex) */
39826 @@ -577,7 +577,7 @@ int drm_release(struct inode *inode, struct file *filp)
39827 * End inline drm_release
39828 */
39829
39830 - if (!--dev->open_count) {
39831 + if (local_dec_and_test(&dev->open_count)) {
39832 if (atomic_read(&dev->ioctl_count)) {
39833 DRM_ERROR("Device busy: %d\n",
39834 atomic_read(&dev->ioctl_count));
39835 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
39836 index 3d2e91c..d31c4c9 100644
39837 --- a/drivers/gpu/drm/drm_global.c
39838 +++ b/drivers/gpu/drm/drm_global.c
39839 @@ -36,7 +36,7 @@
39840 struct drm_global_item {
39841 struct mutex mutex;
39842 void *object;
39843 - int refcount;
39844 + atomic_t refcount;
39845 };
39846
39847 static struct drm_global_item glob[DRM_GLOBAL_NUM];
39848 @@ -49,7 +49,7 @@ void drm_global_init(void)
39849 struct drm_global_item *item = &glob[i];
39850 mutex_init(&item->mutex);
39851 item->object = NULL;
39852 - item->refcount = 0;
39853 + atomic_set(&item->refcount, 0);
39854 }
39855 }
39856
39857 @@ -59,7 +59,7 @@ void drm_global_release(void)
39858 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
39859 struct drm_global_item *item = &glob[i];
39860 BUG_ON(item->object != NULL);
39861 - BUG_ON(item->refcount != 0);
39862 + BUG_ON(atomic_read(&item->refcount) != 0);
39863 }
39864 }
39865
39866 @@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39867 struct drm_global_item *item = &glob[ref->global_type];
39868
39869 mutex_lock(&item->mutex);
39870 - if (item->refcount == 0) {
39871 + if (atomic_read(&item->refcount) == 0) {
39872 item->object = kzalloc(ref->size, GFP_KERNEL);
39873 if (unlikely(item->object == NULL)) {
39874 ret = -ENOMEM;
39875 @@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39876 goto out_err;
39877
39878 }
39879 - ++item->refcount;
39880 + atomic_inc(&item->refcount);
39881 ref->object = item->object;
39882 mutex_unlock(&item->mutex);
39883 return 0;
39884 @@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
39885 struct drm_global_item *item = &glob[ref->global_type];
39886
39887 mutex_lock(&item->mutex);
39888 - BUG_ON(item->refcount == 0);
39889 + BUG_ON(atomic_read(&item->refcount) == 0);
39890 BUG_ON(ref->object != item->object);
39891 - if (--item->refcount == 0) {
39892 + if (atomic_dec_and_test(&item->refcount)) {
39893 ref->release(ref);
39894 item->object = NULL;
39895 }
39896 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
39897 index 7d5a152..d7186da 100644
39898 --- a/drivers/gpu/drm/drm_info.c
39899 +++ b/drivers/gpu/drm/drm_info.c
39900 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
39901 struct drm_local_map *map;
39902 struct drm_map_list *r_list;
39903
39904 - /* Hardcoded from _DRM_FRAME_BUFFER,
39905 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
39906 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
39907 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
39908 + static const char * const types[] = {
39909 + [_DRM_FRAME_BUFFER] = "FB",
39910 + [_DRM_REGISTERS] = "REG",
39911 + [_DRM_SHM] = "SHM",
39912 + [_DRM_AGP] = "AGP",
39913 + [_DRM_SCATTER_GATHER] = "SG",
39914 + [_DRM_CONSISTENT] = "PCI",
39915 + [_DRM_GEM] = "GEM" };
39916 const char *type;
39917 int i;
39918
39919 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
39920 map = r_list->map;
39921 if (!map)
39922 continue;
39923 - if (map->type < 0 || map->type > 5)
39924 + if (map->type >= ARRAY_SIZE(types))
39925 type = "??";
39926 else
39927 type = types[map->type];
39928 @@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
39929 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
39930 vma->vm_flags & VM_LOCKED ? 'l' : '-',
39931 vma->vm_flags & VM_IO ? 'i' : '-',
39932 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39933 + 0);
39934 +#else
39935 vma->vm_pgoff);
39936 +#endif
39937
39938 #if defined(__i386__)
39939 pgprot = pgprot_val(vma->vm_page_prot);
39940 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
39941 index 2f4c4343..dd12cd2 100644
39942 --- a/drivers/gpu/drm/drm_ioc32.c
39943 +++ b/drivers/gpu/drm/drm_ioc32.c
39944 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
39945 request = compat_alloc_user_space(nbytes);
39946 if (!access_ok(VERIFY_WRITE, request, nbytes))
39947 return -EFAULT;
39948 - list = (struct drm_buf_desc *) (request + 1);
39949 + list = (struct drm_buf_desc __user *) (request + 1);
39950
39951 if (__put_user(count, &request->count)
39952 || __put_user(list, &request->list))
39953 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
39954 request = compat_alloc_user_space(nbytes);
39955 if (!access_ok(VERIFY_WRITE, request, nbytes))
39956 return -EFAULT;
39957 - list = (struct drm_buf_pub *) (request + 1);
39958 + list = (struct drm_buf_pub __user *) (request + 1);
39959
39960 if (__put_user(count, &request->count)
39961 || __put_user(list, &request->list))
39962 @@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
39963 return 0;
39964 }
39965
39966 -drm_ioctl_compat_t *drm_compat_ioctls[] = {
39967 +drm_ioctl_compat_t drm_compat_ioctls[] = {
39968 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
39969 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
39970 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
39971 @@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
39972 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39973 {
39974 unsigned int nr = DRM_IOCTL_NR(cmd);
39975 - drm_ioctl_compat_t *fn;
39976 int ret;
39977
39978 /* Assume that ioctls without an explicit compat routine will just
39979 @@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39980 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
39981 return drm_ioctl(filp, cmd, arg);
39982
39983 - fn = drm_compat_ioctls[nr];
39984 -
39985 - if (fn != NULL)
39986 - ret = (*fn) (filp, cmd, arg);
39987 + if (drm_compat_ioctls[nr] != NULL)
39988 + ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
39989 else
39990 ret = drm_ioctl(filp, cmd, arg);
39991
39992 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
39993 index 66dd3a0..3bed6c4 100644
39994 --- a/drivers/gpu/drm/drm_stub.c
39995 +++ b/drivers/gpu/drm/drm_stub.c
39996 @@ -403,7 +403,7 @@ void drm_unplug_dev(struct drm_device *dev)
39997
39998 drm_device_set_unplugged(dev);
39999
40000 - if (dev->open_count == 0) {
40001 + if (local_read(&dev->open_count) == 0) {
40002 drm_put_dev(dev);
40003 }
40004 mutex_unlock(&drm_global_mutex);
40005 diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
40006 index c22c309..ae758c3 100644
40007 --- a/drivers/gpu/drm/drm_sysfs.c
40008 +++ b/drivers/gpu/drm/drm_sysfs.c
40009 @@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
40010 */
40011 int drm_sysfs_device_add(struct drm_minor *minor)
40012 {
40013 - char *minor_str;
40014 + const char *minor_str;
40015 int r;
40016
40017 if (minor->type == DRM_MINOR_CONTROL)
40018 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40019 index d4d16ed..8fb0b51 100644
40020 --- a/drivers/gpu/drm/i810/i810_drv.h
40021 +++ b/drivers/gpu/drm/i810/i810_drv.h
40022 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
40023 int page_flipping;
40024
40025 wait_queue_head_t irq_queue;
40026 - atomic_t irq_received;
40027 - atomic_t irq_emitted;
40028 + atomic_unchecked_t irq_received;
40029 + atomic_unchecked_t irq_emitted;
40030
40031 int front_offset;
40032 } drm_i810_private_t;
40033 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
40034 index 6ed45a9..eb6dc41 100644
40035 --- a/drivers/gpu/drm/i915/i915_debugfs.c
40036 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
40037 @@ -702,7 +702,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
40038 I915_READ(GTIMR));
40039 }
40040 seq_printf(m, "Interrupts received: %d\n",
40041 - atomic_read(&dev_priv->irq_received));
40042 + atomic_read_unchecked(&dev_priv->irq_received));
40043 for_each_ring(ring, dev_priv, i) {
40044 if (INTEL_INFO(dev)->gen >= 6) {
40045 seq_printf(m,
40046 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40047 index e02266a..e3411aa 100644
40048 --- a/drivers/gpu/drm/i915/i915_dma.c
40049 +++ b/drivers/gpu/drm/i915/i915_dma.c
40050 @@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40051 bool can_switch;
40052
40053 spin_lock(&dev->count_lock);
40054 - can_switch = (dev->open_count == 0);
40055 + can_switch = (local_read(&dev->open_count) == 0);
40056 spin_unlock(&dev->count_lock);
40057 return can_switch;
40058 }
40059 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
40060 index 221ac62..f56acc8 100644
40061 --- a/drivers/gpu/drm/i915/i915_drv.h
40062 +++ b/drivers/gpu/drm/i915/i915_drv.h
40063 @@ -1326,7 +1326,7 @@ typedef struct drm_i915_private {
40064 drm_dma_handle_t *status_page_dmah;
40065 struct resource mch_res;
40066
40067 - atomic_t irq_received;
40068 + atomic_unchecked_t irq_received;
40069
40070 /* protects the irq masks */
40071 spinlock_t irq_lock;
40072 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40073 index a3ba9a8..ee52ddd 100644
40074 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40075 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40076 @@ -861,9 +861,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40077
40078 static int
40079 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
40080 - int count)
40081 + unsigned int count)
40082 {
40083 - int i;
40084 + unsigned int i;
40085 unsigned relocs_total = 0;
40086 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40087
40088 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
40089 index d3c3b5b..e79720d 100644
40090 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
40091 +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
40092 @@ -828,7 +828,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
40093 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
40094 dev_priv->gtt.base.start / PAGE_SIZE,
40095 dev_priv->gtt.base.total / PAGE_SIZE,
40096 - false);
40097 + true);
40098 }
40099
40100 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
40101 diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40102 index 3c59584..500f2e9 100644
40103 --- a/drivers/gpu/drm/i915/i915_ioc32.c
40104 +++ b/drivers/gpu/drm/i915/i915_ioc32.c
40105 @@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40106 (unsigned long)request);
40107 }
40108
40109 -static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40110 +static drm_ioctl_compat_t i915_compat_ioctls[] = {
40111 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40112 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40113 [DRM_I915_GETPARAM] = compat_i915_getparam,
40114 @@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40115 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40116 {
40117 unsigned int nr = DRM_IOCTL_NR(cmd);
40118 - drm_ioctl_compat_t *fn = NULL;
40119 int ret;
40120
40121 if (nr < DRM_COMMAND_BASE)
40122 return drm_compat_ioctl(filp, cmd, arg);
40123
40124 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
40125 - fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40126 -
40127 - if (fn != NULL)
40128 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
40129 + drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40130 ret = (*fn) (filp, cmd, arg);
40131 - else
40132 + } else
40133 ret = drm_ioctl(filp, cmd, arg);
40134
40135 return ret;
40136 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
40137 index a209177..842a89a 100644
40138 --- a/drivers/gpu/drm/i915/i915_irq.c
40139 +++ b/drivers/gpu/drm/i915/i915_irq.c
40140 @@ -1419,7 +1419,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
40141 int pipe;
40142 u32 pipe_stats[I915_MAX_PIPES];
40143
40144 - atomic_inc(&dev_priv->irq_received);
40145 + atomic_inc_unchecked(&dev_priv->irq_received);
40146
40147 while (true) {
40148 iir = I915_READ(VLV_IIR);
40149 @@ -1729,7 +1729,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
40150 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
40151 irqreturn_t ret = IRQ_NONE;
40152
40153 - atomic_inc(&dev_priv->irq_received);
40154 + atomic_inc_unchecked(&dev_priv->irq_received);
40155
40156 /* We get interrupts on unclaimed registers, so check for this before we
40157 * do any I915_{READ,WRITE}. */
40158 @@ -1799,7 +1799,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
40159 uint32_t tmp = 0;
40160 enum pipe pipe;
40161
40162 - atomic_inc(&dev_priv->irq_received);
40163 + atomic_inc_unchecked(&dev_priv->irq_received);
40164
40165 master_ctl = I915_READ(GEN8_MASTER_IRQ);
40166 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
40167 @@ -2623,7 +2623,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
40168 {
40169 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40170
40171 - atomic_set(&dev_priv->irq_received, 0);
40172 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40173
40174 I915_WRITE(HWSTAM, 0xeffe);
40175
40176 @@ -2641,7 +2641,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
40177 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40178 int pipe;
40179
40180 - atomic_set(&dev_priv->irq_received, 0);
40181 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40182
40183 /* VLV magic */
40184 I915_WRITE(VLV_IMR, 0);
40185 @@ -2672,7 +2672,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
40186 struct drm_i915_private *dev_priv = dev->dev_private;
40187 int pipe;
40188
40189 - atomic_set(&dev_priv->irq_received, 0);
40190 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40191
40192 I915_WRITE(GEN8_MASTER_IRQ, 0);
40193 POSTING_READ(GEN8_MASTER_IRQ);
40194 @@ -2998,7 +2998,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
40195 if (!dev_priv)
40196 return;
40197
40198 - atomic_set(&dev_priv->irq_received, 0);
40199 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40200
40201 I915_WRITE(GEN8_MASTER_IRQ, 0);
40202
40203 @@ -3092,7 +3092,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
40204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40205 int pipe;
40206
40207 - atomic_set(&dev_priv->irq_received, 0);
40208 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40209
40210 for_each_pipe(pipe)
40211 I915_WRITE(PIPESTAT(pipe), 0);
40212 @@ -3178,7 +3178,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
40213 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40214 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40215
40216 - atomic_inc(&dev_priv->irq_received);
40217 + atomic_inc_unchecked(&dev_priv->irq_received);
40218
40219 iir = I915_READ16(IIR);
40220 if (iir == 0)
40221 @@ -3253,7 +3253,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
40222 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40223 int pipe;
40224
40225 - atomic_set(&dev_priv->irq_received, 0);
40226 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40227
40228 if (I915_HAS_HOTPLUG(dev)) {
40229 I915_WRITE(PORT_HOTPLUG_EN, 0);
40230 @@ -3360,7 +3360,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
40231 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40232 int pipe, ret = IRQ_NONE;
40233
40234 - atomic_inc(&dev_priv->irq_received);
40235 + atomic_inc_unchecked(&dev_priv->irq_received);
40236
40237 iir = I915_READ(IIR);
40238 do {
40239 @@ -3487,7 +3487,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
40240 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40241 int pipe;
40242
40243 - atomic_set(&dev_priv->irq_received, 0);
40244 + atomic_set_unchecked(&dev_priv->irq_received, 0);
40245
40246 I915_WRITE(PORT_HOTPLUG_EN, 0);
40247 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
40248 @@ -3603,7 +3603,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
40249 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40250 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40251
40252 - atomic_inc(&dev_priv->irq_received);
40253 + atomic_inc_unchecked(&dev_priv->irq_received);
40254
40255 iir = I915_READ(IIR);
40256
40257 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40258 index 3c5ff7a..ae759ca 100644
40259 --- a/drivers/gpu/drm/i915/intel_display.c
40260 +++ b/drivers/gpu/drm/i915/intel_display.c
40261 @@ -10506,13 +10506,13 @@ struct intel_quirk {
40262 int subsystem_vendor;
40263 int subsystem_device;
40264 void (*hook)(struct drm_device *dev);
40265 -};
40266 +} __do_const;
40267
40268 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40269 struct intel_dmi_quirk {
40270 void (*hook)(struct drm_device *dev);
40271 const struct dmi_system_id (*dmi_id_list)[];
40272 -};
40273 +} __do_const;
40274
40275 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40276 {
40277 @@ -10520,18 +10520,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40278 return 1;
40279 }
40280
40281 -static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40282 +static const struct dmi_system_id intel_dmi_quirks_table[] = {
40283 {
40284 - .dmi_id_list = &(const struct dmi_system_id[]) {
40285 - {
40286 - .callback = intel_dmi_reverse_brightness,
40287 - .ident = "NCR Corporation",
40288 - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40289 - DMI_MATCH(DMI_PRODUCT_NAME, ""),
40290 - },
40291 - },
40292 - { } /* terminating entry */
40293 + .callback = intel_dmi_reverse_brightness,
40294 + .ident = "NCR Corporation",
40295 + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40296 + DMI_MATCH(DMI_PRODUCT_NAME, ""),
40297 },
40298 + },
40299 + { } /* terminating entry */
40300 +};
40301 +
40302 +static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40303 + {
40304 + .dmi_id_list = &intel_dmi_quirks_table,
40305 .hook = quirk_invert_brightness,
40306 },
40307 };
40308 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40309 index ca4bc54..ee598a2 100644
40310 --- a/drivers/gpu/drm/mga/mga_drv.h
40311 +++ b/drivers/gpu/drm/mga/mga_drv.h
40312 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
40313 u32 clear_cmd;
40314 u32 maccess;
40315
40316 - atomic_t vbl_received; /**< Number of vblanks received. */
40317 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40318 wait_queue_head_t fence_queue;
40319 - atomic_t last_fence_retired;
40320 + atomic_unchecked_t last_fence_retired;
40321 u32 next_fence_to_post;
40322
40323 unsigned int fb_cpp;
40324 diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40325 index 709e90d..89a1c0d 100644
40326 --- a/drivers/gpu/drm/mga/mga_ioc32.c
40327 +++ b/drivers/gpu/drm/mga/mga_ioc32.c
40328 @@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40329 return 0;
40330 }
40331
40332 -drm_ioctl_compat_t *mga_compat_ioctls[] = {
40333 +drm_ioctl_compat_t mga_compat_ioctls[] = {
40334 [DRM_MGA_INIT] = compat_mga_init,
40335 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40336 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40337 @@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40338 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40339 {
40340 unsigned int nr = DRM_IOCTL_NR(cmd);
40341 - drm_ioctl_compat_t *fn = NULL;
40342 int ret;
40343
40344 if (nr < DRM_COMMAND_BASE)
40345 return drm_compat_ioctl(filp, cmd, arg);
40346
40347 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
40348 - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40349 -
40350 - if (fn != NULL)
40351 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
40352 + drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40353 ret = (*fn) (filp, cmd, arg);
40354 - else
40355 + } else
40356 ret = drm_ioctl(filp, cmd, arg);
40357
40358 return ret;
40359 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40360 index 2b0ceb8..517e99e 100644
40361 --- a/drivers/gpu/drm/mga/mga_irq.c
40362 +++ b/drivers/gpu/drm/mga/mga_irq.c
40363 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40364 if (crtc != 0)
40365 return 0;
40366
40367 - return atomic_read(&dev_priv->vbl_received);
40368 + return atomic_read_unchecked(&dev_priv->vbl_received);
40369 }
40370
40371
40372 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
40373 /* VBLANK interrupt */
40374 if (status & MGA_VLINEPEN) {
40375 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40376 - atomic_inc(&dev_priv->vbl_received);
40377 + atomic_inc_unchecked(&dev_priv->vbl_received);
40378 drm_handle_vblank(dev, 0);
40379 handled = 1;
40380 }
40381 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
40382 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40383 MGA_WRITE(MGA_PRIMEND, prim_end);
40384
40385 - atomic_inc(&dev_priv->last_fence_retired);
40386 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
40387 DRM_WAKEUP(&dev_priv->fence_queue);
40388 handled = 1;
40389 }
40390 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40391 * using fences.
40392 */
40393 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
40394 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40395 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40396 - *sequence) <= (1 << 23)));
40397
40398 *sequence = cur_fence;
40399 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40400 index 4c3feaa..26391ce 100644
40401 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40402 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40403 @@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40404 struct bit_table {
40405 const char id;
40406 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40407 -};
40408 +} __no_const;
40409
40410 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40411
40412 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40413 index 4b0fb6c..67667a9 100644
40414 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40415 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40416 @@ -96,7 +96,6 @@ struct nouveau_drm {
40417 struct drm_global_reference mem_global_ref;
40418 struct ttm_bo_global_ref bo_global_ref;
40419 struct ttm_bo_device bdev;
40420 - atomic_t validate_sequence;
40421 int (*move)(struct nouveau_channel *,
40422 struct ttm_buffer_object *,
40423 struct ttm_mem_reg *, struct ttm_mem_reg *);
40424 diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40425 index c1a7e5a..38b8539 100644
40426 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40427 +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40428 @@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40429 unsigned long arg)
40430 {
40431 unsigned int nr = DRM_IOCTL_NR(cmd);
40432 - drm_ioctl_compat_t *fn = NULL;
40433 + drm_ioctl_compat_t fn = NULL;
40434 int ret;
40435
40436 if (nr < DRM_COMMAND_BASE)
40437 diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40438 index 19e3757..ad16478 100644
40439 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40440 +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40441 @@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40442 }
40443
40444 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40445 - nouveau_vram_manager_init,
40446 - nouveau_vram_manager_fini,
40447 - nouveau_vram_manager_new,
40448 - nouveau_vram_manager_del,
40449 - nouveau_vram_manager_debug
40450 + .init = nouveau_vram_manager_init,
40451 + .takedown = nouveau_vram_manager_fini,
40452 + .get_node = nouveau_vram_manager_new,
40453 + .put_node = nouveau_vram_manager_del,
40454 + .debug = nouveau_vram_manager_debug
40455 };
40456
40457 static int
40458 @@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40459 }
40460
40461 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40462 - nouveau_gart_manager_init,
40463 - nouveau_gart_manager_fini,
40464 - nouveau_gart_manager_new,
40465 - nouveau_gart_manager_del,
40466 - nouveau_gart_manager_debug
40467 + .init = nouveau_gart_manager_init,
40468 + .takedown = nouveau_gart_manager_fini,
40469 + .get_node = nouveau_gart_manager_new,
40470 + .put_node = nouveau_gart_manager_del,
40471 + .debug = nouveau_gart_manager_debug
40472 };
40473
40474 #include <core/subdev/vm/nv04.h>
40475 @@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40476 }
40477
40478 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40479 - nv04_gart_manager_init,
40480 - nv04_gart_manager_fini,
40481 - nv04_gart_manager_new,
40482 - nv04_gart_manager_del,
40483 - nv04_gart_manager_debug
40484 + .init = nv04_gart_manager_init,
40485 + .takedown = nv04_gart_manager_fini,
40486 + .get_node = nv04_gart_manager_new,
40487 + .put_node = nv04_gart_manager_del,
40488 + .debug = nv04_gart_manager_debug
40489 };
40490
40491 int
40492 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40493 index 81638d7..2e45854 100644
40494 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40495 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40496 @@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40497 bool can_switch;
40498
40499 spin_lock(&dev->count_lock);
40500 - can_switch = (dev->open_count == 0);
40501 + can_switch = (local_read(&dev->open_count) == 0);
40502 spin_unlock(&dev->count_lock);
40503 return can_switch;
40504 }
40505 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40506 index eb89653..613cf71 100644
40507 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
40508 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40509 @@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40510 int ret;
40511
40512 mutex_lock(&qdev->async_io_mutex);
40513 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
40514 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40515 if (qdev->last_sent_io_cmd > irq_num) {
40516 if (intr)
40517 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40518 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40519 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40520 else
40521 ret = wait_event_timeout(qdev->io_cmd_event,
40522 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40523 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40524 /* 0 is timeout, just bail the "hw" has gone away */
40525 if (ret <= 0)
40526 goto out;
40527 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
40528 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40529 }
40530 outb(val, addr);
40531 qdev->last_sent_io_cmd = irq_num + 1;
40532 if (intr)
40533 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40534 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40535 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40536 else
40537 ret = wait_event_timeout(qdev->io_cmd_event,
40538 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40539 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40540 out:
40541 if (ret > 0)
40542 ret = 0;
40543 diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40544 index c3c2bbd..bc3c0fb 100644
40545 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40546 +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40547 @@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40548 struct drm_info_node *node = (struct drm_info_node *) m->private;
40549 struct qxl_device *qdev = node->minor->dev->dev_private;
40550
40551 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40552 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40553 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40554 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40555 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40556 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40557 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40558 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40559 seq_printf(m, "%d\n", qdev->irq_received_error);
40560 return 0;
40561 }
40562 diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40563 index 7bda32f..dd98fc5 100644
40564 --- a/drivers/gpu/drm/qxl/qxl_drv.h
40565 +++ b/drivers/gpu/drm/qxl/qxl_drv.h
40566 @@ -290,10 +290,10 @@ struct qxl_device {
40567 unsigned int last_sent_io_cmd;
40568
40569 /* interrupt handling */
40570 - atomic_t irq_received;
40571 - atomic_t irq_received_display;
40572 - atomic_t irq_received_cursor;
40573 - atomic_t irq_received_io_cmd;
40574 + atomic_unchecked_t irq_received;
40575 + atomic_unchecked_t irq_received_display;
40576 + atomic_unchecked_t irq_received_cursor;
40577 + atomic_unchecked_t irq_received_io_cmd;
40578 unsigned irq_received_error;
40579 wait_queue_head_t display_event;
40580 wait_queue_head_t cursor_event;
40581 diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40582 index 7b95c75..9cffb4f 100644
40583 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40584 +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40585 @@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40586
40587 /* TODO copy slow path code from i915 */
40588 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40589 - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40590 + unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40591
40592 {
40593 struct qxl_drawable *draw = fb_cmd;
40594 @@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40595 struct drm_qxl_reloc reloc;
40596
40597 if (DRM_COPY_FROM_USER(&reloc,
40598 - &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40599 + &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40600 sizeof(reloc))) {
40601 ret = -EFAULT;
40602 goto out_free_bos;
40603 @@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40604 struct drm_qxl_command *commands =
40605 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40606
40607 - if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
40608 + if (DRM_COPY_FROM_USER(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40609 sizeof(user_cmd)))
40610 return -EFAULT;
40611
40612 diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40613 index 21393dc..329f3a9 100644
40614 --- a/drivers/gpu/drm/qxl/qxl_irq.c
40615 +++ b/drivers/gpu/drm/qxl/qxl_irq.c
40616 @@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
40617
40618 pending = xchg(&qdev->ram_header->int_pending, 0);
40619
40620 - atomic_inc(&qdev->irq_received);
40621 + atomic_inc_unchecked(&qdev->irq_received);
40622
40623 if (pending & QXL_INTERRUPT_DISPLAY) {
40624 - atomic_inc(&qdev->irq_received_display);
40625 + atomic_inc_unchecked(&qdev->irq_received_display);
40626 wake_up_all(&qdev->display_event);
40627 qxl_queue_garbage_collect(qdev, false);
40628 }
40629 if (pending & QXL_INTERRUPT_CURSOR) {
40630 - atomic_inc(&qdev->irq_received_cursor);
40631 + atomic_inc_unchecked(&qdev->irq_received_cursor);
40632 wake_up_all(&qdev->cursor_event);
40633 }
40634 if (pending & QXL_INTERRUPT_IO_CMD) {
40635 - atomic_inc(&qdev->irq_received_io_cmd);
40636 + atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40637 wake_up_all(&qdev->io_cmd_event);
40638 }
40639 if (pending & QXL_INTERRUPT_ERROR) {
40640 @@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40641 init_waitqueue_head(&qdev->io_cmd_event);
40642 INIT_WORK(&qdev->client_monitors_config_work,
40643 qxl_client_monitors_config_work_func);
40644 - atomic_set(&qdev->irq_received, 0);
40645 - atomic_set(&qdev->irq_received_display, 0);
40646 - atomic_set(&qdev->irq_received_cursor, 0);
40647 - atomic_set(&qdev->irq_received_io_cmd, 0);
40648 + atomic_set_unchecked(&qdev->irq_received, 0);
40649 + atomic_set_unchecked(&qdev->irq_received_display, 0);
40650 + atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40651 + atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40652 qdev->irq_received_error = 0;
40653 ret = drm_irq_install(qdev->ddev);
40654 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40655 diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40656 index c7e7e65..7dddd4d 100644
40657 --- a/drivers/gpu/drm/qxl/qxl_ttm.c
40658 +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40659 @@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40660 }
40661 }
40662
40663 -static struct vm_operations_struct qxl_ttm_vm_ops;
40664 +static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40665 static const struct vm_operations_struct *ttm_vm_ops;
40666
40667 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40668 @@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40669 return r;
40670 if (unlikely(ttm_vm_ops == NULL)) {
40671 ttm_vm_ops = vma->vm_ops;
40672 + pax_open_kernel();
40673 qxl_ttm_vm_ops = *ttm_vm_ops;
40674 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40675 + pax_close_kernel();
40676 }
40677 vma->vm_ops = &qxl_ttm_vm_ops;
40678 return 0;
40679 @@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40680 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40681 {
40682 #if defined(CONFIG_DEBUG_FS)
40683 - static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40684 - static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40685 - unsigned i;
40686 + static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40687 + {
40688 + .name = "qxl_mem_mm",
40689 + .show = &qxl_mm_dump_table,
40690 + },
40691 + {
40692 + .name = "qxl_surf_mm",
40693 + .show = &qxl_mm_dump_table,
40694 + }
40695 + };
40696
40697 - for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40698 - if (i == 0)
40699 - sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40700 - else
40701 - sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40702 - qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40703 - qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40704 - qxl_mem_types_list[i].driver_features = 0;
40705 - if (i == 0)
40706 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40707 - else
40708 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40709 + pax_open_kernel();
40710 + *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40711 + *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40712 + pax_close_kernel();
40713
40714 - }
40715 - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
40716 + return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
40717 #else
40718 return 0;
40719 #endif
40720 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
40721 index c451257..0ad2134 100644
40722 --- a/drivers/gpu/drm/r128/r128_cce.c
40723 +++ b/drivers/gpu/drm/r128/r128_cce.c
40724 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
40725
40726 /* GH: Simple idle check.
40727 */
40728 - atomic_set(&dev_priv->idle_count, 0);
40729 + atomic_set_unchecked(&dev_priv->idle_count, 0);
40730
40731 /* We don't support anything other than bus-mastering ring mode,
40732 * but the ring can be in either AGP or PCI space for the ring
40733 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
40734 index 56eb5e3..c4ec43d 100644
40735 --- a/drivers/gpu/drm/r128/r128_drv.h
40736 +++ b/drivers/gpu/drm/r128/r128_drv.h
40737 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
40738 int is_pci;
40739 unsigned long cce_buffers_offset;
40740
40741 - atomic_t idle_count;
40742 + atomic_unchecked_t idle_count;
40743
40744 int page_flipping;
40745 int current_page;
40746 u32 crtc_offset;
40747 u32 crtc_offset_cntl;
40748
40749 - atomic_t vbl_received;
40750 + atomic_unchecked_t vbl_received;
40751
40752 u32 color_fmt;
40753 unsigned int front_offset;
40754 diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
40755 index a954c54..9cc595c 100644
40756 --- a/drivers/gpu/drm/r128/r128_ioc32.c
40757 +++ b/drivers/gpu/drm/r128/r128_ioc32.c
40758 @@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
40759 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
40760 }
40761
40762 -drm_ioctl_compat_t *r128_compat_ioctls[] = {
40763 +drm_ioctl_compat_t r128_compat_ioctls[] = {
40764 [DRM_R128_INIT] = compat_r128_init,
40765 [DRM_R128_DEPTH] = compat_r128_depth,
40766 [DRM_R128_STIPPLE] = compat_r128_stipple,
40767 @@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
40768 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40769 {
40770 unsigned int nr = DRM_IOCTL_NR(cmd);
40771 - drm_ioctl_compat_t *fn = NULL;
40772 int ret;
40773
40774 if (nr < DRM_COMMAND_BASE)
40775 return drm_compat_ioctl(filp, cmd, arg);
40776
40777 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
40778 - fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40779 -
40780 - if (fn != NULL)
40781 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
40782 + drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40783 ret = (*fn) (filp, cmd, arg);
40784 - else
40785 + } else
40786 ret = drm_ioctl(filp, cmd, arg);
40787
40788 return ret;
40789 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
40790 index 2ea4f09..d391371 100644
40791 --- a/drivers/gpu/drm/r128/r128_irq.c
40792 +++ b/drivers/gpu/drm/r128/r128_irq.c
40793 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
40794 if (crtc != 0)
40795 return 0;
40796
40797 - return atomic_read(&dev_priv->vbl_received);
40798 + return atomic_read_unchecked(&dev_priv->vbl_received);
40799 }
40800
40801 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40802 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40803 /* VBLANK interrupt */
40804 if (status & R128_CRTC_VBLANK_INT) {
40805 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
40806 - atomic_inc(&dev_priv->vbl_received);
40807 + atomic_inc_unchecked(&dev_priv->vbl_received);
40808 drm_handle_vblank(dev, 0);
40809 return IRQ_HANDLED;
40810 }
40811 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
40812 index 01dd9ae..6352f04 100644
40813 --- a/drivers/gpu/drm/r128/r128_state.c
40814 +++ b/drivers/gpu/drm/r128/r128_state.c
40815 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
40816
40817 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
40818 {
40819 - if (atomic_read(&dev_priv->idle_count) == 0)
40820 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
40821 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
40822 else
40823 - atomic_set(&dev_priv->idle_count, 0);
40824 + atomic_set_unchecked(&dev_priv->idle_count, 0);
40825 }
40826
40827 #endif
40828 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
40829 index af85299..ed9ac8d 100644
40830 --- a/drivers/gpu/drm/radeon/mkregtable.c
40831 +++ b/drivers/gpu/drm/radeon/mkregtable.c
40832 @@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
40833 regex_t mask_rex;
40834 regmatch_t match[4];
40835 char buf[1024];
40836 - size_t end;
40837 + long end;
40838 int len;
40839 int done = 0;
40840 int r;
40841 unsigned o;
40842 struct offset *offset;
40843 char last_reg_s[10];
40844 - int last_reg;
40845 + unsigned long last_reg;
40846
40847 if (regcomp
40848 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
40849 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
40850 index 39b033b..6efc056 100644
40851 --- a/drivers/gpu/drm/radeon/radeon_device.c
40852 +++ b/drivers/gpu/drm/radeon/radeon_device.c
40853 @@ -1120,7 +1120,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
40854 bool can_switch;
40855
40856 spin_lock(&dev->count_lock);
40857 - can_switch = (dev->open_count == 0);
40858 + can_switch = (local_read(&dev->open_count) == 0);
40859 spin_unlock(&dev->count_lock);
40860 return can_switch;
40861 }
40862 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
40863 index 00e0d44..08381a4 100644
40864 --- a/drivers/gpu/drm/radeon/radeon_drv.h
40865 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
40866 @@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
40867
40868 /* SW interrupt */
40869 wait_queue_head_t swi_queue;
40870 - atomic_t swi_emitted;
40871 + atomic_unchecked_t swi_emitted;
40872 int vblank_crtc;
40873 uint32_t irq_enable_reg;
40874 uint32_t r500_disp_irq_reg;
40875 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
40876 index bdb0f93..5ff558f 100644
40877 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
40878 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
40879 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40880 request = compat_alloc_user_space(sizeof(*request));
40881 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
40882 || __put_user(req32.param, &request->param)
40883 - || __put_user((void __user *)(unsigned long)req32.value,
40884 + || __put_user((unsigned long)req32.value,
40885 &request->value))
40886 return -EFAULT;
40887
40888 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40889 #define compat_radeon_cp_setparam NULL
40890 #endif /* X86_64 || IA64 */
40891
40892 -static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40893 +static drm_ioctl_compat_t radeon_compat_ioctls[] = {
40894 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
40895 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
40896 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
40897 @@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40898 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40899 {
40900 unsigned int nr = DRM_IOCTL_NR(cmd);
40901 - drm_ioctl_compat_t *fn = NULL;
40902 int ret;
40903
40904 if (nr < DRM_COMMAND_BASE)
40905 return drm_compat_ioctl(filp, cmd, arg);
40906
40907 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
40908 - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40909 -
40910 - if (fn != NULL)
40911 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
40912 + drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40913 ret = (*fn) (filp, cmd, arg);
40914 - else
40915 + } else
40916 ret = drm_ioctl(filp, cmd, arg);
40917
40918 return ret;
40919 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
40920 index 8d68e97..9dcfed8 100644
40921 --- a/drivers/gpu/drm/radeon/radeon_irq.c
40922 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
40923 @@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
40924 unsigned int ret;
40925 RING_LOCALS;
40926
40927 - atomic_inc(&dev_priv->swi_emitted);
40928 - ret = atomic_read(&dev_priv->swi_emitted);
40929 + atomic_inc_unchecked(&dev_priv->swi_emitted);
40930 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
40931
40932 BEGIN_RING(4);
40933 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
40934 @@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
40935 drm_radeon_private_t *dev_priv =
40936 (drm_radeon_private_t *) dev->dev_private;
40937
40938 - atomic_set(&dev_priv->swi_emitted, 0);
40939 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
40940 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
40941
40942 dev->max_vblank_count = 0x001fffff;
40943 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
40944 index 4d20910..6726b6d 100644
40945 --- a/drivers/gpu/drm/radeon/radeon_state.c
40946 +++ b/drivers/gpu/drm/radeon/radeon_state.c
40947 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
40948 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
40949 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
40950
40951 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40952 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40953 sarea_priv->nbox * sizeof(depth_boxes[0])))
40954 return -EFAULT;
40955
40956 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
40957 {
40958 drm_radeon_private_t *dev_priv = dev->dev_private;
40959 drm_radeon_getparam_t *param = data;
40960 - int value;
40961 + int value = 0;
40962
40963 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
40964
40965 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
40966 index 84323c9..cf07baf 100644
40967 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
40968 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
40969 @@ -787,7 +787,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
40970 man->size = size >> PAGE_SHIFT;
40971 }
40972
40973 -static struct vm_operations_struct radeon_ttm_vm_ops;
40974 +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
40975 static const struct vm_operations_struct *ttm_vm_ops = NULL;
40976
40977 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40978 @@ -828,8 +828,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
40979 }
40980 if (unlikely(ttm_vm_ops == NULL)) {
40981 ttm_vm_ops = vma->vm_ops;
40982 + pax_open_kernel();
40983 radeon_ttm_vm_ops = *ttm_vm_ops;
40984 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
40985 + pax_close_kernel();
40986 }
40987 vma->vm_ops = &radeon_ttm_vm_ops;
40988 return 0;
40989 @@ -858,38 +860,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
40990 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
40991 {
40992 #if defined(CONFIG_DEBUG_FS)
40993 - static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
40994 - static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
40995 - unsigned i;
40996 + static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
40997 + {
40998 + .name = "radeon_vram_mm",
40999 + .show = &radeon_mm_dump_table,
41000 + },
41001 + {
41002 + .name = "radeon_gtt_mm",
41003 + .show = &radeon_mm_dump_table,
41004 + },
41005 + {
41006 + .name = "ttm_page_pool",
41007 + .show = &ttm_page_alloc_debugfs,
41008 + },
41009 + {
41010 + .name = "ttm_dma_page_pool",
41011 + .show = &ttm_dma_page_alloc_debugfs,
41012 + },
41013 + };
41014 + unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
41015
41016 - for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
41017 - if (i == 0)
41018 - sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
41019 - else
41020 - sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
41021 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
41022 - radeon_mem_types_list[i].show = &radeon_mm_dump_table;
41023 - radeon_mem_types_list[i].driver_features = 0;
41024 - if (i == 0)
41025 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
41026 - else
41027 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
41028 -
41029 - }
41030 - /* Add ttm page pool to debugfs */
41031 - sprintf(radeon_mem_types_names[i], "ttm_page_pool");
41032 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
41033 - radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
41034 - radeon_mem_types_list[i].driver_features = 0;
41035 - radeon_mem_types_list[i++].data = NULL;
41036 + pax_open_kernel();
41037 + *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
41038 + *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
41039 + pax_close_kernel();
41040 #ifdef CONFIG_SWIOTLB
41041 - if (swiotlb_nr_tbl()) {
41042 - sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
41043 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
41044 - radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
41045 - radeon_mem_types_list[i].driver_features = 0;
41046 - radeon_mem_types_list[i++].data = NULL;
41047 - }
41048 + if (swiotlb_nr_tbl())
41049 + i++;
41050 #endif
41051 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
41052
41053 diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41054 index ae1cb31..5b5b6b7c 100644
41055 --- a/drivers/gpu/drm/tegra/dc.c
41056 +++ b/drivers/gpu/drm/tegra/dc.c
41057 @@ -1064,7 +1064,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41058 }
41059
41060 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41061 - dc->debugfs_files[i].data = dc;
41062 + *(void **)&dc->debugfs_files[i].data = dc;
41063
41064 err = drm_debugfs_create_files(dc->debugfs_files,
41065 ARRAY_SIZE(debugfs_files),
41066 diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41067 index 0cd9bc2..9759be4 100644
41068 --- a/drivers/gpu/drm/tegra/hdmi.c
41069 +++ b/drivers/gpu/drm/tegra/hdmi.c
41070 @@ -57,7 +57,7 @@ struct tegra_hdmi {
41071 bool stereo;
41072 bool dvi;
41073
41074 - struct drm_info_list *debugfs_files;
41075 + drm_info_list_no_const *debugfs_files;
41076 struct drm_minor *minor;
41077 struct dentry *debugfs;
41078 };
41079 diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41080 index c58eba33..83c2728 100644
41081 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41082 +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41083 @@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41084 }
41085
41086 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41087 - ttm_bo_man_init,
41088 - ttm_bo_man_takedown,
41089 - ttm_bo_man_get_node,
41090 - ttm_bo_man_put_node,
41091 - ttm_bo_man_debug
41092 + .init = ttm_bo_man_init,
41093 + .takedown = ttm_bo_man_takedown,
41094 + .get_node = ttm_bo_man_get_node,
41095 + .put_node = ttm_bo_man_put_node,
41096 + .debug = ttm_bo_man_debug
41097 };
41098 EXPORT_SYMBOL(ttm_bo_manager_func);
41099 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41100 index dbc2def..0a9f710 100644
41101 --- a/drivers/gpu/drm/ttm/ttm_memory.c
41102 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
41103 @@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41104 zone->glob = glob;
41105 glob->zone_kernel = zone;
41106 ret = kobject_init_and_add(
41107 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41108 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41109 if (unlikely(ret != 0)) {
41110 kobject_put(&zone->kobj);
41111 return ret;
41112 @@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41113 zone->glob = glob;
41114 glob->zone_dma32 = zone;
41115 ret = kobject_init_and_add(
41116 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41117 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41118 if (unlikely(ret != 0)) {
41119 kobject_put(&zone->kobj);
41120 return ret;
41121 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41122 index 863bef9..cba15cf 100644
41123 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41124 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41125 @@ -391,9 +391,9 @@ out:
41126 static unsigned long
41127 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41128 {
41129 - static atomic_t start_pool = ATOMIC_INIT(0);
41130 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
41131 unsigned i;
41132 - unsigned pool_offset = atomic_add_return(1, &start_pool);
41133 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
41134 struct ttm_page_pool *pool;
41135 int shrink_pages = sc->nr_to_scan;
41136 unsigned long freed = 0;
41137 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41138 index 97e9d61..bf23c461 100644
41139 --- a/drivers/gpu/drm/udl/udl_fb.c
41140 +++ b/drivers/gpu/drm/udl/udl_fb.c
41141 @@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41142 fb_deferred_io_cleanup(info);
41143 kfree(info->fbdefio);
41144 info->fbdefio = NULL;
41145 - info->fbops->fb_mmap = udl_fb_mmap;
41146 }
41147
41148 pr_warn("released /dev/fb%d user=%d count=%d\n",
41149 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41150 index a811ef2..ff99b05 100644
41151 --- a/drivers/gpu/drm/via/via_drv.h
41152 +++ b/drivers/gpu/drm/via/via_drv.h
41153 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
41154 typedef uint32_t maskarray_t[5];
41155
41156 typedef struct drm_via_irq {
41157 - atomic_t irq_received;
41158 + atomic_unchecked_t irq_received;
41159 uint32_t pending_mask;
41160 uint32_t enable_mask;
41161 wait_queue_head_t irq_queue;
41162 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
41163 struct timeval last_vblank;
41164 int last_vblank_valid;
41165 unsigned usec_per_vblank;
41166 - atomic_t vbl_received;
41167 + atomic_unchecked_t vbl_received;
41168 drm_via_state_t hc_state;
41169 char pci_buf[VIA_PCI_BUF_SIZE];
41170 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41171 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41172 index ac98964..5dbf512 100644
41173 --- a/drivers/gpu/drm/via/via_irq.c
41174 +++ b/drivers/gpu/drm/via/via_irq.c
41175 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41176 if (crtc != 0)
41177 return 0;
41178
41179 - return atomic_read(&dev_priv->vbl_received);
41180 + return atomic_read_unchecked(&dev_priv->vbl_received);
41181 }
41182
41183 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41184 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41185
41186 status = VIA_READ(VIA_REG_INTERRUPT);
41187 if (status & VIA_IRQ_VBLANK_PENDING) {
41188 - atomic_inc(&dev_priv->vbl_received);
41189 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41190 + atomic_inc_unchecked(&dev_priv->vbl_received);
41191 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41192 do_gettimeofday(&cur_vblank);
41193 if (dev_priv->last_vblank_valid) {
41194 dev_priv->usec_per_vblank =
41195 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41196 dev_priv->last_vblank = cur_vblank;
41197 dev_priv->last_vblank_valid = 1;
41198 }
41199 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41200 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41201 DRM_DEBUG("US per vblank is: %u\n",
41202 dev_priv->usec_per_vblank);
41203 }
41204 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41205
41206 for (i = 0; i < dev_priv->num_irqs; ++i) {
41207 if (status & cur_irq->pending_mask) {
41208 - atomic_inc(&cur_irq->irq_received);
41209 + atomic_inc_unchecked(&cur_irq->irq_received);
41210 DRM_WAKEUP(&cur_irq->irq_queue);
41211 handled = 1;
41212 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41213 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41214 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
41215 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41216 masks[irq][4]));
41217 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41218 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41219 } else {
41220 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
41221 (((cur_irq_sequence =
41222 - atomic_read(&cur_irq->irq_received)) -
41223 + atomic_read_unchecked(&cur_irq->irq_received)) -
41224 *sequence) <= (1 << 23)));
41225 }
41226 *sequence = cur_irq_sequence;
41227 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41228 }
41229
41230 for (i = 0; i < dev_priv->num_irqs; ++i) {
41231 - atomic_set(&cur_irq->irq_received, 0);
41232 + atomic_set_unchecked(&cur_irq->irq_received, 0);
41233 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41234 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41235 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
41236 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41237 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41238 case VIA_IRQ_RELATIVE:
41239 irqwait->request.sequence +=
41240 - atomic_read(&cur_irq->irq_received);
41241 + atomic_read_unchecked(&cur_irq->irq_received);
41242 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41243 case VIA_IRQ_ABSOLUTE:
41244 break;
41245 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41246 index c0b73b9..f6f7f34 100644
41247 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41248 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41249 @@ -341,7 +341,7 @@ struct vmw_private {
41250 * Fencing and IRQs.
41251 */
41252
41253 - atomic_t marker_seq;
41254 + atomic_unchecked_t marker_seq;
41255 wait_queue_head_t fence_queue;
41256 wait_queue_head_t fifo_queue;
41257 int fence_queue_waiters; /* Protected by hw_mutex */
41258 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41259 index 3eb1486..0a47ee9 100644
41260 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41261 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41262 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41263 (unsigned int) min,
41264 (unsigned int) fifo->capabilities);
41265
41266 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41267 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41268 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41269 vmw_marker_queue_init(&fifo->marker_queue);
41270 return vmw_fifo_send_fence(dev_priv, &dummy);
41271 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41272 if (reserveable)
41273 iowrite32(bytes, fifo_mem +
41274 SVGA_FIFO_RESERVED);
41275 - return fifo_mem + (next_cmd >> 2);
41276 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41277 } else {
41278 need_bounce = true;
41279 }
41280 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41281
41282 fm = vmw_fifo_reserve(dev_priv, bytes);
41283 if (unlikely(fm == NULL)) {
41284 - *seqno = atomic_read(&dev_priv->marker_seq);
41285 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41286 ret = -ENOMEM;
41287 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41288 false, 3*HZ);
41289 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41290 }
41291
41292 do {
41293 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41294 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41295 } while (*seqno == 0);
41296
41297 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41298 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41299 index c5c054a..46f0548 100644
41300 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41301 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41302 @@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41303 }
41304
41305 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41306 - vmw_gmrid_man_init,
41307 - vmw_gmrid_man_takedown,
41308 - vmw_gmrid_man_get_node,
41309 - vmw_gmrid_man_put_node,
41310 - vmw_gmrid_man_debug
41311 + .init = vmw_gmrid_man_init,
41312 + .takedown = vmw_gmrid_man_takedown,
41313 + .get_node = vmw_gmrid_man_get_node,
41314 + .put_node = vmw_gmrid_man_put_node,
41315 + .debug = vmw_gmrid_man_debug
41316 };
41317 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41318 index 45d5b5a..f3f5e4e 100644
41319 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41320 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41321 @@ -141,7 +141,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41322 int ret;
41323
41324 num_clips = arg->num_clips;
41325 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41326 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41327
41328 if (unlikely(num_clips == 0))
41329 return 0;
41330 @@ -225,7 +225,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41331 int ret;
41332
41333 num_clips = arg->num_clips;
41334 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41335 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41336
41337 if (unlikely(num_clips == 0))
41338 return 0;
41339 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41340 index 4640adb..e1384ed 100644
41341 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41342 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41343 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41344 * emitted. Then the fence is stale and signaled.
41345 */
41346
41347 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41348 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41349 > VMW_FENCE_WRAP);
41350
41351 return ret;
41352 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41353
41354 if (fifo_idle)
41355 down_read(&fifo_state->rwsem);
41356 - signal_seq = atomic_read(&dev_priv->marker_seq);
41357 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41358 ret = 0;
41359
41360 for (;;) {
41361 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41362 index 8a8725c2..afed796 100644
41363 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41364 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41365 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41366 while (!vmw_lag_lt(queue, us)) {
41367 spin_lock(&queue->lock);
41368 if (list_empty(&queue->head))
41369 - seqno = atomic_read(&dev_priv->marker_seq);
41370 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41371 else {
41372 marker = list_first_entry(&queue->head,
41373 struct vmw_marker, head);
41374 diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41375 index ec0ae2d..dc0780b 100644
41376 --- a/drivers/gpu/vga/vga_switcheroo.c
41377 +++ b/drivers/gpu/vga/vga_switcheroo.c
41378 @@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41379
41380 /* this version is for the case where the power switch is separate
41381 to the device being powered down. */
41382 -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41383 +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41384 {
41385 /* copy over all the bus versions */
41386 if (dev->bus && dev->bus->pm) {
41387 @@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41388 return ret;
41389 }
41390
41391 -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41392 +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41393 {
41394 /* copy over all the bus versions */
41395 if (dev->bus && dev->bus->pm) {
41396 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41397 index 253fe23..0dfec5f 100644
41398 --- a/drivers/hid/hid-core.c
41399 +++ b/drivers/hid/hid-core.c
41400 @@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41401
41402 int hid_add_device(struct hid_device *hdev)
41403 {
41404 - static atomic_t id = ATOMIC_INIT(0);
41405 + static atomic_unchecked_t id = ATOMIC_INIT(0);
41406 int ret;
41407
41408 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41409 @@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
41410 /* XXX hack, any other cleaner solution after the driver core
41411 * is converted to allow more than 20 bytes as the device name? */
41412 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41413 - hdev->vendor, hdev->product, atomic_inc_return(&id));
41414 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41415
41416 hid_debug_register(hdev, dev_name(&hdev->dev));
41417 ret = device_add(&hdev->dev);
41418 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41419 index c13fb5b..55a3802 100644
41420 --- a/drivers/hid/hid-wiimote-debug.c
41421 +++ b/drivers/hid/hid-wiimote-debug.c
41422 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41423 else if (size == 0)
41424 return -EIO;
41425
41426 - if (copy_to_user(u, buf, size))
41427 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
41428 return -EFAULT;
41429
41430 *off += size;
41431 diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
41432 index cedc6da..2c3da2a 100644
41433 --- a/drivers/hid/uhid.c
41434 +++ b/drivers/hid/uhid.c
41435 @@ -47,7 +47,7 @@ struct uhid_device {
41436 struct mutex report_lock;
41437 wait_queue_head_t report_wait;
41438 atomic_t report_done;
41439 - atomic_t report_id;
41440 + atomic_unchecked_t report_id;
41441 struct uhid_event report_buf;
41442 };
41443
41444 @@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
41445
41446 spin_lock_irqsave(&uhid->qlock, flags);
41447 ev->type = UHID_FEATURE;
41448 - ev->u.feature.id = atomic_inc_return(&uhid->report_id);
41449 + ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
41450 ev->u.feature.rnum = rnum;
41451 ev->u.feature.rtype = report_type;
41452
41453 @@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
41454 spin_lock_irqsave(&uhid->qlock, flags);
41455
41456 /* id for old report; drop it silently */
41457 - if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
41458 + if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
41459 goto unlock;
41460 if (atomic_read(&uhid->report_done))
41461 goto unlock;
41462 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41463 index cea623c..73011b0 100644
41464 --- a/drivers/hv/channel.c
41465 +++ b/drivers/hv/channel.c
41466 @@ -362,8 +362,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41467 int ret = 0;
41468 int t;
41469
41470 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41471 - atomic_inc(&vmbus_connection.next_gpadl_handle);
41472 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41473 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41474
41475 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41476 if (ret)
41477 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41478 index f0c5e07..399256e 100644
41479 --- a/drivers/hv/hv.c
41480 +++ b/drivers/hv/hv.c
41481 @@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41482 u64 output_address = (output) ? virt_to_phys(output) : 0;
41483 u32 output_address_hi = output_address >> 32;
41484 u32 output_address_lo = output_address & 0xFFFFFFFF;
41485 - void *hypercall_page = hv_context.hypercall_page;
41486 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41487
41488 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41489 "=a"(hv_status_lo) : "d" (control_hi),
41490 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41491 index 7e17a54..a50a33d 100644
41492 --- a/drivers/hv/hv_balloon.c
41493 +++ b/drivers/hv/hv_balloon.c
41494 @@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41495
41496 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41497 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41498 -static atomic_t trans_id = ATOMIC_INIT(0);
41499 +static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41500
41501 static int dm_ring_size = (5 * PAGE_SIZE);
41502
41503 @@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
41504 pr_info("Memory hot add failed\n");
41505
41506 dm->state = DM_INITIALIZED;
41507 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
41508 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41509 vmbus_sendpacket(dm->dev->channel, &resp,
41510 sizeof(struct dm_hot_add_response),
41511 (unsigned long)NULL,
41512 @@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
41513 memset(&status, 0, sizeof(struct dm_status));
41514 status.hdr.type = DM_STATUS_REPORT;
41515 status.hdr.size = sizeof(struct dm_status);
41516 - status.hdr.trans_id = atomic_inc_return(&trans_id);
41517 + status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41518
41519 /*
41520 * The host expects the guest to report free memory.
41521 @@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
41522 * send the status. This can happen if we were interrupted
41523 * after we picked our transaction ID.
41524 */
41525 - if (status.hdr.trans_id != atomic_read(&trans_id))
41526 + if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41527 return;
41528
41529 vmbus_sendpacket(dm->dev->channel, &status,
41530 @@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
41531 */
41532
41533 do {
41534 - bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41535 + bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41536 ret = vmbus_sendpacket(dm_device.dev->channel,
41537 bl_resp,
41538 bl_resp->hdr.size,
41539 @@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41540
41541 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41542 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41543 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
41544 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41545 resp.hdr.size = sizeof(struct dm_unballoon_response);
41546
41547 vmbus_sendpacket(dm_device.dev->channel, &resp,
41548 @@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41549 memset(&version_req, 0, sizeof(struct dm_version_request));
41550 version_req.hdr.type = DM_VERSION_REQUEST;
41551 version_req.hdr.size = sizeof(struct dm_version_request);
41552 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41553 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41554 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41555 version_req.is_last_attempt = 1;
41556
41557 @@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
41558 memset(&version_req, 0, sizeof(struct dm_version_request));
41559 version_req.hdr.type = DM_VERSION_REQUEST;
41560 version_req.hdr.size = sizeof(struct dm_version_request);
41561 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41562 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41563 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41564 version_req.is_last_attempt = 0;
41565
41566 @@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
41567 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41568 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41569 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41570 - cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41571 + cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41572
41573 cap_msg.caps.cap_bits.balloon = 1;
41574 cap_msg.caps.cap_bits.hot_add = 1;
41575 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41576 index e055176..c22ff1f 100644
41577 --- a/drivers/hv/hyperv_vmbus.h
41578 +++ b/drivers/hv/hyperv_vmbus.h
41579 @@ -602,7 +602,7 @@ enum vmbus_connect_state {
41580 struct vmbus_connection {
41581 enum vmbus_connect_state conn_state;
41582
41583 - atomic_t next_gpadl_handle;
41584 + atomic_unchecked_t next_gpadl_handle;
41585
41586 /*
41587 * Represents channel interrupts. Each bit position represents a
41588 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41589 index 48aad4f..c768fb2 100644
41590 --- a/drivers/hv/vmbus_drv.c
41591 +++ b/drivers/hv/vmbus_drv.c
41592 @@ -846,10 +846,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41593 {
41594 int ret = 0;
41595
41596 - static atomic_t device_num = ATOMIC_INIT(0);
41597 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41598
41599 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41600 - atomic_inc_return(&device_num));
41601 + atomic_inc_return_unchecked(&device_num));
41602
41603 child_device_obj->device.bus = &hv_bus;
41604 child_device_obj->device.parent = &hv_acpi_dev->dev;
41605 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41606 index 6a34f7f..aa4c3a6 100644
41607 --- a/drivers/hwmon/acpi_power_meter.c
41608 +++ b/drivers/hwmon/acpi_power_meter.c
41609 @@ -117,7 +117,7 @@ struct sensor_template {
41610 struct device_attribute *devattr,
41611 const char *buf, size_t count);
41612 int index;
41613 -};
41614 +} __do_const;
41615
41616 /* Averaging interval */
41617 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41618 @@ -632,7 +632,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41619 struct sensor_template *attrs)
41620 {
41621 struct device *dev = &resource->acpi_dev->dev;
41622 - struct sensor_device_attribute *sensors =
41623 + sensor_device_attribute_no_const *sensors =
41624 &resource->sensors[resource->num_sensors];
41625 int res = 0;
41626
41627 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
41628 index 3288f13..71cfb4e 100644
41629 --- a/drivers/hwmon/applesmc.c
41630 +++ b/drivers/hwmon/applesmc.c
41631 @@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
41632 {
41633 struct applesmc_node_group *grp;
41634 struct applesmc_dev_attr *node;
41635 - struct attribute *attr;
41636 + attribute_no_const *attr;
41637 int ret, i;
41638
41639 for (grp = groups; grp->format; grp++) {
41640 diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
41641 index dafc63c..4abb96c 100644
41642 --- a/drivers/hwmon/asus_atk0110.c
41643 +++ b/drivers/hwmon/asus_atk0110.c
41644 @@ -151,10 +151,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
41645 struct atk_sensor_data {
41646 struct list_head list;
41647 struct atk_data *data;
41648 - struct device_attribute label_attr;
41649 - struct device_attribute input_attr;
41650 - struct device_attribute limit1_attr;
41651 - struct device_attribute limit2_attr;
41652 + device_attribute_no_const label_attr;
41653 + device_attribute_no_const input_attr;
41654 + device_attribute_no_const limit1_attr;
41655 + device_attribute_no_const limit2_attr;
41656 char label_attr_name[ATTR_NAME_SIZE];
41657 char input_attr_name[ATTR_NAME_SIZE];
41658 char limit1_attr_name[ATTR_NAME_SIZE];
41659 @@ -274,7 +274,7 @@ static ssize_t atk_name_show(struct device *dev,
41660 static struct device_attribute atk_name_attr =
41661 __ATTR(name, 0444, atk_name_show, NULL);
41662
41663 -static void atk_init_attribute(struct device_attribute *attr, char *name,
41664 +static void atk_init_attribute(device_attribute_no_const *attr, char *name,
41665 sysfs_show_func show)
41666 {
41667 sysfs_attr_init(&attr->attr);
41668 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
41669 index 9425098..7646cc5 100644
41670 --- a/drivers/hwmon/coretemp.c
41671 +++ b/drivers/hwmon/coretemp.c
41672 @@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
41673 return NOTIFY_OK;
41674 }
41675
41676 -static struct notifier_block coretemp_cpu_notifier __refdata = {
41677 +static struct notifier_block coretemp_cpu_notifier = {
41678 .notifier_call = coretemp_cpu_callback,
41679 };
41680
41681 diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
41682 index 632f1dc..57e6a58 100644
41683 --- a/drivers/hwmon/ibmaem.c
41684 +++ b/drivers/hwmon/ibmaem.c
41685 @@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
41686 struct aem_rw_sensor_template *rw)
41687 {
41688 struct device *dev = &data->pdev->dev;
41689 - struct sensor_device_attribute *sensors = data->sensors;
41690 + sensor_device_attribute_no_const *sensors = data->sensors;
41691 int err;
41692
41693 /* Set up read-only sensors */
41694 diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
41695 index 708081b..fe2d4ab 100644
41696 --- a/drivers/hwmon/iio_hwmon.c
41697 +++ b/drivers/hwmon/iio_hwmon.c
41698 @@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
41699 {
41700 struct device *dev = &pdev->dev;
41701 struct iio_hwmon_state *st;
41702 - struct sensor_device_attribute *a;
41703 + sensor_device_attribute_no_const *a;
41704 int ret, i;
41705 int in_i = 1, temp_i = 1, curr_i = 1;
41706 enum iio_chan_type type;
41707 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
41708 index cf811c1..4c17110 100644
41709 --- a/drivers/hwmon/nct6775.c
41710 +++ b/drivers/hwmon/nct6775.c
41711 @@ -944,10 +944,10 @@ static struct attribute_group *
41712 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
41713 int repeat)
41714 {
41715 - struct attribute_group *group;
41716 + attribute_group_no_const *group;
41717 struct sensor_device_attr_u *su;
41718 - struct sensor_device_attribute *a;
41719 - struct sensor_device_attribute_2 *a2;
41720 + sensor_device_attribute_no_const *a;
41721 + sensor_device_attribute_2_no_const *a2;
41722 struct attribute **attrs;
41723 struct sensor_device_template **t;
41724 int i, count;
41725 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
41726 index 3cbf66e..8c5cc2a 100644
41727 --- a/drivers/hwmon/pmbus/pmbus_core.c
41728 +++ b/drivers/hwmon/pmbus/pmbus_core.c
41729 @@ -782,7 +782,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
41730 return 0;
41731 }
41732
41733 -static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41734 +static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
41735 const char *name,
41736 umode_t mode,
41737 ssize_t (*show)(struct device *dev,
41738 @@ -799,7 +799,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41739 dev_attr->store = store;
41740 }
41741
41742 -static void pmbus_attr_init(struct sensor_device_attribute *a,
41743 +static void pmbus_attr_init(sensor_device_attribute_no_const *a,
41744 const char *name,
41745 umode_t mode,
41746 ssize_t (*show)(struct device *dev,
41747 @@ -821,7 +821,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
41748 u16 reg, u8 mask)
41749 {
41750 struct pmbus_boolean *boolean;
41751 - struct sensor_device_attribute *a;
41752 + sensor_device_attribute_no_const *a;
41753
41754 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
41755 if (!boolean)
41756 @@ -846,7 +846,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
41757 bool update, bool readonly)
41758 {
41759 struct pmbus_sensor *sensor;
41760 - struct device_attribute *a;
41761 + device_attribute_no_const *a;
41762
41763 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
41764 if (!sensor)
41765 @@ -877,7 +877,7 @@ static int pmbus_add_label(struct pmbus_data *data,
41766 const char *lstring, int index)
41767 {
41768 struct pmbus_label *label;
41769 - struct device_attribute *a;
41770 + device_attribute_no_const *a;
41771
41772 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
41773 if (!label)
41774 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
41775 index 97cd45a..ac54d8b 100644
41776 --- a/drivers/hwmon/sht15.c
41777 +++ b/drivers/hwmon/sht15.c
41778 @@ -169,7 +169,7 @@ struct sht15_data {
41779 int supply_uv;
41780 bool supply_uv_valid;
41781 struct work_struct update_supply_work;
41782 - atomic_t interrupt_handled;
41783 + atomic_unchecked_t interrupt_handled;
41784 };
41785
41786 /**
41787 @@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
41788 ret = gpio_direction_input(data->pdata->gpio_data);
41789 if (ret)
41790 return ret;
41791 - atomic_set(&data->interrupt_handled, 0);
41792 + atomic_set_unchecked(&data->interrupt_handled, 0);
41793
41794 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41795 if (gpio_get_value(data->pdata->gpio_data) == 0) {
41796 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
41797 /* Only relevant if the interrupt hasn't occurred. */
41798 - if (!atomic_read(&data->interrupt_handled))
41799 + if (!atomic_read_unchecked(&data->interrupt_handled))
41800 schedule_work(&data->read_work);
41801 }
41802 ret = wait_event_timeout(data->wait_queue,
41803 @@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
41804
41805 /* First disable the interrupt */
41806 disable_irq_nosync(irq);
41807 - atomic_inc(&data->interrupt_handled);
41808 + atomic_inc_unchecked(&data->interrupt_handled);
41809 /* Then schedule a reading work struct */
41810 if (data->state != SHT15_READING_NOTHING)
41811 schedule_work(&data->read_work);
41812 @@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
41813 * If not, then start the interrupt again - care here as could
41814 * have gone low in meantime so verify it hasn't!
41815 */
41816 - atomic_set(&data->interrupt_handled, 0);
41817 + atomic_set_unchecked(&data->interrupt_handled, 0);
41818 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41819 /* If still not occurred or another handler was scheduled */
41820 if (gpio_get_value(data->pdata->gpio_data)
41821 - || atomic_read(&data->interrupt_handled))
41822 + || atomic_read_unchecked(&data->interrupt_handled))
41823 return;
41824 }
41825
41826 diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
41827 index 38944e9..ae9e5ed 100644
41828 --- a/drivers/hwmon/via-cputemp.c
41829 +++ b/drivers/hwmon/via-cputemp.c
41830 @@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
41831 return NOTIFY_OK;
41832 }
41833
41834 -static struct notifier_block via_cputemp_cpu_notifier __refdata = {
41835 +static struct notifier_block via_cputemp_cpu_notifier = {
41836 .notifier_call = via_cputemp_cpu_callback,
41837 };
41838
41839 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
41840 index 07f01ac..d79ad3d 100644
41841 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
41842 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
41843 @@ -43,7 +43,7 @@
41844 extern struct i2c_adapter amd756_smbus;
41845
41846 static struct i2c_adapter *s4882_adapter;
41847 -static struct i2c_algorithm *s4882_algo;
41848 +static i2c_algorithm_no_const *s4882_algo;
41849
41850 /* Wrapper access functions for multiplexed SMBus */
41851 static DEFINE_MUTEX(amd756_lock);
41852 diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
41853 index 721f7eb..0fd2a09 100644
41854 --- a/drivers/i2c/busses/i2c-diolan-u2c.c
41855 +++ b/drivers/i2c/busses/i2c-diolan-u2c.c
41856 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
41857 /* usb layer */
41858
41859 /* Send command to device, and get response. */
41860 -static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41861 +static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41862 {
41863 int ret = 0;
41864 int actual;
41865 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
41866 index 2ca268d..c6acbdf 100644
41867 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
41868 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
41869 @@ -41,7 +41,7 @@
41870 extern struct i2c_adapter *nforce2_smbus;
41871
41872 static struct i2c_adapter *s4985_adapter;
41873 -static struct i2c_algorithm *s4985_algo;
41874 +static i2c_algorithm_no_const *s4985_algo;
41875
41876 /* Wrapper access functions for multiplexed SMBus */
41877 static DEFINE_MUTEX(nforce2_lock);
41878 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
41879 index 80b47e8..1a6040d9 100644
41880 --- a/drivers/i2c/i2c-dev.c
41881 +++ b/drivers/i2c/i2c-dev.c
41882 @@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
41883 break;
41884 }
41885
41886 - data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
41887 + data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
41888 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
41889 if (IS_ERR(rdwr_pa[i].buf)) {
41890 res = PTR_ERR(rdwr_pa[i].buf);
41891 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
41892 index 0b510ba..4fbb5085 100644
41893 --- a/drivers/ide/ide-cd.c
41894 +++ b/drivers/ide/ide-cd.c
41895 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
41896 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
41897 if ((unsigned long)buf & alignment
41898 || blk_rq_bytes(rq) & q->dma_pad_mask
41899 - || object_is_on_stack(buf))
41900 + || object_starts_on_stack(buf))
41901 drive->dma = 0;
41902 }
41903 }
41904 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
41905 index 18f72e3..3722327 100644
41906 --- a/drivers/iio/industrialio-core.c
41907 +++ b/drivers/iio/industrialio-core.c
41908 @@ -521,7 +521,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
41909 }
41910
41911 static
41912 -int __iio_device_attr_init(struct device_attribute *dev_attr,
41913 +int __iio_device_attr_init(device_attribute_no_const *dev_attr,
41914 const char *postfix,
41915 struct iio_chan_spec const *chan,
41916 ssize_t (*readfunc)(struct device *dev,
41917 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
41918 index f2ef7ef..743d02f 100644
41919 --- a/drivers/infiniband/core/cm.c
41920 +++ b/drivers/infiniband/core/cm.c
41921 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
41922
41923 struct cm_counter_group {
41924 struct kobject obj;
41925 - atomic_long_t counter[CM_ATTR_COUNT];
41926 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
41927 };
41928
41929 struct cm_counter_attribute {
41930 @@ -1392,7 +1392,7 @@ static void cm_dup_req_handler(struct cm_work *work,
41931 struct ib_mad_send_buf *msg = NULL;
41932 int ret;
41933
41934 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41935 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41936 counter[CM_REQ_COUNTER]);
41937
41938 /* Quick state check to discard duplicate REQs. */
41939 @@ -1776,7 +1776,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
41940 if (!cm_id_priv)
41941 return;
41942
41943 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41944 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41945 counter[CM_REP_COUNTER]);
41946 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
41947 if (ret)
41948 @@ -1943,7 +1943,7 @@ static int cm_rtu_handler(struct cm_work *work)
41949 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
41950 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
41951 spin_unlock_irq(&cm_id_priv->lock);
41952 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41953 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41954 counter[CM_RTU_COUNTER]);
41955 goto out;
41956 }
41957 @@ -2126,7 +2126,7 @@ static int cm_dreq_handler(struct cm_work *work)
41958 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
41959 dreq_msg->local_comm_id);
41960 if (!cm_id_priv) {
41961 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41962 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41963 counter[CM_DREQ_COUNTER]);
41964 cm_issue_drep(work->port, work->mad_recv_wc);
41965 return -EINVAL;
41966 @@ -2151,7 +2151,7 @@ static int cm_dreq_handler(struct cm_work *work)
41967 case IB_CM_MRA_REP_RCVD:
41968 break;
41969 case IB_CM_TIMEWAIT:
41970 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41971 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41972 counter[CM_DREQ_COUNTER]);
41973 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41974 goto unlock;
41975 @@ -2165,7 +2165,7 @@ static int cm_dreq_handler(struct cm_work *work)
41976 cm_free_msg(msg);
41977 goto deref;
41978 case IB_CM_DREQ_RCVD:
41979 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41980 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41981 counter[CM_DREQ_COUNTER]);
41982 goto unlock;
41983 default:
41984 @@ -2532,7 +2532,7 @@ static int cm_mra_handler(struct cm_work *work)
41985 ib_modify_mad(cm_id_priv->av.port->mad_agent,
41986 cm_id_priv->msg, timeout)) {
41987 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
41988 - atomic_long_inc(&work->port->
41989 + atomic_long_inc_unchecked(&work->port->
41990 counter_group[CM_RECV_DUPLICATES].
41991 counter[CM_MRA_COUNTER]);
41992 goto out;
41993 @@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
41994 break;
41995 case IB_CM_MRA_REQ_RCVD:
41996 case IB_CM_MRA_REP_RCVD:
41997 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41998 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41999 counter[CM_MRA_COUNTER]);
42000 /* fall through */
42001 default:
42002 @@ -2703,7 +2703,7 @@ static int cm_lap_handler(struct cm_work *work)
42003 case IB_CM_LAP_IDLE:
42004 break;
42005 case IB_CM_MRA_LAP_SENT:
42006 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42007 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42008 counter[CM_LAP_COUNTER]);
42009 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42010 goto unlock;
42011 @@ -2719,7 +2719,7 @@ static int cm_lap_handler(struct cm_work *work)
42012 cm_free_msg(msg);
42013 goto deref;
42014 case IB_CM_LAP_RCVD:
42015 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42016 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42017 counter[CM_LAP_COUNTER]);
42018 goto unlock;
42019 default:
42020 @@ -3003,7 +3003,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42021 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42022 if (cur_cm_id_priv) {
42023 spin_unlock_irq(&cm.lock);
42024 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42025 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42026 counter[CM_SIDR_REQ_COUNTER]);
42027 goto out; /* Duplicate message. */
42028 }
42029 @@ -3215,10 +3215,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42030 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42031 msg->retries = 1;
42032
42033 - atomic_long_add(1 + msg->retries,
42034 + atomic_long_add_unchecked(1 + msg->retries,
42035 &port->counter_group[CM_XMIT].counter[attr_index]);
42036 if (msg->retries)
42037 - atomic_long_add(msg->retries,
42038 + atomic_long_add_unchecked(msg->retries,
42039 &port->counter_group[CM_XMIT_RETRIES].
42040 counter[attr_index]);
42041
42042 @@ -3428,7 +3428,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42043 }
42044
42045 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42046 - atomic_long_inc(&port->counter_group[CM_RECV].
42047 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42048 counter[attr_id - CM_ATTR_ID_OFFSET]);
42049
42050 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42051 @@ -3633,7 +3633,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42052 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42053
42054 return sprintf(buf, "%ld\n",
42055 - atomic_long_read(&group->counter[cm_attr->index]));
42056 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42057 }
42058
42059 static const struct sysfs_ops cm_counter_ops = {
42060 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42061 index 9f5ad7c..588cd84 100644
42062 --- a/drivers/infiniband/core/fmr_pool.c
42063 +++ b/drivers/infiniband/core/fmr_pool.c
42064 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
42065
42066 struct task_struct *thread;
42067
42068 - atomic_t req_ser;
42069 - atomic_t flush_ser;
42070 + atomic_unchecked_t req_ser;
42071 + atomic_unchecked_t flush_ser;
42072
42073 wait_queue_head_t force_wait;
42074 };
42075 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42076 struct ib_fmr_pool *pool = pool_ptr;
42077
42078 do {
42079 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42080 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42081 ib_fmr_batch_release(pool);
42082
42083 - atomic_inc(&pool->flush_ser);
42084 + atomic_inc_unchecked(&pool->flush_ser);
42085 wake_up_interruptible(&pool->force_wait);
42086
42087 if (pool->flush_function)
42088 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42089 }
42090
42091 set_current_state(TASK_INTERRUPTIBLE);
42092 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42093 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42094 !kthread_should_stop())
42095 schedule();
42096 __set_current_state(TASK_RUNNING);
42097 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42098 pool->dirty_watermark = params->dirty_watermark;
42099 pool->dirty_len = 0;
42100 spin_lock_init(&pool->pool_lock);
42101 - atomic_set(&pool->req_ser, 0);
42102 - atomic_set(&pool->flush_ser, 0);
42103 + atomic_set_unchecked(&pool->req_ser, 0);
42104 + atomic_set_unchecked(&pool->flush_ser, 0);
42105 init_waitqueue_head(&pool->force_wait);
42106
42107 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42108 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42109 }
42110 spin_unlock_irq(&pool->pool_lock);
42111
42112 - serial = atomic_inc_return(&pool->req_ser);
42113 + serial = atomic_inc_return_unchecked(&pool->req_ser);
42114 wake_up_process(pool->thread);
42115
42116 if (wait_event_interruptible(pool->force_wait,
42117 - atomic_read(&pool->flush_ser) - serial >= 0))
42118 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42119 return -EINTR;
42120
42121 return 0;
42122 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42123 } else {
42124 list_add_tail(&fmr->list, &pool->dirty_list);
42125 if (++pool->dirty_len >= pool->dirty_watermark) {
42126 - atomic_inc(&pool->req_ser);
42127 + atomic_inc_unchecked(&pool->req_ser);
42128 wake_up_process(pool->thread);
42129 }
42130 }
42131 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42132 index 84e4500..2c9beeb 100644
42133 --- a/drivers/infiniband/hw/cxgb4/mem.c
42134 +++ b/drivers/infiniband/hw/cxgb4/mem.c
42135 @@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42136 int err;
42137 struct fw_ri_tpte tpt;
42138 u32 stag_idx;
42139 - static atomic_t key;
42140 + static atomic_unchecked_t key;
42141
42142 if (c4iw_fatal_error(rdev))
42143 return -EIO;
42144 @@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42145 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42146 rdev->stats.stag.max = rdev->stats.stag.cur;
42147 mutex_unlock(&rdev->stats.lock);
42148 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42149 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42150 }
42151 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42152 __func__, stag_state, type, pdid, stag_idx);
42153 diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
42154 index 644c2c7..ecf0879 100644
42155 --- a/drivers/infiniband/hw/ipath/ipath_dma.c
42156 +++ b/drivers/infiniband/hw/ipath/ipath_dma.c
42157 @@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
42158 }
42159
42160 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
42161 - ipath_mapping_error,
42162 - ipath_dma_map_single,
42163 - ipath_dma_unmap_single,
42164 - ipath_dma_map_page,
42165 - ipath_dma_unmap_page,
42166 - ipath_map_sg,
42167 - ipath_unmap_sg,
42168 - ipath_sg_dma_address,
42169 - ipath_sg_dma_len,
42170 - ipath_sync_single_for_cpu,
42171 - ipath_sync_single_for_device,
42172 - ipath_dma_alloc_coherent,
42173 - ipath_dma_free_coherent
42174 + .mapping_error = ipath_mapping_error,
42175 + .map_single = ipath_dma_map_single,
42176 + .unmap_single = ipath_dma_unmap_single,
42177 + .map_page = ipath_dma_map_page,
42178 + .unmap_page = ipath_dma_unmap_page,
42179 + .map_sg = ipath_map_sg,
42180 + .unmap_sg = ipath_unmap_sg,
42181 + .dma_address = ipath_sg_dma_address,
42182 + .dma_len = ipath_sg_dma_len,
42183 + .sync_single_for_cpu = ipath_sync_single_for_cpu,
42184 + .sync_single_for_device = ipath_sync_single_for_device,
42185 + .alloc_coherent = ipath_dma_alloc_coherent,
42186 + .free_coherent = ipath_dma_free_coherent
42187 };
42188 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42189 index 79b3dbc..96e5fcc 100644
42190 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
42191 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42192 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42193 struct ib_atomic_eth *ateth;
42194 struct ipath_ack_entry *e;
42195 u64 vaddr;
42196 - atomic64_t *maddr;
42197 + atomic64_unchecked_t *maddr;
42198 u64 sdata;
42199 u32 rkey;
42200 u8 next;
42201 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42202 IB_ACCESS_REMOTE_ATOMIC)))
42203 goto nack_acc_unlck;
42204 /* Perform atomic OP and save result. */
42205 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42206 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42207 sdata = be64_to_cpu(ateth->swap_data);
42208 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42209 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42210 - (u64) atomic64_add_return(sdata, maddr) - sdata :
42211 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42212 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42213 be64_to_cpu(ateth->compare_data),
42214 sdata);
42215 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42216 index 1f95bba..9530f87 100644
42217 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42218 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42219 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42220 unsigned long flags;
42221 struct ib_wc wc;
42222 u64 sdata;
42223 - atomic64_t *maddr;
42224 + atomic64_unchecked_t *maddr;
42225 enum ib_wc_status send_status;
42226
42227 /*
42228 @@ -382,11 +382,11 @@ again:
42229 IB_ACCESS_REMOTE_ATOMIC)))
42230 goto acc_err;
42231 /* Perform atomic OP and save result. */
42232 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42233 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42234 sdata = wqe->wr.wr.atomic.compare_add;
42235 *(u64 *) sqp->s_sge.sge.vaddr =
42236 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42237 - (u64) atomic64_add_return(sdata, maddr) - sdata :
42238 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42239 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42240 sdata, wqe->wr.wr.atomic.swap);
42241 goto send_comp;
42242 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42243 index f2a3f48..673ec79 100644
42244 --- a/drivers/infiniband/hw/mlx4/mad.c
42245 +++ b/drivers/infiniband/hw/mlx4/mad.c
42246 @@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42247
42248 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42249 {
42250 - return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42251 + return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42252 cpu_to_be64(0xff00000000000000LL);
42253 }
42254
42255 diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42256 index 25b2cdf..099ff97 100644
42257 --- a/drivers/infiniband/hw/mlx4/mcg.c
42258 +++ b/drivers/infiniband/hw/mlx4/mcg.c
42259 @@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42260 {
42261 char name[20];
42262
42263 - atomic_set(&ctx->tid, 0);
42264 + atomic_set_unchecked(&ctx->tid, 0);
42265 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42266 ctx->mcg_wq = create_singlethread_workqueue(name);
42267 if (!ctx->mcg_wq)
42268 diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42269 index 036b663..c9a8c73 100644
42270 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42271 +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42272 @@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
42273 struct list_head mcg_mgid0_list;
42274 struct workqueue_struct *mcg_wq;
42275 struct mlx4_ib_demux_pv_ctx **tun;
42276 - atomic_t tid;
42277 + atomic_unchecked_t tid;
42278 int flushing; /* flushing the work queue */
42279 };
42280
42281 diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42282 index 9d3e5c1..6f166df 100644
42283 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42284 +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42285 @@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42286 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42287 }
42288
42289 -int mthca_QUERY_FW(struct mthca_dev *dev)
42290 +int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42291 {
42292 struct mthca_mailbox *mailbox;
42293 u32 *outbox;
42294 @@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42295 CMD_TIME_CLASS_B);
42296 }
42297
42298 -int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42299 +int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42300 int num_mtt)
42301 {
42302 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42303 @@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42304 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42305 }
42306
42307 -int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42308 +int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42309 int eq_num)
42310 {
42311 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42312 @@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42313 CMD_TIME_CLASS_B);
42314 }
42315
42316 -int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42317 +int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42318 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42319 void *in_mad, void *response_mad)
42320 {
42321 diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42322 index 87897b9..7e79542 100644
42323 --- a/drivers/infiniband/hw/mthca/mthca_main.c
42324 +++ b/drivers/infiniband/hw/mthca/mthca_main.c
42325 @@ -692,7 +692,7 @@ err_close:
42326 return err;
42327 }
42328
42329 -static int mthca_setup_hca(struct mthca_dev *dev)
42330 +static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42331 {
42332 int err;
42333
42334 diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42335 index ed9a989..6aa5dc2 100644
42336 --- a/drivers/infiniband/hw/mthca/mthca_mr.c
42337 +++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42338 @@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42339 * through the bitmaps)
42340 */
42341
42342 -static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42343 +static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42344 {
42345 int o;
42346 int m;
42347 @@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42348 return key;
42349 }
42350
42351 -int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42352 +int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42353 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42354 {
42355 struct mthca_mailbox *mailbox;
42356 @@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42357 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42358 }
42359
42360 -int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42361 +int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42362 u64 *buffer_list, int buffer_size_shift,
42363 int list_len, u64 iova, u64 total_size,
42364 u32 access, struct mthca_mr *mr)
42365 diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42366 index 5b71d43..35a9e14 100644
42367 --- a/drivers/infiniband/hw/mthca/mthca_provider.c
42368 +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42369 @@ -763,7 +763,7 @@ unlock:
42370 return 0;
42371 }
42372
42373 -static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42374 +static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42375 {
42376 struct mthca_dev *dev = to_mdev(ibcq->device);
42377 struct mthca_cq *cq = to_mcq(ibcq);
42378 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42379 index 4291410..d2ab1fb 100644
42380 --- a/drivers/infiniband/hw/nes/nes.c
42381 +++ b/drivers/infiniband/hw/nes/nes.c
42382 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42383 LIST_HEAD(nes_adapter_list);
42384 static LIST_HEAD(nes_dev_list);
42385
42386 -atomic_t qps_destroyed;
42387 +atomic_unchecked_t qps_destroyed;
42388
42389 static unsigned int ee_flsh_adapter;
42390 static unsigned int sysfs_nonidx_addr;
42391 @@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42392 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42393 struct nes_adapter *nesadapter = nesdev->nesadapter;
42394
42395 - atomic_inc(&qps_destroyed);
42396 + atomic_inc_unchecked(&qps_destroyed);
42397
42398 /* Free the control structures */
42399
42400 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42401 index 33cc589..3bd6538 100644
42402 --- a/drivers/infiniband/hw/nes/nes.h
42403 +++ b/drivers/infiniband/hw/nes/nes.h
42404 @@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
42405 extern unsigned int wqm_quanta;
42406 extern struct list_head nes_adapter_list;
42407
42408 -extern atomic_t cm_connects;
42409 -extern atomic_t cm_accepts;
42410 -extern atomic_t cm_disconnects;
42411 -extern atomic_t cm_closes;
42412 -extern atomic_t cm_connecteds;
42413 -extern atomic_t cm_connect_reqs;
42414 -extern atomic_t cm_rejects;
42415 -extern atomic_t mod_qp_timouts;
42416 -extern atomic_t qps_created;
42417 -extern atomic_t qps_destroyed;
42418 -extern atomic_t sw_qps_destroyed;
42419 +extern atomic_unchecked_t cm_connects;
42420 +extern atomic_unchecked_t cm_accepts;
42421 +extern atomic_unchecked_t cm_disconnects;
42422 +extern atomic_unchecked_t cm_closes;
42423 +extern atomic_unchecked_t cm_connecteds;
42424 +extern atomic_unchecked_t cm_connect_reqs;
42425 +extern atomic_unchecked_t cm_rejects;
42426 +extern atomic_unchecked_t mod_qp_timouts;
42427 +extern atomic_unchecked_t qps_created;
42428 +extern atomic_unchecked_t qps_destroyed;
42429 +extern atomic_unchecked_t sw_qps_destroyed;
42430 extern u32 mh_detected;
42431 extern u32 mh_pauses_sent;
42432 extern u32 cm_packets_sent;
42433 @@ -196,16 +196,16 @@ extern u32 cm_packets_created;
42434 extern u32 cm_packets_received;
42435 extern u32 cm_packets_dropped;
42436 extern u32 cm_packets_retrans;
42437 -extern atomic_t cm_listens_created;
42438 -extern atomic_t cm_listens_destroyed;
42439 +extern atomic_unchecked_t cm_listens_created;
42440 +extern atomic_unchecked_t cm_listens_destroyed;
42441 extern u32 cm_backlog_drops;
42442 -extern atomic_t cm_loopbacks;
42443 -extern atomic_t cm_nodes_created;
42444 -extern atomic_t cm_nodes_destroyed;
42445 -extern atomic_t cm_accel_dropped_pkts;
42446 -extern atomic_t cm_resets_recvd;
42447 -extern atomic_t pau_qps_created;
42448 -extern atomic_t pau_qps_destroyed;
42449 +extern atomic_unchecked_t cm_loopbacks;
42450 +extern atomic_unchecked_t cm_nodes_created;
42451 +extern atomic_unchecked_t cm_nodes_destroyed;
42452 +extern atomic_unchecked_t cm_accel_dropped_pkts;
42453 +extern atomic_unchecked_t cm_resets_recvd;
42454 +extern atomic_unchecked_t pau_qps_created;
42455 +extern atomic_unchecked_t pau_qps_destroyed;
42456
42457 extern u32 int_mod_timer_init;
42458 extern u32 int_mod_cq_depth_256;
42459 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42460 index 6b29249..461d143 100644
42461 --- a/drivers/infiniband/hw/nes/nes_cm.c
42462 +++ b/drivers/infiniband/hw/nes/nes_cm.c
42463 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
42464 u32 cm_packets_retrans;
42465 u32 cm_packets_created;
42466 u32 cm_packets_received;
42467 -atomic_t cm_listens_created;
42468 -atomic_t cm_listens_destroyed;
42469 +atomic_unchecked_t cm_listens_created;
42470 +atomic_unchecked_t cm_listens_destroyed;
42471 u32 cm_backlog_drops;
42472 -atomic_t cm_loopbacks;
42473 -atomic_t cm_nodes_created;
42474 -atomic_t cm_nodes_destroyed;
42475 -atomic_t cm_accel_dropped_pkts;
42476 -atomic_t cm_resets_recvd;
42477 +atomic_unchecked_t cm_loopbacks;
42478 +atomic_unchecked_t cm_nodes_created;
42479 +atomic_unchecked_t cm_nodes_destroyed;
42480 +atomic_unchecked_t cm_accel_dropped_pkts;
42481 +atomic_unchecked_t cm_resets_recvd;
42482
42483 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42484 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42485 @@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
42486 /* instance of function pointers for client API */
42487 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42488 static struct nes_cm_ops nes_cm_api = {
42489 - mini_cm_accelerated,
42490 - mini_cm_listen,
42491 - mini_cm_del_listen,
42492 - mini_cm_connect,
42493 - mini_cm_close,
42494 - mini_cm_accept,
42495 - mini_cm_reject,
42496 - mini_cm_recv_pkt,
42497 - mini_cm_dealloc_core,
42498 - mini_cm_get,
42499 - mini_cm_set
42500 + .accelerated = mini_cm_accelerated,
42501 + .listen = mini_cm_listen,
42502 + .stop_listener = mini_cm_del_listen,
42503 + .connect = mini_cm_connect,
42504 + .close = mini_cm_close,
42505 + .accept = mini_cm_accept,
42506 + .reject = mini_cm_reject,
42507 + .recv_pkt = mini_cm_recv_pkt,
42508 + .destroy_cm_core = mini_cm_dealloc_core,
42509 + .get = mini_cm_get,
42510 + .set = mini_cm_set
42511 };
42512
42513 static struct nes_cm_core *g_cm_core;
42514
42515 -atomic_t cm_connects;
42516 -atomic_t cm_accepts;
42517 -atomic_t cm_disconnects;
42518 -atomic_t cm_closes;
42519 -atomic_t cm_connecteds;
42520 -atomic_t cm_connect_reqs;
42521 -atomic_t cm_rejects;
42522 +atomic_unchecked_t cm_connects;
42523 +atomic_unchecked_t cm_accepts;
42524 +atomic_unchecked_t cm_disconnects;
42525 +atomic_unchecked_t cm_closes;
42526 +atomic_unchecked_t cm_connecteds;
42527 +atomic_unchecked_t cm_connect_reqs;
42528 +atomic_unchecked_t cm_rejects;
42529
42530 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42531 {
42532 @@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42533 kfree(listener);
42534 listener = NULL;
42535 ret = 0;
42536 - atomic_inc(&cm_listens_destroyed);
42537 + atomic_inc_unchecked(&cm_listens_destroyed);
42538 } else {
42539 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42540 }
42541 @@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42542 cm_node->rem_mac);
42543
42544 add_hte_node(cm_core, cm_node);
42545 - atomic_inc(&cm_nodes_created);
42546 + atomic_inc_unchecked(&cm_nodes_created);
42547
42548 return cm_node;
42549 }
42550 @@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42551 }
42552
42553 atomic_dec(&cm_core->node_cnt);
42554 - atomic_inc(&cm_nodes_destroyed);
42555 + atomic_inc_unchecked(&cm_nodes_destroyed);
42556 nesqp = cm_node->nesqp;
42557 if (nesqp) {
42558 nesqp->cm_node = NULL;
42559 @@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42560
42561 static void drop_packet(struct sk_buff *skb)
42562 {
42563 - atomic_inc(&cm_accel_dropped_pkts);
42564 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
42565 dev_kfree_skb_any(skb);
42566 }
42567
42568 @@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42569 {
42570
42571 int reset = 0; /* whether to send reset in case of err.. */
42572 - atomic_inc(&cm_resets_recvd);
42573 + atomic_inc_unchecked(&cm_resets_recvd);
42574 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42575 " refcnt=%d\n", cm_node, cm_node->state,
42576 atomic_read(&cm_node->ref_count));
42577 @@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42578 rem_ref_cm_node(cm_node->cm_core, cm_node);
42579 return NULL;
42580 }
42581 - atomic_inc(&cm_loopbacks);
42582 + atomic_inc_unchecked(&cm_loopbacks);
42583 loopbackremotenode->loopbackpartner = cm_node;
42584 loopbackremotenode->tcp_cntxt.rcv_wscale =
42585 NES_CM_DEFAULT_RCV_WND_SCALE;
42586 @@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42587 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42588 else {
42589 rem_ref_cm_node(cm_core, cm_node);
42590 - atomic_inc(&cm_accel_dropped_pkts);
42591 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
42592 dev_kfree_skb_any(skb);
42593 }
42594 break;
42595 @@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42596
42597 if ((cm_id) && (cm_id->event_handler)) {
42598 if (issue_disconn) {
42599 - atomic_inc(&cm_disconnects);
42600 + atomic_inc_unchecked(&cm_disconnects);
42601 cm_event.event = IW_CM_EVENT_DISCONNECT;
42602 cm_event.status = disconn_status;
42603 cm_event.local_addr = cm_id->local_addr;
42604 @@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42605 }
42606
42607 if (issue_close) {
42608 - atomic_inc(&cm_closes);
42609 + atomic_inc_unchecked(&cm_closes);
42610 nes_disconnect(nesqp, 1);
42611
42612 cm_id->provider_data = nesqp;
42613 @@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42614
42615 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42616 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42617 - atomic_inc(&cm_accepts);
42618 + atomic_inc_unchecked(&cm_accepts);
42619
42620 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42621 netdev_refcnt_read(nesvnic->netdev));
42622 @@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42623 struct nes_cm_core *cm_core;
42624 u8 *start_buff;
42625
42626 - atomic_inc(&cm_rejects);
42627 + atomic_inc_unchecked(&cm_rejects);
42628 cm_node = (struct nes_cm_node *)cm_id->provider_data;
42629 loopback = cm_node->loopbackpartner;
42630 cm_core = cm_node->cm_core;
42631 @@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42632 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
42633 ntohs(laddr->sin_port));
42634
42635 - atomic_inc(&cm_connects);
42636 + atomic_inc_unchecked(&cm_connects);
42637 nesqp->active_conn = 1;
42638
42639 /* cache the cm_id in the qp */
42640 @@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
42641 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
42642 return err;
42643 }
42644 - atomic_inc(&cm_listens_created);
42645 + atomic_inc_unchecked(&cm_listens_created);
42646 }
42647
42648 cm_id->add_ref(cm_id);
42649 @@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
42650
42651 if (nesqp->destroyed)
42652 return;
42653 - atomic_inc(&cm_connecteds);
42654 + atomic_inc_unchecked(&cm_connecteds);
42655 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
42656 " local port 0x%04X. jiffies = %lu.\n",
42657 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
42658 @@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
42659
42660 cm_id->add_ref(cm_id);
42661 ret = cm_id->event_handler(cm_id, &cm_event);
42662 - atomic_inc(&cm_closes);
42663 + atomic_inc_unchecked(&cm_closes);
42664 cm_event.event = IW_CM_EVENT_CLOSE;
42665 cm_event.status = 0;
42666 cm_event.provider_data = cm_id->provider_data;
42667 @@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
42668 return;
42669 cm_id = cm_node->cm_id;
42670
42671 - atomic_inc(&cm_connect_reqs);
42672 + atomic_inc_unchecked(&cm_connect_reqs);
42673 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42674 cm_node, cm_id, jiffies);
42675
42676 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
42677 return;
42678 cm_id = cm_node->cm_id;
42679
42680 - atomic_inc(&cm_connect_reqs);
42681 + atomic_inc_unchecked(&cm_connect_reqs);
42682 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42683 cm_node, cm_id, jiffies);
42684
42685 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
42686 index 4166452..fc952c3 100644
42687 --- a/drivers/infiniband/hw/nes/nes_mgt.c
42688 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
42689 @@ -40,8 +40,8 @@
42690 #include "nes.h"
42691 #include "nes_mgt.h"
42692
42693 -atomic_t pau_qps_created;
42694 -atomic_t pau_qps_destroyed;
42695 +atomic_unchecked_t pau_qps_created;
42696 +atomic_unchecked_t pau_qps_destroyed;
42697
42698 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
42699 {
42700 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
42701 {
42702 struct sk_buff *skb;
42703 unsigned long flags;
42704 - atomic_inc(&pau_qps_destroyed);
42705 + atomic_inc_unchecked(&pau_qps_destroyed);
42706
42707 /* Free packets that have not yet been forwarded */
42708 /* Lock is acquired by skb_dequeue when removing the skb */
42709 @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
42710 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
42711 skb_queue_head_init(&nesqp->pau_list);
42712 spin_lock_init(&nesqp->pau_lock);
42713 - atomic_inc(&pau_qps_created);
42714 + atomic_inc_unchecked(&pau_qps_created);
42715 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
42716 }
42717
42718 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
42719 index 49eb511..a774366 100644
42720 --- a/drivers/infiniband/hw/nes/nes_nic.c
42721 +++ b/drivers/infiniband/hw/nes/nes_nic.c
42722 @@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
42723 target_stat_values[++index] = mh_detected;
42724 target_stat_values[++index] = mh_pauses_sent;
42725 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
42726 - target_stat_values[++index] = atomic_read(&cm_connects);
42727 - target_stat_values[++index] = atomic_read(&cm_accepts);
42728 - target_stat_values[++index] = atomic_read(&cm_disconnects);
42729 - target_stat_values[++index] = atomic_read(&cm_connecteds);
42730 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
42731 - target_stat_values[++index] = atomic_read(&cm_rejects);
42732 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
42733 - target_stat_values[++index] = atomic_read(&qps_created);
42734 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
42735 - target_stat_values[++index] = atomic_read(&qps_destroyed);
42736 - target_stat_values[++index] = atomic_read(&cm_closes);
42737 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
42738 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
42739 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
42740 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
42741 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
42742 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
42743 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
42744 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
42745 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
42746 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
42747 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
42748 target_stat_values[++index] = cm_packets_sent;
42749 target_stat_values[++index] = cm_packets_bounced;
42750 target_stat_values[++index] = cm_packets_created;
42751 target_stat_values[++index] = cm_packets_received;
42752 target_stat_values[++index] = cm_packets_dropped;
42753 target_stat_values[++index] = cm_packets_retrans;
42754 - target_stat_values[++index] = atomic_read(&cm_listens_created);
42755 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
42756 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
42757 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
42758 target_stat_values[++index] = cm_backlog_drops;
42759 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
42760 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
42761 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
42762 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
42763 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
42764 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
42765 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
42766 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
42767 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
42768 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
42769 target_stat_values[++index] = nesadapter->free_4kpbl;
42770 target_stat_values[++index] = nesadapter->free_256pbl;
42771 target_stat_values[++index] = int_mod_timer_init;
42772 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
42773 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
42774 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
42775 - target_stat_values[++index] = atomic_read(&pau_qps_created);
42776 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
42777 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
42778 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
42779 }
42780
42781 /**
42782 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
42783 index 8308e36..ae0d3b5 100644
42784 --- a/drivers/infiniband/hw/nes/nes_verbs.c
42785 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
42786 @@ -46,9 +46,9 @@
42787
42788 #include <rdma/ib_umem.h>
42789
42790 -atomic_t mod_qp_timouts;
42791 -atomic_t qps_created;
42792 -atomic_t sw_qps_destroyed;
42793 +atomic_unchecked_t mod_qp_timouts;
42794 +atomic_unchecked_t qps_created;
42795 +atomic_unchecked_t sw_qps_destroyed;
42796
42797 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
42798
42799 @@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
42800 if (init_attr->create_flags)
42801 return ERR_PTR(-EINVAL);
42802
42803 - atomic_inc(&qps_created);
42804 + atomic_inc_unchecked(&qps_created);
42805 switch (init_attr->qp_type) {
42806 case IB_QPT_RC:
42807 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
42808 @@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
42809 struct iw_cm_event cm_event;
42810 int ret = 0;
42811
42812 - atomic_inc(&sw_qps_destroyed);
42813 + atomic_inc_unchecked(&sw_qps_destroyed);
42814 nesqp->destroyed = 1;
42815
42816 /* Blow away the connection if it exists. */
42817 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
42818 index 1946101..09766d2 100644
42819 --- a/drivers/infiniband/hw/qib/qib.h
42820 +++ b/drivers/infiniband/hw/qib/qib.h
42821 @@ -52,6 +52,7 @@
42822 #include <linux/kref.h>
42823 #include <linux/sched.h>
42824 #include <linux/kthread.h>
42825 +#include <linux/slab.h>
42826
42827 #include "qib_common.h"
42828 #include "qib_verbs.h"
42829 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
42830 index 24c41ba..102d71f 100644
42831 --- a/drivers/input/gameport/gameport.c
42832 +++ b/drivers/input/gameport/gameport.c
42833 @@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
42834 */
42835 static void gameport_init_port(struct gameport *gameport)
42836 {
42837 - static atomic_t gameport_no = ATOMIC_INIT(0);
42838 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
42839
42840 __module_get(THIS_MODULE);
42841
42842 mutex_init(&gameport->drv_mutex);
42843 device_initialize(&gameport->dev);
42844 dev_set_name(&gameport->dev, "gameport%lu",
42845 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
42846 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
42847 gameport->dev.bus = &gameport_bus;
42848 gameport->dev.release = gameport_release_port;
42849 if (gameport->parent)
42850 diff --git a/drivers/input/input.c b/drivers/input/input.c
42851 index d2965e4..f52b7d7 100644
42852 --- a/drivers/input/input.c
42853 +++ b/drivers/input/input.c
42854 @@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
42855 */
42856 struct input_dev *input_allocate_device(void)
42857 {
42858 - static atomic_t input_no = ATOMIC_INIT(0);
42859 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
42860 struct input_dev *dev;
42861
42862 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
42863 @@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
42864 INIT_LIST_HEAD(&dev->node);
42865
42866 dev_set_name(&dev->dev, "input%ld",
42867 - (unsigned long) atomic_inc_return(&input_no) - 1);
42868 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
42869
42870 __module_get(THIS_MODULE);
42871 }
42872 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
42873 index 04c69af..5f92d00 100644
42874 --- a/drivers/input/joystick/sidewinder.c
42875 +++ b/drivers/input/joystick/sidewinder.c
42876 @@ -30,6 +30,7 @@
42877 #include <linux/kernel.h>
42878 #include <linux/module.h>
42879 #include <linux/slab.h>
42880 +#include <linux/sched.h>
42881 #include <linux/init.h>
42882 #include <linux/input.h>
42883 #include <linux/gameport.h>
42884 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
42885 index 75e3b10..fb390fd 100644
42886 --- a/drivers/input/joystick/xpad.c
42887 +++ b/drivers/input/joystick/xpad.c
42888 @@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
42889
42890 static int xpad_led_probe(struct usb_xpad *xpad)
42891 {
42892 - static atomic_t led_seq = ATOMIC_INIT(0);
42893 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
42894 long led_no;
42895 struct xpad_led *led;
42896 struct led_classdev *led_cdev;
42897 @@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
42898 if (!led)
42899 return -ENOMEM;
42900
42901 - led_no = (long)atomic_inc_return(&led_seq) - 1;
42902 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
42903
42904 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
42905 led->xpad = xpad;
42906 diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
42907 index e204f26..8459f15 100644
42908 --- a/drivers/input/misc/ims-pcu.c
42909 +++ b/drivers/input/misc/ims-pcu.c
42910 @@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
42911
42912 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42913 {
42914 - static atomic_t device_no = ATOMIC_INIT(0);
42915 + static atomic_unchecked_t device_no = ATOMIC_INIT(0);
42916
42917 const struct ims_pcu_device_info *info;
42918 u8 device_id;
42919 @@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42920 }
42921
42922 /* Device appears to be operable, complete initialization */
42923 - pcu->device_no = atomic_inc_return(&device_no) - 1;
42924 + pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
42925
42926 error = ims_pcu_setup_backlight(pcu);
42927 if (error)
42928 diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42929 index 2f0b39d..7370f13 100644
42930 --- a/drivers/input/mouse/psmouse.h
42931 +++ b/drivers/input/mouse/psmouse.h
42932 @@ -116,7 +116,7 @@ struct psmouse_attribute {
42933 ssize_t (*set)(struct psmouse *psmouse, void *data,
42934 const char *buf, size_t count);
42935 bool protect;
42936 -};
42937 +} __do_const;
42938 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
42939
42940 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
42941 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
42942 index 4c842c3..590b0bf 100644
42943 --- a/drivers/input/mousedev.c
42944 +++ b/drivers/input/mousedev.c
42945 @@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
42946
42947 spin_unlock_irq(&client->packet_lock);
42948
42949 - if (copy_to_user(buffer, data, count))
42950 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
42951 return -EFAULT;
42952
42953 return count;
42954 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
42955 index 8f4c4ab..5fc8a45 100644
42956 --- a/drivers/input/serio/serio.c
42957 +++ b/drivers/input/serio/serio.c
42958 @@ -505,7 +505,7 @@ static void serio_release_port(struct device *dev)
42959 */
42960 static void serio_init_port(struct serio *serio)
42961 {
42962 - static atomic_t serio_no = ATOMIC_INIT(0);
42963 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
42964
42965 __module_get(THIS_MODULE);
42966
42967 @@ -516,7 +516,7 @@ static void serio_init_port(struct serio *serio)
42968 mutex_init(&serio->drv_mutex);
42969 device_initialize(&serio->dev);
42970 dev_set_name(&serio->dev, "serio%ld",
42971 - (long)atomic_inc_return(&serio_no) - 1);
42972 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
42973 serio->dev.bus = &serio_bus;
42974 serio->dev.release = serio_release_port;
42975 serio->dev.groups = serio_device_attr_groups;
42976 diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
42977 index 59df2e7..8f1cafb 100644
42978 --- a/drivers/input/serio/serio_raw.c
42979 +++ b/drivers/input/serio/serio_raw.c
42980 @@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
42981
42982 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42983 {
42984 - static atomic_t serio_raw_no = ATOMIC_INIT(0);
42985 + static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
42986 struct serio_raw *serio_raw;
42987 int err;
42988
42989 @@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42990 }
42991
42992 snprintf(serio_raw->name, sizeof(serio_raw->name),
42993 - "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
42994 + "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
42995 kref_init(&serio_raw->kref);
42996 INIT_LIST_HEAD(&serio_raw->client_list);
42997 init_waitqueue_head(&serio_raw->wait);
42998 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
42999 index e5555fc..937986d 100644
43000 --- a/drivers/iommu/iommu.c
43001 +++ b/drivers/iommu/iommu.c
43002 @@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
43003 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
43004 {
43005 bus_register_notifier(bus, &iommu_bus_nb);
43006 - bus_for_each_dev(bus, NULL, ops, add_iommu_group);
43007 + bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
43008 }
43009
43010 /**
43011 diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43012 index 39f81ae..2660096 100644
43013 --- a/drivers/iommu/irq_remapping.c
43014 +++ b/drivers/iommu/irq_remapping.c
43015 @@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43016 void panic_if_irq_remap(const char *msg)
43017 {
43018 if (irq_remapping_enabled)
43019 - panic(msg);
43020 + panic("%s", msg);
43021 }
43022
43023 static void ir_ack_apic_edge(struct irq_data *data)
43024 @@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43025
43026 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43027 {
43028 - chip->irq_print_chip = ir_print_prefix;
43029 - chip->irq_ack = ir_ack_apic_edge;
43030 - chip->irq_eoi = ir_ack_apic_level;
43031 - chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43032 + pax_open_kernel();
43033 + *(void **)&chip->irq_print_chip = ir_print_prefix;
43034 + *(void **)&chip->irq_ack = ir_ack_apic_edge;
43035 + *(void **)&chip->irq_eoi = ir_ack_apic_level;
43036 + *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43037 + pax_close_kernel();
43038 }
43039
43040 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43041 diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43042 index 341c601..e5f407e 100644
43043 --- a/drivers/irqchip/irq-gic.c
43044 +++ b/drivers/irqchip/irq-gic.c
43045 @@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43046 * Supported arch specific GIC irq extension.
43047 * Default make them NULL.
43048 */
43049 -struct irq_chip gic_arch_extn = {
43050 +irq_chip_no_const gic_arch_extn = {
43051 .irq_eoi = NULL,
43052 .irq_mask = NULL,
43053 .irq_unmask = NULL,
43054 @@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43055 chained_irq_exit(chip, desc);
43056 }
43057
43058 -static struct irq_chip gic_chip = {
43059 +static irq_chip_no_const gic_chip __read_only = {
43060 .name = "GIC",
43061 .irq_mask = gic_mask_irq,
43062 .irq_unmask = gic_unmask_irq,
43063 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43064 index ac6f72b..81150f2 100644
43065 --- a/drivers/isdn/capi/capi.c
43066 +++ b/drivers/isdn/capi/capi.c
43067 @@ -81,8 +81,8 @@ struct capiminor {
43068
43069 struct capi20_appl *ap;
43070 u32 ncci;
43071 - atomic_t datahandle;
43072 - atomic_t msgid;
43073 + atomic_unchecked_t datahandle;
43074 + atomic_unchecked_t msgid;
43075
43076 struct tty_port port;
43077 int ttyinstop;
43078 @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43079 capimsg_setu16(s, 2, mp->ap->applid);
43080 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43081 capimsg_setu8 (s, 5, CAPI_RESP);
43082 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43083 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43084 capimsg_setu32(s, 8, mp->ncci);
43085 capimsg_setu16(s, 12, datahandle);
43086 }
43087 @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43088 mp->outbytes -= len;
43089 spin_unlock_bh(&mp->outlock);
43090
43091 - datahandle = atomic_inc_return(&mp->datahandle);
43092 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43093 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43094 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43095 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43096 capimsg_setu16(skb->data, 2, mp->ap->applid);
43097 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43098 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43099 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43100 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43101 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43102 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43103 capimsg_setu16(skb->data, 16, len); /* Data length */
43104 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43105 index c44950d..10ac276 100644
43106 --- a/drivers/isdn/gigaset/bas-gigaset.c
43107 +++ b/drivers/isdn/gigaset/bas-gigaset.c
43108 @@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43109
43110
43111 static const struct gigaset_ops gigops = {
43112 - gigaset_write_cmd,
43113 - gigaset_write_room,
43114 - gigaset_chars_in_buffer,
43115 - gigaset_brkchars,
43116 - gigaset_init_bchannel,
43117 - gigaset_close_bchannel,
43118 - gigaset_initbcshw,
43119 - gigaset_freebcshw,
43120 - gigaset_reinitbcshw,
43121 - gigaset_initcshw,
43122 - gigaset_freecshw,
43123 - gigaset_set_modem_ctrl,
43124 - gigaset_baud_rate,
43125 - gigaset_set_line_ctrl,
43126 - gigaset_isoc_send_skb,
43127 - gigaset_isoc_input,
43128 + .write_cmd = gigaset_write_cmd,
43129 + .write_room = gigaset_write_room,
43130 + .chars_in_buffer = gigaset_chars_in_buffer,
43131 + .brkchars = gigaset_brkchars,
43132 + .init_bchannel = gigaset_init_bchannel,
43133 + .close_bchannel = gigaset_close_bchannel,
43134 + .initbcshw = gigaset_initbcshw,
43135 + .freebcshw = gigaset_freebcshw,
43136 + .reinitbcshw = gigaset_reinitbcshw,
43137 + .initcshw = gigaset_initcshw,
43138 + .freecshw = gigaset_freecshw,
43139 + .set_modem_ctrl = gigaset_set_modem_ctrl,
43140 + .baud_rate = gigaset_baud_rate,
43141 + .set_line_ctrl = gigaset_set_line_ctrl,
43142 + .send_skb = gigaset_isoc_send_skb,
43143 + .handle_input = gigaset_isoc_input,
43144 };
43145
43146 /* bas_gigaset_init
43147 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43148 index 600c79b..3752bab 100644
43149 --- a/drivers/isdn/gigaset/interface.c
43150 +++ b/drivers/isdn/gigaset/interface.c
43151 @@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43152 }
43153 tty->driver_data = cs;
43154
43155 - ++cs->port.count;
43156 + atomic_inc(&cs->port.count);
43157
43158 - if (cs->port.count == 1) {
43159 + if (atomic_read(&cs->port.count) == 1) {
43160 tty_port_tty_set(&cs->port, tty);
43161 cs->port.low_latency = 1;
43162 }
43163 @@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43164
43165 if (!cs->connected)
43166 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43167 - else if (!cs->port.count)
43168 + else if (!atomic_read(&cs->port.count))
43169 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43170 - else if (!--cs->port.count)
43171 + else if (!atomic_dec_return(&cs->port.count))
43172 tty_port_tty_set(&cs->port, NULL);
43173
43174 mutex_unlock(&cs->mutex);
43175 diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43176 index 8c91fd5..14f13ce 100644
43177 --- a/drivers/isdn/gigaset/ser-gigaset.c
43178 +++ b/drivers/isdn/gigaset/ser-gigaset.c
43179 @@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43180 }
43181
43182 static const struct gigaset_ops ops = {
43183 - gigaset_write_cmd,
43184 - gigaset_write_room,
43185 - gigaset_chars_in_buffer,
43186 - gigaset_brkchars,
43187 - gigaset_init_bchannel,
43188 - gigaset_close_bchannel,
43189 - gigaset_initbcshw,
43190 - gigaset_freebcshw,
43191 - gigaset_reinitbcshw,
43192 - gigaset_initcshw,
43193 - gigaset_freecshw,
43194 - gigaset_set_modem_ctrl,
43195 - gigaset_baud_rate,
43196 - gigaset_set_line_ctrl,
43197 - gigaset_m10x_send_skb, /* asyncdata.c */
43198 - gigaset_m10x_input, /* asyncdata.c */
43199 + .write_cmd = gigaset_write_cmd,
43200 + .write_room = gigaset_write_room,
43201 + .chars_in_buffer = gigaset_chars_in_buffer,
43202 + .brkchars = gigaset_brkchars,
43203 + .init_bchannel = gigaset_init_bchannel,
43204 + .close_bchannel = gigaset_close_bchannel,
43205 + .initbcshw = gigaset_initbcshw,
43206 + .freebcshw = gigaset_freebcshw,
43207 + .reinitbcshw = gigaset_reinitbcshw,
43208 + .initcshw = gigaset_initcshw,
43209 + .freecshw = gigaset_freecshw,
43210 + .set_modem_ctrl = gigaset_set_modem_ctrl,
43211 + .baud_rate = gigaset_baud_rate,
43212 + .set_line_ctrl = gigaset_set_line_ctrl,
43213 + .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43214 + .handle_input = gigaset_m10x_input, /* asyncdata.c */
43215 };
43216
43217
43218 diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43219 index d0a41cb..b953e50 100644
43220 --- a/drivers/isdn/gigaset/usb-gigaset.c
43221 +++ b/drivers/isdn/gigaset/usb-gigaset.c
43222 @@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43223 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43224 memcpy(cs->hw.usb->bchars, buf, 6);
43225 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43226 - 0, 0, &buf, 6, 2000);
43227 + 0, 0, buf, 6, 2000);
43228 }
43229
43230 static void gigaset_freebcshw(struct bc_state *bcs)
43231 @@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43232 }
43233
43234 static const struct gigaset_ops ops = {
43235 - gigaset_write_cmd,
43236 - gigaset_write_room,
43237 - gigaset_chars_in_buffer,
43238 - gigaset_brkchars,
43239 - gigaset_init_bchannel,
43240 - gigaset_close_bchannel,
43241 - gigaset_initbcshw,
43242 - gigaset_freebcshw,
43243 - gigaset_reinitbcshw,
43244 - gigaset_initcshw,
43245 - gigaset_freecshw,
43246 - gigaset_set_modem_ctrl,
43247 - gigaset_baud_rate,
43248 - gigaset_set_line_ctrl,
43249 - gigaset_m10x_send_skb,
43250 - gigaset_m10x_input,
43251 + .write_cmd = gigaset_write_cmd,
43252 + .write_room = gigaset_write_room,
43253 + .chars_in_buffer = gigaset_chars_in_buffer,
43254 + .brkchars = gigaset_brkchars,
43255 + .init_bchannel = gigaset_init_bchannel,
43256 + .close_bchannel = gigaset_close_bchannel,
43257 + .initbcshw = gigaset_initbcshw,
43258 + .freebcshw = gigaset_freebcshw,
43259 + .reinitbcshw = gigaset_reinitbcshw,
43260 + .initcshw = gigaset_initcshw,
43261 + .freecshw = gigaset_freecshw,
43262 + .set_modem_ctrl = gigaset_set_modem_ctrl,
43263 + .baud_rate = gigaset_baud_rate,
43264 + .set_line_ctrl = gigaset_set_line_ctrl,
43265 + .send_skb = gigaset_m10x_send_skb,
43266 + .handle_input = gigaset_m10x_input,
43267 };
43268
43269 /*
43270 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43271 index 4d9b195..455075c 100644
43272 --- a/drivers/isdn/hardware/avm/b1.c
43273 +++ b/drivers/isdn/hardware/avm/b1.c
43274 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43275 }
43276 if (left) {
43277 if (t4file->user) {
43278 - if (copy_from_user(buf, dp, left))
43279 + if (left > sizeof buf || copy_from_user(buf, dp, left))
43280 return -EFAULT;
43281 } else {
43282 memcpy(buf, dp, left);
43283 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43284 }
43285 if (left) {
43286 if (config->user) {
43287 - if (copy_from_user(buf, dp, left))
43288 + if (left > sizeof buf || copy_from_user(buf, dp, left))
43289 return -EFAULT;
43290 } else {
43291 memcpy(buf, dp, left);
43292 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43293 index 9bb12ba..d4262f7 100644
43294 --- a/drivers/isdn/i4l/isdn_common.c
43295 +++ b/drivers/isdn/i4l/isdn_common.c
43296 @@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43297 } else
43298 return -EINVAL;
43299 case IIOCDBGVAR:
43300 + if (!capable(CAP_SYS_RAWIO))
43301 + return -EPERM;
43302 if (arg) {
43303 if (copy_to_user(argp, &dev, sizeof(ulong)))
43304 return -EFAULT;
43305 diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43306 index 91d5730..336523e 100644
43307 --- a/drivers/isdn/i4l/isdn_concap.c
43308 +++ b/drivers/isdn/i4l/isdn_concap.c
43309 @@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43310 }
43311
43312 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43313 - &isdn_concap_dl_data_req,
43314 - &isdn_concap_dl_connect_req,
43315 - &isdn_concap_dl_disconn_req
43316 + .data_req = &isdn_concap_dl_data_req,
43317 + .connect_req = &isdn_concap_dl_connect_req,
43318 + .disconn_req = &isdn_concap_dl_disconn_req
43319 };
43320
43321 /* The following should better go into a dedicated source file such that
43322 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43323 index 3c5f249..5fac4d0 100644
43324 --- a/drivers/isdn/i4l/isdn_tty.c
43325 +++ b/drivers/isdn/i4l/isdn_tty.c
43326 @@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43327
43328 #ifdef ISDN_DEBUG_MODEM_OPEN
43329 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43330 - port->count);
43331 + atomic_read(&port->count));
43332 #endif
43333 - port->count++;
43334 + atomic_inc(&port->count);
43335 port->tty = tty;
43336 /*
43337 * Start up serial port
43338 @@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43339 #endif
43340 return;
43341 }
43342 - if ((tty->count == 1) && (port->count != 1)) {
43343 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43344 /*
43345 * Uh, oh. tty->count is 1, which means that the tty
43346 * structure will be freed. Info->count should always
43347 @@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43348 * serial port won't be shutdown.
43349 */
43350 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43351 - "info->count is %d\n", port->count);
43352 - port->count = 1;
43353 + "info->count is %d\n", atomic_read(&port->count));
43354 + atomic_set(&port->count, 1);
43355 }
43356 - if (--port->count < 0) {
43357 + if (atomic_dec_return(&port->count) < 0) {
43358 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43359 - info->line, port->count);
43360 - port->count = 0;
43361 + info->line, atomic_read(&port->count));
43362 + atomic_set(&port->count, 0);
43363 }
43364 - if (port->count) {
43365 + if (atomic_read(&port->count)) {
43366 #ifdef ISDN_DEBUG_MODEM_OPEN
43367 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43368 #endif
43369 @@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43370 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43371 return;
43372 isdn_tty_shutdown(info);
43373 - port->count = 0;
43374 + atomic_set(&port->count, 0);
43375 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43376 port->tty = NULL;
43377 wake_up_interruptible(&port->open_wait);
43378 @@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43379 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43380 modem_info *info = &dev->mdm.info[i];
43381
43382 - if (info->port.count == 0)
43383 + if (atomic_read(&info->port.count) == 0)
43384 continue;
43385 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43386 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43387 diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43388 index e2d4e58..40cd045 100644
43389 --- a/drivers/isdn/i4l/isdn_x25iface.c
43390 +++ b/drivers/isdn/i4l/isdn_x25iface.c
43391 @@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43392
43393
43394 static struct concap_proto_ops ix25_pops = {
43395 - &isdn_x25iface_proto_new,
43396 - &isdn_x25iface_proto_del,
43397 - &isdn_x25iface_proto_restart,
43398 - &isdn_x25iface_proto_close,
43399 - &isdn_x25iface_xmit,
43400 - &isdn_x25iface_receive,
43401 - &isdn_x25iface_connect_ind,
43402 - &isdn_x25iface_disconn_ind
43403 + .proto_new = &isdn_x25iface_proto_new,
43404 + .proto_del = &isdn_x25iface_proto_del,
43405 + .restart = &isdn_x25iface_proto_restart,
43406 + .close = &isdn_x25iface_proto_close,
43407 + .encap_and_xmit = &isdn_x25iface_xmit,
43408 + .data_ind = &isdn_x25iface_receive,
43409 + .connect_ind = &isdn_x25iface_connect_ind,
43410 + .disconn_ind = &isdn_x25iface_disconn_ind
43411 };
43412
43413 /* error message helper function */
43414 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43415 index 53d487f..f020f41 100644
43416 --- a/drivers/isdn/icn/icn.c
43417 +++ b/drivers/isdn/icn/icn.c
43418 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43419 if (count > len)
43420 count = len;
43421 if (user) {
43422 - if (copy_from_user(msg, buf, count))
43423 + if (count > sizeof msg || copy_from_user(msg, buf, count))
43424 return -EFAULT;
43425 } else
43426 memcpy(msg, buf, count);
43427 diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43428 index a4f05c5..1433bc5 100644
43429 --- a/drivers/isdn/mISDN/dsp_cmx.c
43430 +++ b/drivers/isdn/mISDN/dsp_cmx.c
43431 @@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43432 static u16 dsp_count; /* last sample count */
43433 static int dsp_count_valid; /* if we have last sample count */
43434
43435 -void
43436 +void __intentional_overflow(-1)
43437 dsp_cmx_send(void *arg)
43438 {
43439 struct dsp_conf *conf;
43440 diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43441 index d93e245..e7ece6b 100644
43442 --- a/drivers/leds/leds-clevo-mail.c
43443 +++ b/drivers/leds/leds-clevo-mail.c
43444 @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43445 * detected as working, but in reality it is not) as low as
43446 * possible.
43447 */
43448 -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43449 +static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43450 {
43451 .callback = clevo_mail_led_dmi_callback,
43452 .ident = "Clevo D410J",
43453 diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43454 index 5b8f938..b73d657 100644
43455 --- a/drivers/leds/leds-ss4200.c
43456 +++ b/drivers/leds/leds-ss4200.c
43457 @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43458 * detected as working, but in reality it is not) as low as
43459 * possible.
43460 */
43461 -static struct dmi_system_id nas_led_whitelist[] __initdata = {
43462 +static struct dmi_system_id nas_led_whitelist[] __initconst = {
43463 {
43464 .callback = ss4200_led_dmi_callback,
43465 .ident = "Intel SS4200-E",
43466 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43467 index 0bf1e4e..b4bf44e 100644
43468 --- a/drivers/lguest/core.c
43469 +++ b/drivers/lguest/core.c
43470 @@ -97,9 +97,17 @@ static __init int map_switcher(void)
43471 * The end address needs +1 because __get_vm_area allocates an
43472 * extra guard page, so we need space for that.
43473 */
43474 +
43475 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43476 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43477 + VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43478 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43479 +#else
43480 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43481 VM_ALLOC, switcher_addr, switcher_addr
43482 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43483 +#endif
43484 +
43485 if (!switcher_vma) {
43486 err = -ENOMEM;
43487 printk("lguest: could not map switcher pages high\n");
43488 @@ -124,7 +132,7 @@ static __init int map_switcher(void)
43489 * Now the Switcher is mapped at the right address, we can't fail!
43490 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43491 */
43492 - memcpy(switcher_vma->addr, start_switcher_text,
43493 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43494 end_switcher_text - start_switcher_text);
43495
43496 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43497 diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43498 index bfb39bb..08a603b 100644
43499 --- a/drivers/lguest/page_tables.c
43500 +++ b/drivers/lguest/page_tables.c
43501 @@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43502 /*:*/
43503
43504 #ifdef CONFIG_X86_PAE
43505 -static void release_pmd(pmd_t *spmd)
43506 +static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43507 {
43508 /* If the entry's not present, there's nothing to release. */
43509 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43510 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43511 index 922a1ac..9dd0c2a 100644
43512 --- a/drivers/lguest/x86/core.c
43513 +++ b/drivers/lguest/x86/core.c
43514 @@ -59,7 +59,7 @@ static struct {
43515 /* Offset from where switcher.S was compiled to where we've copied it */
43516 static unsigned long switcher_offset(void)
43517 {
43518 - return switcher_addr - (unsigned long)start_switcher_text;
43519 + return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43520 }
43521
43522 /* This cpu's struct lguest_pages (after the Switcher text page) */
43523 @@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
43524 * These copies are pretty cheap, so we do them unconditionally: */
43525 /* Save the current Host top-level page directory.
43526 */
43527 +
43528 +#ifdef CONFIG_PAX_PER_CPU_PGD
43529 + pages->state.host_cr3 = read_cr3();
43530 +#else
43531 pages->state.host_cr3 = __pa(current->mm->pgd);
43532 +#endif
43533 +
43534 /*
43535 * Set up the Guest's page tables to see this CPU's pages (and no
43536 * other CPU's pages).
43537 @@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
43538 * compiled-in switcher code and the high-mapped copy we just made.
43539 */
43540 for (i = 0; i < IDT_ENTRIES; i++)
43541 - default_idt_entries[i] += switcher_offset();
43542 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
43543
43544 /*
43545 * Set up the Switcher's per-cpu areas.
43546 @@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
43547 * it will be undisturbed when we switch. To change %cs and jump we
43548 * need this structure to feed to Intel's "lcall" instruction.
43549 */
43550 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
43551 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
43552 lguest_entry.segment = LGUEST_CS;
43553
43554 /*
43555 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
43556 index 40634b0..4f5855e 100644
43557 --- a/drivers/lguest/x86/switcher_32.S
43558 +++ b/drivers/lguest/x86/switcher_32.S
43559 @@ -87,6 +87,7 @@
43560 #include <asm/page.h>
43561 #include <asm/segment.h>
43562 #include <asm/lguest.h>
43563 +#include <asm/processor-flags.h>
43564
43565 // We mark the start of the code to copy
43566 // It's placed in .text tho it's never run here
43567 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
43568 // Changes type when we load it: damn Intel!
43569 // For after we switch over our page tables
43570 // That entry will be read-only: we'd crash.
43571 +
43572 +#ifdef CONFIG_PAX_KERNEXEC
43573 + mov %cr0, %edx
43574 + xor $X86_CR0_WP, %edx
43575 + mov %edx, %cr0
43576 +#endif
43577 +
43578 movl $(GDT_ENTRY_TSS*8), %edx
43579 ltr %dx
43580
43581 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
43582 // Let's clear it again for our return.
43583 // The GDT descriptor of the Host
43584 // Points to the table after two "size" bytes
43585 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
43586 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
43587 // Clear "used" from type field (byte 5, bit 2)
43588 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
43589 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
43590 +
43591 +#ifdef CONFIG_PAX_KERNEXEC
43592 + mov %cr0, %eax
43593 + xor $X86_CR0_WP, %eax
43594 + mov %eax, %cr0
43595 +#endif
43596
43597 // Once our page table's switched, the Guest is live!
43598 // The Host fades as we run this final step.
43599 @@ -295,13 +309,12 @@ deliver_to_host:
43600 // I consulted gcc, and it gave
43601 // These instructions, which I gladly credit:
43602 leal (%edx,%ebx,8), %eax
43603 - movzwl (%eax),%edx
43604 - movl 4(%eax), %eax
43605 - xorw %ax, %ax
43606 - orl %eax, %edx
43607 + movl 4(%eax), %edx
43608 + movw (%eax), %dx
43609 // Now the address of the handler's in %edx
43610 // We call it now: its "iret" drops us home.
43611 - jmp *%edx
43612 + ljmp $__KERNEL_CS, $1f
43613 +1: jmp *%edx
43614
43615 // Every interrupt can come to us here
43616 // But we must truly tell each apart.
43617 diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
43618 index 9762f1b..3e79734 100644
43619 --- a/drivers/md/bcache/closure.h
43620 +++ b/drivers/md/bcache/closure.h
43621 @@ -483,7 +483,7 @@ static inline void closure_queue(struct closure *cl)
43622 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
43623 struct workqueue_struct *wq)
43624 {
43625 - BUG_ON(object_is_on_stack(cl));
43626 + BUG_ON(object_starts_on_stack(cl));
43627 closure_set_ip(cl);
43628 cl->fn = fn;
43629 cl->wq = wq;
43630 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
43631 index 12dc29b..1596277 100644
43632 --- a/drivers/md/bitmap.c
43633 +++ b/drivers/md/bitmap.c
43634 @@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
43635 chunk_kb ? "KB" : "B");
43636 if (bitmap->storage.file) {
43637 seq_printf(seq, ", file: ");
43638 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
43639 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
43640 }
43641
43642 seq_printf(seq, "\n");
43643 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
43644 index 5152142..623d141 100644
43645 --- a/drivers/md/dm-ioctl.c
43646 +++ b/drivers/md/dm-ioctl.c
43647 @@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
43648 cmd == DM_LIST_VERSIONS_CMD)
43649 return 0;
43650
43651 - if ((cmd == DM_DEV_CREATE_CMD)) {
43652 + if (cmd == DM_DEV_CREATE_CMD) {
43653 if (!*param->name) {
43654 DMWARN("name not supplied when creating device");
43655 return -EINVAL;
43656 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
43657 index 9584443..9fc9ac9 100644
43658 --- a/drivers/md/dm-raid1.c
43659 +++ b/drivers/md/dm-raid1.c
43660 @@ -40,7 +40,7 @@ enum dm_raid1_error {
43661
43662 struct mirror {
43663 struct mirror_set *ms;
43664 - atomic_t error_count;
43665 + atomic_unchecked_t error_count;
43666 unsigned long error_type;
43667 struct dm_dev *dev;
43668 sector_t offset;
43669 @@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
43670 struct mirror *m;
43671
43672 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
43673 - if (!atomic_read(&m->error_count))
43674 + if (!atomic_read_unchecked(&m->error_count))
43675 return m;
43676
43677 return NULL;
43678 @@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
43679 * simple way to tell if a device has encountered
43680 * errors.
43681 */
43682 - atomic_inc(&m->error_count);
43683 + atomic_inc_unchecked(&m->error_count);
43684
43685 if (test_and_set_bit(error_type, &m->error_type))
43686 return;
43687 @@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
43688 struct mirror *m = get_default_mirror(ms);
43689
43690 do {
43691 - if (likely(!atomic_read(&m->error_count)))
43692 + if (likely(!atomic_read_unchecked(&m->error_count)))
43693 return m;
43694
43695 if (m-- == ms->mirror)
43696 @@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
43697 {
43698 struct mirror *default_mirror = get_default_mirror(m->ms);
43699
43700 - return !atomic_read(&default_mirror->error_count);
43701 + return !atomic_read_unchecked(&default_mirror->error_count);
43702 }
43703
43704 static int mirror_available(struct mirror_set *ms, struct bio *bio)
43705 @@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
43706 */
43707 if (likely(region_in_sync(ms, region, 1)))
43708 m = choose_mirror(ms, bio->bi_sector);
43709 - else if (m && atomic_read(&m->error_count))
43710 + else if (m && atomic_read_unchecked(&m->error_count))
43711 m = NULL;
43712
43713 if (likely(m))
43714 @@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
43715 }
43716
43717 ms->mirror[mirror].ms = ms;
43718 - atomic_set(&(ms->mirror[mirror].error_count), 0);
43719 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
43720 ms->mirror[mirror].error_type = 0;
43721 ms->mirror[mirror].offset = offset;
43722
43723 @@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
43724 */
43725 static char device_status_char(struct mirror *m)
43726 {
43727 - if (!atomic_read(&(m->error_count)))
43728 + if (!atomic_read_unchecked(&(m->error_count)))
43729 return 'A';
43730
43731 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
43732 diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
43733 index 28a9012..9c0f6a5 100644
43734 --- a/drivers/md/dm-stats.c
43735 +++ b/drivers/md/dm-stats.c
43736 @@ -382,7 +382,7 @@ do_sync_free:
43737 synchronize_rcu_expedited();
43738 dm_stat_free(&s->rcu_head);
43739 } else {
43740 - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
43741 + ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
43742 call_rcu(&s->rcu_head, dm_stat_free);
43743 }
43744 return 0;
43745 @@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
43746 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
43747 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
43748 ));
43749 - ACCESS_ONCE(last->last_sector) = end_sector;
43750 - ACCESS_ONCE(last->last_rw) = bi_rw;
43751 + ACCESS_ONCE_RW(last->last_sector) = end_sector;
43752 + ACCESS_ONCE_RW(last->last_rw) = bi_rw;
43753 }
43754
43755 rcu_read_lock();
43756 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
43757 index 73c1712..7347292 100644
43758 --- a/drivers/md/dm-stripe.c
43759 +++ b/drivers/md/dm-stripe.c
43760 @@ -21,7 +21,7 @@ struct stripe {
43761 struct dm_dev *dev;
43762 sector_t physical_start;
43763
43764 - atomic_t error_count;
43765 + atomic_unchecked_t error_count;
43766 };
43767
43768 struct stripe_c {
43769 @@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
43770 kfree(sc);
43771 return r;
43772 }
43773 - atomic_set(&(sc->stripe[i].error_count), 0);
43774 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
43775 }
43776
43777 ti->private = sc;
43778 @@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
43779 DMEMIT("%d ", sc->stripes);
43780 for (i = 0; i < sc->stripes; i++) {
43781 DMEMIT("%s ", sc->stripe[i].dev->name);
43782 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
43783 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
43784 'D' : 'A';
43785 }
43786 buffer[i] = '\0';
43787 @@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
43788 */
43789 for (i = 0; i < sc->stripes; i++)
43790 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
43791 - atomic_inc(&(sc->stripe[i].error_count));
43792 - if (atomic_read(&(sc->stripe[i].error_count)) <
43793 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
43794 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
43795 DM_IO_ERROR_THRESHOLD)
43796 schedule_work(&sc->trigger_event);
43797 }
43798 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
43799 index 3ba6a38..b0fa9b0 100644
43800 --- a/drivers/md/dm-table.c
43801 +++ b/drivers/md/dm-table.c
43802 @@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
43803 static int open_dev(struct dm_dev_internal *d, dev_t dev,
43804 struct mapped_device *md)
43805 {
43806 - static char *_claim_ptr = "I belong to device-mapper";
43807 + static char _claim_ptr[] = "I belong to device-mapper";
43808 struct block_device *bdev;
43809
43810 int r;
43811 @@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
43812 if (!dev_size)
43813 return 0;
43814
43815 - if ((start >= dev_size) || (start + len > dev_size)) {
43816 + if ((start >= dev_size) || (len > dev_size - start)) {
43817 DMWARN("%s: %s too small for target: "
43818 "start=%llu, len=%llu, dev_size=%llu",
43819 dm_device_name(ti->table->md), bdevname(bdev, b),
43820 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
43821 index 3bb4506..56e20cc 100644
43822 --- a/drivers/md/dm-thin-metadata.c
43823 +++ b/drivers/md/dm-thin-metadata.c
43824 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43825 {
43826 pmd->info.tm = pmd->tm;
43827 pmd->info.levels = 2;
43828 - pmd->info.value_type.context = pmd->data_sm;
43829 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43830 pmd->info.value_type.size = sizeof(__le64);
43831 pmd->info.value_type.inc = data_block_inc;
43832 pmd->info.value_type.dec = data_block_dec;
43833 @@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43834
43835 pmd->bl_info.tm = pmd->tm;
43836 pmd->bl_info.levels = 1;
43837 - pmd->bl_info.value_type.context = pmd->data_sm;
43838 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43839 pmd->bl_info.value_type.size = sizeof(__le64);
43840 pmd->bl_info.value_type.inc = data_block_inc;
43841 pmd->bl_info.value_type.dec = data_block_dec;
43842 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
43843 index b49c762..c9503cf 100644
43844 --- a/drivers/md/dm.c
43845 +++ b/drivers/md/dm.c
43846 @@ -185,9 +185,9 @@ struct mapped_device {
43847 /*
43848 * Event handling.
43849 */
43850 - atomic_t event_nr;
43851 + atomic_unchecked_t event_nr;
43852 wait_queue_head_t eventq;
43853 - atomic_t uevent_seq;
43854 + atomic_unchecked_t uevent_seq;
43855 struct list_head uevent_list;
43856 spinlock_t uevent_lock; /* Protect access to uevent_list */
43857
43858 @@ -2021,8 +2021,8 @@ static struct mapped_device *alloc_dev(int minor)
43859 spin_lock_init(&md->deferred_lock);
43860 atomic_set(&md->holders, 1);
43861 atomic_set(&md->open_count, 0);
43862 - atomic_set(&md->event_nr, 0);
43863 - atomic_set(&md->uevent_seq, 0);
43864 + atomic_set_unchecked(&md->event_nr, 0);
43865 + atomic_set_unchecked(&md->uevent_seq, 0);
43866 INIT_LIST_HEAD(&md->uevent_list);
43867 spin_lock_init(&md->uevent_lock);
43868
43869 @@ -2176,7 +2176,7 @@ static void event_callback(void *context)
43870
43871 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
43872
43873 - atomic_inc(&md->event_nr);
43874 + atomic_inc_unchecked(&md->event_nr);
43875 wake_up(&md->eventq);
43876 }
43877
43878 @@ -2869,18 +2869,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
43879
43880 uint32_t dm_next_uevent_seq(struct mapped_device *md)
43881 {
43882 - return atomic_add_return(1, &md->uevent_seq);
43883 + return atomic_add_return_unchecked(1, &md->uevent_seq);
43884 }
43885
43886 uint32_t dm_get_event_nr(struct mapped_device *md)
43887 {
43888 - return atomic_read(&md->event_nr);
43889 + return atomic_read_unchecked(&md->event_nr);
43890 }
43891
43892 int dm_wait_event(struct mapped_device *md, int event_nr)
43893 {
43894 return wait_event_interruptible(md->eventq,
43895 - (event_nr != atomic_read(&md->event_nr)));
43896 + (event_nr != atomic_read_unchecked(&md->event_nr)));
43897 }
43898
43899 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
43900 diff --git a/drivers/md/md.c b/drivers/md/md.c
43901 index 369d919..ba7049c 100644
43902 --- a/drivers/md/md.c
43903 +++ b/drivers/md/md.c
43904 @@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
43905 * start build, activate spare
43906 */
43907 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
43908 -static atomic_t md_event_count;
43909 +static atomic_unchecked_t md_event_count;
43910 void md_new_event(struct mddev *mddev)
43911 {
43912 - atomic_inc(&md_event_count);
43913 + atomic_inc_unchecked(&md_event_count);
43914 wake_up(&md_event_waiters);
43915 }
43916 EXPORT_SYMBOL_GPL(md_new_event);
43917 @@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
43918 */
43919 static void md_new_event_inintr(struct mddev *mddev)
43920 {
43921 - atomic_inc(&md_event_count);
43922 + atomic_inc_unchecked(&md_event_count);
43923 wake_up(&md_event_waiters);
43924 }
43925
43926 @@ -1463,7 +1463,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
43927 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
43928 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
43929 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
43930 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43931 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43932
43933 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
43934 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
43935 @@ -1710,7 +1710,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
43936 else
43937 sb->resync_offset = cpu_to_le64(0);
43938
43939 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
43940 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
43941
43942 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
43943 sb->size = cpu_to_le64(mddev->dev_sectors);
43944 @@ -2715,7 +2715,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
43945 static ssize_t
43946 errors_show(struct md_rdev *rdev, char *page)
43947 {
43948 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
43949 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
43950 }
43951
43952 static ssize_t
43953 @@ -2724,7 +2724,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
43954 char *e;
43955 unsigned long n = simple_strtoul(buf, &e, 10);
43956 if (*buf && (*e == 0 || *e == '\n')) {
43957 - atomic_set(&rdev->corrected_errors, n);
43958 + atomic_set_unchecked(&rdev->corrected_errors, n);
43959 return len;
43960 }
43961 return -EINVAL;
43962 @@ -3173,8 +3173,8 @@ int md_rdev_init(struct md_rdev *rdev)
43963 rdev->sb_loaded = 0;
43964 rdev->bb_page = NULL;
43965 atomic_set(&rdev->nr_pending, 0);
43966 - atomic_set(&rdev->read_errors, 0);
43967 - atomic_set(&rdev->corrected_errors, 0);
43968 + atomic_set_unchecked(&rdev->read_errors, 0);
43969 + atomic_set_unchecked(&rdev->corrected_errors, 0);
43970
43971 INIT_LIST_HEAD(&rdev->same_set);
43972 init_waitqueue_head(&rdev->blocked_wait);
43973 @@ -7038,7 +7038,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
43974
43975 spin_unlock(&pers_lock);
43976 seq_printf(seq, "\n");
43977 - seq->poll_event = atomic_read(&md_event_count);
43978 + seq->poll_event = atomic_read_unchecked(&md_event_count);
43979 return 0;
43980 }
43981 if (v == (void*)2) {
43982 @@ -7141,7 +7141,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
43983 return error;
43984
43985 seq = file->private_data;
43986 - seq->poll_event = atomic_read(&md_event_count);
43987 + seq->poll_event = atomic_read_unchecked(&md_event_count);
43988 return error;
43989 }
43990
43991 @@ -7155,7 +7155,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
43992 /* always allow read */
43993 mask = POLLIN | POLLRDNORM;
43994
43995 - if (seq->poll_event != atomic_read(&md_event_count))
43996 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
43997 mask |= POLLERR | POLLPRI;
43998 return mask;
43999 }
44000 @@ -7199,7 +7199,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44001 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44002 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44003 (int)part_stat_read(&disk->part0, sectors[1]) -
44004 - atomic_read(&disk->sync_io);
44005 + atomic_read_unchecked(&disk->sync_io);
44006 /* sync IO will cause sync_io to increase before the disk_stats
44007 * as sync_io is counted when a request starts, and
44008 * disk_stats is counted when it completes.
44009 diff --git a/drivers/md/md.h b/drivers/md/md.h
44010 index 0095ec8..c89277a 100644
44011 --- a/drivers/md/md.h
44012 +++ b/drivers/md/md.h
44013 @@ -94,13 +94,13 @@ struct md_rdev {
44014 * only maintained for arrays that
44015 * support hot removal
44016 */
44017 - atomic_t read_errors; /* number of consecutive read errors that
44018 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
44019 * we have tried to ignore.
44020 */
44021 struct timespec last_read_error; /* monotonic time since our
44022 * last read error
44023 */
44024 - atomic_t corrected_errors; /* number of corrected read errors,
44025 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44026 * for reporting to userspace and storing
44027 * in superblock.
44028 */
44029 @@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44030
44031 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44032 {
44033 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44034 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44035 }
44036
44037 struct md_personality
44038 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44039 index 3e6d115..ffecdeb 100644
44040 --- a/drivers/md/persistent-data/dm-space-map.h
44041 +++ b/drivers/md/persistent-data/dm-space-map.h
44042 @@ -71,6 +71,7 @@ struct dm_space_map {
44043 dm_sm_threshold_fn fn,
44044 void *context);
44045 };
44046 +typedef struct dm_space_map __no_const dm_space_map_no_const;
44047
44048 /*----------------------------------------------------------------*/
44049
44050 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44051 index 63b2e8d..225f16b 100644
44052 --- a/drivers/md/raid1.c
44053 +++ b/drivers/md/raid1.c
44054 @@ -1921,7 +1921,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44055 if (r1_sync_page_io(rdev, sect, s,
44056 bio->bi_io_vec[idx].bv_page,
44057 READ) != 0)
44058 - atomic_add(s, &rdev->corrected_errors);
44059 + atomic_add_unchecked(s, &rdev->corrected_errors);
44060 }
44061 sectors -= s;
44062 sect += s;
44063 @@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44064 test_bit(In_sync, &rdev->flags)) {
44065 if (r1_sync_page_io(rdev, sect, s,
44066 conf->tmppage, READ)) {
44067 - atomic_add(s, &rdev->corrected_errors);
44068 + atomic_add_unchecked(s, &rdev->corrected_errors);
44069 printk(KERN_INFO
44070 "md/raid1:%s: read error corrected "
44071 "(%d sectors at %llu on %s)\n",
44072 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44073 index 06eeb99..770613e 100644
44074 --- a/drivers/md/raid10.c
44075 +++ b/drivers/md/raid10.c
44076 @@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
44077 /* The write handler will notice the lack of
44078 * R10BIO_Uptodate and record any errors etc
44079 */
44080 - atomic_add(r10_bio->sectors,
44081 + atomic_add_unchecked(r10_bio->sectors,
44082 &conf->mirrors[d].rdev->corrected_errors);
44083
44084 /* for reconstruct, we always reschedule after a read.
44085 @@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44086 {
44087 struct timespec cur_time_mon;
44088 unsigned long hours_since_last;
44089 - unsigned int read_errors = atomic_read(&rdev->read_errors);
44090 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44091
44092 ktime_get_ts(&cur_time_mon);
44093
44094 @@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44095 * overflowing the shift of read_errors by hours_since_last.
44096 */
44097 if (hours_since_last >= 8 * sizeof(read_errors))
44098 - atomic_set(&rdev->read_errors, 0);
44099 + atomic_set_unchecked(&rdev->read_errors, 0);
44100 else
44101 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44102 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44103 }
44104
44105 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44106 @@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44107 return;
44108
44109 check_decay_read_errors(mddev, rdev);
44110 - atomic_inc(&rdev->read_errors);
44111 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
44112 + atomic_inc_unchecked(&rdev->read_errors);
44113 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44114 char b[BDEVNAME_SIZE];
44115 bdevname(rdev->bdev, b);
44116
44117 @@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44118 "md/raid10:%s: %s: Raid device exceeded "
44119 "read_error threshold [cur %d:max %d]\n",
44120 mdname(mddev), b,
44121 - atomic_read(&rdev->read_errors), max_read_errors);
44122 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44123 printk(KERN_NOTICE
44124 "md/raid10:%s: %s: Failing raid device\n",
44125 mdname(mddev), b);
44126 @@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44127 sect +
44128 choose_data_offset(r10_bio, rdev)),
44129 bdevname(rdev->bdev, b));
44130 - atomic_add(s, &rdev->corrected_errors);
44131 + atomic_add_unchecked(s, &rdev->corrected_errors);
44132 }
44133
44134 rdev_dec_pending(rdev, mddev);
44135 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44136 index 48cdec8..c7726b1 100644
44137 --- a/drivers/md/raid5.c
44138 +++ b/drivers/md/raid5.c
44139 @@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44140 mdname(conf->mddev), STRIPE_SECTORS,
44141 (unsigned long long)s,
44142 bdevname(rdev->bdev, b));
44143 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44144 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44145 clear_bit(R5_ReadError, &sh->dev[i].flags);
44146 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44147 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44148 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44149
44150 - if (atomic_read(&rdev->read_errors))
44151 - atomic_set(&rdev->read_errors, 0);
44152 + if (atomic_read_unchecked(&rdev->read_errors))
44153 + atomic_set_unchecked(&rdev->read_errors, 0);
44154 } else {
44155 const char *bdn = bdevname(rdev->bdev, b);
44156 int retry = 0;
44157 int set_bad = 0;
44158
44159 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44160 - atomic_inc(&rdev->read_errors);
44161 + atomic_inc_unchecked(&rdev->read_errors);
44162 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44163 printk_ratelimited(
44164 KERN_WARNING
44165 @@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44166 mdname(conf->mddev),
44167 (unsigned long long)s,
44168 bdn);
44169 - } else if (atomic_read(&rdev->read_errors)
44170 + } else if (atomic_read_unchecked(&rdev->read_errors)
44171 > conf->max_nr_stripes)
44172 printk(KERN_WARNING
44173 "md/raid:%s: Too many read errors, failing device %s.\n",
44174 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44175 index 983db75..ef9248c 100644
44176 --- a/drivers/media/dvb-core/dvbdev.c
44177 +++ b/drivers/media/dvb-core/dvbdev.c
44178 @@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44179 const struct dvb_device *template, void *priv, int type)
44180 {
44181 struct dvb_device *dvbdev;
44182 - struct file_operations *dvbdevfops;
44183 + file_operations_no_const *dvbdevfops;
44184 struct device *clsdev;
44185 int minor;
44186 int id;
44187 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44188 index 9b6c3bb..baeb5c7 100644
44189 --- a/drivers/media/dvb-frontends/dib3000.h
44190 +++ b/drivers/media/dvb-frontends/dib3000.h
44191 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44192 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44193 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44194 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44195 -};
44196 +} __no_const;
44197
44198 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44199 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44200 diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44201 index ed8cb90..5ef7f79 100644
44202 --- a/drivers/media/pci/cx88/cx88-video.c
44203 +++ b/drivers/media/pci/cx88/cx88-video.c
44204 @@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44205
44206 /* ------------------------------------------------------------------ */
44207
44208 -static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44209 -static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44210 -static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44211 +static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44212 +static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44213 +static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44214
44215 module_param_array(video_nr, int, NULL, 0444);
44216 module_param_array(vbi_nr, int, NULL, 0444);
44217 diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44218 index 802642d..5534900 100644
44219 --- a/drivers/media/pci/ivtv/ivtv-driver.c
44220 +++ b/drivers/media/pci/ivtv/ivtv-driver.c
44221 @@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44222 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44223
44224 /* ivtv instance counter */
44225 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
44226 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44227
44228 /* Parameter declarations */
44229 static int cardtype[IVTV_MAX_CARDS];
44230 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44231 index dfd0a21..6bbb465 100644
44232 --- a/drivers/media/platform/omap/omap_vout.c
44233 +++ b/drivers/media/platform/omap/omap_vout.c
44234 @@ -63,7 +63,6 @@ enum omap_vout_channels {
44235 OMAP_VIDEO2,
44236 };
44237
44238 -static struct videobuf_queue_ops video_vbq_ops;
44239 /* Variables configurable through module params*/
44240 static u32 video1_numbuffers = 3;
44241 static u32 video2_numbuffers = 3;
44242 @@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
44243 {
44244 struct videobuf_queue *q;
44245 struct omap_vout_device *vout = NULL;
44246 + static struct videobuf_queue_ops video_vbq_ops = {
44247 + .buf_setup = omap_vout_buffer_setup,
44248 + .buf_prepare = omap_vout_buffer_prepare,
44249 + .buf_release = omap_vout_buffer_release,
44250 + .buf_queue = omap_vout_buffer_queue,
44251 + };
44252
44253 vout = video_drvdata(file);
44254 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44255 @@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
44256 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44257
44258 q = &vout->vbq;
44259 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44260 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44261 - video_vbq_ops.buf_release = omap_vout_buffer_release;
44262 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44263 spin_lock_init(&vout->vbq_lock);
44264
44265 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44266 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44267 index fb2acc5..a2fcbdc4 100644
44268 --- a/drivers/media/platform/s5p-tv/mixer.h
44269 +++ b/drivers/media/platform/s5p-tv/mixer.h
44270 @@ -156,7 +156,7 @@ struct mxr_layer {
44271 /** layer index (unique identifier) */
44272 int idx;
44273 /** callbacks for layer methods */
44274 - struct mxr_layer_ops ops;
44275 + struct mxr_layer_ops *ops;
44276 /** format array */
44277 const struct mxr_format **fmt_array;
44278 /** size of format array */
44279 diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44280 index 74344c7..a39e70e 100644
44281 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44282 +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44283 @@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44284 {
44285 struct mxr_layer *layer;
44286 int ret;
44287 - struct mxr_layer_ops ops = {
44288 + static struct mxr_layer_ops ops = {
44289 .release = mxr_graph_layer_release,
44290 .buffer_set = mxr_graph_buffer_set,
44291 .stream_set = mxr_graph_stream_set,
44292 diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44293 index b713403..53cb5ad 100644
44294 --- a/drivers/media/platform/s5p-tv/mixer_reg.c
44295 +++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44296 @@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44297 layer->update_buf = next;
44298 }
44299
44300 - layer->ops.buffer_set(layer, layer->update_buf);
44301 + layer->ops->buffer_set(layer, layer->update_buf);
44302
44303 if (done && done != layer->shadow_buf)
44304 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44305 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44306 index 81b97db..b089ccd 100644
44307 --- a/drivers/media/platform/s5p-tv/mixer_video.c
44308 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
44309 @@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44310 layer->geo.src.height = layer->geo.src.full_height;
44311
44312 mxr_geometry_dump(mdev, &layer->geo);
44313 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44314 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44315 mxr_geometry_dump(mdev, &layer->geo);
44316 }
44317
44318 @@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44319 layer->geo.dst.full_width = mbus_fmt.width;
44320 layer->geo.dst.full_height = mbus_fmt.height;
44321 layer->geo.dst.field = mbus_fmt.field;
44322 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44323 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44324
44325 mxr_geometry_dump(mdev, &layer->geo);
44326 }
44327 @@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44328 /* set source size to highest accepted value */
44329 geo->src.full_width = max(geo->dst.full_width, pix->width);
44330 geo->src.full_height = max(geo->dst.full_height, pix->height);
44331 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44332 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44333 mxr_geometry_dump(mdev, &layer->geo);
44334 /* set cropping to total visible screen */
44335 geo->src.width = pix->width;
44336 @@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44337 geo->src.x_offset = 0;
44338 geo->src.y_offset = 0;
44339 /* assure consistency of geometry */
44340 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44341 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44342 mxr_geometry_dump(mdev, &layer->geo);
44343 /* set full size to lowest possible value */
44344 geo->src.full_width = 0;
44345 geo->src.full_height = 0;
44346 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44347 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44348 mxr_geometry_dump(mdev, &layer->geo);
44349
44350 /* returning results */
44351 @@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44352 target->width = s->r.width;
44353 target->height = s->r.height;
44354
44355 - layer->ops.fix_geometry(layer, stage, s->flags);
44356 + layer->ops->fix_geometry(layer, stage, s->flags);
44357
44358 /* retrieve update selection rectangle */
44359 res.left = target->x_offset;
44360 @@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44361 mxr_output_get(mdev);
44362
44363 mxr_layer_update_output(layer);
44364 - layer->ops.format_set(layer);
44365 + layer->ops->format_set(layer);
44366 /* enabling layer in hardware */
44367 spin_lock_irqsave(&layer->enq_slock, flags);
44368 layer->state = MXR_LAYER_STREAMING;
44369 spin_unlock_irqrestore(&layer->enq_slock, flags);
44370
44371 - layer->ops.stream_set(layer, MXR_ENABLE);
44372 + layer->ops->stream_set(layer, MXR_ENABLE);
44373 mxr_streamer_get(mdev);
44374
44375 return 0;
44376 @@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
44377 spin_unlock_irqrestore(&layer->enq_slock, flags);
44378
44379 /* disabling layer in hardware */
44380 - layer->ops.stream_set(layer, MXR_DISABLE);
44381 + layer->ops->stream_set(layer, MXR_DISABLE);
44382 /* remove one streamer */
44383 mxr_streamer_put(mdev);
44384 /* allow changes in output configuration */
44385 @@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
44386
44387 void mxr_layer_release(struct mxr_layer *layer)
44388 {
44389 - if (layer->ops.release)
44390 - layer->ops.release(layer);
44391 + if (layer->ops->release)
44392 + layer->ops->release(layer);
44393 }
44394
44395 void mxr_base_layer_release(struct mxr_layer *layer)
44396 @@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
44397
44398 layer->mdev = mdev;
44399 layer->idx = idx;
44400 - layer->ops = *ops;
44401 + layer->ops = ops;
44402
44403 spin_lock_init(&layer->enq_slock);
44404 INIT_LIST_HEAD(&layer->enq_list);
44405 diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44406 index c9388c4..ce71ece 100644
44407 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44408 +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44409 @@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
44410 {
44411 struct mxr_layer *layer;
44412 int ret;
44413 - struct mxr_layer_ops ops = {
44414 + static struct mxr_layer_ops ops = {
44415 .release = mxr_vp_layer_release,
44416 .buffer_set = mxr_vp_buffer_set,
44417 .stream_set = mxr_vp_stream_set,
44418 diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
44419 index 2d4e73b..8b4d5b6 100644
44420 --- a/drivers/media/platform/vivi.c
44421 +++ b/drivers/media/platform/vivi.c
44422 @@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
44423 MODULE_LICENSE("Dual BSD/GPL");
44424 MODULE_VERSION(VIVI_VERSION);
44425
44426 -static unsigned video_nr = -1;
44427 -module_param(video_nr, uint, 0644);
44428 +static int video_nr = -1;
44429 +module_param(video_nr, int, 0644);
44430 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
44431
44432 static unsigned n_devs = 1;
44433 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
44434 index 545c04c..a14bded 100644
44435 --- a/drivers/media/radio/radio-cadet.c
44436 +++ b/drivers/media/radio/radio-cadet.c
44437 @@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44438 unsigned char readbuf[RDS_BUFFER];
44439 int i = 0;
44440
44441 + if (count > RDS_BUFFER)
44442 + return -EFAULT;
44443 mutex_lock(&dev->lock);
44444 if (dev->rdsstat == 0)
44445 cadet_start_rds(dev);
44446 @@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44447 while (i < count && dev->rdsin != dev->rdsout)
44448 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
44449
44450 - if (i && copy_to_user(data, readbuf, i))
44451 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
44452 i = -EFAULT;
44453 unlock:
44454 mutex_unlock(&dev->lock);
44455 diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
44456 index 5236035..c622c74 100644
44457 --- a/drivers/media/radio/radio-maxiradio.c
44458 +++ b/drivers/media/radio/radio-maxiradio.c
44459 @@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
44460 /* TEA5757 pin mappings */
44461 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
44462
44463 -static atomic_t maxiradio_instance = ATOMIC_INIT(0);
44464 +static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
44465
44466 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
44467 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
44468 diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
44469 index 050b3bb..79f62b9 100644
44470 --- a/drivers/media/radio/radio-shark.c
44471 +++ b/drivers/media/radio/radio-shark.c
44472 @@ -79,7 +79,7 @@ struct shark_device {
44473 u32 last_val;
44474 };
44475
44476 -static atomic_t shark_instance = ATOMIC_INIT(0);
44477 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44478
44479 static void shark_write_val(struct snd_tea575x *tea, u32 val)
44480 {
44481 diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
44482 index 8654e0d..0608a64 100644
44483 --- a/drivers/media/radio/radio-shark2.c
44484 +++ b/drivers/media/radio/radio-shark2.c
44485 @@ -74,7 +74,7 @@ struct shark_device {
44486 u8 *transfer_buffer;
44487 };
44488
44489 -static atomic_t shark_instance = ATOMIC_INIT(0);
44490 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44491
44492 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
44493 {
44494 diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
44495 index 2fd9009..278cc1e 100644
44496 --- a/drivers/media/radio/radio-si476x.c
44497 +++ b/drivers/media/radio/radio-si476x.c
44498 @@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
44499 struct si476x_radio *radio;
44500 struct v4l2_ctrl *ctrl;
44501
44502 - static atomic_t instance = ATOMIC_INIT(0);
44503 + static atomic_unchecked_t instance = ATOMIC_INIT(0);
44504
44505 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
44506 if (!radio)
44507 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
44508 index 46da365..3ba4206 100644
44509 --- a/drivers/media/rc/rc-main.c
44510 +++ b/drivers/media/rc/rc-main.c
44511 @@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
44512 int rc_register_device(struct rc_dev *dev)
44513 {
44514 static bool raw_init = false; /* raw decoders loaded? */
44515 - static atomic_t devno = ATOMIC_INIT(0);
44516 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
44517 struct rc_map *rc_map;
44518 const char *path;
44519 int rc;
44520 @@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
44521 */
44522 mutex_lock(&dev->lock);
44523
44524 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
44525 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
44526 dev_set_name(&dev->dev, "rc%ld", dev->devno);
44527 dev_set_drvdata(&dev->dev, dev);
44528 rc = device_add(&dev->dev);
44529 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
44530 index 20e345d..da56fe4 100644
44531 --- a/drivers/media/usb/dvb-usb/cxusb.c
44532 +++ b/drivers/media/usb/dvb-usb/cxusb.c
44533 @@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
44534
44535 struct dib0700_adapter_state {
44536 int (*set_param_save) (struct dvb_frontend *);
44537 -};
44538 +} __no_const;
44539
44540 static int dib7070_set_param_override(struct dvb_frontend *fe)
44541 {
44542 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
44543 index c1a63b2..dbcbfb6 100644
44544 --- a/drivers/media/usb/dvb-usb/dw2102.c
44545 +++ b/drivers/media/usb/dvb-usb/dw2102.c
44546 @@ -121,7 +121,7 @@ struct su3000_state {
44547
44548 struct s6x0_state {
44549 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
44550 -};
44551 +} __no_const;
44552
44553 /* debug */
44554 static int dvb_usb_dw2102_debug;
44555 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44556 index 8f7a6a4..eb0e1d4 100644
44557 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44558 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44559 @@ -326,7 +326,7 @@ struct v4l2_buffer32 {
44560 __u32 reserved;
44561 };
44562
44563 -static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44564 +static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
44565 enum v4l2_memory memory)
44566 {
44567 void __user *up_pln;
44568 @@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44569 return 0;
44570 }
44571
44572 -static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44573 +static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
44574 enum v4l2_memory memory)
44575 {
44576 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
44577 @@ -425,7 +425,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
44578 * by passing a very big num_planes value */
44579 uplane = compat_alloc_user_space(num_planes *
44580 sizeof(struct v4l2_plane));
44581 - kp->m.planes = uplane;
44582 + kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
44583
44584 while (--num_planes >= 0) {
44585 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
44586 @@ -496,7 +496,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
44587 if (num_planes == 0)
44588 return 0;
44589
44590 - uplane = kp->m.planes;
44591 + uplane = (struct v4l2_plane __force_user *)kp->m.planes;
44592 if (get_user(p, &up->m.planes))
44593 return -EFAULT;
44594 uplane32 = compat_ptr(p);
44595 @@ -550,7 +550,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
44596 get_user(kp->capability, &up->capability) ||
44597 get_user(kp->flags, &up->flags))
44598 return -EFAULT;
44599 - kp->base = compat_ptr(tmp);
44600 + kp->base = (void __force_kernel *)compat_ptr(tmp);
44601 get_v4l2_pix_format(&kp->fmt, &up->fmt);
44602 return 0;
44603 }
44604 @@ -656,7 +656,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44605 n * sizeof(struct v4l2_ext_control32)))
44606 return -EFAULT;
44607 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
44608 - kp->controls = kcontrols;
44609 + kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
44610 while (--n >= 0) {
44611 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
44612 return -EFAULT;
44613 @@ -678,7 +678,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44614 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
44615 {
44616 struct v4l2_ext_control32 __user *ucontrols;
44617 - struct v4l2_ext_control __user *kcontrols = kp->controls;
44618 + struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
44619 int n = kp->count;
44620 compat_caddr_t p;
44621
44622 @@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
44623 put_user(kp->start_block, &up->start_block) ||
44624 put_user(kp->blocks, &up->blocks) ||
44625 put_user(tmp, &up->edid) ||
44626 - copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
44627 + copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
44628 return -EFAULT;
44629 return 0;
44630 }
44631 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
44632 index fb46790..ae1f8fa 100644
44633 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
44634 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
44635 @@ -1396,8 +1396,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
44636 return 0;
44637
44638 case V4L2_CTRL_TYPE_STRING:
44639 - len = strlen(c->string);
44640 - if (len < ctrl->minimum)
44641 + len = strlen_user(c->string);
44642 + if (!len || len < ctrl->minimum)
44643 return -ERANGE;
44644 if ((len - ctrl->minimum) % ctrl->step)
44645 return -ERANGE;
44646 diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
44647 index 02d1b63..5fd6b16 100644
44648 --- a/drivers/media/v4l2-core/v4l2-device.c
44649 +++ b/drivers/media/v4l2-core/v4l2-device.c
44650 @@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
44651 EXPORT_SYMBOL_GPL(v4l2_device_put);
44652
44653 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
44654 - atomic_t *instance)
44655 + atomic_unchecked_t *instance)
44656 {
44657 - int num = atomic_inc_return(instance) - 1;
44658 + int num = atomic_inc_return_unchecked(instance) - 1;
44659 int len = strlen(basename);
44660
44661 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
44662 diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
44663 index 68e6b5e..8eb2aec 100644
44664 --- a/drivers/media/v4l2-core/v4l2-ioctl.c
44665 +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
44666 @@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
44667 struct file *file, void *fh, void *p);
44668 } u;
44669 void (*debug)(const void *arg, bool write_only);
44670 -};
44671 +} __do_const;
44672 +typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
44673
44674 /* This control needs a priority check */
44675 #define INFO_FL_PRIO (1 << 0)
44676 @@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
44677 struct video_device *vfd = video_devdata(file);
44678 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
44679 bool write_only = false;
44680 - struct v4l2_ioctl_info default_info;
44681 + v4l2_ioctl_info_no_const default_info;
44682 const struct v4l2_ioctl_info *info;
44683 void *fh = file->private_data;
44684 struct v4l2_fh *vfh = NULL;
44685 @@ -2194,7 +2195,7 @@ done:
44686 }
44687
44688 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44689 - void * __user *user_ptr, void ***kernel_ptr)
44690 + void __user **user_ptr, void ***kernel_ptr)
44691 {
44692 int ret = 0;
44693
44694 @@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44695 ret = -EINVAL;
44696 break;
44697 }
44698 - *user_ptr = (void __user *)buf->m.planes;
44699 + *user_ptr = (void __force_user *)buf->m.planes;
44700 *kernel_ptr = (void *)&buf->m.planes;
44701 *array_size = sizeof(struct v4l2_plane) * buf->length;
44702 ret = 1;
44703 @@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44704 ret = -EINVAL;
44705 break;
44706 }
44707 - *user_ptr = (void __user *)ctrls->controls;
44708 + *user_ptr = (void __force_user *)ctrls->controls;
44709 *kernel_ptr = (void *)&ctrls->controls;
44710 *array_size = sizeof(struct v4l2_ext_control)
44711 * ctrls->count;
44712 @@ -2340,7 +2341,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
44713 err = -ENOTTY;
44714
44715 if (has_array_args) {
44716 - *kernel_ptr = user_ptr;
44717 + *kernel_ptr = (void __force_kernel *)user_ptr;
44718 if (copy_to_user(user_ptr, mbuf, array_size))
44719 err = -EFAULT;
44720 goto out_array_args;
44721 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
44722 index 767ff4d..c69d259 100644
44723 --- a/drivers/message/fusion/mptbase.c
44724 +++ b/drivers/message/fusion/mptbase.c
44725 @@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44726 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
44727 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
44728
44729 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44730 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
44731 +#else
44732 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
44733 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
44734 +#endif
44735 +
44736 /*
44737 * Rounding UP to nearest 4-kB boundary here...
44738 */
44739 @@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44740 ioc->facts.GlobalCredits);
44741
44742 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
44743 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44744 + NULL, NULL);
44745 +#else
44746 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
44747 +#endif
44748 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
44749 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
44750 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
44751 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
44752 index dd239bd..689c4f7 100644
44753 --- a/drivers/message/fusion/mptsas.c
44754 +++ b/drivers/message/fusion/mptsas.c
44755 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
44756 return 0;
44757 }
44758
44759 +static inline void
44760 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44761 +{
44762 + if (phy_info->port_details) {
44763 + phy_info->port_details->rphy = rphy;
44764 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44765 + ioc->name, rphy));
44766 + }
44767 +
44768 + if (rphy) {
44769 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44770 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44771 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44772 + ioc->name, rphy, rphy->dev.release));
44773 + }
44774 +}
44775 +
44776 /* no mutex */
44777 static void
44778 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
44779 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
44780 return NULL;
44781 }
44782
44783 -static inline void
44784 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44785 -{
44786 - if (phy_info->port_details) {
44787 - phy_info->port_details->rphy = rphy;
44788 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44789 - ioc->name, rphy));
44790 - }
44791 -
44792 - if (rphy) {
44793 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44794 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44795 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44796 - ioc->name, rphy, rphy->dev.release));
44797 - }
44798 -}
44799 -
44800 static inline struct sas_port *
44801 mptsas_get_port(struct mptsas_phyinfo *phy_info)
44802 {
44803 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
44804 index 727819c..ad74694 100644
44805 --- a/drivers/message/fusion/mptscsih.c
44806 +++ b/drivers/message/fusion/mptscsih.c
44807 @@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
44808
44809 h = shost_priv(SChost);
44810
44811 - if (h) {
44812 - if (h->info_kbuf == NULL)
44813 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44814 - return h->info_kbuf;
44815 - h->info_kbuf[0] = '\0';
44816 + if (!h)
44817 + return NULL;
44818
44819 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44820 - h->info_kbuf[size-1] = '\0';
44821 - }
44822 + if (h->info_kbuf == NULL)
44823 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44824 + return h->info_kbuf;
44825 + h->info_kbuf[0] = '\0';
44826 +
44827 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44828 + h->info_kbuf[size-1] = '\0';
44829
44830 return h->info_kbuf;
44831 }
44832 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
44833 index b7d87cd..3fb36da 100644
44834 --- a/drivers/message/i2o/i2o_proc.c
44835 +++ b/drivers/message/i2o/i2o_proc.c
44836 @@ -255,12 +255,6 @@ static char *scsi_devices[] = {
44837 "Array Controller Device"
44838 };
44839
44840 -static char *chtostr(char *tmp, u8 *chars, int n)
44841 -{
44842 - tmp[0] = 0;
44843 - return strncat(tmp, (char *)chars, n);
44844 -}
44845 -
44846 static int i2o_report_query_status(struct seq_file *seq, int block_status,
44847 char *group)
44848 {
44849 @@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
44850 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
44851 {
44852 struct i2o_controller *c = (struct i2o_controller *)seq->private;
44853 - static u32 work32[5];
44854 - static u8 *work8 = (u8 *) work32;
44855 - static u16 *work16 = (u16 *) work32;
44856 + u32 work32[5];
44857 + u8 *work8 = (u8 *) work32;
44858 + u16 *work16 = (u16 *) work32;
44859 int token;
44860 u32 hwcap;
44861
44862 @@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44863 } *result;
44864
44865 i2o_exec_execute_ddm_table ddm_table;
44866 - char tmp[28 + 1];
44867
44868 result = kmalloc(sizeof(*result), GFP_KERNEL);
44869 if (!result)
44870 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44871
44872 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
44873 seq_printf(seq, "%-#8x", ddm_table.module_id);
44874 - seq_printf(seq, "%-29s",
44875 - chtostr(tmp, ddm_table.module_name_version, 28));
44876 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
44877 seq_printf(seq, "%9d ", ddm_table.data_size);
44878 seq_printf(seq, "%8d", ddm_table.code_size);
44879
44880 @@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44881
44882 i2o_driver_result_table *result;
44883 i2o_driver_store_table *dst;
44884 - char tmp[28 + 1];
44885
44886 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
44887 if (result == NULL)
44888 @@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44889
44890 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
44891 seq_printf(seq, "%-#8x", dst->module_id);
44892 - seq_printf(seq, "%-29s",
44893 - chtostr(tmp, dst->module_name_version, 28));
44894 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
44895 + seq_printf(seq, "%-.28s", dst->module_name_version);
44896 + seq_printf(seq, "%-.8s", dst->date);
44897 seq_printf(seq, "%8d ", dst->module_size);
44898 seq_printf(seq, "%8d ", dst->mpb_size);
44899 seq_printf(seq, "0x%04x", dst->module_flags);
44900 @@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
44901 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44902 {
44903 struct i2o_device *d = (struct i2o_device *)seq->private;
44904 - static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44905 + u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44906 // == (allow) 512d bytes (max)
44907 - static u16 *work16 = (u16 *) work32;
44908 + u16 *work16 = (u16 *) work32;
44909 int token;
44910 - char tmp[16 + 1];
44911
44912 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
44913
44914 @@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44915 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
44916 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
44917 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
44918 - seq_printf(seq, "Vendor info : %s\n",
44919 - chtostr(tmp, (u8 *) (work32 + 2), 16));
44920 - seq_printf(seq, "Product info : %s\n",
44921 - chtostr(tmp, (u8 *) (work32 + 6), 16));
44922 - seq_printf(seq, "Description : %s\n",
44923 - chtostr(tmp, (u8 *) (work32 + 10), 16));
44924 - seq_printf(seq, "Product rev. : %s\n",
44925 - chtostr(tmp, (u8 *) (work32 + 14), 8));
44926 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
44927 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
44928 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
44929 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
44930
44931 seq_printf(seq, "Serial number : ");
44932 print_serial_number(seq, (u8 *) (work32 + 16),
44933 @@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44934 u8 pad[256]; // allow up to 256 byte (max) serial number
44935 } result;
44936
44937 - char tmp[24 + 1];
44938 -
44939 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
44940
44941 if (token < 0) {
44942 @@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44943 }
44944
44945 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
44946 - seq_printf(seq, "Module name : %s\n",
44947 - chtostr(tmp, result.module_name, 24));
44948 - seq_printf(seq, "Module revision : %s\n",
44949 - chtostr(tmp, result.module_rev, 8));
44950 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
44951 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
44952
44953 seq_printf(seq, "Serial number : ");
44954 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
44955 @@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44956 u8 instance_number[4];
44957 } result;
44958
44959 - char tmp[64 + 1];
44960 -
44961 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
44962
44963 if (token < 0) {
44964 @@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44965 return 0;
44966 }
44967
44968 - seq_printf(seq, "Device name : %s\n",
44969 - chtostr(tmp, result.device_name, 64));
44970 - seq_printf(seq, "Service name : %s\n",
44971 - chtostr(tmp, result.service_name, 64));
44972 - seq_printf(seq, "Physical name : %s\n",
44973 - chtostr(tmp, result.physical_location, 64));
44974 - seq_printf(seq, "Instance number : %s\n",
44975 - chtostr(tmp, result.instance_number, 4));
44976 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
44977 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
44978 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
44979 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
44980
44981 return 0;
44982 }
44983 @@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44984 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
44985 {
44986 struct i2o_device *d = (struct i2o_device *)seq->private;
44987 - static u32 work32[12];
44988 - static u16 *work16 = (u16 *) work32;
44989 - static u8 *work8 = (u8 *) work32;
44990 + u32 work32[12];
44991 + u16 *work16 = (u16 *) work32;
44992 + u8 *work8 = (u8 *) work32;
44993 int token;
44994
44995 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
44996 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
44997 index a8c08f3..155fe3d 100644
44998 --- a/drivers/message/i2o/iop.c
44999 +++ b/drivers/message/i2o/iop.c
45000 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
45001
45002 spin_lock_irqsave(&c->context_list_lock, flags);
45003
45004 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
45005 - atomic_inc(&c->context_list_counter);
45006 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
45007 + atomic_inc_unchecked(&c->context_list_counter);
45008
45009 - entry->context = atomic_read(&c->context_list_counter);
45010 + entry->context = atomic_read_unchecked(&c->context_list_counter);
45011
45012 list_add(&entry->list, &c->context_list);
45013
45014 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
45015
45016 #if BITS_PER_LONG == 64
45017 spin_lock_init(&c->context_list_lock);
45018 - atomic_set(&c->context_list_counter, 0);
45019 + atomic_set_unchecked(&c->context_list_counter, 0);
45020 INIT_LIST_HEAD(&c->context_list);
45021 #endif
45022
45023 diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
45024 index e33e385..28dfd23 100644
45025 --- a/drivers/mfd/ab8500-debugfs.c
45026 +++ b/drivers/mfd/ab8500-debugfs.c
45027 @@ -100,7 +100,7 @@ static int irq_last;
45028 static u32 *irq_count;
45029 static int num_irqs;
45030
45031 -static struct device_attribute **dev_attr;
45032 +static device_attribute_no_const **dev_attr;
45033 static char **event_name;
45034
45035 static u8 avg_sample = SAMPLE_16;
45036 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
45037 index fcbb2e9..2635e11 100644
45038 --- a/drivers/mfd/janz-cmodio.c
45039 +++ b/drivers/mfd/janz-cmodio.c
45040 @@ -13,6 +13,7 @@
45041
45042 #include <linux/kernel.h>
45043 #include <linux/module.h>
45044 +#include <linux/slab.h>
45045 #include <linux/init.h>
45046 #include <linux/pci.h>
45047 #include <linux/interrupt.h>
45048 diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
45049 index 176aa26..27811b2 100644
45050 --- a/drivers/mfd/max8925-i2c.c
45051 +++ b/drivers/mfd/max8925-i2c.c
45052 @@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
45053 const struct i2c_device_id *id)
45054 {
45055 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
45056 - static struct max8925_chip *chip;
45057 + struct max8925_chip *chip;
45058 struct device_node *node = client->dev.of_node;
45059
45060 if (node && !pdata) {
45061 diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
45062 index c0f608e..286f8ec 100644
45063 --- a/drivers/mfd/tps65910.c
45064 +++ b/drivers/mfd/tps65910.c
45065 @@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
45066 struct tps65910_platform_data *pdata)
45067 {
45068 int ret = 0;
45069 - static struct regmap_irq_chip *tps6591x_irqs_chip;
45070 + struct regmap_irq_chip *tps6591x_irqs_chip;
45071
45072 if (!irq) {
45073 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
45074 diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
45075 index 9aa6d1e..1631bfc 100644
45076 --- a/drivers/mfd/twl4030-irq.c
45077 +++ b/drivers/mfd/twl4030-irq.c
45078 @@ -35,6 +35,7 @@
45079 #include <linux/of.h>
45080 #include <linux/irqdomain.h>
45081 #include <linux/i2c/twl.h>
45082 +#include <asm/pgtable.h>
45083
45084 #include "twl-core.h"
45085
45086 @@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
45087 * Install an irq handler for each of the SIH modules;
45088 * clone dummy irq_chip since PIH can't *do* anything
45089 */
45090 - twl4030_irq_chip = dummy_irq_chip;
45091 - twl4030_irq_chip.name = "twl4030";
45092 + pax_open_kernel();
45093 + memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
45094 + *(const char **)&twl4030_irq_chip.name = "twl4030";
45095
45096 - twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45097 + *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45098 + pax_close_kernel();
45099
45100 for (i = irq_base; i < irq_end; i++) {
45101 irq_set_chip_and_handler(i, &twl4030_irq_chip,
45102 diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
45103 index 464419b..64bae8d 100644
45104 --- a/drivers/misc/c2port/core.c
45105 +++ b/drivers/misc/c2port/core.c
45106 @@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
45107 goto error_idr_alloc;
45108 c2dev->id = ret;
45109
45110 - bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45111 + pax_open_kernel();
45112 + *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45113 + pax_close_kernel();
45114
45115 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
45116 "c2port%d", c2dev->id);
45117 diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
45118 index 9c34e57..b981cda 100644
45119 --- a/drivers/misc/eeprom/sunxi_sid.c
45120 +++ b/drivers/misc/eeprom/sunxi_sid.c
45121 @@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
45122
45123 platform_set_drvdata(pdev, sid_data);
45124
45125 - sid_bin_attr.size = sid_data->keysize;
45126 + pax_open_kernel();
45127 + *(size_t *)&sid_bin_attr.size = sid_data->keysize;
45128 + pax_close_kernel();
45129 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
45130 return -ENODEV;
45131
45132 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
45133 index 36f5d52..32311c3 100644
45134 --- a/drivers/misc/kgdbts.c
45135 +++ b/drivers/misc/kgdbts.c
45136 @@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
45137 char before[BREAK_INSTR_SIZE];
45138 char after[BREAK_INSTR_SIZE];
45139
45140 - probe_kernel_read(before, (char *)kgdbts_break_test,
45141 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
45142 BREAK_INSTR_SIZE);
45143 init_simple_test();
45144 ts.tst = plant_and_detach_test;
45145 @@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
45146 /* Activate test with initial breakpoint */
45147 if (!is_early)
45148 kgdb_breakpoint();
45149 - probe_kernel_read(after, (char *)kgdbts_break_test,
45150 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
45151 BREAK_INSTR_SIZE);
45152 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
45153 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
45154 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
45155 index 036effe..b3a6336 100644
45156 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
45157 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
45158 @@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
45159 * the lid is closed. This leads to interrupts as soon as a little move
45160 * is done.
45161 */
45162 - atomic_inc(&lis3->count);
45163 + atomic_inc_unchecked(&lis3->count);
45164
45165 wake_up_interruptible(&lis3->misc_wait);
45166 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
45167 @@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
45168 if (lis3->pm_dev)
45169 pm_runtime_get_sync(lis3->pm_dev);
45170
45171 - atomic_set(&lis3->count, 0);
45172 + atomic_set_unchecked(&lis3->count, 0);
45173 return 0;
45174 }
45175
45176 @@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
45177 add_wait_queue(&lis3->misc_wait, &wait);
45178 while (true) {
45179 set_current_state(TASK_INTERRUPTIBLE);
45180 - data = atomic_xchg(&lis3->count, 0);
45181 + data = atomic_xchg_unchecked(&lis3->count, 0);
45182 if (data)
45183 break;
45184
45185 @@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
45186 struct lis3lv02d, miscdev);
45187
45188 poll_wait(file, &lis3->misc_wait, wait);
45189 - if (atomic_read(&lis3->count))
45190 + if (atomic_read_unchecked(&lis3->count))
45191 return POLLIN | POLLRDNORM;
45192 return 0;
45193 }
45194 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
45195 index c439c82..1f20f57 100644
45196 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
45197 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
45198 @@ -297,7 +297,7 @@ struct lis3lv02d {
45199 struct input_polled_dev *idev; /* input device */
45200 struct platform_device *pdev; /* platform device */
45201 struct regulator_bulk_data regulators[2];
45202 - atomic_t count; /* interrupt count after last read */
45203 + atomic_unchecked_t count; /* interrupt count after last read */
45204 union axis_conversion ac; /* hw -> logical axis */
45205 int mapped_btns[3];
45206
45207 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
45208 index 2f30bad..c4c13d0 100644
45209 --- a/drivers/misc/sgi-gru/gruhandles.c
45210 +++ b/drivers/misc/sgi-gru/gruhandles.c
45211 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
45212 unsigned long nsec;
45213
45214 nsec = CLKS2NSEC(clks);
45215 - atomic_long_inc(&mcs_op_statistics[op].count);
45216 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
45217 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
45218 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
45219 if (mcs_op_statistics[op].max < nsec)
45220 mcs_op_statistics[op].max = nsec;
45221 }
45222 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
45223 index 4f76359..cdfcb2e 100644
45224 --- a/drivers/misc/sgi-gru/gruprocfs.c
45225 +++ b/drivers/misc/sgi-gru/gruprocfs.c
45226 @@ -32,9 +32,9 @@
45227
45228 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
45229
45230 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
45231 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
45232 {
45233 - unsigned long val = atomic_long_read(v);
45234 + unsigned long val = atomic_long_read_unchecked(v);
45235
45236 seq_printf(s, "%16lu %s\n", val, id);
45237 }
45238 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
45239
45240 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
45241 for (op = 0; op < mcsop_last; op++) {
45242 - count = atomic_long_read(&mcs_op_statistics[op].count);
45243 - total = atomic_long_read(&mcs_op_statistics[op].total);
45244 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
45245 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
45246 max = mcs_op_statistics[op].max;
45247 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
45248 count ? total / count : 0, max);
45249 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
45250 index 5c3ce24..4915ccb 100644
45251 --- a/drivers/misc/sgi-gru/grutables.h
45252 +++ b/drivers/misc/sgi-gru/grutables.h
45253 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
45254 * GRU statistics.
45255 */
45256 struct gru_stats_s {
45257 - atomic_long_t vdata_alloc;
45258 - atomic_long_t vdata_free;
45259 - atomic_long_t gts_alloc;
45260 - atomic_long_t gts_free;
45261 - atomic_long_t gms_alloc;
45262 - atomic_long_t gms_free;
45263 - atomic_long_t gts_double_allocate;
45264 - atomic_long_t assign_context;
45265 - atomic_long_t assign_context_failed;
45266 - atomic_long_t free_context;
45267 - atomic_long_t load_user_context;
45268 - atomic_long_t load_kernel_context;
45269 - atomic_long_t lock_kernel_context;
45270 - atomic_long_t unlock_kernel_context;
45271 - atomic_long_t steal_user_context;
45272 - atomic_long_t steal_kernel_context;
45273 - atomic_long_t steal_context_failed;
45274 - atomic_long_t nopfn;
45275 - atomic_long_t asid_new;
45276 - atomic_long_t asid_next;
45277 - atomic_long_t asid_wrap;
45278 - atomic_long_t asid_reuse;
45279 - atomic_long_t intr;
45280 - atomic_long_t intr_cbr;
45281 - atomic_long_t intr_tfh;
45282 - atomic_long_t intr_spurious;
45283 - atomic_long_t intr_mm_lock_failed;
45284 - atomic_long_t call_os;
45285 - atomic_long_t call_os_wait_queue;
45286 - atomic_long_t user_flush_tlb;
45287 - atomic_long_t user_unload_context;
45288 - atomic_long_t user_exception;
45289 - atomic_long_t set_context_option;
45290 - atomic_long_t check_context_retarget_intr;
45291 - atomic_long_t check_context_unload;
45292 - atomic_long_t tlb_dropin;
45293 - atomic_long_t tlb_preload_page;
45294 - atomic_long_t tlb_dropin_fail_no_asid;
45295 - atomic_long_t tlb_dropin_fail_upm;
45296 - atomic_long_t tlb_dropin_fail_invalid;
45297 - atomic_long_t tlb_dropin_fail_range_active;
45298 - atomic_long_t tlb_dropin_fail_idle;
45299 - atomic_long_t tlb_dropin_fail_fmm;
45300 - atomic_long_t tlb_dropin_fail_no_exception;
45301 - atomic_long_t tfh_stale_on_fault;
45302 - atomic_long_t mmu_invalidate_range;
45303 - atomic_long_t mmu_invalidate_page;
45304 - atomic_long_t flush_tlb;
45305 - atomic_long_t flush_tlb_gru;
45306 - atomic_long_t flush_tlb_gru_tgh;
45307 - atomic_long_t flush_tlb_gru_zero_asid;
45308 + atomic_long_unchecked_t vdata_alloc;
45309 + atomic_long_unchecked_t vdata_free;
45310 + atomic_long_unchecked_t gts_alloc;
45311 + atomic_long_unchecked_t gts_free;
45312 + atomic_long_unchecked_t gms_alloc;
45313 + atomic_long_unchecked_t gms_free;
45314 + atomic_long_unchecked_t gts_double_allocate;
45315 + atomic_long_unchecked_t assign_context;
45316 + atomic_long_unchecked_t assign_context_failed;
45317 + atomic_long_unchecked_t free_context;
45318 + atomic_long_unchecked_t load_user_context;
45319 + atomic_long_unchecked_t load_kernel_context;
45320 + atomic_long_unchecked_t lock_kernel_context;
45321 + atomic_long_unchecked_t unlock_kernel_context;
45322 + atomic_long_unchecked_t steal_user_context;
45323 + atomic_long_unchecked_t steal_kernel_context;
45324 + atomic_long_unchecked_t steal_context_failed;
45325 + atomic_long_unchecked_t nopfn;
45326 + atomic_long_unchecked_t asid_new;
45327 + atomic_long_unchecked_t asid_next;
45328 + atomic_long_unchecked_t asid_wrap;
45329 + atomic_long_unchecked_t asid_reuse;
45330 + atomic_long_unchecked_t intr;
45331 + atomic_long_unchecked_t intr_cbr;
45332 + atomic_long_unchecked_t intr_tfh;
45333 + atomic_long_unchecked_t intr_spurious;
45334 + atomic_long_unchecked_t intr_mm_lock_failed;
45335 + atomic_long_unchecked_t call_os;
45336 + atomic_long_unchecked_t call_os_wait_queue;
45337 + atomic_long_unchecked_t user_flush_tlb;
45338 + atomic_long_unchecked_t user_unload_context;
45339 + atomic_long_unchecked_t user_exception;
45340 + atomic_long_unchecked_t set_context_option;
45341 + atomic_long_unchecked_t check_context_retarget_intr;
45342 + atomic_long_unchecked_t check_context_unload;
45343 + atomic_long_unchecked_t tlb_dropin;
45344 + atomic_long_unchecked_t tlb_preload_page;
45345 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
45346 + atomic_long_unchecked_t tlb_dropin_fail_upm;
45347 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
45348 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
45349 + atomic_long_unchecked_t tlb_dropin_fail_idle;
45350 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
45351 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
45352 + atomic_long_unchecked_t tfh_stale_on_fault;
45353 + atomic_long_unchecked_t mmu_invalidate_range;
45354 + atomic_long_unchecked_t mmu_invalidate_page;
45355 + atomic_long_unchecked_t flush_tlb;
45356 + atomic_long_unchecked_t flush_tlb_gru;
45357 + atomic_long_unchecked_t flush_tlb_gru_tgh;
45358 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
45359
45360 - atomic_long_t copy_gpa;
45361 - atomic_long_t read_gpa;
45362 + atomic_long_unchecked_t copy_gpa;
45363 + atomic_long_unchecked_t read_gpa;
45364
45365 - atomic_long_t mesq_receive;
45366 - atomic_long_t mesq_receive_none;
45367 - atomic_long_t mesq_send;
45368 - atomic_long_t mesq_send_failed;
45369 - atomic_long_t mesq_noop;
45370 - atomic_long_t mesq_send_unexpected_error;
45371 - atomic_long_t mesq_send_lb_overflow;
45372 - atomic_long_t mesq_send_qlimit_reached;
45373 - atomic_long_t mesq_send_amo_nacked;
45374 - atomic_long_t mesq_send_put_nacked;
45375 - atomic_long_t mesq_page_overflow;
45376 - atomic_long_t mesq_qf_locked;
45377 - atomic_long_t mesq_qf_noop_not_full;
45378 - atomic_long_t mesq_qf_switch_head_failed;
45379 - atomic_long_t mesq_qf_unexpected_error;
45380 - atomic_long_t mesq_noop_unexpected_error;
45381 - atomic_long_t mesq_noop_lb_overflow;
45382 - atomic_long_t mesq_noop_qlimit_reached;
45383 - atomic_long_t mesq_noop_amo_nacked;
45384 - atomic_long_t mesq_noop_put_nacked;
45385 - atomic_long_t mesq_noop_page_overflow;
45386 + atomic_long_unchecked_t mesq_receive;
45387 + atomic_long_unchecked_t mesq_receive_none;
45388 + atomic_long_unchecked_t mesq_send;
45389 + atomic_long_unchecked_t mesq_send_failed;
45390 + atomic_long_unchecked_t mesq_noop;
45391 + atomic_long_unchecked_t mesq_send_unexpected_error;
45392 + atomic_long_unchecked_t mesq_send_lb_overflow;
45393 + atomic_long_unchecked_t mesq_send_qlimit_reached;
45394 + atomic_long_unchecked_t mesq_send_amo_nacked;
45395 + atomic_long_unchecked_t mesq_send_put_nacked;
45396 + atomic_long_unchecked_t mesq_page_overflow;
45397 + atomic_long_unchecked_t mesq_qf_locked;
45398 + atomic_long_unchecked_t mesq_qf_noop_not_full;
45399 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
45400 + atomic_long_unchecked_t mesq_qf_unexpected_error;
45401 + atomic_long_unchecked_t mesq_noop_unexpected_error;
45402 + atomic_long_unchecked_t mesq_noop_lb_overflow;
45403 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
45404 + atomic_long_unchecked_t mesq_noop_amo_nacked;
45405 + atomic_long_unchecked_t mesq_noop_put_nacked;
45406 + atomic_long_unchecked_t mesq_noop_page_overflow;
45407
45408 };
45409
45410 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
45411 tghop_invalidate, mcsop_last};
45412
45413 struct mcs_op_statistic {
45414 - atomic_long_t count;
45415 - atomic_long_t total;
45416 + atomic_long_unchecked_t count;
45417 + atomic_long_unchecked_t total;
45418 unsigned long max;
45419 };
45420
45421 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
45422
45423 #define STAT(id) do { \
45424 if (gru_options & OPT_STATS) \
45425 - atomic_long_inc(&gru_stats.id); \
45426 + atomic_long_inc_unchecked(&gru_stats.id); \
45427 } while (0)
45428
45429 #ifdef CONFIG_SGI_GRU_DEBUG
45430 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
45431 index c862cd4..0d176fe 100644
45432 --- a/drivers/misc/sgi-xp/xp.h
45433 +++ b/drivers/misc/sgi-xp/xp.h
45434 @@ -288,7 +288,7 @@ struct xpc_interface {
45435 xpc_notify_func, void *);
45436 void (*received) (short, int, void *);
45437 enum xp_retval (*partid_to_nasids) (short, void *);
45438 -};
45439 +} __no_const;
45440
45441 extern struct xpc_interface xpc_interface;
45442
45443 diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
45444 index 01be66d..e3a0c7e 100644
45445 --- a/drivers/misc/sgi-xp/xp_main.c
45446 +++ b/drivers/misc/sgi-xp/xp_main.c
45447 @@ -78,13 +78,13 @@ xpc_notloaded(void)
45448 }
45449
45450 struct xpc_interface xpc_interface = {
45451 - (void (*)(int))xpc_notloaded,
45452 - (void (*)(int))xpc_notloaded,
45453 - (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
45454 - (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
45455 + .connect = (void (*)(int))xpc_notloaded,
45456 + .disconnect = (void (*)(int))xpc_notloaded,
45457 + .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
45458 + .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
45459 void *))xpc_notloaded,
45460 - (void (*)(short, int, void *))xpc_notloaded,
45461 - (enum xp_retval(*)(short, void *))xpc_notloaded
45462 + .received = (void (*)(short, int, void *))xpc_notloaded,
45463 + .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
45464 };
45465 EXPORT_SYMBOL_GPL(xpc_interface);
45466
45467 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
45468 index b94d5f7..7f494c5 100644
45469 --- a/drivers/misc/sgi-xp/xpc.h
45470 +++ b/drivers/misc/sgi-xp/xpc.h
45471 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
45472 void (*received_payload) (struct xpc_channel *, void *);
45473 void (*notify_senders_of_disconnect) (struct xpc_channel *);
45474 };
45475 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
45476
45477 /* struct xpc_partition act_state values (for XPC HB) */
45478
45479 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
45480 /* found in xpc_main.c */
45481 extern struct device *xpc_part;
45482 extern struct device *xpc_chan;
45483 -extern struct xpc_arch_operations xpc_arch_ops;
45484 +extern xpc_arch_operations_no_const xpc_arch_ops;
45485 extern int xpc_disengage_timelimit;
45486 extern int xpc_disengage_timedout;
45487 extern int xpc_activate_IRQ_rcvd;
45488 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
45489 index 82dc574..8539ab2 100644
45490 --- a/drivers/misc/sgi-xp/xpc_main.c
45491 +++ b/drivers/misc/sgi-xp/xpc_main.c
45492 @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
45493 .notifier_call = xpc_system_die,
45494 };
45495
45496 -struct xpc_arch_operations xpc_arch_ops;
45497 +xpc_arch_operations_no_const xpc_arch_ops;
45498
45499 /*
45500 * Timer function to enforce the timelimit on the partition disengage.
45501 @@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
45502
45503 if (((die_args->trapnr == X86_TRAP_MF) ||
45504 (die_args->trapnr == X86_TRAP_XF)) &&
45505 - !user_mode_vm(die_args->regs))
45506 + !user_mode(die_args->regs))
45507 xpc_die_deactivate();
45508
45509 break;
45510 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
45511 index 7b5424f..ed1d6ac 100644
45512 --- a/drivers/mmc/card/block.c
45513 +++ b/drivers/mmc/card/block.c
45514 @@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
45515 if (idata->ic.postsleep_min_us)
45516 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
45517
45518 - if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
45519 + if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
45520 err = -EFAULT;
45521 goto cmd_rel_host;
45522 }
45523 diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
45524 index 357bbc5..3e049c1 100644
45525 --- a/drivers/mmc/card/queue.c
45526 +++ b/drivers/mmc/card/queue.c
45527 @@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
45528 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
45529
45530 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
45531 - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
45532 + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
45533
45534 mq->card = card;
45535 mq->queue = blk_init_queue(mmc_request_fn, lock);
45536 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
45537 index e5b5eeb..7bf2212 100644
45538 --- a/drivers/mmc/core/mmc_ops.c
45539 +++ b/drivers/mmc/core/mmc_ops.c
45540 @@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
45541 void *data_buf;
45542 int is_on_stack;
45543
45544 - is_on_stack = object_is_on_stack(buf);
45545 + is_on_stack = object_starts_on_stack(buf);
45546 if (is_on_stack) {
45547 /*
45548 * dma onto stack is unsafe/nonportable, but callers to this
45549 diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
45550 index 6bf24ab..13d0293b 100644
45551 --- a/drivers/mmc/host/dw_mmc.h
45552 +++ b/drivers/mmc/host/dw_mmc.h
45553 @@ -258,5 +258,5 @@ struct dw_mci_drv_data {
45554 int (*parse_dt)(struct dw_mci *host);
45555 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
45556 struct dw_mci_tuning_data *tuning_data);
45557 -};
45558 +} __do_const;
45559 #endif /* _DW_MMC_H_ */
45560 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
45561 index f320579..7b7ebac 100644
45562 --- a/drivers/mmc/host/mmci.c
45563 +++ b/drivers/mmc/host/mmci.c
45564 @@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev,
45565 }
45566
45567 if (variant->busy_detect) {
45568 - mmci_ops.card_busy = mmci_card_busy;
45569 + pax_open_kernel();
45570 + *(void **)&mmci_ops.card_busy = mmci_card_busy;
45571 + pax_close_kernel();
45572 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
45573 }
45574
45575 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
45576 index 1dcaf8a..025af25 100644
45577 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
45578 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
45579 @@ -1009,9 +1009,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
45580 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
45581 }
45582
45583 - if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
45584 - sdhci_esdhc_ops.platform_execute_tuning =
45585 + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
45586 + pax_open_kernel();
45587 + *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
45588 esdhc_executing_tuning;
45589 + pax_close_kernel();
45590 + }
45591 boarddata = &imx_data->boarddata;
45592 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
45593 if (!host->mmc->parent->platform_data) {
45594 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
45595 index 6debda9..2ba7427 100644
45596 --- a/drivers/mmc/host/sdhci-s3c.c
45597 +++ b/drivers/mmc/host/sdhci-s3c.c
45598 @@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
45599 * we can use overriding functions instead of default.
45600 */
45601 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
45602 - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
45603 - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
45604 - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
45605 + pax_open_kernel();
45606 + *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
45607 + *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
45608 + *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
45609 + pax_close_kernel();
45610 }
45611
45612 /* It supports additional host capabilities if needed */
45613 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
45614 index 096993f..f02c23b 100644
45615 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
45616 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
45617 @@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
45618 size_t totlen = 0, thislen;
45619 int ret = 0;
45620 size_t buflen = 0;
45621 - static char *buffer;
45622 + char *buffer;
45623
45624 if (!ECCBUF_SIZE) {
45625 /* We should fall back to a general writev implementation.
45626 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
45627 index 370b9dd..1a1176b 100644
45628 --- a/drivers/mtd/nand/denali.c
45629 +++ b/drivers/mtd/nand/denali.c
45630 @@ -24,6 +24,7 @@
45631 #include <linux/slab.h>
45632 #include <linux/mtd/mtd.h>
45633 #include <linux/module.h>
45634 +#include <linux/slab.h>
45635
45636 #include "denali.h"
45637
45638 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
45639 index 51b9d6a..52af9a7 100644
45640 --- a/drivers/mtd/nftlmount.c
45641 +++ b/drivers/mtd/nftlmount.c
45642 @@ -24,6 +24,7 @@
45643 #include <asm/errno.h>
45644 #include <linux/delay.h>
45645 #include <linux/slab.h>
45646 +#include <linux/sched.h>
45647 #include <linux/mtd/mtd.h>
45648 #include <linux/mtd/nand.h>
45649 #include <linux/mtd/nftl.h>
45650 diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
45651 index 4b8e895..6b3c498 100644
45652 --- a/drivers/mtd/sm_ftl.c
45653 +++ b/drivers/mtd/sm_ftl.c
45654 @@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
45655 #define SM_CIS_VENDOR_OFFSET 0x59
45656 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
45657 {
45658 - struct attribute_group *attr_group;
45659 + attribute_group_no_const *attr_group;
45660 struct attribute **attributes;
45661 struct sm_sysfs_attribute *vendor_attribute;
45662
45663 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
45664 index 4b8c58b..a200546 100644
45665 --- a/drivers/net/bonding/bond_main.c
45666 +++ b/drivers/net/bonding/bond_main.c
45667 @@ -4527,6 +4527,7 @@ static void __exit bonding_exit(void)
45668
45669 bond_netlink_fini();
45670 unregister_pernet_subsys(&bond_net_ops);
45671 + rtnl_link_unregister(&bond_link_ops);
45672
45673 #ifdef CONFIG_NET_POLL_CONTROLLER
45674 /*
45675 diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
45676 index 40e7b1c..6a70fff 100644
45677 --- a/drivers/net/bonding/bond_netlink.c
45678 +++ b/drivers/net/bonding/bond_netlink.c
45679 @@ -102,7 +102,7 @@ nla_put_failure:
45680 return -EMSGSIZE;
45681 }
45682
45683 -struct rtnl_link_ops bond_link_ops __read_mostly = {
45684 +struct rtnl_link_ops bond_link_ops = {
45685 .kind = "bond",
45686 .priv_size = sizeof(struct bonding),
45687 .setup = bond_setup,
45688 diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
45689 index 3c06947..fd0e5de 100644
45690 --- a/drivers/net/can/Kconfig
45691 +++ b/drivers/net/can/Kconfig
45692 @@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
45693
45694 config CAN_FLEXCAN
45695 tristate "Support for Freescale FLEXCAN based chips"
45696 - depends on ARM || PPC
45697 + depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
45698 ---help---
45699 Say Y here if you want to support for Freescale FlexCAN.
45700
45701 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
45702 index 36fa577..a158806 100644
45703 --- a/drivers/net/ethernet/8390/ax88796.c
45704 +++ b/drivers/net/ethernet/8390/ax88796.c
45705 @@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
45706 if (ax->plat->reg_offsets)
45707 ei_local->reg_offset = ax->plat->reg_offsets;
45708 else {
45709 + resource_size_t _mem_size = mem_size;
45710 + do_div(_mem_size, 0x18);
45711 ei_local->reg_offset = ax->reg_offsets;
45712 for (ret = 0; ret < 0x18; ret++)
45713 - ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
45714 + ax->reg_offsets[ret] = _mem_size * ret;
45715 }
45716
45717 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
45718 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45719 index 41f3ca5a..1ee5364 100644
45720 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45721 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45722 @@ -1139,7 +1139,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
45723 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
45724 {
45725 /* RX_MODE controlling object */
45726 - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
45727 + bnx2x_init_rx_mode_obj(bp);
45728
45729 /* multicast configuration controlling object */
45730 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
45731 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45732 index 18438a5..c923b8e 100644
45733 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45734 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45735 @@ -2591,15 +2591,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
45736 return rc;
45737 }
45738
45739 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45740 - struct bnx2x_rx_mode_obj *o)
45741 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
45742 {
45743 if (CHIP_IS_E1x(bp)) {
45744 - o->wait_comp = bnx2x_empty_rx_mode_wait;
45745 - o->config_rx_mode = bnx2x_set_rx_mode_e1x;
45746 + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
45747 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
45748 } else {
45749 - o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
45750 - o->config_rx_mode = bnx2x_set_rx_mode_e2;
45751 + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
45752 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
45753 }
45754 }
45755
45756 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45757 index 6a53c15..6e7d1e7 100644
45758 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45759 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45760 @@ -1332,8 +1332,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
45761
45762 /********************* RX MODE ****************/
45763
45764 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45765 - struct bnx2x_rx_mode_obj *o);
45766 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
45767
45768 /**
45769 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
45770 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
45771 index cf9917b..c658558 100644
45772 --- a/drivers/net/ethernet/broadcom/tg3.h
45773 +++ b/drivers/net/ethernet/broadcom/tg3.h
45774 @@ -150,6 +150,7 @@
45775 #define CHIPREV_ID_5750_A0 0x4000
45776 #define CHIPREV_ID_5750_A1 0x4001
45777 #define CHIPREV_ID_5750_A3 0x4003
45778 +#define CHIPREV_ID_5750_C1 0x4201
45779 #define CHIPREV_ID_5750_C2 0x4202
45780 #define CHIPREV_ID_5752_A0_HW 0x5000
45781 #define CHIPREV_ID_5752_A0 0x6000
45782 diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
45783 index 3ca77fa..fcc015f 100644
45784 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c
45785 +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
45786 @@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
45787 }
45788
45789 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
45790 - bna_cb_ioceth_enable,
45791 - bna_cb_ioceth_disable,
45792 - bna_cb_ioceth_hbfail,
45793 - bna_cb_ioceth_reset
45794 + .enable_cbfn = bna_cb_ioceth_enable,
45795 + .disable_cbfn = bna_cb_ioceth_disable,
45796 + .hbfail_cbfn = bna_cb_ioceth_hbfail,
45797 + .reset_cbfn = bna_cb_ioceth_reset
45798 };
45799
45800 static void bna_attr_init(struct bna_ioceth *ioceth)
45801 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45802 index 8cffcdf..aadf043 100644
45803 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45804 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45805 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
45806 */
45807 struct l2t_skb_cb {
45808 arp_failure_handler_func arp_failure_handler;
45809 -};
45810 +} __no_const;
45811
45812 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
45813
45814 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45815 index fff02ed..d421412 100644
45816 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45817 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45818 @@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
45819
45820 int i;
45821 struct adapter *ap = netdev2adap(dev);
45822 - static const unsigned int *reg_ranges;
45823 + const unsigned int *reg_ranges;
45824 int arr_size = 0, buf_size = 0;
45825
45826 if (is_t4(ap->params.chip)) {
45827 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
45828 index c05b66d..ed69872 100644
45829 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
45830 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
45831 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45832 for (i=0; i<ETH_ALEN; i++) {
45833 tmp.addr[i] = dev->dev_addr[i];
45834 }
45835 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45836 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45837 break;
45838
45839 case DE4X5_SET_HWADDR: /* Set the hardware address */
45840 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45841 spin_lock_irqsave(&lp->lock, flags);
45842 memcpy(&statbuf, &lp->pktStats, ioc->len);
45843 spin_unlock_irqrestore(&lp->lock, flags);
45844 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
45845 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
45846 return -EFAULT;
45847 break;
45848 }
45849 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
45850 index a37039d..a51d7e8 100644
45851 --- a/drivers/net/ethernet/emulex/benet/be_main.c
45852 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
45853 @@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
45854
45855 if (wrapped)
45856 newacc += 65536;
45857 - ACCESS_ONCE(*acc) = newacc;
45858 + ACCESS_ONCE_RW(*acc) = newacc;
45859 }
45860
45861 static void populate_erx_stats(struct be_adapter *adapter,
45862 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
45863 index 212f44b..fb69959 100644
45864 --- a/drivers/net/ethernet/faraday/ftgmac100.c
45865 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
45866 @@ -31,6 +31,8 @@
45867 #include <linux/netdevice.h>
45868 #include <linux/phy.h>
45869 #include <linux/platform_device.h>
45870 +#include <linux/interrupt.h>
45871 +#include <linux/irqreturn.h>
45872 #include <net/ip.h>
45873
45874 #include "ftgmac100.h"
45875 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
45876 index 8be5b40..081bc1b 100644
45877 --- a/drivers/net/ethernet/faraday/ftmac100.c
45878 +++ b/drivers/net/ethernet/faraday/ftmac100.c
45879 @@ -31,6 +31,8 @@
45880 #include <linux/module.h>
45881 #include <linux/netdevice.h>
45882 #include <linux/platform_device.h>
45883 +#include <linux/interrupt.h>
45884 +#include <linux/irqreturn.h>
45885
45886 #include "ftmac100.h"
45887
45888 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45889 index 5184e2a..acb28c3 100644
45890 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45891 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45892 @@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
45893 }
45894
45895 /* update the base incval used to calculate frequency adjustment */
45896 - ACCESS_ONCE(adapter->base_incval) = incval;
45897 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
45898 smp_mb();
45899
45900 /* need lock to prevent incorrect read while modifying cyclecounter */
45901 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45902 index fbe5363..266b4e3 100644
45903 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
45904 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45905 @@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45906 struct __vxge_hw_fifo *fifo;
45907 struct vxge_hw_fifo_config *config;
45908 u32 txdl_size, txdl_per_memblock;
45909 - struct vxge_hw_mempool_cbs fifo_mp_callback;
45910 + static struct vxge_hw_mempool_cbs fifo_mp_callback = {
45911 + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
45912 + };
45913 +
45914 struct __vxge_hw_virtualpath *vpath;
45915
45916 if ((vp == NULL) || (attr == NULL)) {
45917 @@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45918 goto exit;
45919 }
45920
45921 - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
45922 -
45923 fifo->mempool =
45924 __vxge_hw_mempool_create(vpath->hldev,
45925 fifo->config->memblock_size,
45926 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45927 index 918e18d..4ca3650 100644
45928 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45929 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45930 @@ -2086,7 +2086,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
45931 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
45932 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
45933 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
45934 - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45935 + pax_open_kernel();
45936 + *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45937 + pax_close_kernel();
45938 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45939 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
45940 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
45941 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45942 index 734d286..b017bf5 100644
45943 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45944 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45945 @@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
45946 case QLCNIC_NON_PRIV_FUNC:
45947 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
45948 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45949 - nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45950 + pax_open_kernel();
45951 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45952 + pax_close_kernel();
45953 break;
45954 case QLCNIC_PRIV_FUNC:
45955 ahw->op_mode = QLCNIC_PRIV_FUNC;
45956 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
45957 - nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45958 + pax_open_kernel();
45959 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45960 + pax_close_kernel();
45961 break;
45962 case QLCNIC_MGMT_FUNC:
45963 ahw->op_mode = QLCNIC_MGMT_FUNC;
45964 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45965 - nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45966 + pax_open_kernel();
45967 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45968 + pax_close_kernel();
45969 break;
45970 default:
45971 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
45972 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45973 index 7763962..c3499a7 100644
45974 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45975 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45976 @@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
45977 struct qlcnic_dump_entry *entry;
45978 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
45979 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
45980 - static const struct qlcnic_dump_operations *fw_dump_ops;
45981 + const struct qlcnic_dump_operations *fw_dump_ops;
45982 struct device *dev = &adapter->pdev->dev;
45983 struct qlcnic_hardware_context *ahw;
45984 void *temp_buffer;
45985 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
45986 index c737f0e..32b8682 100644
45987 --- a/drivers/net/ethernet/realtek/r8169.c
45988 +++ b/drivers/net/ethernet/realtek/r8169.c
45989 @@ -759,22 +759,22 @@ struct rtl8169_private {
45990 struct mdio_ops {
45991 void (*write)(struct rtl8169_private *, int, int);
45992 int (*read)(struct rtl8169_private *, int);
45993 - } mdio_ops;
45994 + } __no_const mdio_ops;
45995
45996 struct pll_power_ops {
45997 void (*down)(struct rtl8169_private *);
45998 void (*up)(struct rtl8169_private *);
45999 - } pll_power_ops;
46000 + } __no_const pll_power_ops;
46001
46002 struct jumbo_ops {
46003 void (*enable)(struct rtl8169_private *);
46004 void (*disable)(struct rtl8169_private *);
46005 - } jumbo_ops;
46006 + } __no_const jumbo_ops;
46007
46008 struct csi_ops {
46009 void (*write)(struct rtl8169_private *, int, int);
46010 u32 (*read)(struct rtl8169_private *, int);
46011 - } csi_ops;
46012 + } __no_const csi_ops;
46013
46014 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
46015 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
46016 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
46017 index a124103..59c74f8 100644
46018 --- a/drivers/net/ethernet/sfc/ptp.c
46019 +++ b/drivers/net/ethernet/sfc/ptp.c
46020 @@ -541,7 +541,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
46021 ptp->start.dma_addr);
46022
46023 /* Clear flag that signals MC ready */
46024 - ACCESS_ONCE(*start) = 0;
46025 + ACCESS_ONCE_RW(*start) = 0;
46026 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
46027 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
46028 EFX_BUG_ON_PARANOID(rc);
46029 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46030 index 50617c5..b13724c 100644
46031 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46032 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46033 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
46034
46035 writel(value, ioaddr + MMC_CNTRL);
46036
46037 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
46038 - MMC_CNTRL, value);
46039 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
46040 +// MMC_CNTRL, value);
46041 }
46042
46043 /* To mask all all interrupts.*/
46044 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
46045 index e6fe0d8..2b7d752 100644
46046 --- a/drivers/net/hyperv/hyperv_net.h
46047 +++ b/drivers/net/hyperv/hyperv_net.h
46048 @@ -101,7 +101,7 @@ struct rndis_device {
46049
46050 enum rndis_device_state state;
46051 bool link_state;
46052 - atomic_t new_req_id;
46053 + atomic_unchecked_t new_req_id;
46054
46055 spinlock_t request_lock;
46056 struct list_head req_list;
46057 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
46058 index 0775f0a..d4fb316 100644
46059 --- a/drivers/net/hyperv/rndis_filter.c
46060 +++ b/drivers/net/hyperv/rndis_filter.c
46061 @@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
46062 * template
46063 */
46064 set = &rndis_msg->msg.set_req;
46065 - set->req_id = atomic_inc_return(&dev->new_req_id);
46066 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46067
46068 /* Add to the request list */
46069 spin_lock_irqsave(&dev->request_lock, flags);
46070 @@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
46071
46072 /* Setup the rndis set */
46073 halt = &request->request_msg.msg.halt_req;
46074 - halt->req_id = atomic_inc_return(&dev->new_req_id);
46075 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46076
46077 /* Ignore return since this msg is optional. */
46078 rndis_filter_send_request(dev, request);
46079 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
46080 index bf0d55e..82bcfbd1 100644
46081 --- a/drivers/net/ieee802154/fakehard.c
46082 +++ b/drivers/net/ieee802154/fakehard.c
46083 @@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
46084 phy->transmit_power = 0xbf;
46085
46086 dev->netdev_ops = &fake_ops;
46087 - dev->ml_priv = &fake_mlme;
46088 + dev->ml_priv = (void *)&fake_mlme;
46089
46090 priv = netdev_priv(dev);
46091 priv->phy = phy;
46092 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
46093 index d7e2907..1f8bfee 100644
46094 --- a/drivers/net/macvlan.c
46095 +++ b/drivers/net/macvlan.c
46096 @@ -993,13 +993,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
46097 int macvlan_link_register(struct rtnl_link_ops *ops)
46098 {
46099 /* common fields */
46100 - ops->priv_size = sizeof(struct macvlan_dev);
46101 - ops->validate = macvlan_validate;
46102 - ops->maxtype = IFLA_MACVLAN_MAX;
46103 - ops->policy = macvlan_policy;
46104 - ops->changelink = macvlan_changelink;
46105 - ops->get_size = macvlan_get_size;
46106 - ops->fill_info = macvlan_fill_info;
46107 + pax_open_kernel();
46108 + *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
46109 + *(void **)&ops->validate = macvlan_validate;
46110 + *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
46111 + *(const void **)&ops->policy = macvlan_policy;
46112 + *(void **)&ops->changelink = macvlan_changelink;
46113 + *(void **)&ops->get_size = macvlan_get_size;
46114 + *(void **)&ops->fill_info = macvlan_fill_info;
46115 + pax_close_kernel();
46116
46117 return rtnl_link_register(ops);
46118 };
46119 @@ -1054,7 +1056,7 @@ static int macvlan_device_event(struct notifier_block *unused,
46120 return NOTIFY_DONE;
46121 }
46122
46123 -static struct notifier_block macvlan_notifier_block __read_mostly = {
46124 +static struct notifier_block macvlan_notifier_block = {
46125 .notifier_call = macvlan_device_event,
46126 };
46127
46128 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
46129 index 2a89da0..c17fe1d 100644
46130 --- a/drivers/net/macvtap.c
46131 +++ b/drivers/net/macvtap.c
46132 @@ -1012,7 +1012,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
46133 }
46134
46135 ret = 0;
46136 - if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46137 + if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46138 put_user(q->flags, &ifr->ifr_flags))
46139 ret = -EFAULT;
46140 macvtap_put_vlan(vlan);
46141 @@ -1182,7 +1182,7 @@ static int macvtap_device_event(struct notifier_block *unused,
46142 return NOTIFY_DONE;
46143 }
46144
46145 -static struct notifier_block macvtap_notifier_block __read_mostly = {
46146 +static struct notifier_block macvtap_notifier_block = {
46147 .notifier_call = macvtap_device_event,
46148 };
46149
46150 diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
46151 index daec9b0..6428fcb 100644
46152 --- a/drivers/net/phy/mdio-bitbang.c
46153 +++ b/drivers/net/phy/mdio-bitbang.c
46154 @@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
46155 struct mdiobb_ctrl *ctrl = bus->priv;
46156
46157 module_put(ctrl->ops->owner);
46158 + mdiobus_unregister(bus);
46159 mdiobus_free(bus);
46160 }
46161 EXPORT_SYMBOL(free_mdio_bitbang);
46162 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
46163 index 72ff14b..11d442d 100644
46164 --- a/drivers/net/ppp/ppp_generic.c
46165 +++ b/drivers/net/ppp/ppp_generic.c
46166 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46167 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
46168 struct ppp_stats stats;
46169 struct ppp_comp_stats cstats;
46170 - char *vers;
46171
46172 switch (cmd) {
46173 case SIOCGPPPSTATS:
46174 @@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46175 break;
46176
46177 case SIOCGPPPVER:
46178 - vers = PPP_VERSION;
46179 - if (copy_to_user(addr, vers, strlen(vers) + 1))
46180 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
46181 break;
46182 err = 0;
46183 break;
46184 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
46185 index 1252d9c..80e660b 100644
46186 --- a/drivers/net/slip/slhc.c
46187 +++ b/drivers/net/slip/slhc.c
46188 @@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
46189 register struct tcphdr *thp;
46190 register struct iphdr *ip;
46191 register struct cstate *cs;
46192 - int len, hdrlen;
46193 + long len, hdrlen;
46194 unsigned char *cp = icp;
46195
46196 /* We've got a compressed packet; read the change byte */
46197 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
46198 index b75ae5b..953c157 100644
46199 --- a/drivers/net/team/team.c
46200 +++ b/drivers/net/team/team.c
46201 @@ -2865,7 +2865,7 @@ static int team_device_event(struct notifier_block *unused,
46202 return NOTIFY_DONE;
46203 }
46204
46205 -static struct notifier_block team_notifier_block __read_mostly = {
46206 +static struct notifier_block team_notifier_block = {
46207 .notifier_call = team_device_event,
46208 };
46209
46210 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
46211 index 55c9238..ebb6ee5 100644
46212 --- a/drivers/net/tun.c
46213 +++ b/drivers/net/tun.c
46214 @@ -1841,7 +1841,7 @@ unlock:
46215 }
46216
46217 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46218 - unsigned long arg, int ifreq_len)
46219 + unsigned long arg, size_t ifreq_len)
46220 {
46221 struct tun_file *tfile = file->private_data;
46222 struct tun_struct *tun;
46223 @@ -1854,6 +1854,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46224 unsigned int ifindex;
46225 int ret;
46226
46227 + if (ifreq_len > sizeof ifr)
46228 + return -EFAULT;
46229 +
46230 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
46231 if (copy_from_user(&ifr, argp, ifreq_len))
46232 return -EFAULT;
46233 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
46234 index 1a48234..a555339 100644
46235 --- a/drivers/net/usb/hso.c
46236 +++ b/drivers/net/usb/hso.c
46237 @@ -71,7 +71,7 @@
46238 #include <asm/byteorder.h>
46239 #include <linux/serial_core.h>
46240 #include <linux/serial.h>
46241 -
46242 +#include <asm/local.h>
46243
46244 #define MOD_AUTHOR "Option Wireless"
46245 #define MOD_DESCRIPTION "USB High Speed Option driver"
46246 @@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
46247 struct urb *urb;
46248
46249 urb = serial->rx_urb[0];
46250 - if (serial->port.count > 0) {
46251 + if (atomic_read(&serial->port.count) > 0) {
46252 count = put_rxbuf_data(urb, serial);
46253 if (count == -1)
46254 return;
46255 @@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
46256 DUMP1(urb->transfer_buffer, urb->actual_length);
46257
46258 /* Anyone listening? */
46259 - if (serial->port.count == 0)
46260 + if (atomic_read(&serial->port.count) == 0)
46261 return;
46262
46263 if (status == 0) {
46264 @@ -1297,8 +1297,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46265 tty_port_tty_set(&serial->port, tty);
46266
46267 /* check for port already opened, if not set the termios */
46268 - serial->port.count++;
46269 - if (serial->port.count == 1) {
46270 + if (atomic_inc_return(&serial->port.count) == 1) {
46271 serial->rx_state = RX_IDLE;
46272 /* Force default termio settings */
46273 _hso_serial_set_termios(tty, NULL);
46274 @@ -1310,7 +1309,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46275 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
46276 if (result) {
46277 hso_stop_serial_device(serial->parent);
46278 - serial->port.count--;
46279 + atomic_dec(&serial->port.count);
46280 kref_put(&serial->parent->ref, hso_serial_ref_free);
46281 }
46282 } else {
46283 @@ -1347,10 +1346,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
46284
46285 /* reset the rts and dtr */
46286 /* do the actual close */
46287 - serial->port.count--;
46288 + atomic_dec(&serial->port.count);
46289
46290 - if (serial->port.count <= 0) {
46291 - serial->port.count = 0;
46292 + if (atomic_read(&serial->port.count) <= 0) {
46293 + atomic_set(&serial->port.count, 0);
46294 tty_port_tty_set(&serial->port, NULL);
46295 if (!usb_gone)
46296 hso_stop_serial_device(serial->parent);
46297 @@ -1426,7 +1425,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
46298
46299 /* the actual setup */
46300 spin_lock_irqsave(&serial->serial_lock, flags);
46301 - if (serial->port.count)
46302 + if (atomic_read(&serial->port.count))
46303 _hso_serial_set_termios(tty, old);
46304 else
46305 tty->termios = *old;
46306 @@ -1895,7 +1894,7 @@ static void intr_callback(struct urb *urb)
46307 D1("Pending read interrupt on port %d\n", i);
46308 spin_lock(&serial->serial_lock);
46309 if (serial->rx_state == RX_IDLE &&
46310 - serial->port.count > 0) {
46311 + atomic_read(&serial->port.count) > 0) {
46312 /* Setup and send a ctrl req read on
46313 * port i */
46314 if (!serial->rx_urb_filled[0]) {
46315 @@ -3071,7 +3070,7 @@ static int hso_resume(struct usb_interface *iface)
46316 /* Start all serial ports */
46317 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
46318 if (serial_table[i] && (serial_table[i]->interface == iface)) {
46319 - if (dev2ser(serial_table[i])->port.count) {
46320 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
46321 result =
46322 hso_start_serial_device(serial_table[i], GFP_NOIO);
46323 hso_kick_transmit(dev2ser(serial_table[i]));
46324 diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
46325 index a79e9d3..78cd4fa 100644
46326 --- a/drivers/net/usb/sierra_net.c
46327 +++ b/drivers/net/usb/sierra_net.c
46328 @@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
46329 /* atomic counter partially included in MAC address to make sure 2 devices
46330 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
46331 */
46332 -static atomic_t iface_counter = ATOMIC_INIT(0);
46333 +static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
46334
46335 /*
46336 * SYNC Timer Delay definition used to set the expiry time
46337 @@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
46338 dev->net->netdev_ops = &sierra_net_device_ops;
46339
46340 /* change MAC addr to include, ifacenum, and to be unique */
46341 - dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
46342 + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
46343 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
46344
46345 /* we will have to manufacture ethernet headers, prepare template */
46346 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
46347 index 0247973..088193a 100644
46348 --- a/drivers/net/vxlan.c
46349 +++ b/drivers/net/vxlan.c
46350 @@ -2615,7 +2615,7 @@ nla_put_failure:
46351 return -EMSGSIZE;
46352 }
46353
46354 -static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
46355 +static struct rtnl_link_ops vxlan_link_ops = {
46356 .kind = "vxlan",
46357 .maxtype = IFLA_VXLAN_MAX,
46358 .policy = vxlan_policy,
46359 diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
46360 index 5920c99..ff2e4a5 100644
46361 --- a/drivers/net/wan/lmc/lmc_media.c
46362 +++ b/drivers/net/wan/lmc/lmc_media.c
46363 @@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
46364 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
46365
46366 lmc_media_t lmc_ds3_media = {
46367 - lmc_ds3_init, /* special media init stuff */
46368 - lmc_ds3_default, /* reset to default state */
46369 - lmc_ds3_set_status, /* reset status to state provided */
46370 - lmc_dummy_set_1, /* set clock source */
46371 - lmc_dummy_set2_1, /* set line speed */
46372 - lmc_ds3_set_100ft, /* set cable length */
46373 - lmc_ds3_set_scram, /* set scrambler */
46374 - lmc_ds3_get_link_status, /* get link status */
46375 - lmc_dummy_set_1, /* set link status */
46376 - lmc_ds3_set_crc_length, /* set CRC length */
46377 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
46378 - lmc_ds3_watchdog
46379 + .init = lmc_ds3_init, /* special media init stuff */
46380 + .defaults = lmc_ds3_default, /* reset to default state */
46381 + .set_status = lmc_ds3_set_status, /* reset status to state provided */
46382 + .set_clock_source = lmc_dummy_set_1, /* set clock source */
46383 + .set_speed = lmc_dummy_set2_1, /* set line speed */
46384 + .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
46385 + .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
46386 + .get_link_status = lmc_ds3_get_link_status, /* get link status */
46387 + .set_link_status = lmc_dummy_set_1, /* set link status */
46388 + .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
46389 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46390 + .watchdog = lmc_ds3_watchdog
46391 };
46392
46393 lmc_media_t lmc_hssi_media = {
46394 - lmc_hssi_init, /* special media init stuff */
46395 - lmc_hssi_default, /* reset to default state */
46396 - lmc_hssi_set_status, /* reset status to state provided */
46397 - lmc_hssi_set_clock, /* set clock source */
46398 - lmc_dummy_set2_1, /* set line speed */
46399 - lmc_dummy_set_1, /* set cable length */
46400 - lmc_dummy_set_1, /* set scrambler */
46401 - lmc_hssi_get_link_status, /* get link status */
46402 - lmc_hssi_set_link_status, /* set link status */
46403 - lmc_hssi_set_crc_length, /* set CRC length */
46404 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
46405 - lmc_hssi_watchdog
46406 + .init = lmc_hssi_init, /* special media init stuff */
46407 + .defaults = lmc_hssi_default, /* reset to default state */
46408 + .set_status = lmc_hssi_set_status, /* reset status to state provided */
46409 + .set_clock_source = lmc_hssi_set_clock, /* set clock source */
46410 + .set_speed = lmc_dummy_set2_1, /* set line speed */
46411 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
46412 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46413 + .get_link_status = lmc_hssi_get_link_status, /* get link status */
46414 + .set_link_status = lmc_hssi_set_link_status, /* set link status */
46415 + .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
46416 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46417 + .watchdog = lmc_hssi_watchdog
46418 };
46419
46420 -lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
46421 - lmc_ssi_default, /* reset to default state */
46422 - lmc_ssi_set_status, /* reset status to state provided */
46423 - lmc_ssi_set_clock, /* set clock source */
46424 - lmc_ssi_set_speed, /* set line speed */
46425 - lmc_dummy_set_1, /* set cable length */
46426 - lmc_dummy_set_1, /* set scrambler */
46427 - lmc_ssi_get_link_status, /* get link status */
46428 - lmc_ssi_set_link_status, /* set link status */
46429 - lmc_ssi_set_crc_length, /* set CRC length */
46430 - lmc_dummy_set_1, /* set T1 or E1 circuit type */
46431 - lmc_ssi_watchdog
46432 +lmc_media_t lmc_ssi_media = {
46433 + .init = lmc_ssi_init, /* special media init stuff */
46434 + .defaults = lmc_ssi_default, /* reset to default state */
46435 + .set_status = lmc_ssi_set_status, /* reset status to state provided */
46436 + .set_clock_source = lmc_ssi_set_clock, /* set clock source */
46437 + .set_speed = lmc_ssi_set_speed, /* set line speed */
46438 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
46439 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46440 + .get_link_status = lmc_ssi_get_link_status, /* get link status */
46441 + .set_link_status = lmc_ssi_set_link_status, /* set link status */
46442 + .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
46443 + .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46444 + .watchdog = lmc_ssi_watchdog
46445 };
46446
46447 lmc_media_t lmc_t1_media = {
46448 - lmc_t1_init, /* special media init stuff */
46449 - lmc_t1_default, /* reset to default state */
46450 - lmc_t1_set_status, /* reset status to state provided */
46451 - lmc_t1_set_clock, /* set clock source */
46452 - lmc_dummy_set2_1, /* set line speed */
46453 - lmc_dummy_set_1, /* set cable length */
46454 - lmc_dummy_set_1, /* set scrambler */
46455 - lmc_t1_get_link_status, /* get link status */
46456 - lmc_dummy_set_1, /* set link status */
46457 - lmc_t1_set_crc_length, /* set CRC length */
46458 - lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
46459 - lmc_t1_watchdog
46460 + .init = lmc_t1_init, /* special media init stuff */
46461 + .defaults = lmc_t1_default, /* reset to default state */
46462 + .set_status = lmc_t1_set_status, /* reset status to state provided */
46463 + .set_clock_source = lmc_t1_set_clock, /* set clock source */
46464 + .set_speed = lmc_dummy_set2_1, /* set line speed */
46465 + .set_cable_length = lmc_dummy_set_1, /* set cable length */
46466 + .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46467 + .get_link_status = lmc_t1_get_link_status, /* get link status */
46468 + .set_link_status = lmc_dummy_set_1, /* set link status */
46469 + .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
46470 + .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
46471 + .watchdog = lmc_t1_watchdog
46472 };
46473
46474 static void
46475 diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
46476 index feacc3b..5bac0de 100644
46477 --- a/drivers/net/wan/z85230.c
46478 +++ b/drivers/net/wan/z85230.c
46479 @@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
46480
46481 struct z8530_irqhandler z8530_sync =
46482 {
46483 - z8530_rx,
46484 - z8530_tx,
46485 - z8530_status
46486 + .rx = z8530_rx,
46487 + .tx = z8530_tx,
46488 + .status = z8530_status
46489 };
46490
46491 EXPORT_SYMBOL(z8530_sync);
46492 @@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
46493 }
46494
46495 static struct z8530_irqhandler z8530_dma_sync = {
46496 - z8530_dma_rx,
46497 - z8530_dma_tx,
46498 - z8530_dma_status
46499 + .rx = z8530_dma_rx,
46500 + .tx = z8530_dma_tx,
46501 + .status = z8530_dma_status
46502 };
46503
46504 static struct z8530_irqhandler z8530_txdma_sync = {
46505 - z8530_rx,
46506 - z8530_dma_tx,
46507 - z8530_dma_status
46508 + .rx = z8530_rx,
46509 + .tx = z8530_dma_tx,
46510 + .status = z8530_dma_status
46511 };
46512
46513 /**
46514 @@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
46515
46516 struct z8530_irqhandler z8530_nop=
46517 {
46518 - z8530_rx_clear,
46519 - z8530_tx_clear,
46520 - z8530_status_clear
46521 + .rx = z8530_rx_clear,
46522 + .tx = z8530_tx_clear,
46523 + .status = z8530_status_clear
46524 };
46525
46526
46527 diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
46528 index 0b60295..b8bfa5b 100644
46529 --- a/drivers/net/wimax/i2400m/rx.c
46530 +++ b/drivers/net/wimax/i2400m/rx.c
46531 @@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
46532 if (i2400m->rx_roq == NULL)
46533 goto error_roq_alloc;
46534
46535 - rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
46536 + rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
46537 GFP_KERNEL);
46538 if (rd == NULL) {
46539 result = -ENOMEM;
46540 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
46541 index edf4b57..68b51c0 100644
46542 --- a/drivers/net/wireless/airo.c
46543 +++ b/drivers/net/wireless/airo.c
46544 @@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
46545 struct airo_info *ai = dev->ml_priv;
46546 int ridcode;
46547 int enabled;
46548 - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
46549 + int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
46550 unsigned char *iobuf;
46551
46552 /* Only super-user can write RIDs */
46553 diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
46554 index 34c8a33..3261fdc 100644
46555 --- a/drivers/net/wireless/at76c50x-usb.c
46556 +++ b/drivers/net/wireless/at76c50x-usb.c
46557 @@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
46558 }
46559
46560 /* Convert timeout from the DFU status to jiffies */
46561 -static inline unsigned long at76_get_timeout(struct dfu_status *s)
46562 +static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
46563 {
46564 return msecs_to_jiffies((s->poll_timeout[2] << 16)
46565 | (s->poll_timeout[1] << 8)
46566 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
46567 index edae50b..b24278c 100644
46568 --- a/drivers/net/wireless/ath/ath10k/htc.c
46569 +++ b/drivers/net/wireless/ath/ath10k/htc.c
46570 @@ -842,7 +842,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
46571 /* registered target arrival callback from the HIF layer */
46572 int ath10k_htc_init(struct ath10k *ar)
46573 {
46574 - struct ath10k_hif_cb htc_callbacks;
46575 + static struct ath10k_hif_cb htc_callbacks = {
46576 + .rx_completion = ath10k_htc_rx_completion_handler,
46577 + .tx_completion = ath10k_htc_tx_completion_handler,
46578 + };
46579 struct ath10k_htc_ep *ep = NULL;
46580 struct ath10k_htc *htc = &ar->htc;
46581
46582 @@ -852,8 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
46583 ath10k_htc_reset_endpoint_states(htc);
46584
46585 /* setup HIF layer callbacks */
46586 - htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
46587 - htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
46588 htc->ar = ar;
46589
46590 /* Get HIF default pipe for HTC message exchange */
46591 diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
46592 index 4716d33..a688310 100644
46593 --- a/drivers/net/wireless/ath/ath10k/htc.h
46594 +++ b/drivers/net/wireless/ath/ath10k/htc.h
46595 @@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
46596
46597 struct ath10k_htc_ops {
46598 void (*target_send_suspend_complete)(struct ath10k *ar);
46599 -};
46600 +} __no_const;
46601
46602 struct ath10k_htc_ep_ops {
46603 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
46604 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
46605 void (*ep_tx_credits)(struct ath10k *);
46606 -};
46607 +} __no_const;
46608
46609 /* service connection information */
46610 struct ath10k_htc_svc_conn_req {
46611 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46612 index a366d6b..b6f28f8 100644
46613 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46614 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46615 @@ -218,8 +218,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46616 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
46617 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
46618
46619 - ACCESS_ONCE(ads->ds_link) = i->link;
46620 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
46621 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
46622 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
46623
46624 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
46625 ctl6 = SM(i->keytype, AR_EncrType);
46626 @@ -233,26 +233,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46627
46628 if ((i->is_first || i->is_last) &&
46629 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
46630 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
46631 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
46632 | set11nTries(i->rates, 1)
46633 | set11nTries(i->rates, 2)
46634 | set11nTries(i->rates, 3)
46635 | (i->dur_update ? AR_DurUpdateEna : 0)
46636 | SM(0, AR_BurstDur);
46637
46638 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
46639 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
46640 | set11nRate(i->rates, 1)
46641 | set11nRate(i->rates, 2)
46642 | set11nRate(i->rates, 3);
46643 } else {
46644 - ACCESS_ONCE(ads->ds_ctl2) = 0;
46645 - ACCESS_ONCE(ads->ds_ctl3) = 0;
46646 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
46647 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
46648 }
46649
46650 if (!i->is_first) {
46651 - ACCESS_ONCE(ads->ds_ctl0) = 0;
46652 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46653 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46654 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
46655 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46656 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46657 return;
46658 }
46659
46660 @@ -277,7 +277,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46661 break;
46662 }
46663
46664 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46665 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46666 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46667 | SM(i->txpower, AR_XmitPower)
46668 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46669 @@ -287,19 +287,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46670 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
46671 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
46672
46673 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46674 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46675 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46676 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46677
46678 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
46679 return;
46680
46681 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46682 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46683 | set11nPktDurRTSCTS(i->rates, 1);
46684
46685 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46686 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46687 | set11nPktDurRTSCTS(i->rates, 3);
46688
46689 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46690 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46691 | set11nRateFlags(i->rates, 1)
46692 | set11nRateFlags(i->rates, 2)
46693 | set11nRateFlags(i->rates, 3)
46694 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46695 index f6c5c1b..6058354 100644
46696 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46697 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46698 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46699 (i->qcu << AR_TxQcuNum_S) | desc_len;
46700
46701 checksum += val;
46702 - ACCESS_ONCE(ads->info) = val;
46703 + ACCESS_ONCE_RW(ads->info) = val;
46704
46705 checksum += i->link;
46706 - ACCESS_ONCE(ads->link) = i->link;
46707 + ACCESS_ONCE_RW(ads->link) = i->link;
46708
46709 checksum += i->buf_addr[0];
46710 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
46711 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
46712 checksum += i->buf_addr[1];
46713 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
46714 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
46715 checksum += i->buf_addr[2];
46716 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
46717 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
46718 checksum += i->buf_addr[3];
46719 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
46720 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
46721
46722 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
46723 - ACCESS_ONCE(ads->ctl3) = val;
46724 + ACCESS_ONCE_RW(ads->ctl3) = val;
46725 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
46726 - ACCESS_ONCE(ads->ctl5) = val;
46727 + ACCESS_ONCE_RW(ads->ctl5) = val;
46728 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
46729 - ACCESS_ONCE(ads->ctl7) = val;
46730 + ACCESS_ONCE_RW(ads->ctl7) = val;
46731 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
46732 - ACCESS_ONCE(ads->ctl9) = val;
46733 + ACCESS_ONCE_RW(ads->ctl9) = val;
46734
46735 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
46736 - ACCESS_ONCE(ads->ctl10) = checksum;
46737 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
46738
46739 if (i->is_first || i->is_last) {
46740 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
46741 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
46742 | set11nTries(i->rates, 1)
46743 | set11nTries(i->rates, 2)
46744 | set11nTries(i->rates, 3)
46745 | (i->dur_update ? AR_DurUpdateEna : 0)
46746 | SM(0, AR_BurstDur);
46747
46748 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
46749 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
46750 | set11nRate(i->rates, 1)
46751 | set11nRate(i->rates, 2)
46752 | set11nRate(i->rates, 3);
46753 } else {
46754 - ACCESS_ONCE(ads->ctl13) = 0;
46755 - ACCESS_ONCE(ads->ctl14) = 0;
46756 + ACCESS_ONCE_RW(ads->ctl13) = 0;
46757 + ACCESS_ONCE_RW(ads->ctl14) = 0;
46758 }
46759
46760 ads->ctl20 = 0;
46761 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46762
46763 ctl17 = SM(i->keytype, AR_EncrType);
46764 if (!i->is_first) {
46765 - ACCESS_ONCE(ads->ctl11) = 0;
46766 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46767 - ACCESS_ONCE(ads->ctl15) = 0;
46768 - ACCESS_ONCE(ads->ctl16) = 0;
46769 - ACCESS_ONCE(ads->ctl17) = ctl17;
46770 - ACCESS_ONCE(ads->ctl18) = 0;
46771 - ACCESS_ONCE(ads->ctl19) = 0;
46772 + ACCESS_ONCE_RW(ads->ctl11) = 0;
46773 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46774 + ACCESS_ONCE_RW(ads->ctl15) = 0;
46775 + ACCESS_ONCE_RW(ads->ctl16) = 0;
46776 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46777 + ACCESS_ONCE_RW(ads->ctl18) = 0;
46778 + ACCESS_ONCE_RW(ads->ctl19) = 0;
46779 return;
46780 }
46781
46782 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46783 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46784 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46785 | SM(i->txpower, AR_XmitPower)
46786 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46787 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46788 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
46789 ctl12 |= SM(val, AR_PAPRDChainMask);
46790
46791 - ACCESS_ONCE(ads->ctl12) = ctl12;
46792 - ACCESS_ONCE(ads->ctl17) = ctl17;
46793 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
46794 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46795
46796 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46797 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46798 | set11nPktDurRTSCTS(i->rates, 1);
46799
46800 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46801 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46802 | set11nPktDurRTSCTS(i->rates, 3);
46803
46804 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
46805 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
46806 | set11nRateFlags(i->rates, 1)
46807 | set11nRateFlags(i->rates, 2)
46808 | set11nRateFlags(i->rates, 3)
46809 | SM(i->rtscts_rate, AR_RTSCTSRate);
46810
46811 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
46812 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
46813 }
46814
46815 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
46816 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
46817 index a2c9a5d..b52273e 100644
46818 --- a/drivers/net/wireless/ath/ath9k/hw.h
46819 +++ b/drivers/net/wireless/ath/ath9k/hw.h
46820 @@ -635,7 +635,7 @@ struct ath_hw_private_ops {
46821
46822 /* ANI */
46823 void (*ani_cache_ini_regs)(struct ath_hw *ah);
46824 -};
46825 +} __no_const;
46826
46827 /**
46828 * struct ath_spec_scan - parameters for Atheros spectral scan
46829 @@ -711,7 +711,7 @@ struct ath_hw_ops {
46830 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
46831 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
46832 #endif
46833 -};
46834 +} __no_const;
46835
46836 struct ath_nf_limits {
46837 s16 max;
46838 diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
46839 index 92190da..f3a4c4c 100644
46840 --- a/drivers/net/wireless/b43/phy_lp.c
46841 +++ b/drivers/net/wireless/b43/phy_lp.c
46842 @@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
46843 {
46844 struct ssb_bus *bus = dev->dev->sdev->bus;
46845
46846 - static const struct b206x_channel *chandata = NULL;
46847 + const struct b206x_channel *chandata = NULL;
46848 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
46849 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
46850 u16 old_comm15, scale;
46851 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
46852 index dea3b50..543db99 100644
46853 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
46854 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
46855 @@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46856 */
46857 if (il3945_mod_params.disable_hw_scan) {
46858 D_INFO("Disabling hw_scan\n");
46859 - il3945_mac_ops.hw_scan = NULL;
46860 + pax_open_kernel();
46861 + *(void **)&il3945_mac_ops.hw_scan = NULL;
46862 + pax_close_kernel();
46863 }
46864
46865 D_INFO("*** LOAD DRIVER ***\n");
46866 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46867 index d94f8ab..5b568c8 100644
46868 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46869 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46870 @@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
46871 {
46872 struct iwl_priv *priv = file->private_data;
46873 char buf[64];
46874 - int buf_size;
46875 + size_t buf_size;
46876 u32 offset, len;
46877
46878 memset(buf, 0, sizeof(buf));
46879 @@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
46880 struct iwl_priv *priv = file->private_data;
46881
46882 char buf[8];
46883 - int buf_size;
46884 + size_t buf_size;
46885 u32 reset_flag;
46886
46887 memset(buf, 0, sizeof(buf));
46888 @@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
46889 {
46890 struct iwl_priv *priv = file->private_data;
46891 char buf[8];
46892 - int buf_size;
46893 + size_t buf_size;
46894 int ht40;
46895
46896 memset(buf, 0, sizeof(buf));
46897 @@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
46898 {
46899 struct iwl_priv *priv = file->private_data;
46900 char buf[8];
46901 - int buf_size;
46902 + size_t buf_size;
46903 int value;
46904
46905 memset(buf, 0, sizeof(buf));
46906 @@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
46907 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
46908 DEBUGFS_READ_FILE_OPS(current_sleep_command);
46909
46910 -static const char *fmt_value = " %-30s %10u\n";
46911 -static const char *fmt_hex = " %-30s 0x%02X\n";
46912 -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
46913 -static const char *fmt_header =
46914 +static const char fmt_value[] = " %-30s %10u\n";
46915 +static const char fmt_hex[] = " %-30s 0x%02X\n";
46916 +static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
46917 +static const char fmt_header[] =
46918 "%-32s current cumulative delta max\n";
46919
46920 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
46921 @@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
46922 {
46923 struct iwl_priv *priv = file->private_data;
46924 char buf[8];
46925 - int buf_size;
46926 + size_t buf_size;
46927 int clear;
46928
46929 memset(buf, 0, sizeof(buf));
46930 @@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
46931 {
46932 struct iwl_priv *priv = file->private_data;
46933 char buf[8];
46934 - int buf_size;
46935 + size_t buf_size;
46936 int trace;
46937
46938 memset(buf, 0, sizeof(buf));
46939 @@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
46940 {
46941 struct iwl_priv *priv = file->private_data;
46942 char buf[8];
46943 - int buf_size;
46944 + size_t buf_size;
46945 int missed;
46946
46947 memset(buf, 0, sizeof(buf));
46948 @@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
46949
46950 struct iwl_priv *priv = file->private_data;
46951 char buf[8];
46952 - int buf_size;
46953 + size_t buf_size;
46954 int plcp;
46955
46956 memset(buf, 0, sizeof(buf));
46957 @@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
46958
46959 struct iwl_priv *priv = file->private_data;
46960 char buf[8];
46961 - int buf_size;
46962 + size_t buf_size;
46963 int flush;
46964
46965 memset(buf, 0, sizeof(buf));
46966 @@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
46967
46968 struct iwl_priv *priv = file->private_data;
46969 char buf[8];
46970 - int buf_size;
46971 + size_t buf_size;
46972 int rts;
46973
46974 if (!priv->cfg->ht_params)
46975 @@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
46976 {
46977 struct iwl_priv *priv = file->private_data;
46978 char buf[8];
46979 - int buf_size;
46980 + size_t buf_size;
46981
46982 memset(buf, 0, sizeof(buf));
46983 buf_size = min(count, sizeof(buf) - 1);
46984 @@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
46985 struct iwl_priv *priv = file->private_data;
46986 u32 event_log_flag;
46987 char buf[8];
46988 - int buf_size;
46989 + size_t buf_size;
46990
46991 /* check that the interface is up */
46992 if (!iwl_is_ready(priv))
46993 @@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
46994 struct iwl_priv *priv = file->private_data;
46995 char buf[8];
46996 u32 calib_disabled;
46997 - int buf_size;
46998 + size_t buf_size;
46999
47000 memset(buf, 0, sizeof(buf));
47001 buf_size = min(count, sizeof(buf) - 1);
47002 diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
47003 index 7aad766..06addb4 100644
47004 --- a/drivers/net/wireless/iwlwifi/dvm/main.c
47005 +++ b/drivers/net/wireless/iwlwifi/dvm/main.c
47006 @@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
47007 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
47008 {
47009 struct iwl_nvm_data *data = priv->nvm_data;
47010 - char *debug_msg;
47011 + static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
47012
47013 if (data->sku_cap_11n_enable &&
47014 !priv->cfg->ht_params) {
47015 @@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
47016 return -EINVAL;
47017 }
47018
47019 - debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
47020 IWL_DEBUG_INFO(priv, debug_msg,
47021 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
47022 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
47023 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
47024 index f53ef83..5e34bcb 100644
47025 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
47026 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
47027 @@ -1390,7 +1390,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
47028 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
47029
47030 char buf[8];
47031 - int buf_size;
47032 + size_t buf_size;
47033 u32 reset_flag;
47034
47035 memset(buf, 0, sizeof(buf));
47036 @@ -1411,7 +1411,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
47037 {
47038 struct iwl_trans *trans = file->private_data;
47039 char buf[8];
47040 - int buf_size;
47041 + size_t buf_size;
47042 int csr;
47043
47044 memset(buf, 0, sizeof(buf));
47045 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
47046 index a1b32ee..94b3c3d 100644
47047 --- a/drivers/net/wireless/mac80211_hwsim.c
47048 +++ b/drivers/net/wireless/mac80211_hwsim.c
47049 @@ -2224,25 +2224,19 @@ static int __init init_mac80211_hwsim(void)
47050
47051 if (channels > 1) {
47052 hwsim_if_comb.num_different_channels = channels;
47053 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
47054 - mac80211_hwsim_ops.cancel_hw_scan =
47055 - mac80211_hwsim_cancel_hw_scan;
47056 - mac80211_hwsim_ops.sw_scan_start = NULL;
47057 - mac80211_hwsim_ops.sw_scan_complete = NULL;
47058 - mac80211_hwsim_ops.remain_on_channel =
47059 - mac80211_hwsim_roc;
47060 - mac80211_hwsim_ops.cancel_remain_on_channel =
47061 - mac80211_hwsim_croc;
47062 - mac80211_hwsim_ops.add_chanctx =
47063 - mac80211_hwsim_add_chanctx;
47064 - mac80211_hwsim_ops.remove_chanctx =
47065 - mac80211_hwsim_remove_chanctx;
47066 - mac80211_hwsim_ops.change_chanctx =
47067 - mac80211_hwsim_change_chanctx;
47068 - mac80211_hwsim_ops.assign_vif_chanctx =
47069 - mac80211_hwsim_assign_vif_chanctx;
47070 - mac80211_hwsim_ops.unassign_vif_chanctx =
47071 - mac80211_hwsim_unassign_vif_chanctx;
47072 + pax_open_kernel();
47073 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
47074 + *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
47075 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
47076 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
47077 + *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
47078 + *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
47079 + *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
47080 + *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
47081 + *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
47082 + *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
47083 + *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
47084 + pax_close_kernel();
47085 }
47086
47087 spin_lock_init(&hwsim_radio_lock);
47088 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
47089 index 8169a85..7fa3b47 100644
47090 --- a/drivers/net/wireless/rndis_wlan.c
47091 +++ b/drivers/net/wireless/rndis_wlan.c
47092 @@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
47093
47094 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
47095
47096 - if (rts_threshold < 0 || rts_threshold > 2347)
47097 + if (rts_threshold > 2347)
47098 rts_threshold = 2347;
47099
47100 tmp = cpu_to_le32(rts_threshold);
47101 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
47102 index e4ba2ce..63d7417 100644
47103 --- a/drivers/net/wireless/rt2x00/rt2x00.h
47104 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
47105 @@ -377,7 +377,7 @@ struct rt2x00_intf {
47106 * for hardware which doesn't support hardware
47107 * sequence counting.
47108 */
47109 - atomic_t seqno;
47110 + atomic_unchecked_t seqno;
47111 };
47112
47113 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
47114 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
47115 index a5d38e8..d3c24ea 100644
47116 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
47117 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
47118 @@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
47119 * sequence counter given by mac80211.
47120 */
47121 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
47122 - seqno = atomic_add_return(0x10, &intf->seqno);
47123 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
47124 else
47125 - seqno = atomic_read(&intf->seqno);
47126 + seqno = atomic_read_unchecked(&intf->seqno);
47127
47128 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
47129 hdr->seq_ctrl |= cpu_to_le16(seqno);
47130 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
47131 index e2b3d9c..67a5184 100644
47132 --- a/drivers/net/wireless/ti/wl1251/sdio.c
47133 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
47134 @@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
47135
47136 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
47137
47138 - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47139 - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47140 + pax_open_kernel();
47141 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47142 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47143 + pax_close_kernel();
47144
47145 wl1251_info("using dedicated interrupt line");
47146 } else {
47147 - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47148 - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47149 + pax_open_kernel();
47150 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47151 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47152 + pax_close_kernel();
47153
47154 wl1251_info("using SDIO interrupt");
47155 }
47156 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
47157 index be7129b..4161356 100644
47158 --- a/drivers/net/wireless/ti/wl12xx/main.c
47159 +++ b/drivers/net/wireless/ti/wl12xx/main.c
47160 @@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47161 sizeof(wl->conf.mem));
47162
47163 /* read data preparation is only needed by wl127x */
47164 - wl->ops->prepare_read = wl127x_prepare_read;
47165 + pax_open_kernel();
47166 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47167 + pax_close_kernel();
47168
47169 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47170 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47171 @@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47172 sizeof(wl->conf.mem));
47173
47174 /* read data preparation is only needed by wl127x */
47175 - wl->ops->prepare_read = wl127x_prepare_read;
47176 + pax_open_kernel();
47177 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47178 + pax_close_kernel();
47179
47180 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47181 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47182 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
47183 index ec37b16..7e34d66 100644
47184 --- a/drivers/net/wireless/ti/wl18xx/main.c
47185 +++ b/drivers/net/wireless/ti/wl18xx/main.c
47186 @@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl)
47187 }
47188
47189 if (!checksum_param) {
47190 - wl18xx_ops.set_rx_csum = NULL;
47191 - wl18xx_ops.init_vif = NULL;
47192 + pax_open_kernel();
47193 + *(void **)&wl18xx_ops.set_rx_csum = NULL;
47194 + *(void **)&wl18xx_ops.init_vif = NULL;
47195 + pax_close_kernel();
47196 }
47197
47198 /* Enable 11a Band only if we have 5G antennas */
47199 diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
47200 index 84d94f5..bd6c61c 100644
47201 --- a/drivers/net/wireless/zd1211rw/zd_usb.c
47202 +++ b/drivers/net/wireless/zd1211rw/zd_usb.c
47203 @@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
47204 {
47205 struct zd_usb *usb = urb->context;
47206 struct zd_usb_interrupt *intr = &usb->intr;
47207 - int len;
47208 + unsigned int len;
47209 u16 int_num;
47210
47211 ZD_ASSERT(in_interrupt());
47212 diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
47213 index 7130864..00e64de 100644
47214 --- a/drivers/nfc/nfcwilink.c
47215 +++ b/drivers/nfc/nfcwilink.c
47216 @@ -498,7 +498,7 @@ static struct nci_ops nfcwilink_ops = {
47217
47218 static int nfcwilink_probe(struct platform_device *pdev)
47219 {
47220 - static struct nfcwilink *drv;
47221 + struct nfcwilink *drv;
47222 int rc;
47223 __u32 protocols;
47224
47225 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
47226 index d93b2b6..ae50401 100644
47227 --- a/drivers/oprofile/buffer_sync.c
47228 +++ b/drivers/oprofile/buffer_sync.c
47229 @@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
47230 if (cookie == NO_COOKIE)
47231 offset = pc;
47232 if (cookie == INVALID_COOKIE) {
47233 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47234 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47235 offset = pc;
47236 }
47237 if (cookie != last_cookie) {
47238 @@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
47239 /* add userspace sample */
47240
47241 if (!mm) {
47242 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
47243 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
47244 return 0;
47245 }
47246
47247 cookie = lookup_dcookie(mm, s->eip, &offset);
47248
47249 if (cookie == INVALID_COOKIE) {
47250 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47251 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47252 return 0;
47253 }
47254
47255 @@ -552,7 +552,7 @@ void sync_buffer(int cpu)
47256 /* ignore backtraces if failed to add a sample */
47257 if (state == sb_bt_start) {
47258 state = sb_bt_ignore;
47259 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
47260 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
47261 }
47262 }
47263 release_mm(mm);
47264 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
47265 index c0cc4e7..44d4e54 100644
47266 --- a/drivers/oprofile/event_buffer.c
47267 +++ b/drivers/oprofile/event_buffer.c
47268 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
47269 }
47270
47271 if (buffer_pos == buffer_size) {
47272 - atomic_inc(&oprofile_stats.event_lost_overflow);
47273 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
47274 return;
47275 }
47276
47277 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
47278 index ed2c3ec..deda85a 100644
47279 --- a/drivers/oprofile/oprof.c
47280 +++ b/drivers/oprofile/oprof.c
47281 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
47282 if (oprofile_ops.switch_events())
47283 return;
47284
47285 - atomic_inc(&oprofile_stats.multiplex_counter);
47286 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
47287 start_switch_worker();
47288 }
47289
47290 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
47291 index ee2cfce..7f8f699 100644
47292 --- a/drivers/oprofile/oprofile_files.c
47293 +++ b/drivers/oprofile/oprofile_files.c
47294 @@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
47295
47296 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
47297
47298 -static ssize_t timeout_read(struct file *file, char __user *buf,
47299 +static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
47300 size_t count, loff_t *offset)
47301 {
47302 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
47303 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
47304 index 59659ce..6c860a0 100644
47305 --- a/drivers/oprofile/oprofile_stats.c
47306 +++ b/drivers/oprofile/oprofile_stats.c
47307 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
47308 cpu_buf->sample_invalid_eip = 0;
47309 }
47310
47311 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
47312 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
47313 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
47314 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
47315 - atomic_set(&oprofile_stats.multiplex_counter, 0);
47316 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
47317 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
47318 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
47319 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
47320 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
47321 }
47322
47323
47324 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
47325 index 1fc622b..8c48fc3 100644
47326 --- a/drivers/oprofile/oprofile_stats.h
47327 +++ b/drivers/oprofile/oprofile_stats.h
47328 @@ -13,11 +13,11 @@
47329 #include <linux/atomic.h>
47330
47331 struct oprofile_stat_struct {
47332 - atomic_t sample_lost_no_mm;
47333 - atomic_t sample_lost_no_mapping;
47334 - atomic_t bt_lost_no_mapping;
47335 - atomic_t event_lost_overflow;
47336 - atomic_t multiplex_counter;
47337 + atomic_unchecked_t sample_lost_no_mm;
47338 + atomic_unchecked_t sample_lost_no_mapping;
47339 + atomic_unchecked_t bt_lost_no_mapping;
47340 + atomic_unchecked_t event_lost_overflow;
47341 + atomic_unchecked_t multiplex_counter;
47342 };
47343
47344 extern struct oprofile_stat_struct oprofile_stats;
47345 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
47346 index 3f49345..c750d0b 100644
47347 --- a/drivers/oprofile/oprofilefs.c
47348 +++ b/drivers/oprofile/oprofilefs.c
47349 @@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
47350
47351 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
47352 {
47353 - atomic_t *val = file->private_data;
47354 - return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
47355 + atomic_unchecked_t *val = file->private_data;
47356 + return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
47357 }
47358
47359
47360 @@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
47361
47362
47363 int oprofilefs_create_ro_atomic(struct dentry *root,
47364 - char const *name, atomic_t *val)
47365 + char const *name, atomic_unchecked_t *val)
47366 {
47367 return __oprofilefs_create_file(root, name,
47368 &atomic_ro_fops, 0444, val);
47369 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
47370 index 61be1d9..dec05d7 100644
47371 --- a/drivers/oprofile/timer_int.c
47372 +++ b/drivers/oprofile/timer_int.c
47373 @@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
47374 return NOTIFY_OK;
47375 }
47376
47377 -static struct notifier_block __refdata oprofile_cpu_notifier = {
47378 +static struct notifier_block oprofile_cpu_notifier = {
47379 .notifier_call = oprofile_cpu_notify,
47380 };
47381
47382 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
47383 index 92ed045..62d39bd7 100644
47384 --- a/drivers/parport/procfs.c
47385 +++ b/drivers/parport/procfs.c
47386 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
47387
47388 *ppos += len;
47389
47390 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
47391 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
47392 }
47393
47394 #ifdef CONFIG_PARPORT_1284
47395 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
47396
47397 *ppos += len;
47398
47399 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
47400 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
47401 }
47402 #endif /* IEEE1284.3 support. */
47403
47404 diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
47405 index ecfac7e..41be7028 100644
47406 --- a/drivers/pci/hotplug/acpiphp_ibm.c
47407 +++ b/drivers/pci/hotplug/acpiphp_ibm.c
47408 @@ -453,7 +453,9 @@ static int __init ibm_acpiphp_init(void)
47409 goto init_cleanup;
47410 }
47411
47412 - ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
47413 + pax_open_kernel();
47414 + *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
47415 + pax_close_kernel();
47416 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
47417
47418 return retval;
47419 diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
47420 index 7536eef..52dc8fa 100644
47421 --- a/drivers/pci/hotplug/cpcihp_generic.c
47422 +++ b/drivers/pci/hotplug/cpcihp_generic.c
47423 @@ -73,7 +73,6 @@ static u16 port;
47424 static unsigned int enum_bit;
47425 static u8 enum_mask;
47426
47427 -static struct cpci_hp_controller_ops generic_hpc_ops;
47428 static struct cpci_hp_controller generic_hpc;
47429
47430 static int __init validate_parameters(void)
47431 @@ -139,6 +138,10 @@ static int query_enum(void)
47432 return ((value & enum_mask) == enum_mask);
47433 }
47434
47435 +static struct cpci_hp_controller_ops generic_hpc_ops = {
47436 + .query_enum = query_enum,
47437 +};
47438 +
47439 static int __init cpcihp_generic_init(void)
47440 {
47441 int status;
47442 @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
47443 pci_dev_put(dev);
47444
47445 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
47446 - generic_hpc_ops.query_enum = query_enum;
47447 generic_hpc.ops = &generic_hpc_ops;
47448
47449 status = cpci_hp_register_controller(&generic_hpc);
47450 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
47451 index e8c4a7c..7046f5c 100644
47452 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
47453 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
47454 @@ -59,7 +59,6 @@
47455 /* local variables */
47456 static bool debug;
47457 static bool poll;
47458 -static struct cpci_hp_controller_ops zt5550_hpc_ops;
47459 static struct cpci_hp_controller zt5550_hpc;
47460
47461 /* Primary cPCI bus bridge device */
47462 @@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
47463 return 0;
47464 }
47465
47466 +static struct cpci_hp_controller_ops zt5550_hpc_ops = {
47467 + .query_enum = zt5550_hc_query_enum,
47468 +};
47469 +
47470 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
47471 {
47472 int status;
47473 @@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
47474 dbg("returned from zt5550_hc_config");
47475
47476 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
47477 - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
47478 zt5550_hpc.ops = &zt5550_hpc_ops;
47479 if(!poll) {
47480 zt5550_hpc.irq = hc_dev->irq;
47481 zt5550_hpc.irq_flags = IRQF_SHARED;
47482 zt5550_hpc.dev_id = hc_dev;
47483
47484 - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
47485 - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
47486 - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
47487 + pax_open_kernel();
47488 + *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
47489 + *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
47490 + *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
47491 + pax_open_kernel();
47492 } else {
47493 info("using ENUM# polling mode");
47494 }
47495 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
47496 index 76ba8a1..20ca857 100644
47497 --- a/drivers/pci/hotplug/cpqphp_nvram.c
47498 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
47499 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
47500
47501 void compaq_nvram_init (void __iomem *rom_start)
47502 {
47503 +
47504 +#ifndef CONFIG_PAX_KERNEXEC
47505 if (rom_start) {
47506 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
47507 }
47508 +#endif
47509 +
47510 dbg("int15 entry = %p\n", compaq_int15_entry_point);
47511
47512 /* initialize our int15 lock */
47513 diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
47514 index cfa92a9..29539c5 100644
47515 --- a/drivers/pci/hotplug/pci_hotplug_core.c
47516 +++ b/drivers/pci/hotplug/pci_hotplug_core.c
47517 @@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
47518 return -EINVAL;
47519 }
47520
47521 - slot->ops->owner = owner;
47522 - slot->ops->mod_name = mod_name;
47523 + pax_open_kernel();
47524 + *(struct module **)&slot->ops->owner = owner;
47525 + *(const char **)&slot->ops->mod_name = mod_name;
47526 + pax_close_kernel();
47527
47528 mutex_lock(&pci_hp_mutex);
47529 /*
47530 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
47531 index bbd48bb..6907ef4 100644
47532 --- a/drivers/pci/hotplug/pciehp_core.c
47533 +++ b/drivers/pci/hotplug/pciehp_core.c
47534 @@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
47535 struct slot *slot = ctrl->slot;
47536 struct hotplug_slot *hotplug = NULL;
47537 struct hotplug_slot_info *info = NULL;
47538 - struct hotplug_slot_ops *ops = NULL;
47539 + hotplug_slot_ops_no_const *ops = NULL;
47540 char name[SLOT_NAME_SIZE];
47541 int retval = -ENOMEM;
47542
47543 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
47544 index c91e6c1..5c723ef 100644
47545 --- a/drivers/pci/pci-sysfs.c
47546 +++ b/drivers/pci/pci-sysfs.c
47547 @@ -1117,7 +1117,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
47548 {
47549 /* allocate attribute structure, piggyback attribute name */
47550 int name_len = write_combine ? 13 : 10;
47551 - struct bin_attribute *res_attr;
47552 + bin_attribute_no_const *res_attr;
47553 int retval;
47554
47555 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
47556 @@ -1302,7 +1302,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
47557 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
47558 {
47559 int retval;
47560 - struct bin_attribute *attr;
47561 + bin_attribute_no_const *attr;
47562
47563 /* If the device has VPD, try to expose it in sysfs. */
47564 if (dev->vpd) {
47565 @@ -1349,7 +1349,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
47566 {
47567 int retval;
47568 int rom_size = 0;
47569 - struct bin_attribute *attr;
47570 + bin_attribute_no_const *attr;
47571
47572 if (!sysfs_initialized)
47573 return -EACCES;
47574 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
47575 index 9c91ecc..bda4796 100644
47576 --- a/drivers/pci/pci.h
47577 +++ b/drivers/pci/pci.h
47578 @@ -95,7 +95,7 @@ struct pci_vpd_ops {
47579 struct pci_vpd {
47580 unsigned int len;
47581 const struct pci_vpd_ops *ops;
47582 - struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
47583 + bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
47584 };
47585
47586 int pci_vpd_pci22_init(struct pci_dev *dev);
47587 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
47588 index f1272dc..e92a1ac 100644
47589 --- a/drivers/pci/pcie/aspm.c
47590 +++ b/drivers/pci/pcie/aspm.c
47591 @@ -27,9 +27,9 @@
47592 #define MODULE_PARAM_PREFIX "pcie_aspm."
47593
47594 /* Note: those are not register definitions */
47595 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
47596 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
47597 -#define ASPM_STATE_L1 (4) /* L1 state */
47598 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
47599 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
47600 +#define ASPM_STATE_L1 (4U) /* L1 state */
47601 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
47602 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
47603
47604 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
47605 index 38e403d..a2ce55a 100644
47606 --- a/drivers/pci/probe.c
47607 +++ b/drivers/pci/probe.c
47608 @@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
47609 struct pci_bus_region region, inverted_region;
47610 bool bar_too_big = false, bar_disabled = false;
47611
47612 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
47613 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
47614
47615 /* No printks while decoding is disabled! */
47616 if (!dev->mmio_always_on) {
47617 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
47618 index 46d1378..30e452b 100644
47619 --- a/drivers/pci/proc.c
47620 +++ b/drivers/pci/proc.c
47621 @@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
47622 static int __init pci_proc_init(void)
47623 {
47624 struct pci_dev *dev = NULL;
47625 +
47626 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47627 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47628 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
47629 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47630 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47631 +#endif
47632 +#else
47633 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
47634 +#endif
47635 proc_create("devices", 0, proc_bus_pci_dir,
47636 &proc_bus_pci_dev_operations);
47637 proc_initialized = 1;
47638 diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
47639 index 3e5b4497..dcdfb70 100644
47640 --- a/drivers/platform/chrome/chromeos_laptop.c
47641 +++ b/drivers/platform/chrome/chromeos_laptop.c
47642 @@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
47643 return 0;
47644 }
47645
47646 -static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
47647 +static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
47648 {
47649 .ident = "Samsung Series 5 550 - Touchpad",
47650 .matches = {
47651 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
47652 index 19c313b..ed28b38 100644
47653 --- a/drivers/platform/x86/asus-wmi.c
47654 +++ b/drivers/platform/x86/asus-wmi.c
47655 @@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
47656 int err;
47657 u32 retval = -1;
47658
47659 +#ifdef CONFIG_GRKERNSEC_KMEM
47660 + return -EPERM;
47661 +#endif
47662 +
47663 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
47664
47665 if (err < 0)
47666 @@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
47667 int err;
47668 u32 retval = -1;
47669
47670 +#ifdef CONFIG_GRKERNSEC_KMEM
47671 + return -EPERM;
47672 +#endif
47673 +
47674 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
47675 &retval);
47676
47677 @@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
47678 union acpi_object *obj;
47679 acpi_status status;
47680
47681 +#ifdef CONFIG_GRKERNSEC_KMEM
47682 + return -EPERM;
47683 +#endif
47684 +
47685 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
47686 1, asus->debug.method_id,
47687 &input, &output);
47688 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
47689 index 62f8030..c7f2a45 100644
47690 --- a/drivers/platform/x86/msi-laptop.c
47691 +++ b/drivers/platform/x86/msi-laptop.c
47692 @@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
47693
47694 if (!quirks->ec_read_only) {
47695 /* allow userland write sysfs file */
47696 - dev_attr_bluetooth.store = store_bluetooth;
47697 - dev_attr_wlan.store = store_wlan;
47698 - dev_attr_threeg.store = store_threeg;
47699 - dev_attr_bluetooth.attr.mode |= S_IWUSR;
47700 - dev_attr_wlan.attr.mode |= S_IWUSR;
47701 - dev_attr_threeg.attr.mode |= S_IWUSR;
47702 + pax_open_kernel();
47703 + *(void **)&dev_attr_bluetooth.store = store_bluetooth;
47704 + *(void **)&dev_attr_wlan.store = store_wlan;
47705 + *(void **)&dev_attr_threeg.store = store_threeg;
47706 + *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
47707 + *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
47708 + *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
47709 + pax_close_kernel();
47710 }
47711
47712 /* disable hardware control by fn key */
47713 diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
47714 index 70222f2..8c8ce66 100644
47715 --- a/drivers/platform/x86/msi-wmi.c
47716 +++ b/drivers/platform/x86/msi-wmi.c
47717 @@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
47718 static void msi_wmi_notify(u32 value, void *context)
47719 {
47720 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
47721 - static struct key_entry *key;
47722 + struct key_entry *key;
47723 union acpi_object *obj;
47724 acpi_status status;
47725
47726 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
47727 index fb233ae..23a325c 100644
47728 --- a/drivers/platform/x86/sony-laptop.c
47729 +++ b/drivers/platform/x86/sony-laptop.c
47730 @@ -2453,7 +2453,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
47731 }
47732
47733 /* High speed charging function */
47734 -static struct device_attribute *hsc_handle;
47735 +static device_attribute_no_const *hsc_handle;
47736
47737 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
47738 struct device_attribute *attr,
47739 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
47740 index 58b0274..6704626 100644
47741 --- a/drivers/platform/x86/thinkpad_acpi.c
47742 +++ b/drivers/platform/x86/thinkpad_acpi.c
47743 @@ -2100,7 +2100,7 @@ static int hotkey_mask_get(void)
47744 return 0;
47745 }
47746
47747 -void static hotkey_mask_warn_incomplete_mask(void)
47748 +static void hotkey_mask_warn_incomplete_mask(void)
47749 {
47750 /* log only what the user can fix... */
47751 const u32 wantedmask = hotkey_driver_mask &
47752 @@ -2327,11 +2327,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
47753 }
47754 }
47755
47756 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47757 - struct tp_nvram_state *newn,
47758 - const u32 event_mask)
47759 -{
47760 -
47761 #define TPACPI_COMPARE_KEY(__scancode, __member) \
47762 do { \
47763 if ((event_mask & (1 << __scancode)) && \
47764 @@ -2345,36 +2340,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47765 tpacpi_hotkey_send_key(__scancode); \
47766 } while (0)
47767
47768 - void issue_volchange(const unsigned int oldvol,
47769 - const unsigned int newvol)
47770 - {
47771 - unsigned int i = oldvol;
47772 +static void issue_volchange(const unsigned int oldvol,
47773 + const unsigned int newvol,
47774 + const u32 event_mask)
47775 +{
47776 + unsigned int i = oldvol;
47777
47778 - while (i > newvol) {
47779 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47780 - i--;
47781 - }
47782 - while (i < newvol) {
47783 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47784 - i++;
47785 - }
47786 + while (i > newvol) {
47787 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47788 + i--;
47789 }
47790 + while (i < newvol) {
47791 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47792 + i++;
47793 + }
47794 +}
47795
47796 - void issue_brightnesschange(const unsigned int oldbrt,
47797 - const unsigned int newbrt)
47798 - {
47799 - unsigned int i = oldbrt;
47800 +static void issue_brightnesschange(const unsigned int oldbrt,
47801 + const unsigned int newbrt,
47802 + const u32 event_mask)
47803 +{
47804 + unsigned int i = oldbrt;
47805
47806 - while (i > newbrt) {
47807 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47808 - i--;
47809 - }
47810 - while (i < newbrt) {
47811 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47812 - i++;
47813 - }
47814 + while (i > newbrt) {
47815 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47816 + i--;
47817 + }
47818 + while (i < newbrt) {
47819 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47820 + i++;
47821 }
47822 +}
47823
47824 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47825 + struct tp_nvram_state *newn,
47826 + const u32 event_mask)
47827 +{
47828 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
47829 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
47830 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
47831 @@ -2408,7 +2409,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47832 oldn->volume_level != newn->volume_level) {
47833 /* recently muted, or repeated mute keypress, or
47834 * multiple presses ending in mute */
47835 - issue_volchange(oldn->volume_level, newn->volume_level);
47836 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47837 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
47838 }
47839 } else {
47840 @@ -2418,7 +2419,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47841 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47842 }
47843 if (oldn->volume_level != newn->volume_level) {
47844 - issue_volchange(oldn->volume_level, newn->volume_level);
47845 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47846 } else if (oldn->volume_toggle != newn->volume_toggle) {
47847 /* repeated vol up/down keypress at end of scale ? */
47848 if (newn->volume_level == 0)
47849 @@ -2431,7 +2432,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47850 /* handle brightness */
47851 if (oldn->brightness_level != newn->brightness_level) {
47852 issue_brightnesschange(oldn->brightness_level,
47853 - newn->brightness_level);
47854 + newn->brightness_level,
47855 + event_mask);
47856 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
47857 /* repeated key presses that didn't change state */
47858 if (newn->brightness_level == 0)
47859 @@ -2440,10 +2442,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47860 && !tp_features.bright_unkfw)
47861 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47862 }
47863 +}
47864
47865 #undef TPACPI_COMPARE_KEY
47866 #undef TPACPI_MAY_SEND_KEY
47867 -}
47868
47869 /*
47870 * Polling driver
47871 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
47872 index 769d265..a3a05ca 100644
47873 --- a/drivers/pnp/pnpbios/bioscalls.c
47874 +++ b/drivers/pnp/pnpbios/bioscalls.c
47875 @@ -58,7 +58,7 @@ do { \
47876 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
47877 } while(0)
47878
47879 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
47880 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
47881 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
47882
47883 /*
47884 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47885
47886 cpu = get_cpu();
47887 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
47888 +
47889 + pax_open_kernel();
47890 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
47891 + pax_close_kernel();
47892
47893 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
47894 spin_lock_irqsave(&pnp_bios_lock, flags);
47895 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47896 :"memory");
47897 spin_unlock_irqrestore(&pnp_bios_lock, flags);
47898
47899 + pax_open_kernel();
47900 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
47901 + pax_close_kernel();
47902 +
47903 put_cpu();
47904
47905 /* If we get here and this is set then the PnP BIOS faulted on us. */
47906 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
47907 return status;
47908 }
47909
47910 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
47911 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
47912 {
47913 int i;
47914
47915 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47916 pnp_bios_callpoint.offset = header->fields.pm16offset;
47917 pnp_bios_callpoint.segment = PNP_CS16;
47918
47919 + pax_open_kernel();
47920 +
47921 for_each_possible_cpu(i) {
47922 struct desc_struct *gdt = get_cpu_gdt_table(i);
47923 if (!gdt)
47924 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47925 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
47926 (unsigned long)__va(header->fields.pm16dseg));
47927 }
47928 +
47929 + pax_close_kernel();
47930 }
47931 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
47932 index d95e101..67f0c3f 100644
47933 --- a/drivers/pnp/resource.c
47934 +++ b/drivers/pnp/resource.c
47935 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
47936 return 1;
47937
47938 /* check if the resource is valid */
47939 - if (*irq < 0 || *irq > 15)
47940 + if (*irq > 15)
47941 return 0;
47942
47943 /* check if the resource is reserved */
47944 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
47945 return 1;
47946
47947 /* check if the resource is valid */
47948 - if (*dma < 0 || *dma == 4 || *dma > 7)
47949 + if (*dma == 4 || *dma > 7)
47950 return 0;
47951
47952 /* check if the resource is reserved */
47953 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
47954 index 0c52e2a..3421ab7 100644
47955 --- a/drivers/power/pda_power.c
47956 +++ b/drivers/power/pda_power.c
47957 @@ -37,7 +37,11 @@ static int polling;
47958
47959 #if IS_ENABLED(CONFIG_USB_PHY)
47960 static struct usb_phy *transceiver;
47961 -static struct notifier_block otg_nb;
47962 +static int otg_handle_notification(struct notifier_block *nb,
47963 + unsigned long event, void *unused);
47964 +static struct notifier_block otg_nb = {
47965 + .notifier_call = otg_handle_notification
47966 +};
47967 #endif
47968
47969 static struct regulator *ac_draw;
47970 @@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
47971
47972 #if IS_ENABLED(CONFIG_USB_PHY)
47973 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
47974 - otg_nb.notifier_call = otg_handle_notification;
47975 ret = usb_register_notifier(transceiver, &otg_nb);
47976 if (ret) {
47977 dev_err(dev, "failure to register otg notifier\n");
47978 diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
47979 index cc439fd..8fa30df 100644
47980 --- a/drivers/power/power_supply.h
47981 +++ b/drivers/power/power_supply.h
47982 @@ -16,12 +16,12 @@ struct power_supply;
47983
47984 #ifdef CONFIG_SYSFS
47985
47986 -extern void power_supply_init_attrs(struct device_type *dev_type);
47987 +extern void power_supply_init_attrs(void);
47988 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
47989
47990 #else
47991
47992 -static inline void power_supply_init_attrs(struct device_type *dev_type) {}
47993 +static inline void power_supply_init_attrs(void) {}
47994 #define power_supply_uevent NULL
47995
47996 #endif /* CONFIG_SYSFS */
47997 diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
47998 index 557af94..84dc1fe 100644
47999 --- a/drivers/power/power_supply_core.c
48000 +++ b/drivers/power/power_supply_core.c
48001 @@ -24,7 +24,10 @@
48002 struct class *power_supply_class;
48003 EXPORT_SYMBOL_GPL(power_supply_class);
48004
48005 -static struct device_type power_supply_dev_type;
48006 +extern const struct attribute_group *power_supply_attr_groups[];
48007 +static struct device_type power_supply_dev_type = {
48008 + .groups = power_supply_attr_groups,
48009 +};
48010
48011 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
48012 struct power_supply *supply)
48013 @@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
48014 return PTR_ERR(power_supply_class);
48015
48016 power_supply_class->dev_uevent = power_supply_uevent;
48017 - power_supply_init_attrs(&power_supply_dev_type);
48018 + power_supply_init_attrs();
48019
48020 return 0;
48021 }
48022 diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
48023 index 44420d1..967126e 100644
48024 --- a/drivers/power/power_supply_sysfs.c
48025 +++ b/drivers/power/power_supply_sysfs.c
48026 @@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
48027 .is_visible = power_supply_attr_is_visible,
48028 };
48029
48030 -static const struct attribute_group *power_supply_attr_groups[] = {
48031 +const struct attribute_group *power_supply_attr_groups[] = {
48032 &power_supply_attr_group,
48033 NULL,
48034 };
48035
48036 -void power_supply_init_attrs(struct device_type *dev_type)
48037 +void power_supply_init_attrs(void)
48038 {
48039 int i;
48040
48041 - dev_type->groups = power_supply_attr_groups;
48042 -
48043 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
48044 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
48045 }
48046 diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
48047 index 84419af..268ede8 100644
48048 --- a/drivers/powercap/powercap_sys.c
48049 +++ b/drivers/powercap/powercap_sys.c
48050 @@ -154,8 +154,77 @@ struct powercap_constraint_attr {
48051 struct device_attribute name_attr;
48052 };
48053
48054 +static ssize_t show_constraint_name(struct device *dev,
48055 + struct device_attribute *dev_attr,
48056 + char *buf);
48057 +
48058 static struct powercap_constraint_attr
48059 - constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
48060 + constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
48061 + [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
48062 + .power_limit_attr = {
48063 + .attr = {
48064 + .name = NULL,
48065 + .mode = S_IWUSR | S_IRUGO
48066 + },
48067 + .show = show_constraint_power_limit_uw,
48068 + .store = store_constraint_power_limit_uw
48069 + },
48070 +
48071 + .time_window_attr = {
48072 + .attr = {
48073 + .name = NULL,
48074 + .mode = S_IWUSR | S_IRUGO
48075 + },
48076 + .show = show_constraint_time_window_us,
48077 + .store = store_constraint_time_window_us
48078 + },
48079 +
48080 + .max_power_attr = {
48081 + .attr = {
48082 + .name = NULL,
48083 + .mode = S_IRUGO
48084 + },
48085 + .show = show_constraint_max_power_uw,
48086 + .store = NULL
48087 + },
48088 +
48089 + .min_power_attr = {
48090 + .attr = {
48091 + .name = NULL,
48092 + .mode = S_IRUGO
48093 + },
48094 + .show = show_constraint_min_power_uw,
48095 + .store = NULL
48096 + },
48097 +
48098 + .max_time_window_attr = {
48099 + .attr = {
48100 + .name = NULL,
48101 + .mode = S_IRUGO
48102 + },
48103 + .show = show_constraint_max_time_window_us,
48104 + .store = NULL
48105 + },
48106 +
48107 + .min_time_window_attr = {
48108 + .attr = {
48109 + .name = NULL,
48110 + .mode = S_IRUGO
48111 + },
48112 + .show = show_constraint_min_time_window_us,
48113 + .store = NULL
48114 + },
48115 +
48116 + .name_attr = {
48117 + .attr = {
48118 + .name = NULL,
48119 + .mode = S_IRUGO
48120 + },
48121 + .show = show_constraint_name,
48122 + .store = NULL
48123 + }
48124 + }
48125 +};
48126
48127 /* A list of powercap control_types */
48128 static LIST_HEAD(powercap_cntrl_list);
48129 @@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
48130 }
48131
48132 static int create_constraint_attribute(int id, const char *name,
48133 - int mode,
48134 - struct device_attribute *dev_attr,
48135 - ssize_t (*show)(struct device *,
48136 - struct device_attribute *, char *),
48137 - ssize_t (*store)(struct device *,
48138 - struct device_attribute *,
48139 - const char *, size_t)
48140 - )
48141 + struct device_attribute *dev_attr)
48142 {
48143 + name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
48144
48145 - dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
48146 - id, name);
48147 - if (!dev_attr->attr.name)
48148 + if (!name)
48149 return -ENOMEM;
48150 - dev_attr->attr.mode = mode;
48151 - dev_attr->show = show;
48152 - dev_attr->store = store;
48153 +
48154 + pax_open_kernel();
48155 + *(const char **)&dev_attr->attr.name = name;
48156 + pax_close_kernel();
48157
48158 return 0;
48159 }
48160 @@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
48161
48162 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
48163 ret = create_constraint_attribute(i, "power_limit_uw",
48164 - S_IWUSR | S_IRUGO,
48165 - &constraint_attrs[i].power_limit_attr,
48166 - show_constraint_power_limit_uw,
48167 - store_constraint_power_limit_uw);
48168 + &constraint_attrs[i].power_limit_attr);
48169 if (ret)
48170 goto err_alloc;
48171 ret = create_constraint_attribute(i, "time_window_us",
48172 - S_IWUSR | S_IRUGO,
48173 - &constraint_attrs[i].time_window_attr,
48174 - show_constraint_time_window_us,
48175 - store_constraint_time_window_us);
48176 + &constraint_attrs[i].time_window_attr);
48177 if (ret)
48178 goto err_alloc;
48179 - ret = create_constraint_attribute(i, "name", S_IRUGO,
48180 - &constraint_attrs[i].name_attr,
48181 - show_constraint_name,
48182 - NULL);
48183 + ret = create_constraint_attribute(i, "name",
48184 + &constraint_attrs[i].name_attr);
48185 if (ret)
48186 goto err_alloc;
48187 - ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
48188 - &constraint_attrs[i].max_power_attr,
48189 - show_constraint_max_power_uw,
48190 - NULL);
48191 + ret = create_constraint_attribute(i, "max_power_uw",
48192 + &constraint_attrs[i].max_power_attr);
48193 if (ret)
48194 goto err_alloc;
48195 - ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
48196 - &constraint_attrs[i].min_power_attr,
48197 - show_constraint_min_power_uw,
48198 - NULL);
48199 + ret = create_constraint_attribute(i, "min_power_uw",
48200 + &constraint_attrs[i].min_power_attr);
48201 if (ret)
48202 goto err_alloc;
48203 ret = create_constraint_attribute(i, "max_time_window_us",
48204 - S_IRUGO,
48205 - &constraint_attrs[i].max_time_window_attr,
48206 - show_constraint_max_time_window_us,
48207 - NULL);
48208 + &constraint_attrs[i].max_time_window_attr);
48209 if (ret)
48210 goto err_alloc;
48211 ret = create_constraint_attribute(i, "min_time_window_us",
48212 - S_IRUGO,
48213 - &constraint_attrs[i].min_time_window_attr,
48214 - show_constraint_min_time_window_us,
48215 - NULL);
48216 + &constraint_attrs[i].min_time_window_attr);
48217 if (ret)
48218 goto err_alloc;
48219
48220 @@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
48221 power_zone->zone_dev_attrs[count++] =
48222 &dev_attr_max_energy_range_uj.attr;
48223 if (power_zone->ops->get_energy_uj) {
48224 + pax_open_kernel();
48225 if (power_zone->ops->reset_energy_uj)
48226 - dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48227 + *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48228 else
48229 - dev_attr_energy_uj.attr.mode = S_IRUGO;
48230 + *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
48231 + pax_close_kernel();
48232 power_zone->zone_dev_attrs[count++] =
48233 &dev_attr_energy_uj.attr;
48234 }
48235 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
48236 index 75dffb79..df850cd 100644
48237 --- a/drivers/regulator/core.c
48238 +++ b/drivers/regulator/core.c
48239 @@ -3370,7 +3370,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48240 {
48241 const struct regulation_constraints *constraints = NULL;
48242 const struct regulator_init_data *init_data;
48243 - static atomic_t regulator_no = ATOMIC_INIT(0);
48244 + static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
48245 struct regulator_dev *rdev;
48246 struct device *dev;
48247 int ret, i;
48248 @@ -3440,7 +3440,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48249 rdev->dev.of_node = config->of_node;
48250 rdev->dev.parent = dev;
48251 dev_set_name(&rdev->dev, "regulator.%d",
48252 - atomic_inc_return(&regulator_no) - 1);
48253 + atomic_inc_return_unchecked(&regulator_no) - 1);
48254 ret = device_register(&rdev->dev);
48255 if (ret != 0) {
48256 put_device(&rdev->dev);
48257 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
48258 index 8d94d3d..653b623 100644
48259 --- a/drivers/regulator/max8660.c
48260 +++ b/drivers/regulator/max8660.c
48261 @@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
48262 max8660->shadow_regs[MAX8660_OVER1] = 5;
48263 } else {
48264 /* Otherwise devices can be toggled via software */
48265 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
48266 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
48267 + pax_open_kernel();
48268 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
48269 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
48270 + pax_close_kernel();
48271 }
48272
48273 /*
48274 diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
48275 index 892aa1e..ebd1b9c 100644
48276 --- a/drivers/regulator/max8973-regulator.c
48277 +++ b/drivers/regulator/max8973-regulator.c
48278 @@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
48279 if (!pdata || !pdata->enable_ext_control) {
48280 max->desc.enable_reg = MAX8973_VOUT;
48281 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
48282 - max->ops.enable = regulator_enable_regmap;
48283 - max->ops.disable = regulator_disable_regmap;
48284 - max->ops.is_enabled = regulator_is_enabled_regmap;
48285 + pax_open_kernel();
48286 + *(void **)&max->ops.enable = regulator_enable_regmap;
48287 + *(void **)&max->ops.disable = regulator_disable_regmap;
48288 + *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
48289 + pax_close_kernel();
48290 }
48291
48292 if (pdata) {
48293 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
48294 index 96c9f80..90974ca 100644
48295 --- a/drivers/regulator/mc13892-regulator.c
48296 +++ b/drivers/regulator/mc13892-regulator.c
48297 @@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
48298 }
48299 mc13xxx_unlock(mc13892);
48300
48301 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48302 + pax_open_kernel();
48303 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48304 = mc13892_vcam_set_mode;
48305 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48306 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48307 = mc13892_vcam_get_mode;
48308 + pax_close_kernel();
48309
48310 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
48311 ARRAY_SIZE(mc13892_regulators));
48312 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
48313 index a2325bc..04c549f 100644
48314 --- a/drivers/rtc/rtc-cmos.c
48315 +++ b/drivers/rtc/rtc-cmos.c
48316 @@ -779,7 +779,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
48317 hpet_rtc_timer_init();
48318
48319 /* export at least the first block of NVRAM */
48320 - nvram.size = address_space - NVRAM_OFFSET;
48321 + pax_open_kernel();
48322 + *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
48323 + pax_close_kernel();
48324 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
48325 if (retval < 0) {
48326 dev_dbg(dev, "can't create nvram file? %d\n", retval);
48327 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
48328 index d049393..bb20be0 100644
48329 --- a/drivers/rtc/rtc-dev.c
48330 +++ b/drivers/rtc/rtc-dev.c
48331 @@ -16,6 +16,7 @@
48332 #include <linux/module.h>
48333 #include <linux/rtc.h>
48334 #include <linux/sched.h>
48335 +#include <linux/grsecurity.h>
48336 #include "rtc-core.h"
48337
48338 static dev_t rtc_devt;
48339 @@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
48340 if (copy_from_user(&tm, uarg, sizeof(tm)))
48341 return -EFAULT;
48342
48343 + gr_log_timechange();
48344 +
48345 return rtc_set_time(rtc, &tm);
48346
48347 case RTC_PIE_ON:
48348 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
48349 index 4e75345..09f8663 100644
48350 --- a/drivers/rtc/rtc-ds1307.c
48351 +++ b/drivers/rtc/rtc-ds1307.c
48352 @@ -107,7 +107,7 @@ struct ds1307 {
48353 u8 offset; /* register's offset */
48354 u8 regs[11];
48355 u16 nvram_offset;
48356 - struct bin_attribute *nvram;
48357 + bin_attribute_no_const *nvram;
48358 enum ds_type type;
48359 unsigned long flags;
48360 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
48361 diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
48362 index 11880c1..b823aa4 100644
48363 --- a/drivers/rtc/rtc-m48t59.c
48364 +++ b/drivers/rtc/rtc-m48t59.c
48365 @@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
48366 if (IS_ERR(m48t59->rtc))
48367 return PTR_ERR(m48t59->rtc);
48368
48369 - m48t59_nvram_attr.size = pdata->offset;
48370 + pax_open_kernel();
48371 + *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
48372 + pax_close_kernel();
48373
48374 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
48375 if (ret)
48376 diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
48377 index 14b5f8d..cc9bd26 100644
48378 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c
48379 +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
48380 @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
48381 for (bit = 0; bit < 8; bit++) {
48382
48383 if ((pci_status[i] & (0x1 << bit)) != 0) {
48384 - static const char *s;
48385 + const char *s;
48386
48387 s = pci_status_strings[bit];
48388 if (i == 7/*TARG*/ && bit == 3)
48389 @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
48390
48391 for (bit = 0; bit < 8; bit++) {
48392
48393 - if ((split_status[i] & (0x1 << bit)) != 0) {
48394 - static const char *s;
48395 -
48396 - s = split_status_strings[bit];
48397 - printk(s, ahd_name(ahd),
48398 + if ((split_status[i] & (0x1 << bit)) != 0)
48399 + printk(split_status_strings[bit], ahd_name(ahd),
48400 split_status_source[i]);
48401 - }
48402
48403 if (i > 1)
48404 continue;
48405
48406 - if ((sg_split_status[i] & (0x1 << bit)) != 0) {
48407 - static const char *s;
48408 -
48409 - s = split_status_strings[bit];
48410 - printk(s, ahd_name(ahd), "SG");
48411 - }
48412 + if ((sg_split_status[i] & (0x1 << bit)) != 0)
48413 + printk(split_status_strings[bit], ahd_name(ahd), "SG");
48414 }
48415 }
48416 /*
48417 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
48418 index e693af6..2e525b6 100644
48419 --- a/drivers/scsi/bfa/bfa_fcpim.h
48420 +++ b/drivers/scsi/bfa/bfa_fcpim.h
48421 @@ -36,7 +36,7 @@ struct bfa_iotag_s {
48422
48423 struct bfa_itn_s {
48424 bfa_isr_func_t isr;
48425 -};
48426 +} __no_const;
48427
48428 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
48429 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
48430 diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
48431 index a3ab5cc..8143622 100644
48432 --- a/drivers/scsi/bfa/bfa_fcs.c
48433 +++ b/drivers/scsi/bfa/bfa_fcs.c
48434 @@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
48435 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
48436
48437 static struct bfa_fcs_mod_s fcs_modules[] = {
48438 - { bfa_fcs_port_attach, NULL, NULL },
48439 - { bfa_fcs_uf_attach, NULL, NULL },
48440 - { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
48441 - bfa_fcs_fabric_modexit },
48442 + {
48443 + .attach = bfa_fcs_port_attach,
48444 + .modinit = NULL,
48445 + .modexit = NULL
48446 + },
48447 + {
48448 + .attach = bfa_fcs_uf_attach,
48449 + .modinit = NULL,
48450 + .modexit = NULL
48451 + },
48452 + {
48453 + .attach = bfa_fcs_fabric_attach,
48454 + .modinit = bfa_fcs_fabric_modinit,
48455 + .modexit = bfa_fcs_fabric_modexit
48456 + },
48457 };
48458
48459 /*
48460 diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
48461 index f5e4e61..a0acaf6 100644
48462 --- a/drivers/scsi/bfa/bfa_fcs_lport.c
48463 +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
48464 @@ -89,15 +89,26 @@ static struct {
48465 void (*offline) (struct bfa_fcs_lport_s *port);
48466 } __port_action[] = {
48467 {
48468 - bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
48469 - bfa_fcs_lport_unknown_offline}, {
48470 - bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
48471 - bfa_fcs_lport_fab_offline}, {
48472 - bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
48473 - bfa_fcs_lport_n2n_offline}, {
48474 - bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
48475 - bfa_fcs_lport_loop_offline},
48476 - };
48477 + .init = bfa_fcs_lport_unknown_init,
48478 + .online = bfa_fcs_lport_unknown_online,
48479 + .offline = bfa_fcs_lport_unknown_offline
48480 + },
48481 + {
48482 + .init = bfa_fcs_lport_fab_init,
48483 + .online = bfa_fcs_lport_fab_online,
48484 + .offline = bfa_fcs_lport_fab_offline
48485 + },
48486 + {
48487 + .init = bfa_fcs_lport_n2n_init,
48488 + .online = bfa_fcs_lport_n2n_online,
48489 + .offline = bfa_fcs_lport_n2n_offline
48490 + },
48491 + {
48492 + .init = bfa_fcs_lport_loop_init,
48493 + .online = bfa_fcs_lport_loop_online,
48494 + .offline = bfa_fcs_lport_loop_offline
48495 + },
48496 +};
48497
48498 /*
48499 * fcs_port_sm FCS logical port state machine
48500 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
48501 index 90814fe..4384138 100644
48502 --- a/drivers/scsi/bfa/bfa_ioc.h
48503 +++ b/drivers/scsi/bfa/bfa_ioc.h
48504 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
48505 bfa_ioc_disable_cbfn_t disable_cbfn;
48506 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
48507 bfa_ioc_reset_cbfn_t reset_cbfn;
48508 -};
48509 +} __no_const;
48510
48511 /*
48512 * IOC event notification mechanism.
48513 @@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
48514 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
48515 enum bfi_ioc_state fwstate);
48516 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
48517 -};
48518 +} __no_const;
48519
48520 /*
48521 * Queue element to wait for room in request queue. FIFO order is
48522 diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
48523 index a14c784..6de6790 100644
48524 --- a/drivers/scsi/bfa/bfa_modules.h
48525 +++ b/drivers/scsi/bfa/bfa_modules.h
48526 @@ -78,12 +78,12 @@ enum {
48527 \
48528 extern struct bfa_module_s hal_mod_ ## __mod; \
48529 struct bfa_module_s hal_mod_ ## __mod = { \
48530 - bfa_ ## __mod ## _meminfo, \
48531 - bfa_ ## __mod ## _attach, \
48532 - bfa_ ## __mod ## _detach, \
48533 - bfa_ ## __mod ## _start, \
48534 - bfa_ ## __mod ## _stop, \
48535 - bfa_ ## __mod ## _iocdisable, \
48536 + .meminfo = bfa_ ## __mod ## _meminfo, \
48537 + .attach = bfa_ ## __mod ## _attach, \
48538 + .detach = bfa_ ## __mod ## _detach, \
48539 + .start = bfa_ ## __mod ## _start, \
48540 + .stop = bfa_ ## __mod ## _stop, \
48541 + .iocdisable = bfa_ ## __mod ## _iocdisable, \
48542 }
48543
48544 #define BFA_CACHELINE_SZ (256)
48545 diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
48546 index 045c4e1..13de803 100644
48547 --- a/drivers/scsi/fcoe/fcoe_sysfs.c
48548 +++ b/drivers/scsi/fcoe/fcoe_sysfs.c
48549 @@ -33,8 +33,8 @@
48550 */
48551 #include "libfcoe.h"
48552
48553 -static atomic_t ctlr_num;
48554 -static atomic_t fcf_num;
48555 +static atomic_unchecked_t ctlr_num;
48556 +static atomic_unchecked_t fcf_num;
48557
48558 /*
48559 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
48560 @@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
48561 if (!ctlr)
48562 goto out;
48563
48564 - ctlr->id = atomic_inc_return(&ctlr_num) - 1;
48565 + ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
48566 ctlr->f = f;
48567 ctlr->mode = FIP_CONN_TYPE_FABRIC;
48568 INIT_LIST_HEAD(&ctlr->fcfs);
48569 @@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
48570 fcf->dev.parent = &ctlr->dev;
48571 fcf->dev.bus = &fcoe_bus_type;
48572 fcf->dev.type = &fcoe_fcf_device_type;
48573 - fcf->id = atomic_inc_return(&fcf_num) - 1;
48574 + fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
48575 fcf->state = FCOE_FCF_STATE_UNKNOWN;
48576
48577 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
48578 @@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
48579 {
48580 int error;
48581
48582 - atomic_set(&ctlr_num, 0);
48583 - atomic_set(&fcf_num, 0);
48584 + atomic_set_unchecked(&ctlr_num, 0);
48585 + atomic_set_unchecked(&fcf_num, 0);
48586
48587 error = bus_register(&fcoe_bus_type);
48588 if (error)
48589 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
48590 index f2c5005..db36c02 100644
48591 --- a/drivers/scsi/hosts.c
48592 +++ b/drivers/scsi/hosts.c
48593 @@ -42,7 +42,7 @@
48594 #include "scsi_logging.h"
48595
48596
48597 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
48598 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
48599
48600
48601 static void scsi_host_cls_release(struct device *dev)
48602 @@ -367,7 +367,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
48603 * subtract one because we increment first then return, but we need to
48604 * know what the next host number was before increment
48605 */
48606 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
48607 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
48608 shost->dma_channel = 0xff;
48609
48610 /* These three are default values which can be overridden */
48611 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
48612 index 20a5e6e..8b23cea 100644
48613 --- a/drivers/scsi/hpsa.c
48614 +++ b/drivers/scsi/hpsa.c
48615 @@ -578,7 +578,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
48616 unsigned long flags;
48617
48618 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
48619 - return h->access.command_completed(h, q);
48620 + return h->access->command_completed(h, q);
48621
48622 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
48623 a = rq->head[rq->current_entry];
48624 @@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
48625 while (!list_empty(&h->reqQ)) {
48626 c = list_entry(h->reqQ.next, struct CommandList, list);
48627 /* can't do anything if fifo is full */
48628 - if ((h->access.fifo_full(h))) {
48629 + if ((h->access->fifo_full(h))) {
48630 dev_warn(&h->pdev->dev, "fifo full\n");
48631 break;
48632 }
48633 @@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h)
48634
48635 /* Tell the controller execute command */
48636 spin_unlock_irqrestore(&h->lock, flags);
48637 - h->access.submit_command(h, c);
48638 + h->access->submit_command(h, c);
48639 spin_lock_irqsave(&h->lock, flags);
48640 }
48641 spin_unlock_irqrestore(&h->lock, flags);
48642 @@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h)
48643
48644 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
48645 {
48646 - return h->access.command_completed(h, q);
48647 + return h->access->command_completed(h, q);
48648 }
48649
48650 static inline bool interrupt_pending(struct ctlr_info *h)
48651 {
48652 - return h->access.intr_pending(h);
48653 + return h->access->intr_pending(h);
48654 }
48655
48656 static inline long interrupt_not_for_us(struct ctlr_info *h)
48657 {
48658 - return (h->access.intr_pending(h) == 0) ||
48659 + return (h->access->intr_pending(h) == 0) ||
48660 (h->interrupts_enabled == 0);
48661 }
48662
48663 @@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
48664 if (prod_index < 0)
48665 return -ENODEV;
48666 h->product_name = products[prod_index].product_name;
48667 - h->access = *(products[prod_index].access);
48668 + h->access = products[prod_index].access;
48669
48670 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
48671 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
48672 @@ -4668,7 +4668,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
48673
48674 assert_spin_locked(&lockup_detector_lock);
48675 remove_ctlr_from_lockup_detector_list(h);
48676 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
48677 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
48678 spin_lock_irqsave(&h->lock, flags);
48679 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
48680 spin_unlock_irqrestore(&h->lock, flags);
48681 @@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
48682 }
48683
48684 /* make sure the board interrupts are off */
48685 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
48686 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
48687
48688 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
48689 goto clean2;
48690 @@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
48691 * fake ones to scoop up any residual completions.
48692 */
48693 spin_lock_irqsave(&h->lock, flags);
48694 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
48695 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
48696 spin_unlock_irqrestore(&h->lock, flags);
48697 free_irqs(h);
48698 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
48699 @@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
48700 dev_info(&h->pdev->dev, "Board READY.\n");
48701 dev_info(&h->pdev->dev,
48702 "Waiting for stale completions to drain.\n");
48703 - h->access.set_intr_mask(h, HPSA_INTR_ON);
48704 + h->access->set_intr_mask(h, HPSA_INTR_ON);
48705 msleep(10000);
48706 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
48707 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
48708
48709 rc = controller_reset_failed(h->cfgtable);
48710 if (rc)
48711 @@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
48712 }
48713
48714 /* Turn the interrupts on so we can service requests */
48715 - h->access.set_intr_mask(h, HPSA_INTR_ON);
48716 + h->access->set_intr_mask(h, HPSA_INTR_ON);
48717
48718 hpsa_hba_inquiry(h);
48719 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
48720 @@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
48721 * To write all data in the battery backed cache to disks
48722 */
48723 hpsa_flush_cache(h);
48724 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
48725 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
48726 hpsa_free_irqs_and_disable_msix(h);
48727 }
48728
48729 @@ -5143,7 +5143,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
48730 return;
48731 }
48732 /* Change the access methods to the performant access methods */
48733 - h->access = SA5_performant_access;
48734 + h->access = &SA5_performant_access;
48735 h->transMethod = CFGTBL_Trans_Performant;
48736 }
48737
48738 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
48739 index bc85e72..d463049 100644
48740 --- a/drivers/scsi/hpsa.h
48741 +++ b/drivers/scsi/hpsa.h
48742 @@ -79,7 +79,7 @@ struct ctlr_info {
48743 unsigned int msix_vector;
48744 unsigned int msi_vector;
48745 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
48746 - struct access_method access;
48747 + struct access_method *access;
48748
48749 /* queue and queue Info */
48750 struct list_head reqQ;
48751 @@ -381,19 +381,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
48752 }
48753
48754 static struct access_method SA5_access = {
48755 - SA5_submit_command,
48756 - SA5_intr_mask,
48757 - SA5_fifo_full,
48758 - SA5_intr_pending,
48759 - SA5_completed,
48760 + .submit_command = SA5_submit_command,
48761 + .set_intr_mask = SA5_intr_mask,
48762 + .fifo_full = SA5_fifo_full,
48763 + .intr_pending = SA5_intr_pending,
48764 + .command_completed = SA5_completed,
48765 };
48766
48767 static struct access_method SA5_performant_access = {
48768 - SA5_submit_command,
48769 - SA5_performant_intr_mask,
48770 - SA5_fifo_full,
48771 - SA5_performant_intr_pending,
48772 - SA5_performant_completed,
48773 + .submit_command = SA5_submit_command,
48774 + .set_intr_mask = SA5_performant_intr_mask,
48775 + .fifo_full = SA5_fifo_full,
48776 + .intr_pending = SA5_performant_intr_pending,
48777 + .command_completed = SA5_performant_completed,
48778 };
48779
48780 struct board_type {
48781 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
48782 index 1b3a094..068e683 100644
48783 --- a/drivers/scsi/libfc/fc_exch.c
48784 +++ b/drivers/scsi/libfc/fc_exch.c
48785 @@ -101,12 +101,12 @@ struct fc_exch_mgr {
48786 u16 pool_max_index;
48787
48788 struct {
48789 - atomic_t no_free_exch;
48790 - atomic_t no_free_exch_xid;
48791 - atomic_t xid_not_found;
48792 - atomic_t xid_busy;
48793 - atomic_t seq_not_found;
48794 - atomic_t non_bls_resp;
48795 + atomic_unchecked_t no_free_exch;
48796 + atomic_unchecked_t no_free_exch_xid;
48797 + atomic_unchecked_t xid_not_found;
48798 + atomic_unchecked_t xid_busy;
48799 + atomic_unchecked_t seq_not_found;
48800 + atomic_unchecked_t non_bls_resp;
48801 } stats;
48802 };
48803
48804 @@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
48805 /* allocate memory for exchange */
48806 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
48807 if (!ep) {
48808 - atomic_inc(&mp->stats.no_free_exch);
48809 + atomic_inc_unchecked(&mp->stats.no_free_exch);
48810 goto out;
48811 }
48812 memset(ep, 0, sizeof(*ep));
48813 @@ -874,7 +874,7 @@ out:
48814 return ep;
48815 err:
48816 spin_unlock_bh(&pool->lock);
48817 - atomic_inc(&mp->stats.no_free_exch_xid);
48818 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
48819 mempool_free(ep, mp->ep_pool);
48820 return NULL;
48821 }
48822 @@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48823 xid = ntohs(fh->fh_ox_id); /* we originated exch */
48824 ep = fc_exch_find(mp, xid);
48825 if (!ep) {
48826 - atomic_inc(&mp->stats.xid_not_found);
48827 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48828 reject = FC_RJT_OX_ID;
48829 goto out;
48830 }
48831 @@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48832 ep = fc_exch_find(mp, xid);
48833 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
48834 if (ep) {
48835 - atomic_inc(&mp->stats.xid_busy);
48836 + atomic_inc_unchecked(&mp->stats.xid_busy);
48837 reject = FC_RJT_RX_ID;
48838 goto rel;
48839 }
48840 @@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48841 }
48842 xid = ep->xid; /* get our XID */
48843 } else if (!ep) {
48844 - atomic_inc(&mp->stats.xid_not_found);
48845 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48846 reject = FC_RJT_RX_ID; /* XID not found */
48847 goto out;
48848 }
48849 @@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48850 } else {
48851 sp = &ep->seq;
48852 if (sp->id != fh->fh_seq_id) {
48853 - atomic_inc(&mp->stats.seq_not_found);
48854 + atomic_inc_unchecked(&mp->stats.seq_not_found);
48855 if (f_ctl & FC_FC_END_SEQ) {
48856 /*
48857 * Update sequence_id based on incoming last
48858 @@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48859
48860 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
48861 if (!ep) {
48862 - atomic_inc(&mp->stats.xid_not_found);
48863 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48864 goto out;
48865 }
48866 if (ep->esb_stat & ESB_ST_COMPLETE) {
48867 - atomic_inc(&mp->stats.xid_not_found);
48868 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48869 goto rel;
48870 }
48871 if (ep->rxid == FC_XID_UNKNOWN)
48872 ep->rxid = ntohs(fh->fh_rx_id);
48873 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
48874 - atomic_inc(&mp->stats.xid_not_found);
48875 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48876 goto rel;
48877 }
48878 if (ep->did != ntoh24(fh->fh_s_id) &&
48879 ep->did != FC_FID_FLOGI) {
48880 - atomic_inc(&mp->stats.xid_not_found);
48881 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48882 goto rel;
48883 }
48884 sof = fr_sof(fp);
48885 @@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48886 sp->ssb_stat |= SSB_ST_RESP;
48887 sp->id = fh->fh_seq_id;
48888 } else if (sp->id != fh->fh_seq_id) {
48889 - atomic_inc(&mp->stats.seq_not_found);
48890 + atomic_inc_unchecked(&mp->stats.seq_not_found);
48891 goto rel;
48892 }
48893
48894 @@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48895 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
48896
48897 if (!sp)
48898 - atomic_inc(&mp->stats.xid_not_found);
48899 + atomic_inc_unchecked(&mp->stats.xid_not_found);
48900 else
48901 - atomic_inc(&mp->stats.non_bls_resp);
48902 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
48903
48904 fc_frame_free(fp);
48905 }
48906 @@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
48907
48908 list_for_each_entry(ema, &lport->ema_list, ema_list) {
48909 mp = ema->mp;
48910 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
48911 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
48912 st->fc_no_free_exch_xid +=
48913 - atomic_read(&mp->stats.no_free_exch_xid);
48914 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
48915 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
48916 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
48917 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
48918 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
48919 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
48920 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
48921 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
48922 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
48923 }
48924 }
48925 EXPORT_SYMBOL(fc_exch_update_stats);
48926 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
48927 index d289583..b745eec 100644
48928 --- a/drivers/scsi/libsas/sas_ata.c
48929 +++ b/drivers/scsi/libsas/sas_ata.c
48930 @@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
48931 .postreset = ata_std_postreset,
48932 .error_handler = ata_std_error_handler,
48933 .post_internal_cmd = sas_ata_post_internal,
48934 - .qc_defer = ata_std_qc_defer,
48935 + .qc_defer = ata_std_qc_defer,
48936 .qc_prep = ata_noop_qc_prep,
48937 .qc_issue = sas_ata_qc_issue,
48938 .qc_fill_rtf = sas_ata_qc_fill_rtf,
48939 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
48940 index 4e1b75c..0bbdfa9 100644
48941 --- a/drivers/scsi/lpfc/lpfc.h
48942 +++ b/drivers/scsi/lpfc/lpfc.h
48943 @@ -432,7 +432,7 @@ struct lpfc_vport {
48944 struct dentry *debug_nodelist;
48945 struct dentry *vport_debugfs_root;
48946 struct lpfc_debugfs_trc *disc_trc;
48947 - atomic_t disc_trc_cnt;
48948 + atomic_unchecked_t disc_trc_cnt;
48949 #endif
48950 uint8_t stat_data_enabled;
48951 uint8_t stat_data_blocked;
48952 @@ -865,8 +865,8 @@ struct lpfc_hba {
48953 struct timer_list fabric_block_timer;
48954 unsigned long bit_flags;
48955 #define FABRIC_COMANDS_BLOCKED 0
48956 - atomic_t num_rsrc_err;
48957 - atomic_t num_cmd_success;
48958 + atomic_unchecked_t num_rsrc_err;
48959 + atomic_unchecked_t num_cmd_success;
48960 unsigned long last_rsrc_error_time;
48961 unsigned long last_ramp_down_time;
48962 unsigned long last_ramp_up_time;
48963 @@ -902,7 +902,7 @@ struct lpfc_hba {
48964
48965 struct dentry *debug_slow_ring_trc;
48966 struct lpfc_debugfs_trc *slow_ring_trc;
48967 - atomic_t slow_ring_trc_cnt;
48968 + atomic_unchecked_t slow_ring_trc_cnt;
48969 /* iDiag debugfs sub-directory */
48970 struct dentry *idiag_root;
48971 struct dentry *idiag_pci_cfg;
48972 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
48973 index 60084e6..0e2e700 100644
48974 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
48975 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
48976 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
48977
48978 #include <linux/debugfs.h>
48979
48980 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48981 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48982 static unsigned long lpfc_debugfs_start_time = 0L;
48983
48984 /* iDiag */
48985 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
48986 lpfc_debugfs_enable = 0;
48987
48988 len = 0;
48989 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
48990 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
48991 (lpfc_debugfs_max_disc_trc - 1);
48992 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
48993 dtp = vport->disc_trc + i;
48994 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
48995 lpfc_debugfs_enable = 0;
48996
48997 len = 0;
48998 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
48999 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
49000 (lpfc_debugfs_max_slow_ring_trc - 1);
49001 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
49002 dtp = phba->slow_ring_trc + i;
49003 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
49004 !vport || !vport->disc_trc)
49005 return;
49006
49007 - index = atomic_inc_return(&vport->disc_trc_cnt) &
49008 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
49009 (lpfc_debugfs_max_disc_trc - 1);
49010 dtp = vport->disc_trc + index;
49011 dtp->fmt = fmt;
49012 dtp->data1 = data1;
49013 dtp->data2 = data2;
49014 dtp->data3 = data3;
49015 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
49016 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
49017 dtp->jif = jiffies;
49018 #endif
49019 return;
49020 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
49021 !phba || !phba->slow_ring_trc)
49022 return;
49023
49024 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
49025 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
49026 (lpfc_debugfs_max_slow_ring_trc - 1);
49027 dtp = phba->slow_ring_trc + index;
49028 dtp->fmt = fmt;
49029 dtp->data1 = data1;
49030 dtp->data2 = data2;
49031 dtp->data3 = data3;
49032 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
49033 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
49034 dtp->jif = jiffies;
49035 #endif
49036 return;
49037 @@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
49038 "slow_ring buffer\n");
49039 goto debug_failed;
49040 }
49041 - atomic_set(&phba->slow_ring_trc_cnt, 0);
49042 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
49043 memset(phba->slow_ring_trc, 0,
49044 (sizeof(struct lpfc_debugfs_trc) *
49045 lpfc_debugfs_max_slow_ring_trc));
49046 @@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
49047 "buffer\n");
49048 goto debug_failed;
49049 }
49050 - atomic_set(&vport->disc_trc_cnt, 0);
49051 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
49052
49053 snprintf(name, sizeof(name), "discovery_trace");
49054 vport->debug_disc_trc =
49055 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
49056 index 68c94cc..8c27be5 100644
49057 --- a/drivers/scsi/lpfc/lpfc_init.c
49058 +++ b/drivers/scsi/lpfc/lpfc_init.c
49059 @@ -10949,8 +10949,10 @@ lpfc_init(void)
49060 "misc_register returned with status %d", error);
49061
49062 if (lpfc_enable_npiv) {
49063 - lpfc_transport_functions.vport_create = lpfc_vport_create;
49064 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49065 + pax_open_kernel();
49066 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
49067 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49068 + pax_close_kernel();
49069 }
49070 lpfc_transport_template =
49071 fc_attach_transport(&lpfc_transport_functions);
49072 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
49073 index b2ede05..aaf482ca 100644
49074 --- a/drivers/scsi/lpfc/lpfc_scsi.c
49075 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
49076 @@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
49077 uint32_t evt_posted;
49078
49079 spin_lock_irqsave(&phba->hbalock, flags);
49080 - atomic_inc(&phba->num_rsrc_err);
49081 + atomic_inc_unchecked(&phba->num_rsrc_err);
49082 phba->last_rsrc_error_time = jiffies;
49083
49084 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
49085 @@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
49086 unsigned long flags;
49087 struct lpfc_hba *phba = vport->phba;
49088 uint32_t evt_posted;
49089 - atomic_inc(&phba->num_cmd_success);
49090 + atomic_inc_unchecked(&phba->num_cmd_success);
49091
49092 if (vport->cfg_lun_queue_depth <= queue_depth)
49093 return;
49094 @@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49095 unsigned long num_rsrc_err, num_cmd_success;
49096 int i;
49097
49098 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
49099 - num_cmd_success = atomic_read(&phba->num_cmd_success);
49100 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
49101 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
49102
49103 /*
49104 * The error and success command counters are global per
49105 @@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49106 }
49107 }
49108 lpfc_destroy_vport_work_array(phba, vports);
49109 - atomic_set(&phba->num_rsrc_err, 0);
49110 - atomic_set(&phba->num_cmd_success, 0);
49111 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
49112 + atomic_set_unchecked(&phba->num_cmd_success, 0);
49113 }
49114
49115 /**
49116 @@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
49117 }
49118 }
49119 lpfc_destroy_vport_work_array(phba, vports);
49120 - atomic_set(&phba->num_rsrc_err, 0);
49121 - atomic_set(&phba->num_cmd_success, 0);
49122 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
49123 + atomic_set_unchecked(&phba->num_cmd_success, 0);
49124 }
49125
49126 /**
49127 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49128 index 7f0af4f..193ac3e 100644
49129 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49130 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49131 @@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
49132 {
49133 struct scsi_device *sdev = to_scsi_device(dev);
49134 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49135 - static struct _raid_device *raid_device;
49136 + struct _raid_device *raid_device;
49137 unsigned long flags;
49138 Mpi2RaidVolPage0_t vol_pg0;
49139 Mpi2ConfigReply_t mpi_reply;
49140 @@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
49141 {
49142 struct scsi_device *sdev = to_scsi_device(dev);
49143 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49144 - static struct _raid_device *raid_device;
49145 + struct _raid_device *raid_device;
49146 unsigned long flags;
49147 Mpi2RaidVolPage0_t vol_pg0;
49148 Mpi2ConfigReply_t mpi_reply;
49149 @@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
49150 struct fw_event_work *fw_event)
49151 {
49152 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
49153 - static struct _raid_device *raid_device;
49154 + struct _raid_device *raid_device;
49155 unsigned long flags;
49156 u16 handle;
49157
49158 @@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
49159 u64 sas_address;
49160 struct _sas_device *sas_device;
49161 struct _sas_node *expander_device;
49162 - static struct _raid_device *raid_device;
49163 + struct _raid_device *raid_device;
49164 u8 retry_count;
49165 unsigned long flags;
49166
49167 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
49168 index be8ce54..94ed33a 100644
49169 --- a/drivers/scsi/pmcraid.c
49170 +++ b/drivers/scsi/pmcraid.c
49171 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
49172 res->scsi_dev = scsi_dev;
49173 scsi_dev->hostdata = res;
49174 res->change_detected = 0;
49175 - atomic_set(&res->read_failures, 0);
49176 - atomic_set(&res->write_failures, 0);
49177 + atomic_set_unchecked(&res->read_failures, 0);
49178 + atomic_set_unchecked(&res->write_failures, 0);
49179 rc = 0;
49180 }
49181 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
49182 @@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
49183
49184 /* If this was a SCSI read/write command keep count of errors */
49185 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
49186 - atomic_inc(&res->read_failures);
49187 + atomic_inc_unchecked(&res->read_failures);
49188 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
49189 - atomic_inc(&res->write_failures);
49190 + atomic_inc_unchecked(&res->write_failures);
49191
49192 if (!RES_IS_GSCSI(res->cfg_entry) &&
49193 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
49194 @@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
49195 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49196 * hrrq_id assigned here in queuecommand
49197 */
49198 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49199 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49200 pinstance->num_hrrq;
49201 cmd->cmd_done = pmcraid_io_done;
49202
49203 @@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
49204 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49205 * hrrq_id assigned here in queuecommand
49206 */
49207 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49208 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49209 pinstance->num_hrrq;
49210
49211 if (request_size) {
49212 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
49213
49214 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
49215 /* add resources only after host is added into system */
49216 - if (!atomic_read(&pinstance->expose_resources))
49217 + if (!atomic_read_unchecked(&pinstance->expose_resources))
49218 return;
49219
49220 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
49221 @@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
49222 init_waitqueue_head(&pinstance->reset_wait_q);
49223
49224 atomic_set(&pinstance->outstanding_cmds, 0);
49225 - atomic_set(&pinstance->last_message_id, 0);
49226 - atomic_set(&pinstance->expose_resources, 0);
49227 + atomic_set_unchecked(&pinstance->last_message_id, 0);
49228 + atomic_set_unchecked(&pinstance->expose_resources, 0);
49229
49230 INIT_LIST_HEAD(&pinstance->free_res_q);
49231 INIT_LIST_HEAD(&pinstance->used_res_q);
49232 @@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
49233 /* Schedule worker thread to handle CCN and take care of adding and
49234 * removing devices to OS
49235 */
49236 - atomic_set(&pinstance->expose_resources, 1);
49237 + atomic_set_unchecked(&pinstance->expose_resources, 1);
49238 schedule_work(&pinstance->worker_q);
49239 return rc;
49240
49241 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
49242 index e1d150f..6c6df44 100644
49243 --- a/drivers/scsi/pmcraid.h
49244 +++ b/drivers/scsi/pmcraid.h
49245 @@ -748,7 +748,7 @@ struct pmcraid_instance {
49246 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
49247
49248 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
49249 - atomic_t last_message_id;
49250 + atomic_unchecked_t last_message_id;
49251
49252 /* configuration table */
49253 struct pmcraid_config_table *cfg_table;
49254 @@ -777,7 +777,7 @@ struct pmcraid_instance {
49255 atomic_t outstanding_cmds;
49256
49257 /* should add/delete resources to mid-layer now ?*/
49258 - atomic_t expose_resources;
49259 + atomic_unchecked_t expose_resources;
49260
49261
49262
49263 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
49264 struct pmcraid_config_table_entry_ext cfg_entry_ext;
49265 };
49266 struct scsi_device *scsi_dev; /* Link scsi_device structure */
49267 - atomic_t read_failures; /* count of failed READ commands */
49268 - atomic_t write_failures; /* count of failed WRITE commands */
49269 + atomic_unchecked_t read_failures; /* count of failed READ commands */
49270 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
49271
49272 /* To indicate add/delete/modify during CCN */
49273 u8 change_detected;
49274 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
49275 index 5f174b8..98d32b0 100644
49276 --- a/drivers/scsi/qla2xxx/qla_attr.c
49277 +++ b/drivers/scsi/qla2xxx/qla_attr.c
49278 @@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
49279 return 0;
49280 }
49281
49282 -struct fc_function_template qla2xxx_transport_functions = {
49283 +fc_function_template_no_const qla2xxx_transport_functions = {
49284
49285 .show_host_node_name = 1,
49286 .show_host_port_name = 1,
49287 @@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
49288 .bsg_timeout = qla24xx_bsg_timeout,
49289 };
49290
49291 -struct fc_function_template qla2xxx_transport_vport_functions = {
49292 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
49293
49294 .show_host_node_name = 1,
49295 .show_host_port_name = 1,
49296 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
49297 index 4446bf5..9a3574d 100644
49298 --- a/drivers/scsi/qla2xxx/qla_gbl.h
49299 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
49300 @@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
49301 struct device_attribute;
49302 extern struct device_attribute *qla2x00_host_attrs[];
49303 struct fc_function_template;
49304 -extern struct fc_function_template qla2xxx_transport_functions;
49305 -extern struct fc_function_template qla2xxx_transport_vport_functions;
49306 +extern fc_function_template_no_const qla2xxx_transport_functions;
49307 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
49308 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
49309 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
49310 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
49311 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
49312 index 52be35e..b933907 100644
49313 --- a/drivers/scsi/qla2xxx/qla_os.c
49314 +++ b/drivers/scsi/qla2xxx/qla_os.c
49315 @@ -1568,8 +1568,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
49316 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
49317 /* Ok, a 64bit DMA mask is applicable. */
49318 ha->flags.enable_64bit_addressing = 1;
49319 - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49320 - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49321 + pax_open_kernel();
49322 + *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49323 + *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49324 + pax_close_kernel();
49325 return;
49326 }
49327 }
49328 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
49329 index 084d1fd..9f939eb 100644
49330 --- a/drivers/scsi/qla4xxx/ql4_def.h
49331 +++ b/drivers/scsi/qla4xxx/ql4_def.h
49332 @@ -296,7 +296,7 @@ struct ddb_entry {
49333 * (4000 only) */
49334 atomic_t relogin_timer; /* Max Time to wait for
49335 * relogin to complete */
49336 - atomic_t relogin_retry_count; /* Num of times relogin has been
49337 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
49338 * retried */
49339 uint32_t default_time2wait; /* Default Min time between
49340 * relogins (+aens) */
49341 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
49342 index cf174a4..128a420 100644
49343 --- a/drivers/scsi/qla4xxx/ql4_os.c
49344 +++ b/drivers/scsi/qla4xxx/ql4_os.c
49345 @@ -3311,12 +3311,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
49346 */
49347 if (!iscsi_is_session_online(cls_sess)) {
49348 /* Reset retry relogin timer */
49349 - atomic_inc(&ddb_entry->relogin_retry_count);
49350 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
49351 DEBUG2(ql4_printk(KERN_INFO, ha,
49352 "%s: index[%d] relogin timed out-retrying"
49353 " relogin (%d), retry (%d)\n", __func__,
49354 ddb_entry->fw_ddb_index,
49355 - atomic_read(&ddb_entry->relogin_retry_count),
49356 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
49357 ddb_entry->default_time2wait + 4));
49358 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
49359 atomic_set(&ddb_entry->retry_relogin_timer,
49360 @@ -5458,7 +5458,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
49361
49362 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
49363 atomic_set(&ddb_entry->relogin_timer, 0);
49364 - atomic_set(&ddb_entry->relogin_retry_count, 0);
49365 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
49366 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
49367 ddb_entry->default_relogin_timeout =
49368 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
49369 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
49370 index fe0bcb1..c9255be 100644
49371 --- a/drivers/scsi/scsi.c
49372 +++ b/drivers/scsi/scsi.c
49373 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
49374 struct Scsi_Host *host = cmd->device->host;
49375 int rtn = 0;
49376
49377 - atomic_inc(&cmd->device->iorequest_cnt);
49378 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
49379
49380 /* check if the device is still usable */
49381 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
49382 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
49383 index 7bd7f0d..93159d8 100644
49384 --- a/drivers/scsi/scsi_lib.c
49385 +++ b/drivers/scsi/scsi_lib.c
49386 @@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
49387 shost = sdev->host;
49388 scsi_init_cmd_errh(cmd);
49389 cmd->result = DID_NO_CONNECT << 16;
49390 - atomic_inc(&cmd->device->iorequest_cnt);
49391 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
49392
49393 /*
49394 * SCSI request completion path will do scsi_device_unbusy(),
49395 @@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
49396
49397 INIT_LIST_HEAD(&cmd->eh_entry);
49398
49399 - atomic_inc(&cmd->device->iodone_cnt);
49400 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
49401 if (cmd->result)
49402 - atomic_inc(&cmd->device->ioerr_cnt);
49403 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
49404
49405 disposition = scsi_decide_disposition(cmd);
49406 if (disposition != SUCCESS &&
49407 @@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
49408
49409 host_dev = scsi_get_device(shost);
49410 if (host_dev && host_dev->dma_mask)
49411 - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
49412 + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
49413
49414 return bounce_limit;
49415 }
49416 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
49417 index 8ff62c2..693b6f7 100644
49418 --- a/drivers/scsi/scsi_sysfs.c
49419 +++ b/drivers/scsi/scsi_sysfs.c
49420 @@ -725,7 +725,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
49421 char *buf) \
49422 { \
49423 struct scsi_device *sdev = to_scsi_device(dev); \
49424 - unsigned long long count = atomic_read(&sdev->field); \
49425 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
49426 return snprintf(buf, 20, "0x%llx\n", count); \
49427 } \
49428 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
49429 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
49430 index 84a1fdf..693b0d6 100644
49431 --- a/drivers/scsi/scsi_tgt_lib.c
49432 +++ b/drivers/scsi/scsi_tgt_lib.c
49433 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
49434 int err;
49435
49436 dprintk("%lx %u\n", uaddr, len);
49437 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
49438 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
49439 if (err) {
49440 /*
49441 * TODO: need to fixup sg_tablesize, max_segment_size,
49442 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
49443 index 4628fd5..a94a1c2 100644
49444 --- a/drivers/scsi/scsi_transport_fc.c
49445 +++ b/drivers/scsi/scsi_transport_fc.c
49446 @@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
49447 * Netlink Infrastructure
49448 */
49449
49450 -static atomic_t fc_event_seq;
49451 +static atomic_unchecked_t fc_event_seq;
49452
49453 /**
49454 * fc_get_event_number - Obtain the next sequential FC event number
49455 @@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
49456 u32
49457 fc_get_event_number(void)
49458 {
49459 - return atomic_add_return(1, &fc_event_seq);
49460 + return atomic_add_return_unchecked(1, &fc_event_seq);
49461 }
49462 EXPORT_SYMBOL(fc_get_event_number);
49463
49464 @@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
49465 {
49466 int error;
49467
49468 - atomic_set(&fc_event_seq, 0);
49469 + atomic_set_unchecked(&fc_event_seq, 0);
49470
49471 error = transport_class_register(&fc_host_class);
49472 if (error)
49473 @@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
49474 char *cp;
49475
49476 *val = simple_strtoul(buf, &cp, 0);
49477 - if ((*cp && (*cp != '\n')) || (*val < 0))
49478 + if (*cp && (*cp != '\n'))
49479 return -EINVAL;
49480 /*
49481 * Check for overflow; dev_loss_tmo is u32
49482 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
49483 index 63a6ca4..5d5cadd 100644
49484 --- a/drivers/scsi/scsi_transport_iscsi.c
49485 +++ b/drivers/scsi/scsi_transport_iscsi.c
49486 @@ -79,7 +79,7 @@ struct iscsi_internal {
49487 struct transport_container session_cont;
49488 };
49489
49490 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
49491 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
49492 static struct workqueue_struct *iscsi_eh_timer_workq;
49493
49494 static DEFINE_IDA(iscsi_sess_ida);
49495 @@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
49496 int err;
49497
49498 ihost = shost->shost_data;
49499 - session->sid = atomic_add_return(1, &iscsi_session_nr);
49500 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
49501
49502 if (target_id == ISCSI_MAX_TARGET) {
49503 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
49504 @@ -4103,7 +4103,7 @@ static __init int iscsi_transport_init(void)
49505 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
49506 ISCSI_TRANSPORT_VERSION);
49507
49508 - atomic_set(&iscsi_session_nr, 0);
49509 + atomic_set_unchecked(&iscsi_session_nr, 0);
49510
49511 err = class_register(&iscsi_transport_class);
49512 if (err)
49513 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
49514 index 2700a5a..752ec38 100644
49515 --- a/drivers/scsi/scsi_transport_srp.c
49516 +++ b/drivers/scsi/scsi_transport_srp.c
49517 @@ -36,7 +36,7 @@
49518 #include "scsi_transport_srp_internal.h"
49519
49520 struct srp_host_attrs {
49521 - atomic_t next_port_id;
49522 + atomic_unchecked_t next_port_id;
49523 };
49524 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
49525
49526 @@ -94,7 +94,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
49527 struct Scsi_Host *shost = dev_to_shost(dev);
49528 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
49529
49530 - atomic_set(&srp_host->next_port_id, 0);
49531 + atomic_set_unchecked(&srp_host->next_port_id, 0);
49532 return 0;
49533 }
49534
49535 @@ -730,7 +730,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
49536 rport_fast_io_fail_timedout);
49537 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
49538
49539 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
49540 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
49541 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
49542
49543 transport_setup_device(&rport->dev);
49544 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
49545 index 69725f7..03aaee1 100644
49546 --- a/drivers/scsi/sd.c
49547 +++ b/drivers/scsi/sd.c
49548 @@ -2964,7 +2964,7 @@ static int sd_probe(struct device *dev)
49549 sdkp->disk = gd;
49550 sdkp->index = index;
49551 atomic_set(&sdkp->openers, 0);
49552 - atomic_set(&sdkp->device->ioerr_cnt, 0);
49553 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
49554
49555 if (!sdp->request_queue->rq_timeout) {
49556 if (sdp->type != TYPE_MOD)
49557 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
49558 index df5e961..df6b97f 100644
49559 --- a/drivers/scsi/sg.c
49560 +++ b/drivers/scsi/sg.c
49561 @@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
49562 sdp->disk->disk_name,
49563 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
49564 NULL,
49565 - (char *)arg);
49566 + (char __user *)arg);
49567 case BLKTRACESTART:
49568 return blk_trace_startstop(sdp->device->request_queue, 1);
49569 case BLKTRACESTOP:
49570 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
49571 index 349ebba..ff2a249 100644
49572 --- a/drivers/spi/spi.c
49573 +++ b/drivers/spi/spi.c
49574 @@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
49575 EXPORT_SYMBOL_GPL(spi_bus_unlock);
49576
49577 /* portable code must never pass more than 32 bytes */
49578 -#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
49579 +#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
49580
49581 static u8 *buf;
49582
49583 diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
49584 index 2c61783..4d49e4e 100644
49585 --- a/drivers/staging/android/timed_output.c
49586 +++ b/drivers/staging/android/timed_output.c
49587 @@ -25,7 +25,7 @@
49588 #include "timed_output.h"
49589
49590 static struct class *timed_output_class;
49591 -static atomic_t device_count;
49592 +static atomic_unchecked_t device_count;
49593
49594 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
49595 char *buf)
49596 @@ -63,7 +63,7 @@ static int create_timed_output_class(void)
49597 timed_output_class = class_create(THIS_MODULE, "timed_output");
49598 if (IS_ERR(timed_output_class))
49599 return PTR_ERR(timed_output_class);
49600 - atomic_set(&device_count, 0);
49601 + atomic_set_unchecked(&device_count, 0);
49602 timed_output_class->dev_groups = timed_output_groups;
49603 }
49604
49605 @@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
49606 if (ret < 0)
49607 return ret;
49608
49609 - tdev->index = atomic_inc_return(&device_count);
49610 + tdev->index = atomic_inc_return_unchecked(&device_count);
49611 tdev->dev = device_create(timed_output_class, NULL,
49612 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
49613 if (IS_ERR(tdev->dev))
49614 diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
49615 index c0f7cd7..5424212 100644
49616 --- a/drivers/staging/gdm724x/gdm_tty.c
49617 +++ b/drivers/staging/gdm724x/gdm_tty.c
49618 @@ -45,7 +45,7 @@
49619 #define gdm_tty_send_control(n, r, v, d, l) (\
49620 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
49621
49622 -#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
49623 +#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
49624
49625 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
49626 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
49627 diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
49628 index 96e4eee..6d7c37e 100644
49629 --- a/drivers/staging/imx-drm/imx-drm-core.c
49630 +++ b/drivers/staging/imx-drm/imx-drm-core.c
49631 @@ -510,7 +510,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
49632 goto err_busy;
49633 }
49634
49635 - if (imxdrm->drm->open_count) {
49636 + if (local_read(&imxdrm->drm->open_count)) {
49637 ret = -EBUSY;
49638 goto err_busy;
49639 }
49640 @@ -590,7 +590,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
49641
49642 mutex_lock(&imxdrm->mutex);
49643
49644 - if (imxdrm->drm->open_count) {
49645 + if (local_read(&imxdrm->drm->open_count)) {
49646 ret = -EBUSY;
49647 goto err_busy;
49648 }
49649 @@ -729,7 +729,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
49650
49651 mutex_lock(&imxdrm->mutex);
49652
49653 - if (imxdrm->drm->open_count) {
49654 + if (local_read(&imxdrm->drm->open_count)) {
49655 ret = -EBUSY;
49656 goto err_busy;
49657 }
49658 diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
49659 index b7613c8..c302392 100644
49660 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c
49661 +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
49662 @@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
49663 return 0;
49664 }
49665
49666 -sfw_test_client_ops_t brw_test_client;
49667 -void brw_init_test_client(void)
49668 -{
49669 - brw_test_client.tso_init = brw_client_init;
49670 - brw_test_client.tso_fini = brw_client_fini;
49671 - brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
49672 - brw_test_client.tso_done_rpc = brw_client_done_rpc;
49673 +sfw_test_client_ops_t brw_test_client = {
49674 + .tso_init = brw_client_init,
49675 + .tso_fini = brw_client_fini,
49676 + .tso_prep_rpc = brw_client_prep_rpc,
49677 + .tso_done_rpc = brw_client_done_rpc,
49678 };
49679
49680 srpc_service_t brw_test_service;
49681 diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
49682 index 483c785..e1a2a7b 100644
49683 --- a/drivers/staging/lustre/lnet/selftest/framework.c
49684 +++ b/drivers/staging/lustre/lnet/selftest/framework.c
49685 @@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
49686
49687 extern sfw_test_client_ops_t ping_test_client;
49688 extern srpc_service_t ping_test_service;
49689 -extern void ping_init_test_client(void);
49690 extern void ping_init_test_service(void);
49691
49692 extern sfw_test_client_ops_t brw_test_client;
49693 extern srpc_service_t brw_test_service;
49694 -extern void brw_init_test_client(void);
49695 extern void brw_init_test_service(void);
49696
49697
49698 @@ -1684,12 +1682,10 @@ sfw_startup (void)
49699 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
49700 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
49701
49702 - brw_init_test_client();
49703 brw_init_test_service();
49704 rc = sfw_register_test(&brw_test_service, &brw_test_client);
49705 LASSERT (rc == 0);
49706
49707 - ping_init_test_client();
49708 ping_init_test_service();
49709 rc = sfw_register_test(&ping_test_service, &ping_test_client);
49710 LASSERT (rc == 0);
49711 diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
49712 index f0f9194..b589047 100644
49713 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c
49714 +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
49715 @@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
49716 return 0;
49717 }
49718
49719 -sfw_test_client_ops_t ping_test_client;
49720 -void ping_init_test_client(void)
49721 -{
49722 - ping_test_client.tso_init = ping_client_init;
49723 - ping_test_client.tso_fini = ping_client_fini;
49724 - ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
49725 - ping_test_client.tso_done_rpc = ping_client_done_rpc;
49726 -}
49727 +sfw_test_client_ops_t ping_test_client = {
49728 + .tso_init = ping_client_init,
49729 + .tso_fini = ping_client_fini,
49730 + .tso_prep_rpc = ping_client_prep_rpc,
49731 + .tso_done_rpc = ping_client_done_rpc,
49732 +};
49733
49734 srpc_service_t ping_test_service;
49735 void ping_init_test_service(void)
49736 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49737 index bc2b82f..67fd598 100644
49738 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
49739 +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49740 @@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
49741 ldlm_completion_callback lcs_completion;
49742 ldlm_blocking_callback lcs_blocking;
49743 ldlm_glimpse_callback lcs_glimpse;
49744 -};
49745 +} __no_const;
49746
49747 /* ldlm_lockd.c */
49748 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
49749 diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
49750 index d0aea15..7af68e1 100644
49751 --- a/drivers/staging/lustre/lustre/include/obd.h
49752 +++ b/drivers/staging/lustre/lustre/include/obd.h
49753 @@ -1417,7 +1417,7 @@ struct md_ops {
49754 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
49755 * wrapper function in include/linux/obd_class.h.
49756 */
49757 -};
49758 +} __no_const;
49759
49760 struct lsm_operations {
49761 void (*lsm_free)(struct lov_stripe_md *);
49762 diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49763 index 39fcdac..222780f 100644
49764 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49765 +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49766 @@ -249,7 +249,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
49767 int added = (mode == LCK_NL);
49768 int overlaps = 0;
49769 int splitted = 0;
49770 - const struct ldlm_callback_suite null_cbs = { NULL };
49771 + const struct ldlm_callback_suite null_cbs = { };
49772 int rc;
49773
49774 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
49775 diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49776 index fc6c977..df1f956 100644
49777 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49778 +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49779 @@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
49780 int LL_PROC_PROTO(proc_console_max_delay_cs)
49781 {
49782 int rc, max_delay_cs;
49783 - ctl_table_t dummy = *table;
49784 + ctl_table_no_const dummy = *table;
49785 cfs_duration_t d;
49786
49787 dummy.data = &max_delay_cs;
49788 @@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
49789 int LL_PROC_PROTO(proc_console_min_delay_cs)
49790 {
49791 int rc, min_delay_cs;
49792 - ctl_table_t dummy = *table;
49793 + ctl_table_no_const dummy = *table;
49794 cfs_duration_t d;
49795
49796 dummy.data = &min_delay_cs;
49797 @@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
49798 int LL_PROC_PROTO(proc_console_backoff)
49799 {
49800 int rc, backoff;
49801 - ctl_table_t dummy = *table;
49802 + ctl_table_no_const dummy = *table;
49803
49804 dummy.data = &backoff;
49805 dummy.proc_handler = &proc_dointvec;
49806 diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
49807 index f3108c7..cd4f9da 100644
49808 --- a/drivers/staging/lustre/lustre/libcfs/module.c
49809 +++ b/drivers/staging/lustre/lustre/libcfs/module.c
49810 @@ -348,11 +348,11 @@ out:
49811
49812
49813 struct cfs_psdev_ops libcfs_psdev_ops = {
49814 - libcfs_psdev_open,
49815 - libcfs_psdev_release,
49816 - NULL,
49817 - NULL,
49818 - libcfs_ioctl
49819 + .p_open = libcfs_psdev_open,
49820 + .p_close = libcfs_psdev_release,
49821 + .p_read = NULL,
49822 + .p_write = NULL,
49823 + .p_ioctl = libcfs_ioctl
49824 };
49825
49826 extern int insert_proc(void);
49827 diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
49828 index a4e0472..05d854c 100644
49829 --- a/drivers/staging/lustre/lustre/llite/dir.c
49830 +++ b/drivers/staging/lustre/lustre/llite/dir.c
49831 @@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
49832 int mode;
49833 int err;
49834
49835 - mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
49836 + mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
49837 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
49838 strlen(filename), mode, LUSTRE_OPC_MKDIR,
49839 lump);
49840 diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
49841 index 3675020..e80d92c 100644
49842 --- a/drivers/staging/media/solo6x10/solo6x10-core.c
49843 +++ b/drivers/staging/media/solo6x10/solo6x10-core.c
49844 @@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
49845
49846 static int solo_sysfs_init(struct solo_dev *solo_dev)
49847 {
49848 - struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
49849 + bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
49850 struct device *dev = &solo_dev->dev;
49851 const char *driver;
49852 int i;
49853 diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
49854 index 1db18c7..35e6afc 100644
49855 --- a/drivers/staging/media/solo6x10/solo6x10-g723.c
49856 +++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
49857 @@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
49858
49859 int solo_g723_init(struct solo_dev *solo_dev)
49860 {
49861 - static struct snd_device_ops ops = { NULL };
49862 + static struct snd_device_ops ops = { };
49863 struct snd_card *card;
49864 struct snd_kcontrol_new kctl;
49865 char name[32];
49866 diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49867 index 7f2f247..d999137 100644
49868 --- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
49869 +++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49870 @@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
49871
49872 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
49873 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
49874 - p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
49875 + p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
49876 if (p2m_id < 0)
49877 p2m_id = -p2m_id;
49878 }
49879 diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
49880 index f1bbb8c..a73eaba 100644
49881 --- a/drivers/staging/media/solo6x10/solo6x10.h
49882 +++ b/drivers/staging/media/solo6x10/solo6x10.h
49883 @@ -237,7 +237,7 @@ struct solo_dev {
49884
49885 /* P2M DMA Engine */
49886 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
49887 - atomic_t p2m_count;
49888 + atomic_unchecked_t p2m_count;
49889 int p2m_jiffies;
49890 unsigned int p2m_timeouts;
49891
49892 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
49893 index 0315f60..ce93f406 100644
49894 --- a/drivers/staging/octeon/ethernet-rx.c
49895 +++ b/drivers/staging/octeon/ethernet-rx.c
49896 @@ -418,11 +418,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49897 /* Increment RX stats for virtual ports */
49898 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
49899 #ifdef CONFIG_64BIT
49900 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
49901 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
49902 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
49903 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
49904 #else
49905 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
49906 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
49907 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
49908 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
49909 #endif
49910 }
49911 netif_receive_skb(skb);
49912 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49913 dev->name);
49914 */
49915 #ifdef CONFIG_64BIT
49916 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
49917 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49918 #else
49919 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
49920 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
49921 #endif
49922 dev_kfree_skb_irq(skb);
49923 }
49924 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
49925 index bd6ca71..8f0961e 100644
49926 --- a/drivers/staging/octeon/ethernet.c
49927 +++ b/drivers/staging/octeon/ethernet.c
49928 @@ -254,11 +254,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
49929 * since the RX tasklet also increments it.
49930 */
49931 #ifdef CONFIG_64BIT
49932 - atomic64_add(rx_status.dropped_packets,
49933 - (atomic64_t *)&priv->stats.rx_dropped);
49934 + atomic64_add_unchecked(rx_status.dropped_packets,
49935 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49936 #else
49937 - atomic_add(rx_status.dropped_packets,
49938 - (atomic_t *)&priv->stats.rx_dropped);
49939 + atomic_add_unchecked(rx_status.dropped_packets,
49940 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
49941 #endif
49942 }
49943
49944 diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
49945 index 439c3c9..2d74293 100644
49946 --- a/drivers/staging/rtl8188eu/include/hal_intf.h
49947 +++ b/drivers/staging/rtl8188eu/include/hal_intf.h
49948 @@ -271,7 +271,7 @@ struct hal_ops {
49949 s32 (*c2h_handler)(struct adapter *padapter,
49950 struct c2h_evt_hdr *c2h_evt);
49951 c2h_id_filter c2h_id_filter_ccx;
49952 -};
49953 +} __no_const;
49954
49955 enum rt_eeprom_type {
49956 EEPROM_93C46,
49957 diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
49958 index eb6f0e5..e6a0958 100644
49959 --- a/drivers/staging/rtl8188eu/include/rtw_io.h
49960 +++ b/drivers/staging/rtl8188eu/include/rtw_io.h
49961 @@ -126,7 +126,7 @@ struct _io_ops {
49962 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
49963 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
49964 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
49965 -};
49966 +} __no_const;
49967
49968 struct io_req {
49969 struct list_head list;
49970 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
49971 index dc23395..cf7e9b1 100644
49972 --- a/drivers/staging/rtl8712/rtl871x_io.h
49973 +++ b/drivers/staging/rtl8712/rtl871x_io.h
49974 @@ -108,7 +108,7 @@ struct _io_ops {
49975 u8 *pmem);
49976 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
49977 u8 *pmem);
49978 -};
49979 +} __no_const;
49980
49981 struct io_req {
49982 struct list_head list;
49983 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
49984 index 1f5088b..0e59820 100644
49985 --- a/drivers/staging/sbe-2t3e3/netdev.c
49986 +++ b/drivers/staging/sbe-2t3e3/netdev.c
49987 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49988 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
49989
49990 if (rlen)
49991 - if (copy_to_user(data, &resp, rlen))
49992 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
49993 return -EFAULT;
49994
49995 return 0;
49996 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
49997 index a863a98..d272795 100644
49998 --- a/drivers/staging/usbip/vhci.h
49999 +++ b/drivers/staging/usbip/vhci.h
50000 @@ -83,7 +83,7 @@ struct vhci_hcd {
50001 unsigned resuming:1;
50002 unsigned long re_timeout;
50003
50004 - atomic_t seqnum;
50005 + atomic_unchecked_t seqnum;
50006
50007 /*
50008 * NOTE:
50009 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
50010 index e810ad5..931336f 100644
50011 --- a/drivers/staging/usbip/vhci_hcd.c
50012 +++ b/drivers/staging/usbip/vhci_hcd.c
50013 @@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
50014
50015 spin_lock(&vdev->priv_lock);
50016
50017 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
50018 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
50019 if (priv->seqnum == 0xffff)
50020 dev_info(&urb->dev->dev, "seqnum max\n");
50021
50022 @@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
50023 return -ENOMEM;
50024 }
50025
50026 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
50027 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
50028 if (unlink->seqnum == 0xffff)
50029 pr_info("seqnum max\n");
50030
50031 @@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
50032 vdev->rhport = rhport;
50033 }
50034
50035 - atomic_set(&vhci->seqnum, 0);
50036 + atomic_set_unchecked(&vhci->seqnum, 0);
50037 spin_lock_init(&vhci->lock);
50038
50039 hcd->power_budget = 0; /* no limit */
50040 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
50041 index d07fcb5..358e1e1 100644
50042 --- a/drivers/staging/usbip/vhci_rx.c
50043 +++ b/drivers/staging/usbip/vhci_rx.c
50044 @@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
50045 if (!urb) {
50046 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
50047 pr_info("max seqnum %d\n",
50048 - atomic_read(&the_controller->seqnum));
50049 + atomic_read_unchecked(&the_controller->seqnum));
50050 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
50051 return;
50052 }
50053 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
50054 index ab8b2ba..99184aa 100644
50055 --- a/drivers/staging/vt6655/hostap.c
50056 +++ b/drivers/staging/vt6655/hostap.c
50057 @@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
50058 *
50059 */
50060
50061 +static net_device_ops_no_const apdev_netdev_ops;
50062 +
50063 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50064 {
50065 PSDevice apdev_priv;
50066 struct net_device *dev = pDevice->dev;
50067 int ret;
50068 - const struct net_device_ops apdev_netdev_ops = {
50069 - .ndo_start_xmit = pDevice->tx_80211,
50070 - };
50071
50072 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50073
50074 @@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50075 *apdev_priv = *pDevice;
50076 eth_hw_addr_inherit(pDevice->apdev, dev);
50077
50078 + /* only half broken now */
50079 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50080 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50081
50082 pDevice->apdev->type = ARPHRD_IEEE80211;
50083 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
50084 index 67ba48b..24e602f 100644
50085 --- a/drivers/staging/vt6656/hostap.c
50086 +++ b/drivers/staging/vt6656/hostap.c
50087 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
50088 *
50089 */
50090
50091 +static net_device_ops_no_const apdev_netdev_ops;
50092 +
50093 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50094 {
50095 struct vnt_private *apdev_priv;
50096 struct net_device *dev = pDevice->dev;
50097 int ret;
50098 - const struct net_device_ops apdev_netdev_ops = {
50099 - .ndo_start_xmit = pDevice->tx_80211,
50100 - };
50101
50102 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50103
50104 @@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50105 *apdev_priv = *pDevice;
50106 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
50107
50108 + /* only half broken now */
50109 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50110 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50111
50112 pDevice->apdev->type = ARPHRD_IEEE80211;
50113 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
50114 index 24884ca..26c8220 100644
50115 --- a/drivers/target/sbp/sbp_target.c
50116 +++ b/drivers/target/sbp/sbp_target.c
50117 @@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
50118
50119 #define SESSION_MAINTENANCE_INTERVAL HZ
50120
50121 -static atomic_t login_id = ATOMIC_INIT(0);
50122 +static atomic_unchecked_t login_id = ATOMIC_INIT(0);
50123
50124 static void session_maintenance_work(struct work_struct *);
50125 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
50126 @@ -444,7 +444,7 @@ static void sbp_management_request_login(
50127 login->lun = se_lun;
50128 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
50129 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
50130 - login->login_id = atomic_inc_return(&login_id);
50131 + login->login_id = atomic_inc_return_unchecked(&login_id);
50132
50133 login->tgt_agt = sbp_target_agent_register(login);
50134 if (IS_ERR(login->tgt_agt)) {
50135 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
50136 index d06de84..fd38c9b 100644
50137 --- a/drivers/target/target_core_device.c
50138 +++ b/drivers/target/target_core_device.c
50139 @@ -1435,7 +1435,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
50140 spin_lock_init(&dev->se_tmr_lock);
50141 spin_lock_init(&dev->qf_cmd_lock);
50142 sema_init(&dev->caw_sem, 1);
50143 - atomic_set(&dev->dev_ordered_id, 0);
50144 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
50145 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
50146 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
50147 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
50148 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
50149 index dee2be1..f5fd8ca 100644
50150 --- a/drivers/target/target_core_transport.c
50151 +++ b/drivers/target/target_core_transport.c
50152 @@ -1113,7 +1113,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
50153 * Used to determine when ORDERED commands should go from
50154 * Dormant to Active status.
50155 */
50156 - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
50157 + cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
50158 smp_mb__after_atomic_inc();
50159 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
50160 cmd->se_ordered_id, cmd->sam_task_attr,
50161 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
50162 index 33f83fe..d80f8e1 100644
50163 --- a/drivers/tty/cyclades.c
50164 +++ b/drivers/tty/cyclades.c
50165 @@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
50166 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
50167 info->port.count);
50168 #endif
50169 - info->port.count++;
50170 + atomic_inc(&info->port.count);
50171 #ifdef CY_DEBUG_COUNT
50172 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
50173 - current->pid, info->port.count);
50174 + current->pid, atomic_read(&info->port.count));
50175 #endif
50176
50177 /*
50178 @@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
50179 for (j = 0; j < cy_card[i].nports; j++) {
50180 info = &cy_card[i].ports[j];
50181
50182 - if (info->port.count) {
50183 + if (atomic_read(&info->port.count)) {
50184 /* XXX is the ldisc num worth this? */
50185 struct tty_struct *tty;
50186 struct tty_ldisc *ld;
50187 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
50188 index 9eba119..5070303 100644
50189 --- a/drivers/tty/hvc/hvc_console.c
50190 +++ b/drivers/tty/hvc/hvc_console.c
50191 @@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
50192
50193 spin_lock_irqsave(&hp->port.lock, flags);
50194 /* Check and then increment for fast path open. */
50195 - if (hp->port.count++ > 0) {
50196 + if (atomic_inc_return(&hp->port.count) > 1) {
50197 spin_unlock_irqrestore(&hp->port.lock, flags);
50198 hvc_kick();
50199 return 0;
50200 @@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50201
50202 spin_lock_irqsave(&hp->port.lock, flags);
50203
50204 - if (--hp->port.count == 0) {
50205 + if (atomic_dec_return(&hp->port.count) == 0) {
50206 spin_unlock_irqrestore(&hp->port.lock, flags);
50207 /* We are done with the tty pointer now. */
50208 tty_port_tty_set(&hp->port, NULL);
50209 @@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50210 */
50211 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
50212 } else {
50213 - if (hp->port.count < 0)
50214 + if (atomic_read(&hp->port.count) < 0)
50215 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
50216 - hp->vtermno, hp->port.count);
50217 + hp->vtermno, atomic_read(&hp->port.count));
50218 spin_unlock_irqrestore(&hp->port.lock, flags);
50219 }
50220 }
50221 @@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
50222 * open->hangup case this can be called after the final close so prevent
50223 * that from happening for now.
50224 */
50225 - if (hp->port.count <= 0) {
50226 + if (atomic_read(&hp->port.count) <= 0) {
50227 spin_unlock_irqrestore(&hp->port.lock, flags);
50228 return;
50229 }
50230
50231 - hp->port.count = 0;
50232 + atomic_set(&hp->port.count, 0);
50233 spin_unlock_irqrestore(&hp->port.lock, flags);
50234 tty_port_tty_set(&hp->port, NULL);
50235
50236 @@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
50237 return -EPIPE;
50238
50239 /* FIXME what's this (unprotected) check for? */
50240 - if (hp->port.count <= 0)
50241 + if (atomic_read(&hp->port.count) <= 0)
50242 return -EIO;
50243
50244 spin_lock_irqsave(&hp->lock, flags);
50245 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
50246 index 81e939e..95ead10 100644
50247 --- a/drivers/tty/hvc/hvcs.c
50248 +++ b/drivers/tty/hvc/hvcs.c
50249 @@ -83,6 +83,7 @@
50250 #include <asm/hvcserver.h>
50251 #include <asm/uaccess.h>
50252 #include <asm/vio.h>
50253 +#include <asm/local.h>
50254
50255 /*
50256 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
50257 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
50258
50259 spin_lock_irqsave(&hvcsd->lock, flags);
50260
50261 - if (hvcsd->port.count > 0) {
50262 + if (atomic_read(&hvcsd->port.count) > 0) {
50263 spin_unlock_irqrestore(&hvcsd->lock, flags);
50264 printk(KERN_INFO "HVCS: vterm state unchanged. "
50265 "The hvcs device node is still in use.\n");
50266 @@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
50267 }
50268 }
50269
50270 - hvcsd->port.count = 0;
50271 + atomic_set(&hvcsd->port.count, 0);
50272 hvcsd->port.tty = tty;
50273 tty->driver_data = hvcsd;
50274
50275 @@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
50276 unsigned long flags;
50277
50278 spin_lock_irqsave(&hvcsd->lock, flags);
50279 - hvcsd->port.count++;
50280 + atomic_inc(&hvcsd->port.count);
50281 hvcsd->todo_mask |= HVCS_SCHED_READ;
50282 spin_unlock_irqrestore(&hvcsd->lock, flags);
50283
50284 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50285 hvcsd = tty->driver_data;
50286
50287 spin_lock_irqsave(&hvcsd->lock, flags);
50288 - if (--hvcsd->port.count == 0) {
50289 + if (atomic_dec_and_test(&hvcsd->port.count)) {
50290
50291 vio_disable_interrupts(hvcsd->vdev);
50292
50293 @@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50294
50295 free_irq(irq, hvcsd);
50296 return;
50297 - } else if (hvcsd->port.count < 0) {
50298 + } else if (atomic_read(&hvcsd->port.count) < 0) {
50299 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
50300 " is missmanaged.\n",
50301 - hvcsd->vdev->unit_address, hvcsd->port.count);
50302 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
50303 }
50304
50305 spin_unlock_irqrestore(&hvcsd->lock, flags);
50306 @@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
50307
50308 spin_lock_irqsave(&hvcsd->lock, flags);
50309 /* Preserve this so that we know how many kref refs to put */
50310 - temp_open_count = hvcsd->port.count;
50311 + temp_open_count = atomic_read(&hvcsd->port.count);
50312
50313 /*
50314 * Don't kref put inside the spinlock because the destruction
50315 @@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
50316 tty->driver_data = NULL;
50317 hvcsd->port.tty = NULL;
50318
50319 - hvcsd->port.count = 0;
50320 + atomic_set(&hvcsd->port.count, 0);
50321
50322 /* This will drop any buffered data on the floor which is OK in a hangup
50323 * scenario. */
50324 @@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
50325 * the middle of a write operation? This is a crummy place to do this
50326 * but we want to keep it all in the spinlock.
50327 */
50328 - if (hvcsd->port.count <= 0) {
50329 + if (atomic_read(&hvcsd->port.count) <= 0) {
50330 spin_unlock_irqrestore(&hvcsd->lock, flags);
50331 return -ENODEV;
50332 }
50333 @@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
50334 {
50335 struct hvcs_struct *hvcsd = tty->driver_data;
50336
50337 - if (!hvcsd || hvcsd->port.count <= 0)
50338 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
50339 return 0;
50340
50341 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
50342 diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
50343 index 4190199..06d5bfa 100644
50344 --- a/drivers/tty/hvc/hvsi.c
50345 +++ b/drivers/tty/hvc/hvsi.c
50346 @@ -85,7 +85,7 @@ struct hvsi_struct {
50347 int n_outbuf;
50348 uint32_t vtermno;
50349 uint32_t virq;
50350 - atomic_t seqno; /* HVSI packet sequence number */
50351 + atomic_unchecked_t seqno; /* HVSI packet sequence number */
50352 uint16_t mctrl;
50353 uint8_t state; /* HVSI protocol state */
50354 uint8_t flags;
50355 @@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
50356
50357 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
50358 packet.hdr.len = sizeof(struct hvsi_query_response);
50359 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50360 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50361 packet.verb = VSV_SEND_VERSION_NUMBER;
50362 packet.u.version = HVSI_VERSION;
50363 packet.query_seqno = query_seqno+1;
50364 @@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
50365
50366 packet.hdr.type = VS_QUERY_PACKET_HEADER;
50367 packet.hdr.len = sizeof(struct hvsi_query);
50368 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50369 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50370 packet.verb = verb;
50371
50372 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
50373 @@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
50374 int wrote;
50375
50376 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
50377 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50378 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50379 packet.hdr.len = sizeof(struct hvsi_control);
50380 packet.verb = VSV_SET_MODEM_CTL;
50381 packet.mask = HVSI_TSDTR;
50382 @@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
50383 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
50384
50385 packet.hdr.type = VS_DATA_PACKET_HEADER;
50386 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50387 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50388 packet.hdr.len = count + sizeof(struct hvsi_header);
50389 memcpy(&packet.data, buf, count);
50390
50391 @@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
50392 struct hvsi_control packet __ALIGNED__;
50393
50394 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
50395 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50396 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50397 packet.hdr.len = 6;
50398 packet.verb = VSV_CLOSE_PROTOCOL;
50399
50400 @@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
50401
50402 tty_port_tty_set(&hp->port, tty);
50403 spin_lock_irqsave(&hp->lock, flags);
50404 - hp->port.count++;
50405 + atomic_inc(&hp->port.count);
50406 atomic_set(&hp->seqno, 0);
50407 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
50408 spin_unlock_irqrestore(&hp->lock, flags);
50409 @@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
50410
50411 spin_lock_irqsave(&hp->lock, flags);
50412
50413 - if (--hp->port.count == 0) {
50414 + if (atomic_dec_return(&hp->port.count) == 0) {
50415 tty_port_tty_set(&hp->port, NULL);
50416 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
50417
50418 @@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
50419
50420 spin_lock_irqsave(&hp->lock, flags);
50421 }
50422 - } else if (hp->port.count < 0)
50423 + } else if (atomic_read(&hp->port.count) < 0)
50424 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
50425 - hp - hvsi_ports, hp->port.count);
50426 + hp - hvsi_ports, atomic_read(&hp->port.count));
50427
50428 spin_unlock_irqrestore(&hp->lock, flags);
50429 }
50430 @@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
50431 tty_port_tty_set(&hp->port, NULL);
50432
50433 spin_lock_irqsave(&hp->lock, flags);
50434 - hp->port.count = 0;
50435 + atomic_set(&hp->port.count, 0);
50436 hp->n_outbuf = 0;
50437 spin_unlock_irqrestore(&hp->lock, flags);
50438 }
50439 diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
50440 index 347050e..14f8fbf 100644
50441 --- a/drivers/tty/hvc/hvsi_lib.c
50442 +++ b/drivers/tty/hvc/hvsi_lib.c
50443 @@ -9,7 +9,7 @@
50444
50445 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
50446 {
50447 - packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
50448 + packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
50449
50450 /* Assumes that always succeeds, works in practice */
50451 return pv->put_chars(pv->termno, (char *)packet, packet->len);
50452 @@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
50453
50454 /* Reset state */
50455 pv->established = 0;
50456 - atomic_set(&pv->seqno, 0);
50457 + atomic_set_unchecked(&pv->seqno, 0);
50458
50459 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
50460
50461 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
50462 index 8fd72ff..34a0bed 100644
50463 --- a/drivers/tty/ipwireless/tty.c
50464 +++ b/drivers/tty/ipwireless/tty.c
50465 @@ -29,6 +29,7 @@
50466 #include <linux/tty_driver.h>
50467 #include <linux/tty_flip.h>
50468 #include <linux/uaccess.h>
50469 +#include <asm/local.h>
50470
50471 #include "tty.h"
50472 #include "network.h"
50473 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
50474 mutex_unlock(&tty->ipw_tty_mutex);
50475 return -ENODEV;
50476 }
50477 - if (tty->port.count == 0)
50478 + if (atomic_read(&tty->port.count) == 0)
50479 tty->tx_bytes_queued = 0;
50480
50481 - tty->port.count++;
50482 + atomic_inc(&tty->port.count);
50483
50484 tty->port.tty = linux_tty;
50485 linux_tty->driver_data = tty;
50486 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
50487
50488 static void do_ipw_close(struct ipw_tty *tty)
50489 {
50490 - tty->port.count--;
50491 -
50492 - if (tty->port.count == 0) {
50493 + if (atomic_dec_return(&tty->port.count) == 0) {
50494 struct tty_struct *linux_tty = tty->port.tty;
50495
50496 if (linux_tty != NULL) {
50497 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
50498 return;
50499
50500 mutex_lock(&tty->ipw_tty_mutex);
50501 - if (tty->port.count == 0) {
50502 + if (atomic_read(&tty->port.count) == 0) {
50503 mutex_unlock(&tty->ipw_tty_mutex);
50504 return;
50505 }
50506 @@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
50507
50508 mutex_lock(&tty->ipw_tty_mutex);
50509
50510 - if (!tty->port.count) {
50511 + if (!atomic_read(&tty->port.count)) {
50512 mutex_unlock(&tty->ipw_tty_mutex);
50513 return;
50514 }
50515 @@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
50516 return -ENODEV;
50517
50518 mutex_lock(&tty->ipw_tty_mutex);
50519 - if (!tty->port.count) {
50520 + if (!atomic_read(&tty->port.count)) {
50521 mutex_unlock(&tty->ipw_tty_mutex);
50522 return -EINVAL;
50523 }
50524 @@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
50525 if (!tty)
50526 return -ENODEV;
50527
50528 - if (!tty->port.count)
50529 + if (!atomic_read(&tty->port.count))
50530 return -EINVAL;
50531
50532 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
50533 @@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
50534 if (!tty)
50535 return 0;
50536
50537 - if (!tty->port.count)
50538 + if (!atomic_read(&tty->port.count))
50539 return 0;
50540
50541 return tty->tx_bytes_queued;
50542 @@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
50543 if (!tty)
50544 return -ENODEV;
50545
50546 - if (!tty->port.count)
50547 + if (!atomic_read(&tty->port.count))
50548 return -EINVAL;
50549
50550 return get_control_lines(tty);
50551 @@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
50552 if (!tty)
50553 return -ENODEV;
50554
50555 - if (!tty->port.count)
50556 + if (!atomic_read(&tty->port.count))
50557 return -EINVAL;
50558
50559 return set_control_lines(tty, set, clear);
50560 @@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
50561 if (!tty)
50562 return -ENODEV;
50563
50564 - if (!tty->port.count)
50565 + if (!atomic_read(&tty->port.count))
50566 return -EINVAL;
50567
50568 /* FIXME: Exactly how is the tty object locked here .. */
50569 @@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
50570 * are gone */
50571 mutex_lock(&ttyj->ipw_tty_mutex);
50572 }
50573 - while (ttyj->port.count)
50574 + while (atomic_read(&ttyj->port.count))
50575 do_ipw_close(ttyj);
50576 ipwireless_disassociate_network_ttys(network,
50577 ttyj->channel_idx);
50578 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
50579 index 1deaca4..c8582d4 100644
50580 --- a/drivers/tty/moxa.c
50581 +++ b/drivers/tty/moxa.c
50582 @@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
50583 }
50584
50585 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
50586 - ch->port.count++;
50587 + atomic_inc(&ch->port.count);
50588 tty->driver_data = ch;
50589 tty_port_tty_set(&ch->port, tty);
50590 mutex_lock(&ch->port.mutex);
50591 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
50592 index 5056090..c80ca04 100644
50593 --- a/drivers/tty/n_gsm.c
50594 +++ b/drivers/tty/n_gsm.c
50595 @@ -1643,7 +1643,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
50596 spin_lock_init(&dlci->lock);
50597 mutex_init(&dlci->mutex);
50598 dlci->fifo = &dlci->_fifo;
50599 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
50600 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
50601 kfree(dlci);
50602 return NULL;
50603 }
50604 @@ -2946,7 +2946,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
50605 struct gsm_dlci *dlci = tty->driver_data;
50606 struct tty_port *port = &dlci->port;
50607
50608 - port->count++;
50609 + atomic_inc(&port->count);
50610 dlci_get(dlci);
50611 dlci_get(dlci->gsm->dlci[0]);
50612 mux_get(dlci->gsm);
50613 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
50614 index 4c10837..a40ec45 100644
50615 --- a/drivers/tty/n_tty.c
50616 +++ b/drivers/tty/n_tty.c
50617 @@ -114,7 +114,7 @@ struct n_tty_data {
50618 int minimum_to_wake;
50619
50620 /* consumer-published */
50621 - size_t read_tail;
50622 + size_t read_tail __intentional_overflow(-1);
50623 size_t line_start;
50624
50625 /* protected by output lock */
50626 @@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
50627 {
50628 *ops = tty_ldisc_N_TTY;
50629 ops->owner = NULL;
50630 - ops->refcount = ops->flags = 0;
50631 + atomic_set(&ops->refcount, 0);
50632 + ops->flags = 0;
50633 }
50634 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
50635 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
50636 index 25c9bc7..24077b7 100644
50637 --- a/drivers/tty/pty.c
50638 +++ b/drivers/tty/pty.c
50639 @@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
50640 panic("Couldn't register Unix98 pts driver");
50641
50642 /* Now create the /dev/ptmx special device */
50643 + pax_open_kernel();
50644 tty_default_fops(&ptmx_fops);
50645 - ptmx_fops.open = ptmx_open;
50646 + *(void **)&ptmx_fops.open = ptmx_open;
50647 + pax_close_kernel();
50648
50649 cdev_init(&ptmx_cdev, &ptmx_fops);
50650 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
50651 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
50652 index 354564e..fe50d9a 100644
50653 --- a/drivers/tty/rocket.c
50654 +++ b/drivers/tty/rocket.c
50655 @@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
50656 tty->driver_data = info;
50657 tty_port_tty_set(port, tty);
50658
50659 - if (port->count++ == 0) {
50660 + if (atomic_inc_return(&port->count) == 1) {
50661 atomic_inc(&rp_num_ports_open);
50662
50663 #ifdef ROCKET_DEBUG_OPEN
50664 @@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
50665 #endif
50666 }
50667 #ifdef ROCKET_DEBUG_OPEN
50668 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
50669 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
50670 #endif
50671
50672 /*
50673 @@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
50674 spin_unlock_irqrestore(&info->port.lock, flags);
50675 return;
50676 }
50677 - if (info->port.count)
50678 + if (atomic_read(&info->port.count))
50679 atomic_dec(&rp_num_ports_open);
50680 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
50681 spin_unlock_irqrestore(&info->port.lock, flags);
50682 diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
50683 index 1274499..f541382 100644
50684 --- a/drivers/tty/serial/ioc4_serial.c
50685 +++ b/drivers/tty/serial/ioc4_serial.c
50686 @@ -437,7 +437,7 @@ struct ioc4_soft {
50687 } is_intr_info[MAX_IOC4_INTR_ENTS];
50688
50689 /* Number of entries active in the above array */
50690 - atomic_t is_num_intrs;
50691 + atomic_unchecked_t is_num_intrs;
50692 } is_intr_type[IOC4_NUM_INTR_TYPES];
50693
50694 /* is_ir_lock must be held while
50695 @@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
50696 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
50697 || (type == IOC4_OTHER_INTR_TYPE)));
50698
50699 - i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
50700 + i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
50701 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
50702
50703 /* Save off the lower level interrupt handler */
50704 @@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
50705
50706 soft = arg;
50707 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
50708 - num_intrs = (int)atomic_read(
50709 + num_intrs = (int)atomic_read_unchecked(
50710 &soft->is_intr_type[intr_type].is_num_intrs);
50711
50712 this_mir = this_ir = pending_intrs(soft, intr_type);
50713 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
50714 index a260cde..6b2b5ce 100644
50715 --- a/drivers/tty/serial/kgdboc.c
50716 +++ b/drivers/tty/serial/kgdboc.c
50717 @@ -24,8 +24,9 @@
50718 #define MAX_CONFIG_LEN 40
50719
50720 static struct kgdb_io kgdboc_io_ops;
50721 +static struct kgdb_io kgdboc_io_ops_console;
50722
50723 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
50724 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
50725 static int configured = -1;
50726
50727 static char config[MAX_CONFIG_LEN];
50728 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
50729 kgdboc_unregister_kbd();
50730 if (configured == 1)
50731 kgdb_unregister_io_module(&kgdboc_io_ops);
50732 + else if (configured == 2)
50733 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
50734 }
50735
50736 static int configure_kgdboc(void)
50737 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
50738 int err;
50739 char *cptr = config;
50740 struct console *cons;
50741 + int is_console = 0;
50742
50743 err = kgdboc_option_setup(config);
50744 if (err || !strlen(config) || isspace(config[0]))
50745 goto noconfig;
50746
50747 err = -ENODEV;
50748 - kgdboc_io_ops.is_console = 0;
50749 kgdb_tty_driver = NULL;
50750
50751 kgdboc_use_kms = 0;
50752 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
50753 int idx;
50754 if (cons->device && cons->device(cons, &idx) == p &&
50755 idx == tty_line) {
50756 - kgdboc_io_ops.is_console = 1;
50757 + is_console = 1;
50758 break;
50759 }
50760 cons = cons->next;
50761 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
50762 kgdb_tty_line = tty_line;
50763
50764 do_register:
50765 - err = kgdb_register_io_module(&kgdboc_io_ops);
50766 + if (is_console) {
50767 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
50768 + configured = 2;
50769 + } else {
50770 + err = kgdb_register_io_module(&kgdboc_io_ops);
50771 + configured = 1;
50772 + }
50773 if (err)
50774 goto noconfig;
50775
50776 @@ -205,8 +214,6 @@ do_register:
50777 if (err)
50778 goto nmi_con_failed;
50779
50780 - configured = 1;
50781 -
50782 return 0;
50783
50784 nmi_con_failed:
50785 @@ -223,7 +230,7 @@ noconfig:
50786 static int __init init_kgdboc(void)
50787 {
50788 /* Already configured? */
50789 - if (configured == 1)
50790 + if (configured >= 1)
50791 return 0;
50792
50793 return configure_kgdboc();
50794 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
50795 if (config[len - 1] == '\n')
50796 config[len - 1] = '\0';
50797
50798 - if (configured == 1)
50799 + if (configured >= 1)
50800 cleanup_kgdboc();
50801
50802 /* Go and configure with the new params. */
50803 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
50804 .post_exception = kgdboc_post_exp_handler,
50805 };
50806
50807 +static struct kgdb_io kgdboc_io_ops_console = {
50808 + .name = "kgdboc",
50809 + .read_char = kgdboc_get_char,
50810 + .write_char = kgdboc_put_char,
50811 + .pre_exception = kgdboc_pre_exp_handler,
50812 + .post_exception = kgdboc_post_exp_handler,
50813 + .is_console = 1
50814 +};
50815 +
50816 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
50817 /* This is only available if kgdboc is a built in for early debugging */
50818 static int __init kgdboc_early_init(char *opt)
50819 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
50820 index b5d779c..3622cfe 100644
50821 --- a/drivers/tty/serial/msm_serial.c
50822 +++ b/drivers/tty/serial/msm_serial.c
50823 @@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
50824 .cons = MSM_CONSOLE,
50825 };
50826
50827 -static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
50828 +static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
50829
50830 static const struct of_device_id msm_uartdm_table[] = {
50831 { .compatible = "qcom,msm-uartdm" },
50832 @@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
50833 int irq;
50834
50835 if (pdev->id == -1)
50836 - pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
50837 + pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
50838
50839 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
50840 return -ENXIO;
50841 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
50842 index c1af04d..0815c8a 100644
50843 --- a/drivers/tty/serial/samsung.c
50844 +++ b/drivers/tty/serial/samsung.c
50845 @@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
50846 }
50847 }
50848
50849 +static int s3c64xx_serial_startup(struct uart_port *port);
50850 static int s3c24xx_serial_startup(struct uart_port *port)
50851 {
50852 struct s3c24xx_uart_port *ourport = to_ourport(port);
50853 int ret;
50854
50855 + /* Startup sequence is different for s3c64xx and higher SoC's */
50856 + if (s3c24xx_serial_has_interrupt_mask(port))
50857 + return s3c64xx_serial_startup(port);
50858 +
50859 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
50860 port->mapbase, port->membase);
50861
50862 @@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
50863 /* setup info for port */
50864 port->dev = &platdev->dev;
50865
50866 - /* Startup sequence is different for s3c64xx and higher SoC's */
50867 - if (s3c24xx_serial_has_interrupt_mask(port))
50868 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
50869 -
50870 port->uartclk = 1;
50871
50872 if (cfg->uart_flags & UPF_CONS_FLOW) {
50873 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
50874 index 0f02351..07c59c5 100644
50875 --- a/drivers/tty/serial/serial_core.c
50876 +++ b/drivers/tty/serial/serial_core.c
50877 @@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
50878 uart_flush_buffer(tty);
50879 uart_shutdown(tty, state);
50880 spin_lock_irqsave(&port->lock, flags);
50881 - port->count = 0;
50882 + atomic_set(&port->count, 0);
50883 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
50884 spin_unlock_irqrestore(&port->lock, flags);
50885 tty_port_tty_set(port, NULL);
50886 @@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50887 goto end;
50888 }
50889
50890 - port->count++;
50891 + atomic_inc(&port->count);
50892 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
50893 retval = -ENXIO;
50894 goto err_dec_count;
50895 @@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50896 /*
50897 * Make sure the device is in D0 state.
50898 */
50899 - if (port->count == 1)
50900 + if (atomic_read(&port->count) == 1)
50901 uart_change_pm(state, UART_PM_STATE_ON);
50902
50903 /*
50904 @@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50905 end:
50906 return retval;
50907 err_dec_count:
50908 - port->count--;
50909 + atomic_inc(&port->count);
50910 mutex_unlock(&port->mutex);
50911 goto end;
50912 }
50913 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
50914 index e1ce141..6d4ed80 100644
50915 --- a/drivers/tty/synclink.c
50916 +++ b/drivers/tty/synclink.c
50917 @@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50918
50919 if (debug_level >= DEBUG_LEVEL_INFO)
50920 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
50921 - __FILE__,__LINE__, info->device_name, info->port.count);
50922 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50923
50924 if (tty_port_close_start(&info->port, tty, filp) == 0)
50925 goto cleanup;
50926 @@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50927 cleanup:
50928 if (debug_level >= DEBUG_LEVEL_INFO)
50929 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
50930 - tty->driver->name, info->port.count);
50931 + tty->driver->name, atomic_read(&info->port.count));
50932
50933 } /* end of mgsl_close() */
50934
50935 @@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
50936
50937 mgsl_flush_buffer(tty);
50938 shutdown(info);
50939 -
50940 - info->port.count = 0;
50941 +
50942 + atomic_set(&info->port.count, 0);
50943 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50944 info->port.tty = NULL;
50945
50946 @@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50947
50948 if (debug_level >= DEBUG_LEVEL_INFO)
50949 printk("%s(%d):block_til_ready before block on %s count=%d\n",
50950 - __FILE__,__LINE__, tty->driver->name, port->count );
50951 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50952
50953 spin_lock_irqsave(&info->irq_spinlock, flags);
50954 if (!tty_hung_up_p(filp)) {
50955 extra_count = true;
50956 - port->count--;
50957 + atomic_dec(&port->count);
50958 }
50959 spin_unlock_irqrestore(&info->irq_spinlock, flags);
50960 port->blocked_open++;
50961 @@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50962
50963 if (debug_level >= DEBUG_LEVEL_INFO)
50964 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
50965 - __FILE__,__LINE__, tty->driver->name, port->count );
50966 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50967
50968 tty_unlock(tty);
50969 schedule();
50970 @@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50971
50972 /* FIXME: Racy on hangup during close wait */
50973 if (extra_count)
50974 - port->count++;
50975 + atomic_inc(&port->count);
50976 port->blocked_open--;
50977
50978 if (debug_level >= DEBUG_LEVEL_INFO)
50979 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
50980 - __FILE__,__LINE__, tty->driver->name, port->count );
50981 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50982
50983 if (!retval)
50984 port->flags |= ASYNC_NORMAL_ACTIVE;
50985 @@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50986
50987 if (debug_level >= DEBUG_LEVEL_INFO)
50988 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
50989 - __FILE__,__LINE__,tty->driver->name, info->port.count);
50990 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50991
50992 /* If port is closing, signal caller to try again */
50993 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50994 @@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50995 spin_unlock_irqrestore(&info->netlock, flags);
50996 goto cleanup;
50997 }
50998 - info->port.count++;
50999 + atomic_inc(&info->port.count);
51000 spin_unlock_irqrestore(&info->netlock, flags);
51001
51002 - if (info->port.count == 1) {
51003 + if (atomic_read(&info->port.count) == 1) {
51004 /* 1st open on this device, init hardware */
51005 retval = startup(info);
51006 if (retval < 0)
51007 @@ -3446,8 +3446,8 @@ cleanup:
51008 if (retval) {
51009 if (tty->count == 1)
51010 info->port.tty = NULL; /* tty layer will release tty struct */
51011 - if(info->port.count)
51012 - info->port.count--;
51013 + if (atomic_read(&info->port.count))
51014 + atomic_dec(&info->port.count);
51015 }
51016
51017 return retval;
51018 @@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51019 unsigned short new_crctype;
51020
51021 /* return error if TTY interface open */
51022 - if (info->port.count)
51023 + if (atomic_read(&info->port.count))
51024 return -EBUSY;
51025
51026 switch (encoding)
51027 @@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
51028
51029 /* arbitrate between network and tty opens */
51030 spin_lock_irqsave(&info->netlock, flags);
51031 - if (info->port.count != 0 || info->netcount != 0) {
51032 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51033 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
51034 spin_unlock_irqrestore(&info->netlock, flags);
51035 return -EBUSY;
51036 @@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51037 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
51038
51039 /* return error if TTY interface open */
51040 - if (info->port.count)
51041 + if (atomic_read(&info->port.count))
51042 return -EBUSY;
51043
51044 if (cmd != SIOCWANDEV)
51045 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
51046 index 1abf946..1ee34fc 100644
51047 --- a/drivers/tty/synclink_gt.c
51048 +++ b/drivers/tty/synclink_gt.c
51049 @@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51050 tty->driver_data = info;
51051 info->port.tty = tty;
51052
51053 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
51054 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
51055
51056 /* If port is closing, signal caller to try again */
51057 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51058 @@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51059 mutex_unlock(&info->port.mutex);
51060 goto cleanup;
51061 }
51062 - info->port.count++;
51063 + atomic_inc(&info->port.count);
51064 spin_unlock_irqrestore(&info->netlock, flags);
51065
51066 - if (info->port.count == 1) {
51067 + if (atomic_read(&info->port.count) == 1) {
51068 /* 1st open on this device, init hardware */
51069 retval = startup(info);
51070 if (retval < 0) {
51071 @@ -715,8 +715,8 @@ cleanup:
51072 if (retval) {
51073 if (tty->count == 1)
51074 info->port.tty = NULL; /* tty layer will release tty struct */
51075 - if(info->port.count)
51076 - info->port.count--;
51077 + if(atomic_read(&info->port.count))
51078 + atomic_dec(&info->port.count);
51079 }
51080
51081 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
51082 @@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51083
51084 if (sanity_check(info, tty->name, "close"))
51085 return;
51086 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
51087 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
51088
51089 if (tty_port_close_start(&info->port, tty, filp) == 0)
51090 goto cleanup;
51091 @@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51092 tty_port_close_end(&info->port, tty);
51093 info->port.tty = NULL;
51094 cleanup:
51095 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
51096 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
51097 }
51098
51099 static void hangup(struct tty_struct *tty)
51100 @@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
51101 shutdown(info);
51102
51103 spin_lock_irqsave(&info->port.lock, flags);
51104 - info->port.count = 0;
51105 + atomic_set(&info->port.count, 0);
51106 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51107 info->port.tty = NULL;
51108 spin_unlock_irqrestore(&info->port.lock, flags);
51109 @@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51110 unsigned short new_crctype;
51111
51112 /* return error if TTY interface open */
51113 - if (info->port.count)
51114 + if (atomic_read(&info->port.count))
51115 return -EBUSY;
51116
51117 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
51118 @@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
51119
51120 /* arbitrate between network and tty opens */
51121 spin_lock_irqsave(&info->netlock, flags);
51122 - if (info->port.count != 0 || info->netcount != 0) {
51123 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51124 DBGINFO(("%s hdlc_open busy\n", dev->name));
51125 spin_unlock_irqrestore(&info->netlock, flags);
51126 return -EBUSY;
51127 @@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51128 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
51129
51130 /* return error if TTY interface open */
51131 - if (info->port.count)
51132 + if (atomic_read(&info->port.count))
51133 return -EBUSY;
51134
51135 if (cmd != SIOCWANDEV)
51136 @@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
51137 if (port == NULL)
51138 continue;
51139 spin_lock(&port->lock);
51140 - if ((port->port.count || port->netcount) &&
51141 + if ((atomic_read(&port->port.count) || port->netcount) &&
51142 port->pending_bh && !port->bh_running &&
51143 !port->bh_requested) {
51144 DBGISR(("%s bh queued\n", port->device_name));
51145 @@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51146 spin_lock_irqsave(&info->lock, flags);
51147 if (!tty_hung_up_p(filp)) {
51148 extra_count = true;
51149 - port->count--;
51150 + atomic_dec(&port->count);
51151 }
51152 spin_unlock_irqrestore(&info->lock, flags);
51153 port->blocked_open++;
51154 @@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51155 remove_wait_queue(&port->open_wait, &wait);
51156
51157 if (extra_count)
51158 - port->count++;
51159 + atomic_inc(&port->count);
51160 port->blocked_open--;
51161
51162 if (!retval)
51163 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
51164 index dc6e969..5dc8786 100644
51165 --- a/drivers/tty/synclinkmp.c
51166 +++ b/drivers/tty/synclinkmp.c
51167 @@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51168
51169 if (debug_level >= DEBUG_LEVEL_INFO)
51170 printk("%s(%d):%s open(), old ref count = %d\n",
51171 - __FILE__,__LINE__,tty->driver->name, info->port.count);
51172 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
51173
51174 /* If port is closing, signal caller to try again */
51175 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51176 @@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51177 spin_unlock_irqrestore(&info->netlock, flags);
51178 goto cleanup;
51179 }
51180 - info->port.count++;
51181 + atomic_inc(&info->port.count);
51182 spin_unlock_irqrestore(&info->netlock, flags);
51183
51184 - if (info->port.count == 1) {
51185 + if (atomic_read(&info->port.count) == 1) {
51186 /* 1st open on this device, init hardware */
51187 retval = startup(info);
51188 if (retval < 0)
51189 @@ -796,8 +796,8 @@ cleanup:
51190 if (retval) {
51191 if (tty->count == 1)
51192 info->port.tty = NULL; /* tty layer will release tty struct */
51193 - if(info->port.count)
51194 - info->port.count--;
51195 + if(atomic_read(&info->port.count))
51196 + atomic_dec(&info->port.count);
51197 }
51198
51199 return retval;
51200 @@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51201
51202 if (debug_level >= DEBUG_LEVEL_INFO)
51203 printk("%s(%d):%s close() entry, count=%d\n",
51204 - __FILE__,__LINE__, info->device_name, info->port.count);
51205 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
51206
51207 if (tty_port_close_start(&info->port, tty, filp) == 0)
51208 goto cleanup;
51209 @@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51210 cleanup:
51211 if (debug_level >= DEBUG_LEVEL_INFO)
51212 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
51213 - tty->driver->name, info->port.count);
51214 + tty->driver->name, atomic_read(&info->port.count));
51215 }
51216
51217 /* Called by tty_hangup() when a hangup is signaled.
51218 @@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
51219 shutdown(info);
51220
51221 spin_lock_irqsave(&info->port.lock, flags);
51222 - info->port.count = 0;
51223 + atomic_set(&info->port.count, 0);
51224 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51225 info->port.tty = NULL;
51226 spin_unlock_irqrestore(&info->port.lock, flags);
51227 @@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51228 unsigned short new_crctype;
51229
51230 /* return error if TTY interface open */
51231 - if (info->port.count)
51232 + if (atomic_read(&info->port.count))
51233 return -EBUSY;
51234
51235 switch (encoding)
51236 @@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
51237
51238 /* arbitrate between network and tty opens */
51239 spin_lock_irqsave(&info->netlock, flags);
51240 - if (info->port.count != 0 || info->netcount != 0) {
51241 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51242 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
51243 spin_unlock_irqrestore(&info->netlock, flags);
51244 return -EBUSY;
51245 @@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51246 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
51247
51248 /* return error if TTY interface open */
51249 - if (info->port.count)
51250 + if (atomic_read(&info->port.count))
51251 return -EBUSY;
51252
51253 if (cmd != SIOCWANDEV)
51254 @@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
51255 * do not request bottom half processing if the
51256 * device is not open in a normal mode.
51257 */
51258 - if ( port && (port->port.count || port->netcount) &&
51259 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
51260 port->pending_bh && !port->bh_running &&
51261 !port->bh_requested ) {
51262 if ( debug_level >= DEBUG_LEVEL_ISR )
51263 @@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51264
51265 if (debug_level >= DEBUG_LEVEL_INFO)
51266 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
51267 - __FILE__,__LINE__, tty->driver->name, port->count );
51268 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51269
51270 spin_lock_irqsave(&info->lock, flags);
51271 if (!tty_hung_up_p(filp)) {
51272 extra_count = true;
51273 - port->count--;
51274 + atomic_dec(&port->count);
51275 }
51276 spin_unlock_irqrestore(&info->lock, flags);
51277 port->blocked_open++;
51278 @@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51279
51280 if (debug_level >= DEBUG_LEVEL_INFO)
51281 printk("%s(%d):%s block_til_ready() count=%d\n",
51282 - __FILE__,__LINE__, tty->driver->name, port->count );
51283 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51284
51285 tty_unlock(tty);
51286 schedule();
51287 @@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51288 remove_wait_queue(&port->open_wait, &wait);
51289
51290 if (extra_count)
51291 - port->count++;
51292 + atomic_inc(&port->count);
51293 port->blocked_open--;
51294
51295 if (debug_level >= DEBUG_LEVEL_INFO)
51296 printk("%s(%d):%s block_til_ready() after, count=%d\n",
51297 - __FILE__,__LINE__, tty->driver->name, port->count );
51298 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51299
51300 if (!retval)
51301 port->flags |= ASYNC_NORMAL_ACTIVE;
51302 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
51303 index ce396ec..04a37be 100644
51304 --- a/drivers/tty/sysrq.c
51305 +++ b/drivers/tty/sysrq.c
51306 @@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
51307 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
51308 size_t count, loff_t *ppos)
51309 {
51310 - if (count) {
51311 + if (count && capable(CAP_SYS_ADMIN)) {
51312 char c;
51313
51314 if (get_user(c, buf))
51315 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
51316 index c74a00a..02cf211a 100644
51317 --- a/drivers/tty/tty_io.c
51318 +++ b/drivers/tty/tty_io.c
51319 @@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
51320
51321 void tty_default_fops(struct file_operations *fops)
51322 {
51323 - *fops = tty_fops;
51324 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
51325 }
51326
51327 /*
51328 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
51329 index 6458e11..6cfc218 100644
51330 --- a/drivers/tty/tty_ldisc.c
51331 +++ b/drivers/tty/tty_ldisc.c
51332 @@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
51333 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51334 tty_ldiscs[disc] = new_ldisc;
51335 new_ldisc->num = disc;
51336 - new_ldisc->refcount = 0;
51337 + atomic_set(&new_ldisc->refcount, 0);
51338 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
51339
51340 return ret;
51341 @@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
51342 return -EINVAL;
51343
51344 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51345 - if (tty_ldiscs[disc]->refcount)
51346 + if (atomic_read(&tty_ldiscs[disc]->refcount))
51347 ret = -EBUSY;
51348 else
51349 tty_ldiscs[disc] = NULL;
51350 @@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
51351 if (ldops) {
51352 ret = ERR_PTR(-EAGAIN);
51353 if (try_module_get(ldops->owner)) {
51354 - ldops->refcount++;
51355 + atomic_inc(&ldops->refcount);
51356 ret = ldops;
51357 }
51358 }
51359 @@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
51360 unsigned long flags;
51361
51362 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51363 - ldops->refcount--;
51364 + atomic_dec(&ldops->refcount);
51365 module_put(ldops->owner);
51366 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
51367 }
51368 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
51369 index c94d234..8210f2d 100644
51370 --- a/drivers/tty/tty_port.c
51371 +++ b/drivers/tty/tty_port.c
51372 @@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
51373 unsigned long flags;
51374
51375 spin_lock_irqsave(&port->lock, flags);
51376 - port->count = 0;
51377 + atomic_set(&port->count, 0);
51378 port->flags &= ~ASYNC_NORMAL_ACTIVE;
51379 tty = port->tty;
51380 if (tty)
51381 @@ -394,7 +394,7 @@ int tty_port_block_til_ready(struct tty_port *port,
51382 /* The port lock protects the port counts */
51383 spin_lock_irqsave(&port->lock, flags);
51384 if (!tty_hung_up_p(filp))
51385 - port->count--;
51386 + atomic_dec(&port->count);
51387 port->blocked_open++;
51388 spin_unlock_irqrestore(&port->lock, flags);
51389
51390 @@ -436,7 +436,7 @@ int tty_port_block_til_ready(struct tty_port *port,
51391 we must not mess that up further */
51392 spin_lock_irqsave(&port->lock, flags);
51393 if (!tty_hung_up_p(filp))
51394 - port->count++;
51395 + atomic_inc(&port->count);
51396 port->blocked_open--;
51397 if (retval == 0)
51398 port->flags |= ASYNC_NORMAL_ACTIVE;
51399 @@ -470,19 +470,19 @@ int tty_port_close_start(struct tty_port *port,
51400 return 0;
51401 }
51402
51403 - if (tty->count == 1 && port->count != 1) {
51404 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
51405 printk(KERN_WARNING
51406 "tty_port_close_start: tty->count = 1 port count = %d.\n",
51407 - port->count);
51408 - port->count = 1;
51409 + atomic_read(&port->count));
51410 + atomic_set(&port->count, 1);
51411 }
51412 - if (--port->count < 0) {
51413 + if (atomic_dec_return(&port->count) < 0) {
51414 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
51415 - port->count);
51416 - port->count = 0;
51417 + atomic_read(&port->count));
51418 + atomic_set(&port->count, 0);
51419 }
51420
51421 - if (port->count) {
51422 + if (atomic_read(&port->count)) {
51423 spin_unlock_irqrestore(&port->lock, flags);
51424 return 0;
51425 }
51426 @@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
51427 {
51428 spin_lock_irq(&port->lock);
51429 if (!tty_hung_up_p(filp))
51430 - ++port->count;
51431 + atomic_inc(&port->count);
51432 spin_unlock_irq(&port->lock);
51433 tty_port_tty_set(port, tty);
51434
51435 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
51436 index d0e3a44..5f8b754 100644
51437 --- a/drivers/tty/vt/keyboard.c
51438 +++ b/drivers/tty/vt/keyboard.c
51439 @@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
51440 kbd->kbdmode == VC_OFF) &&
51441 value != KVAL(K_SAK))
51442 return; /* SAK is allowed even in raw mode */
51443 +
51444 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51445 + {
51446 + void *func = fn_handler[value];
51447 + if (func == fn_show_state || func == fn_show_ptregs ||
51448 + func == fn_show_mem)
51449 + return;
51450 + }
51451 +#endif
51452 +
51453 fn_handler[value](vc);
51454 }
51455
51456 @@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
51457 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
51458 return -EFAULT;
51459
51460 - if (!capable(CAP_SYS_TTY_CONFIG))
51461 - perm = 0;
51462 -
51463 switch (cmd) {
51464 case KDGKBENT:
51465 /* Ensure another thread doesn't free it under us */
51466 @@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
51467 spin_unlock_irqrestore(&kbd_event_lock, flags);
51468 return put_user(val, &user_kbe->kb_value);
51469 case KDSKBENT:
51470 + if (!capable(CAP_SYS_TTY_CONFIG))
51471 + perm = 0;
51472 +
51473 if (!perm)
51474 return -EPERM;
51475 if (!i && v == K_NOSUCHMAP) {
51476 @@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
51477 int i, j, k;
51478 int ret;
51479
51480 - if (!capable(CAP_SYS_TTY_CONFIG))
51481 - perm = 0;
51482 -
51483 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
51484 if (!kbs) {
51485 ret = -ENOMEM;
51486 @@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
51487 kfree(kbs);
51488 return ((p && *p) ? -EOVERFLOW : 0);
51489 case KDSKBSENT:
51490 + if (!capable(CAP_SYS_TTY_CONFIG))
51491 + perm = 0;
51492 +
51493 if (!perm) {
51494 ret = -EPERM;
51495 goto reterr;
51496 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
51497 index a673e5b..36e5d32 100644
51498 --- a/drivers/uio/uio.c
51499 +++ b/drivers/uio/uio.c
51500 @@ -25,6 +25,7 @@
51501 #include <linux/kobject.h>
51502 #include <linux/cdev.h>
51503 #include <linux/uio_driver.h>
51504 +#include <asm/local.h>
51505
51506 #define UIO_MAX_DEVICES (1U << MINORBITS)
51507
51508 @@ -32,7 +33,7 @@ struct uio_device {
51509 struct module *owner;
51510 struct device *dev;
51511 int minor;
51512 - atomic_t event;
51513 + atomic_unchecked_t event;
51514 struct fasync_struct *async_queue;
51515 wait_queue_head_t wait;
51516 struct uio_info *info;
51517 @@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
51518 struct device_attribute *attr, char *buf)
51519 {
51520 struct uio_device *idev = dev_get_drvdata(dev);
51521 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
51522 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
51523 }
51524 static DEVICE_ATTR_RO(event);
51525
51526 @@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
51527 {
51528 struct uio_device *idev = info->uio_dev;
51529
51530 - atomic_inc(&idev->event);
51531 + atomic_inc_unchecked(&idev->event);
51532 wake_up_interruptible(&idev->wait);
51533 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
51534 }
51535 @@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
51536 }
51537
51538 listener->dev = idev;
51539 - listener->event_count = atomic_read(&idev->event);
51540 + listener->event_count = atomic_read_unchecked(&idev->event);
51541 filep->private_data = listener;
51542
51543 if (idev->info->open) {
51544 @@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
51545 return -EIO;
51546
51547 poll_wait(filep, &idev->wait, wait);
51548 - if (listener->event_count != atomic_read(&idev->event))
51549 + if (listener->event_count != atomic_read_unchecked(&idev->event))
51550 return POLLIN | POLLRDNORM;
51551 return 0;
51552 }
51553 @@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
51554 do {
51555 set_current_state(TASK_INTERRUPTIBLE);
51556
51557 - event_count = atomic_read(&idev->event);
51558 + event_count = atomic_read_unchecked(&idev->event);
51559 if (event_count != listener->event_count) {
51560 if (copy_to_user(buf, &event_count, count))
51561 retval = -EFAULT;
51562 @@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
51563 static int uio_find_mem_index(struct vm_area_struct *vma)
51564 {
51565 struct uio_device *idev = vma->vm_private_data;
51566 + unsigned long size;
51567
51568 if (vma->vm_pgoff < MAX_UIO_MAPS) {
51569 - if (idev->info->mem[vma->vm_pgoff].size == 0)
51570 + size = idev->info->mem[vma->vm_pgoff].size;
51571 + if (size == 0)
51572 + return -1;
51573 + if (vma->vm_end - vma->vm_start > size)
51574 return -1;
51575 return (int)vma->vm_pgoff;
51576 }
51577 @@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
51578 idev->owner = owner;
51579 idev->info = info;
51580 init_waitqueue_head(&idev->wait);
51581 - atomic_set(&idev->event, 0);
51582 + atomic_set_unchecked(&idev->event, 0);
51583
51584 ret = uio_get_minor(idev);
51585 if (ret)
51586 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
51587 index 8a7eb77..c00402f 100644
51588 --- a/drivers/usb/atm/cxacru.c
51589 +++ b/drivers/usb/atm/cxacru.c
51590 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
51591 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
51592 if (ret < 2)
51593 return -EINVAL;
51594 - if (index < 0 || index > 0x7f)
51595 + if (index > 0x7f)
51596 return -EINVAL;
51597 pos += tmp;
51598
51599 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
51600 index 25a7bfc..57f3cf5 100644
51601 --- a/drivers/usb/atm/usbatm.c
51602 +++ b/drivers/usb/atm/usbatm.c
51603 @@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51604 if (printk_ratelimit())
51605 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
51606 __func__, vpi, vci);
51607 - atomic_inc(&vcc->stats->rx_err);
51608 + atomic_inc_unchecked(&vcc->stats->rx_err);
51609 return;
51610 }
51611
51612 @@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51613 if (length > ATM_MAX_AAL5_PDU) {
51614 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
51615 __func__, length, vcc);
51616 - atomic_inc(&vcc->stats->rx_err);
51617 + atomic_inc_unchecked(&vcc->stats->rx_err);
51618 goto out;
51619 }
51620
51621 @@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51622 if (sarb->len < pdu_length) {
51623 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
51624 __func__, pdu_length, sarb->len, vcc);
51625 - atomic_inc(&vcc->stats->rx_err);
51626 + atomic_inc_unchecked(&vcc->stats->rx_err);
51627 goto out;
51628 }
51629
51630 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
51631 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
51632 __func__, vcc);
51633 - atomic_inc(&vcc->stats->rx_err);
51634 + atomic_inc_unchecked(&vcc->stats->rx_err);
51635 goto out;
51636 }
51637
51638 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51639 if (printk_ratelimit())
51640 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
51641 __func__, length);
51642 - atomic_inc(&vcc->stats->rx_drop);
51643 + atomic_inc_unchecked(&vcc->stats->rx_drop);
51644 goto out;
51645 }
51646
51647 @@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51648
51649 vcc->push(vcc, skb);
51650
51651 - atomic_inc(&vcc->stats->rx);
51652 + atomic_inc_unchecked(&vcc->stats->rx);
51653 out:
51654 skb_trim(sarb, 0);
51655 }
51656 @@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
51657 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
51658
51659 usbatm_pop(vcc, skb);
51660 - atomic_inc(&vcc->stats->tx);
51661 + atomic_inc_unchecked(&vcc->stats->tx);
51662
51663 skb = skb_dequeue(&instance->sndqueue);
51664 }
51665 @@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
51666 if (!left--)
51667 return sprintf(page,
51668 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
51669 - atomic_read(&atm_dev->stats.aal5.tx),
51670 - atomic_read(&atm_dev->stats.aal5.tx_err),
51671 - atomic_read(&atm_dev->stats.aal5.rx),
51672 - atomic_read(&atm_dev->stats.aal5.rx_err),
51673 - atomic_read(&atm_dev->stats.aal5.rx_drop));
51674 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
51675 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
51676 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
51677 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
51678 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
51679
51680 if (!left--) {
51681 if (instance->disconnected)
51682 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
51683 index 2a3bbdf..91d72cf 100644
51684 --- a/drivers/usb/core/devices.c
51685 +++ b/drivers/usb/core/devices.c
51686 @@ -126,7 +126,7 @@ static const char format_endpt[] =
51687 * time it gets called.
51688 */
51689 static struct device_connect_event {
51690 - atomic_t count;
51691 + atomic_unchecked_t count;
51692 wait_queue_head_t wait;
51693 } device_event = {
51694 .count = ATOMIC_INIT(1),
51695 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
51696
51697 void usbfs_conn_disc_event(void)
51698 {
51699 - atomic_add(2, &device_event.count);
51700 + atomic_add_unchecked(2, &device_event.count);
51701 wake_up(&device_event.wait);
51702 }
51703
51704 @@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
51705
51706 poll_wait(file, &device_event.wait, wait);
51707
51708 - event_count = atomic_read(&device_event.count);
51709 + event_count = atomic_read_unchecked(&device_event.count);
51710 if (file->f_version != event_count) {
51711 file->f_version = event_count;
51712 return POLLIN | POLLRDNORM;
51713 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
51714 index 967152a..16fa2e5 100644
51715 --- a/drivers/usb/core/devio.c
51716 +++ b/drivers/usb/core/devio.c
51717 @@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
51718 struct dev_state *ps = file->private_data;
51719 struct usb_device *dev = ps->dev;
51720 ssize_t ret = 0;
51721 - unsigned len;
51722 + size_t len;
51723 loff_t pos;
51724 int i;
51725
51726 @@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
51727 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
51728 struct usb_config_descriptor *config =
51729 (struct usb_config_descriptor *)dev->rawdescriptors[i];
51730 - unsigned int length = le16_to_cpu(config->wTotalLength);
51731 + size_t length = le16_to_cpu(config->wTotalLength);
51732
51733 if (*ppos < pos + length) {
51734
51735 /* The descriptor may claim to be longer than it
51736 * really is. Here is the actual allocated length. */
51737 - unsigned alloclen =
51738 + size_t alloclen =
51739 le16_to_cpu(dev->config[i].desc.wTotalLength);
51740
51741 - len = length - (*ppos - pos);
51742 + len = length + pos - *ppos;
51743 if (len > nbytes)
51744 len = nbytes;
51745
51746 /* Simply don't write (skip over) unallocated parts */
51747 if (alloclen > (*ppos - pos)) {
51748 - alloclen -= (*ppos - pos);
51749 + alloclen = alloclen + pos - *ppos;
51750 if (copy_to_user(buf,
51751 dev->rawdescriptors[i] + (*ppos - pos),
51752 min(len, alloclen))) {
51753 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
51754 index d39106c..bfe13a4 100644
51755 --- a/drivers/usb/core/hcd.c
51756 +++ b/drivers/usb/core/hcd.c
51757 @@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51758 */
51759 usb_get_urb(urb);
51760 atomic_inc(&urb->use_count);
51761 - atomic_inc(&urb->dev->urbnum);
51762 + atomic_inc_unchecked(&urb->dev->urbnum);
51763 usbmon_urb_submit(&hcd->self, urb);
51764
51765 /* NOTE requirements on root-hub callers (usbfs and the hub
51766 @@ -1576,7 +1576,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51767 urb->hcpriv = NULL;
51768 INIT_LIST_HEAD(&urb->urb_list);
51769 atomic_dec(&urb->use_count);
51770 - atomic_dec(&urb->dev->urbnum);
51771 + atomic_dec_unchecked(&urb->dev->urbnum);
51772 if (atomic_read(&urb->reject))
51773 wake_up(&usb_kill_urb_queue);
51774 usb_put_urb(urb);
51775 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
51776 index ebcd3bf..be93a64 100644
51777 --- a/drivers/usb/core/hub.c
51778 +++ b/drivers/usb/core/hub.c
51779 @@ -27,6 +27,7 @@
51780 #include <linux/freezer.h>
51781 #include <linux/random.h>
51782 #include <linux/pm_qos.h>
51783 +#include <linux/grsecurity.h>
51784
51785 #include <asm/uaccess.h>
51786 #include <asm/byteorder.h>
51787 @@ -4437,6 +4438,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
51788 goto done;
51789 return;
51790 }
51791 +
51792 + if (gr_handle_new_usb())
51793 + goto done;
51794 +
51795 if (hub_is_superspeed(hub->hdev))
51796 unit_load = 150;
51797 else
51798 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
51799 index bb31597..6c5ef8b 100644
51800 --- a/drivers/usb/core/message.c
51801 +++ b/drivers/usb/core/message.c
51802 @@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
51803 * Return: If successful, the number of bytes transferred. Otherwise, a negative
51804 * error number.
51805 */
51806 -int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51807 +int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51808 __u8 requesttype, __u16 value, __u16 index, void *data,
51809 __u16 size, int timeout)
51810 {
51811 @@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
51812 * If successful, 0. Otherwise a negative error number. The number of actual
51813 * bytes transferred will be stored in the @actual_length paramater.
51814 */
51815 -int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51816 +int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51817 void *data, int len, int *actual_length, int timeout)
51818 {
51819 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
51820 @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
51821 * bytes transferred will be stored in the @actual_length paramater.
51822 *
51823 */
51824 -int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51825 +int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51826 void *data, int len, int *actual_length, int timeout)
51827 {
51828 struct urb *urb;
51829 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
51830 index 52a97ad..e73330f 100644
51831 --- a/drivers/usb/core/sysfs.c
51832 +++ b/drivers/usb/core/sysfs.c
51833 @@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
51834 struct usb_device *udev;
51835
51836 udev = to_usb_device(dev);
51837 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
51838 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
51839 }
51840 static DEVICE_ATTR_RO(urbnum);
51841
51842 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
51843 index 4d11449..f4ccabf 100644
51844 --- a/drivers/usb/core/usb.c
51845 +++ b/drivers/usb/core/usb.c
51846 @@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
51847 set_dev_node(&dev->dev, dev_to_node(bus->controller));
51848 dev->state = USB_STATE_ATTACHED;
51849 dev->lpm_disable_count = 1;
51850 - atomic_set(&dev->urbnum, 0);
51851 + atomic_set_unchecked(&dev->urbnum, 0);
51852
51853 INIT_LIST_HEAD(&dev->ep0.urb_list);
51854 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
51855 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
51856 index 02e44fc..3c4fe64 100644
51857 --- a/drivers/usb/dwc3/gadget.c
51858 +++ b/drivers/usb/dwc3/gadget.c
51859 @@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
51860 if (!usb_endpoint_xfer_isoc(desc))
51861 return 0;
51862
51863 - memset(&trb_link, 0, sizeof(trb_link));
51864 -
51865 /* Link TRB for ISOC. The HWO bit is never reset */
51866 trb_st_hw = &dep->trb_pool[0];
51867
51868 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
51869 index 8cfc319..4868255 100644
51870 --- a/drivers/usb/early/ehci-dbgp.c
51871 +++ b/drivers/usb/early/ehci-dbgp.c
51872 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
51873
51874 #ifdef CONFIG_KGDB
51875 static struct kgdb_io kgdbdbgp_io_ops;
51876 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
51877 +static struct kgdb_io kgdbdbgp_io_ops_console;
51878 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
51879 #else
51880 #define dbgp_kgdb_mode (0)
51881 #endif
51882 @@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
51883 .write_char = kgdbdbgp_write_char,
51884 };
51885
51886 +static struct kgdb_io kgdbdbgp_io_ops_console = {
51887 + .name = "kgdbdbgp",
51888 + .read_char = kgdbdbgp_read_char,
51889 + .write_char = kgdbdbgp_write_char,
51890 + .is_console = 1
51891 +};
51892 +
51893 static int kgdbdbgp_wait_time;
51894
51895 static int __init kgdbdbgp_parse_config(char *str)
51896 @@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
51897 ptr++;
51898 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
51899 }
51900 - kgdb_register_io_module(&kgdbdbgp_io_ops);
51901 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
51902 + if (early_dbgp_console.index != -1)
51903 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
51904 + else
51905 + kgdb_register_io_module(&kgdbdbgp_io_ops);
51906
51907 return 0;
51908 }
51909 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
51910 index b369292..9f3ba40 100644
51911 --- a/drivers/usb/gadget/u_serial.c
51912 +++ b/drivers/usb/gadget/u_serial.c
51913 @@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51914 spin_lock_irq(&port->port_lock);
51915
51916 /* already open? Great. */
51917 - if (port->port.count) {
51918 + if (atomic_read(&port->port.count)) {
51919 status = 0;
51920 - port->port.count++;
51921 + atomic_inc(&port->port.count);
51922
51923 /* currently opening/closing? wait ... */
51924 } else if (port->openclose) {
51925 @@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51926 tty->driver_data = port;
51927 port->port.tty = tty;
51928
51929 - port->port.count = 1;
51930 + atomic_set(&port->port.count, 1);
51931 port->openclose = false;
51932
51933 /* if connected, start the I/O stream */
51934 @@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51935
51936 spin_lock_irq(&port->port_lock);
51937
51938 - if (port->port.count != 1) {
51939 - if (port->port.count == 0)
51940 + if (atomic_read(&port->port.count) != 1) {
51941 + if (atomic_read(&port->port.count) == 0)
51942 WARN_ON(1);
51943 else
51944 - --port->port.count;
51945 + atomic_dec(&port->port.count);
51946 goto exit;
51947 }
51948
51949 @@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51950 * and sleep if necessary
51951 */
51952 port->openclose = true;
51953 - port->port.count = 0;
51954 + atomic_set(&port->port.count, 0);
51955
51956 gser = port->port_usb;
51957 if (gser && gser->disconnect)
51958 @@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
51959 int cond;
51960
51961 spin_lock_irq(&port->port_lock);
51962 - cond = (port->port.count == 0) && !port->openclose;
51963 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
51964 spin_unlock_irq(&port->port_lock);
51965 return cond;
51966 }
51967 @@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
51968 /* if it's already open, start I/O ... and notify the serial
51969 * protocol about open/close status (connect/disconnect).
51970 */
51971 - if (port->port.count) {
51972 + if (atomic_read(&port->port.count)) {
51973 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
51974 gs_start_io(port);
51975 if (gser->connect)
51976 @@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
51977
51978 port->port_usb = NULL;
51979 gser->ioport = NULL;
51980 - if (port->port.count > 0 || port->openclose) {
51981 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
51982 wake_up_interruptible(&port->drain_wait);
51983 if (port->port.tty)
51984 tty_hangup(port->port.tty);
51985 @@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
51986
51987 /* finally, free any unused/unusable I/O buffers */
51988 spin_lock_irqsave(&port->port_lock, flags);
51989 - if (port->port.count == 0 && !port->openclose)
51990 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
51991 gs_buf_free(&port->port_write_buf);
51992 gs_free_requests(gser->out, &port->read_pool, NULL);
51993 gs_free_requests(gser->out, &port->read_queue, NULL);
51994 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
51995 index 1bb85be..29e28d9 100644
51996 --- a/drivers/usb/host/ehci-hub.c
51997 +++ b/drivers/usb/host/ehci-hub.c
51998 @@ -780,7 +780,7 @@ static struct urb *request_single_step_set_feature_urb(
51999 urb->transfer_flags = URB_DIR_IN;
52000 usb_get_urb(urb);
52001 atomic_inc(&urb->use_count);
52002 - atomic_inc(&urb->dev->urbnum);
52003 + atomic_inc_unchecked(&urb->dev->urbnum);
52004 urb->setup_dma = dma_map_single(
52005 hcd->self.controller,
52006 urb->setup_packet,
52007 @@ -847,7 +847,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
52008 urb->status = -EINPROGRESS;
52009 usb_get_urb(urb);
52010 atomic_inc(&urb->use_count);
52011 - atomic_inc(&urb->dev->urbnum);
52012 + atomic_inc_unchecked(&urb->dev->urbnum);
52013 retval = submit_single_step_set_feature(hcd, urb, 0);
52014 if (!retval && !wait_for_completion_timeout(&done,
52015 msecs_to_jiffies(2000))) {
52016 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
52017 index ba6a5d6..f88f7f3 100644
52018 --- a/drivers/usb/misc/appledisplay.c
52019 +++ b/drivers/usb/misc/appledisplay.c
52020 @@ -83,7 +83,7 @@ struct appledisplay {
52021 spinlock_t lock;
52022 };
52023
52024 -static atomic_t count_displays = ATOMIC_INIT(0);
52025 +static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
52026 static struct workqueue_struct *wq;
52027
52028 static void appledisplay_complete(struct urb *urb)
52029 @@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
52030
52031 /* Register backlight device */
52032 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
52033 - atomic_inc_return(&count_displays) - 1);
52034 + atomic_inc_return_unchecked(&count_displays) - 1);
52035 memset(&props, 0, sizeof(struct backlight_properties));
52036 props.type = BACKLIGHT_RAW;
52037 props.max_brightness = 0xff;
52038 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
52039 index c69bb50..215ef37 100644
52040 --- a/drivers/usb/serial/console.c
52041 +++ b/drivers/usb/serial/console.c
52042 @@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
52043
52044 info->port = port;
52045
52046 - ++port->port.count;
52047 + atomic_inc(&port->port.count);
52048 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
52049 if (serial->type->set_termios) {
52050 /*
52051 @@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
52052 }
52053 /* Now that any required fake tty operations are completed restore
52054 * the tty port count */
52055 - --port->port.count;
52056 + atomic_dec(&port->port.count);
52057 /* The console is special in terms of closing the device so
52058 * indicate this port is now acting as a system console. */
52059 port->port.console = 1;
52060 @@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
52061 free_tty:
52062 kfree(tty);
52063 reset_open_count:
52064 - port->port.count = 0;
52065 + atomic_set(&port->port.count, 0);
52066 usb_autopm_put_interface(serial->interface);
52067 error_get_interface:
52068 usb_serial_put(serial);
52069 @@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
52070 static void usb_console_write(struct console *co,
52071 const char *buf, unsigned count)
52072 {
52073 - static struct usbcons_info *info = &usbcons_info;
52074 + struct usbcons_info *info = &usbcons_info;
52075 struct usb_serial_port *port = info->port;
52076 struct usb_serial *serial;
52077 int retval = -ENODEV;
52078 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
52079 index 75f70f0..d467e1a 100644
52080 --- a/drivers/usb/storage/usb.h
52081 +++ b/drivers/usb/storage/usb.h
52082 @@ -63,7 +63,7 @@ struct us_unusual_dev {
52083 __u8 useProtocol;
52084 __u8 useTransport;
52085 int (*initFunction)(struct us_data *);
52086 -};
52087 +} __do_const;
52088
52089
52090 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
52091 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
52092 index e614f02..3fd60e2 100644
52093 --- a/drivers/usb/wusbcore/wa-hc.h
52094 +++ b/drivers/usb/wusbcore/wa-hc.h
52095 @@ -225,7 +225,7 @@ struct wahc {
52096 spinlock_t xfer_list_lock;
52097 struct work_struct xfer_enqueue_work;
52098 struct work_struct xfer_error_work;
52099 - atomic_t xfer_id_count;
52100 + atomic_unchecked_t xfer_id_count;
52101
52102 kernel_ulong_t quirks;
52103 };
52104 @@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa)
52105 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
52106 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
52107 wa->dto_in_use = 0;
52108 - atomic_set(&wa->xfer_id_count, 1);
52109 + atomic_set_unchecked(&wa->xfer_id_count, 1);
52110 }
52111
52112 /**
52113 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
52114 index ed5abe8..7036400 100644
52115 --- a/drivers/usb/wusbcore/wa-xfer.c
52116 +++ b/drivers/usb/wusbcore/wa-xfer.c
52117 @@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
52118 */
52119 static void wa_xfer_id_init(struct wa_xfer *xfer)
52120 {
52121 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
52122 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
52123 }
52124
52125 /* Return the xfer's ID. */
52126 diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
52127 index 1eab4ac..e21efc9 100644
52128 --- a/drivers/vfio/vfio.c
52129 +++ b/drivers/vfio/vfio.c
52130 @@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
52131 return 0;
52132
52133 /* TODO Prevent device auto probing */
52134 - WARN("Device %s added to live group %d!\n", dev_name(dev),
52135 + WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
52136 iommu_group_id(group->iommu_group));
52137
52138 return 0;
52139 diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
52140 index 5174eba..451e6bc 100644
52141 --- a/drivers/vhost/vringh.c
52142 +++ b/drivers/vhost/vringh.c
52143 @@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
52144 /* Userspace access helpers: in this case, addresses are really userspace. */
52145 static inline int getu16_user(u16 *val, const u16 *p)
52146 {
52147 - return get_user(*val, (__force u16 __user *)p);
52148 + return get_user(*val, (u16 __force_user *)p);
52149 }
52150
52151 static inline int putu16_user(u16 *p, u16 val)
52152 {
52153 - return put_user(val, (__force u16 __user *)p);
52154 + return put_user(val, (u16 __force_user *)p);
52155 }
52156
52157 static inline int copydesc_user(void *dst, const void *src, size_t len)
52158 {
52159 - return copy_from_user(dst, (__force void __user *)src, len) ?
52160 + return copy_from_user(dst, (void __force_user *)src, len) ?
52161 -EFAULT : 0;
52162 }
52163
52164 @@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
52165 const struct vring_used_elem *src,
52166 unsigned int num)
52167 {
52168 - return copy_to_user((__force void __user *)dst, src,
52169 + return copy_to_user((void __force_user *)dst, src,
52170 sizeof(*dst) * num) ? -EFAULT : 0;
52171 }
52172
52173 static inline int xfer_from_user(void *src, void *dst, size_t len)
52174 {
52175 - return copy_from_user(dst, (__force void __user *)src, len) ?
52176 + return copy_from_user(dst, (void __force_user *)src, len) ?
52177 -EFAULT : 0;
52178 }
52179
52180 static inline int xfer_to_user(void *dst, void *src, size_t len)
52181 {
52182 - return copy_to_user((__force void __user *)dst, src, len) ?
52183 + return copy_to_user((void __force_user *)dst, src, len) ?
52184 -EFAULT : 0;
52185 }
52186
52187 @@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
52188 vrh->last_used_idx = 0;
52189 vrh->vring.num = num;
52190 /* vring expects kernel addresses, but only used via accessors. */
52191 - vrh->vring.desc = (__force struct vring_desc *)desc;
52192 - vrh->vring.avail = (__force struct vring_avail *)avail;
52193 - vrh->vring.used = (__force struct vring_used *)used;
52194 + vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
52195 + vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
52196 + vrh->vring.used = (__force_kernel struct vring_used *)used;
52197 return 0;
52198 }
52199 EXPORT_SYMBOL(vringh_init_user);
52200 @@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
52201
52202 static inline int putu16_kern(u16 *p, u16 val)
52203 {
52204 - ACCESS_ONCE(*p) = val;
52205 + ACCESS_ONCE_RW(*p) = val;
52206 return 0;
52207 }
52208
52209 diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
52210 index 1b0b233..6f34c2c 100644
52211 --- a/drivers/video/arcfb.c
52212 +++ b/drivers/video/arcfb.c
52213 @@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
52214 return -ENOSPC;
52215
52216 err = 0;
52217 - if ((count + p) > fbmemlength) {
52218 + if (count > (fbmemlength - p)) {
52219 count = fbmemlength - p;
52220 err = -ENOSPC;
52221 }
52222 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
52223 index 12ca031..84a8a74 100644
52224 --- a/drivers/video/aty/aty128fb.c
52225 +++ b/drivers/video/aty/aty128fb.c
52226 @@ -149,7 +149,7 @@ enum {
52227 };
52228
52229 /* Must match above enum */
52230 -static char * const r128_family[] = {
52231 +static const char * const r128_family[] = {
52232 "AGP",
52233 "PCI",
52234 "PRO AGP",
52235 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
52236 index 28fafbf..ae91651 100644
52237 --- a/drivers/video/aty/atyfb_base.c
52238 +++ b/drivers/video/aty/atyfb_base.c
52239 @@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
52240 par->accel_flags = var->accel_flags; /* hack */
52241
52242 if (var->accel_flags) {
52243 - info->fbops->fb_sync = atyfb_sync;
52244 + pax_open_kernel();
52245 + *(void **)&info->fbops->fb_sync = atyfb_sync;
52246 + pax_close_kernel();
52247 info->flags &= ~FBINFO_HWACCEL_DISABLED;
52248 } else {
52249 - info->fbops->fb_sync = NULL;
52250 + pax_open_kernel();
52251 + *(void **)&info->fbops->fb_sync = NULL;
52252 + pax_close_kernel();
52253 info->flags |= FBINFO_HWACCEL_DISABLED;
52254 }
52255
52256 diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
52257 index 95ec042..e6affdd 100644
52258 --- a/drivers/video/aty/mach64_cursor.c
52259 +++ b/drivers/video/aty/mach64_cursor.c
52260 @@ -7,6 +7,7 @@
52261 #include <linux/string.h>
52262
52263 #include <asm/io.h>
52264 +#include <asm/pgtable.h>
52265
52266 #ifdef __sparc__
52267 #include <asm/fbio.h>
52268 @@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
52269 info->sprite.buf_align = 16; /* and 64 lines tall. */
52270 info->sprite.flags = FB_PIXMAP_IO;
52271
52272 - info->fbops->fb_cursor = atyfb_cursor;
52273 + pax_open_kernel();
52274 + *(void **)&info->fbops->fb_cursor = atyfb_cursor;
52275 + pax_close_kernel();
52276
52277 return 0;
52278 }
52279 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
52280 index 7592cc2..92feb56 100644
52281 --- a/drivers/video/backlight/kb3886_bl.c
52282 +++ b/drivers/video/backlight/kb3886_bl.c
52283 @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
52284 static unsigned long kb3886bl_flags;
52285 #define KB3886BL_SUSPENDED 0x01
52286
52287 -static struct dmi_system_id __initdata kb3886bl_device_table[] = {
52288 +static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
52289 {
52290 .ident = "Sahara Touch-iT",
52291 .matches = {
52292 diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
52293 index 900aa4e..6d49418 100644
52294 --- a/drivers/video/fb_defio.c
52295 +++ b/drivers/video/fb_defio.c
52296 @@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
52297
52298 BUG_ON(!fbdefio);
52299 mutex_init(&fbdefio->lock);
52300 - info->fbops->fb_mmap = fb_deferred_io_mmap;
52301 + pax_open_kernel();
52302 + *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
52303 + pax_close_kernel();
52304 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
52305 INIT_LIST_HEAD(&fbdefio->pagelist);
52306 if (fbdefio->delay == 0) /* set a default of 1 s */
52307 @@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
52308 page->mapping = NULL;
52309 }
52310
52311 - info->fbops->fb_mmap = NULL;
52312 + *(void **)&info->fbops->fb_mmap = NULL;
52313 mutex_destroy(&fbdefio->lock);
52314 }
52315 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
52316 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
52317 index 010d191..7b8235a 100644
52318 --- a/drivers/video/fbmem.c
52319 +++ b/drivers/video/fbmem.c
52320 @@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
52321 image->dx += image->width + 8;
52322 }
52323 } else if (rotate == FB_ROTATE_UD) {
52324 - for (x = 0; x < num && image->dx >= 0; x++) {
52325 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
52326 info->fbops->fb_imageblit(info, image);
52327 image->dx -= image->width + 8;
52328 }
52329 @@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
52330 image->dy += image->height + 8;
52331 }
52332 } else if (rotate == FB_ROTATE_CCW) {
52333 - for (x = 0; x < num && image->dy >= 0; x++) {
52334 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
52335 info->fbops->fb_imageblit(info, image);
52336 image->dy -= image->height + 8;
52337 }
52338 @@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
52339 return -EFAULT;
52340 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
52341 return -EINVAL;
52342 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
52343 + if (con2fb.framebuffer >= FB_MAX)
52344 return -EINVAL;
52345 if (!registered_fb[con2fb.framebuffer])
52346 request_module("fb%d", con2fb.framebuffer);
52347 @@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
52348 __u32 data;
52349 int err;
52350
52351 - err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
52352 + err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
52353
52354 data = (__u32) (unsigned long) fix->smem_start;
52355 err |= put_user(data, &fix32->smem_start);
52356 diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
52357 index 130708f..cdac1a9 100644
52358 --- a/drivers/video/hyperv_fb.c
52359 +++ b/drivers/video/hyperv_fb.c
52360 @@ -233,7 +233,7 @@ static uint screen_fb_size;
52361 static inline int synthvid_send(struct hv_device *hdev,
52362 struct synthvid_msg *msg)
52363 {
52364 - static atomic64_t request_id = ATOMIC64_INIT(0);
52365 + static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
52366 int ret;
52367
52368 msg->pipe_hdr.type = PIPE_MSG_DATA;
52369 @@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
52370
52371 ret = vmbus_sendpacket(hdev->channel, msg,
52372 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
52373 - atomic64_inc_return(&request_id),
52374 + atomic64_inc_return_unchecked(&request_id),
52375 VM_PKT_DATA_INBAND, 0);
52376
52377 if (ret)
52378 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
52379 index 7672d2e..b56437f 100644
52380 --- a/drivers/video/i810/i810_accel.c
52381 +++ b/drivers/video/i810/i810_accel.c
52382 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
52383 }
52384 }
52385 printk("ringbuffer lockup!!!\n");
52386 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
52387 i810_report_error(mmio);
52388 par->dev_flags |= LOCKUP;
52389 info->pixmap.scan_align = 1;
52390 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
52391 index 3c14e43..2630570 100644
52392 --- a/drivers/video/logo/logo_linux_clut224.ppm
52393 +++ b/drivers/video/logo/logo_linux_clut224.ppm
52394 @@ -2,1603 +2,1123 @@ P3
52395 # Standard 224-color Linux logo
52396 80 80
52397 255
52398 - 0 0 0 0 0 0 0 0 0 0 0 0
52399 - 0 0 0 0 0 0 0 0 0 0 0 0
52400 - 0 0 0 0 0 0 0 0 0 0 0 0
52401 - 0 0 0 0 0 0 0 0 0 0 0 0
52402 - 0 0 0 0 0 0 0 0 0 0 0 0
52403 - 0 0 0 0 0 0 0 0 0 0 0 0
52404 - 0 0 0 0 0 0 0 0 0 0 0 0
52405 - 0 0 0 0 0 0 0 0 0 0 0 0
52406 - 0 0 0 0 0 0 0 0 0 0 0 0
52407 - 6 6 6 6 6 6 10 10 10 10 10 10
52408 - 10 10 10 6 6 6 6 6 6 6 6 6
52409 - 0 0 0 0 0 0 0 0 0 0 0 0
52410 - 0 0 0 0 0 0 0 0 0 0 0 0
52411 - 0 0 0 0 0 0 0 0 0 0 0 0
52412 - 0 0 0 0 0 0 0 0 0 0 0 0
52413 - 0 0 0 0 0 0 0 0 0 0 0 0
52414 - 0 0 0 0 0 0 0 0 0 0 0 0
52415 - 0 0 0 0 0 0 0 0 0 0 0 0
52416 - 0 0 0 0 0 0 0 0 0 0 0 0
52417 - 0 0 0 0 0 0 0 0 0 0 0 0
52418 - 0 0 0 0 0 0 0 0 0 0 0 0
52419 - 0 0 0 0 0 0 0 0 0 0 0 0
52420 - 0 0 0 0 0 0 0 0 0 0 0 0
52421 - 0 0 0 0 0 0 0 0 0 0 0 0
52422 - 0 0 0 0 0 0 0 0 0 0 0 0
52423 - 0 0 0 0 0 0 0 0 0 0 0 0
52424 - 0 0 0 0 0 0 0 0 0 0 0 0
52425 - 0 0 0 0 0 0 0 0 0 0 0 0
52426 - 0 0 0 6 6 6 10 10 10 14 14 14
52427 - 22 22 22 26 26 26 30 30 30 34 34 34
52428 - 30 30 30 30 30 30 26 26 26 18 18 18
52429 - 14 14 14 10 10 10 6 6 6 0 0 0
52430 - 0 0 0 0 0 0 0 0 0 0 0 0
52431 - 0 0 0 0 0 0 0 0 0 0 0 0
52432 - 0 0 0 0 0 0 0 0 0 0 0 0
52433 - 0 0 0 0 0 0 0 0 0 0 0 0
52434 - 0 0 0 0 0 0 0 0 0 0 0 0
52435 - 0 0 0 0 0 0 0 0 0 0 0 0
52436 - 0 0 0 0 0 0 0 0 0 0 0 0
52437 - 0 0 0 0 0 0 0 0 0 0 0 0
52438 - 0 0 0 0 0 0 0 0 0 0 0 0
52439 - 0 0 0 0 0 1 0 0 1 0 0 0
52440 - 0 0 0 0 0 0 0 0 0 0 0 0
52441 - 0 0 0 0 0 0 0 0 0 0 0 0
52442 - 0 0 0 0 0 0 0 0 0 0 0 0
52443 - 0 0 0 0 0 0 0 0 0 0 0 0
52444 - 0 0 0 0 0 0 0 0 0 0 0 0
52445 - 0 0 0 0 0 0 0 0 0 0 0 0
52446 - 6 6 6 14 14 14 26 26 26 42 42 42
52447 - 54 54 54 66 66 66 78 78 78 78 78 78
52448 - 78 78 78 74 74 74 66 66 66 54 54 54
52449 - 42 42 42 26 26 26 18 18 18 10 10 10
52450 - 6 6 6 0 0 0 0 0 0 0 0 0
52451 - 0 0 0 0 0 0 0 0 0 0 0 0
52452 - 0 0 0 0 0 0 0 0 0 0 0 0
52453 - 0 0 0 0 0 0 0 0 0 0 0 0
52454 - 0 0 0 0 0 0 0 0 0 0 0 0
52455 - 0 0 0 0 0 0 0 0 0 0 0 0
52456 - 0 0 0 0 0 0 0 0 0 0 0 0
52457 - 0 0 0 0 0 0 0 0 0 0 0 0
52458 - 0 0 0 0 0 0 0 0 0 0 0 0
52459 - 0 0 1 0 0 0 0 0 0 0 0 0
52460 - 0 0 0 0 0 0 0 0 0 0 0 0
52461 - 0 0 0 0 0 0 0 0 0 0 0 0
52462 - 0 0 0 0 0 0 0 0 0 0 0 0
52463 - 0 0 0 0 0 0 0 0 0 0 0 0
52464 - 0 0 0 0 0 0 0 0 0 0 0 0
52465 - 0 0 0 0 0 0 0 0 0 10 10 10
52466 - 22 22 22 42 42 42 66 66 66 86 86 86
52467 - 66 66 66 38 38 38 38 38 38 22 22 22
52468 - 26 26 26 34 34 34 54 54 54 66 66 66
52469 - 86 86 86 70 70 70 46 46 46 26 26 26
52470 - 14 14 14 6 6 6 0 0 0 0 0 0
52471 - 0 0 0 0 0 0 0 0 0 0 0 0
52472 - 0 0 0 0 0 0 0 0 0 0 0 0
52473 - 0 0 0 0 0 0 0 0 0 0 0 0
52474 - 0 0 0 0 0 0 0 0 0 0 0 0
52475 - 0 0 0 0 0 0 0 0 0 0 0 0
52476 - 0 0 0 0 0 0 0 0 0 0 0 0
52477 - 0 0 0 0 0 0 0 0 0 0 0 0
52478 - 0 0 0 0 0 0 0 0 0 0 0 0
52479 - 0 0 1 0 0 1 0 0 1 0 0 0
52480 - 0 0 0 0 0 0 0 0 0 0 0 0
52481 - 0 0 0 0 0 0 0 0 0 0 0 0
52482 - 0 0 0 0 0 0 0 0 0 0 0 0
52483 - 0 0 0 0 0 0 0 0 0 0 0 0
52484 - 0 0 0 0 0 0 0 0 0 0 0 0
52485 - 0 0 0 0 0 0 10 10 10 26 26 26
52486 - 50 50 50 82 82 82 58 58 58 6 6 6
52487 - 2 2 6 2 2 6 2 2 6 2 2 6
52488 - 2 2 6 2 2 6 2 2 6 2 2 6
52489 - 6 6 6 54 54 54 86 86 86 66 66 66
52490 - 38 38 38 18 18 18 6 6 6 0 0 0
52491 - 0 0 0 0 0 0 0 0 0 0 0 0
52492 - 0 0 0 0 0 0 0 0 0 0 0 0
52493 - 0 0 0 0 0 0 0 0 0 0 0 0
52494 - 0 0 0 0 0 0 0 0 0 0 0 0
52495 - 0 0 0 0 0 0 0 0 0 0 0 0
52496 - 0 0 0 0 0 0 0 0 0 0 0 0
52497 - 0 0 0 0 0 0 0 0 0 0 0 0
52498 - 0 0 0 0 0 0 0 0 0 0 0 0
52499 - 0 0 0 0 0 0 0 0 0 0 0 0
52500 - 0 0 0 0 0 0 0 0 0 0 0 0
52501 - 0 0 0 0 0 0 0 0 0 0 0 0
52502 - 0 0 0 0 0 0 0 0 0 0 0 0
52503 - 0 0 0 0 0 0 0 0 0 0 0 0
52504 - 0 0 0 0 0 0 0 0 0 0 0 0
52505 - 0 0 0 6 6 6 22 22 22 50 50 50
52506 - 78 78 78 34 34 34 2 2 6 2 2 6
52507 - 2 2 6 2 2 6 2 2 6 2 2 6
52508 - 2 2 6 2 2 6 2 2 6 2 2 6
52509 - 2 2 6 2 2 6 6 6 6 70 70 70
52510 - 78 78 78 46 46 46 22 22 22 6 6 6
52511 - 0 0 0 0 0 0 0 0 0 0 0 0
52512 - 0 0 0 0 0 0 0 0 0 0 0 0
52513 - 0 0 0 0 0 0 0 0 0 0 0 0
52514 - 0 0 0 0 0 0 0 0 0 0 0 0
52515 - 0 0 0 0 0 0 0 0 0 0 0 0
52516 - 0 0 0 0 0 0 0 0 0 0 0 0
52517 - 0 0 0 0 0 0 0 0 0 0 0 0
52518 - 0 0 0 0 0 0 0 0 0 0 0 0
52519 - 0 0 1 0 0 1 0 0 1 0 0 0
52520 - 0 0 0 0 0 0 0 0 0 0 0 0
52521 - 0 0 0 0 0 0 0 0 0 0 0 0
52522 - 0 0 0 0 0 0 0 0 0 0 0 0
52523 - 0 0 0 0 0 0 0 0 0 0 0 0
52524 - 0 0 0 0 0 0 0 0 0 0 0 0
52525 - 6 6 6 18 18 18 42 42 42 82 82 82
52526 - 26 26 26 2 2 6 2 2 6 2 2 6
52527 - 2 2 6 2 2 6 2 2 6 2 2 6
52528 - 2 2 6 2 2 6 2 2 6 14 14 14
52529 - 46 46 46 34 34 34 6 6 6 2 2 6
52530 - 42 42 42 78 78 78 42 42 42 18 18 18
52531 - 6 6 6 0 0 0 0 0 0 0 0 0
52532 - 0 0 0 0 0 0 0 0 0 0 0 0
52533 - 0 0 0 0 0 0 0 0 0 0 0 0
52534 - 0 0 0 0 0 0 0 0 0 0 0 0
52535 - 0 0 0 0 0 0 0 0 0 0 0 0
52536 - 0 0 0 0 0 0 0 0 0 0 0 0
52537 - 0 0 0 0 0 0 0 0 0 0 0 0
52538 - 0 0 0 0 0 0 0 0 0 0 0 0
52539 - 0 0 1 0 0 0 0 0 1 0 0 0
52540 - 0 0 0 0 0 0 0 0 0 0 0 0
52541 - 0 0 0 0 0 0 0 0 0 0 0 0
52542 - 0 0 0 0 0 0 0 0 0 0 0 0
52543 - 0 0 0 0 0 0 0 0 0 0 0 0
52544 - 0 0 0 0 0 0 0 0 0 0 0 0
52545 - 10 10 10 30 30 30 66 66 66 58 58 58
52546 - 2 2 6 2 2 6 2 2 6 2 2 6
52547 - 2 2 6 2 2 6 2 2 6 2 2 6
52548 - 2 2 6 2 2 6 2 2 6 26 26 26
52549 - 86 86 86 101 101 101 46 46 46 10 10 10
52550 - 2 2 6 58 58 58 70 70 70 34 34 34
52551 - 10 10 10 0 0 0 0 0 0 0 0 0
52552 - 0 0 0 0 0 0 0 0 0 0 0 0
52553 - 0 0 0 0 0 0 0 0 0 0 0 0
52554 - 0 0 0 0 0 0 0 0 0 0 0 0
52555 - 0 0 0 0 0 0 0 0 0 0 0 0
52556 - 0 0 0 0 0 0 0 0 0 0 0 0
52557 - 0 0 0 0 0 0 0 0 0 0 0 0
52558 - 0 0 0 0 0 0 0 0 0 0 0 0
52559 - 0 0 1 0 0 1 0 0 1 0 0 0
52560 - 0 0 0 0 0 0 0 0 0 0 0 0
52561 - 0 0 0 0 0 0 0 0 0 0 0 0
52562 - 0 0 0 0 0 0 0 0 0 0 0 0
52563 - 0 0 0 0 0 0 0 0 0 0 0 0
52564 - 0 0 0 0 0 0 0 0 0 0 0 0
52565 - 14 14 14 42 42 42 86 86 86 10 10 10
52566 - 2 2 6 2 2 6 2 2 6 2 2 6
52567 - 2 2 6 2 2 6 2 2 6 2 2 6
52568 - 2 2 6 2 2 6 2 2 6 30 30 30
52569 - 94 94 94 94 94 94 58 58 58 26 26 26
52570 - 2 2 6 6 6 6 78 78 78 54 54 54
52571 - 22 22 22 6 6 6 0 0 0 0 0 0
52572 - 0 0 0 0 0 0 0 0 0 0 0 0
52573 - 0 0 0 0 0 0 0 0 0 0 0 0
52574 - 0 0 0 0 0 0 0 0 0 0 0 0
52575 - 0 0 0 0 0 0 0 0 0 0 0 0
52576 - 0 0 0 0 0 0 0 0 0 0 0 0
52577 - 0 0 0 0 0 0 0 0 0 0 0 0
52578 - 0 0 0 0 0 0 0 0 0 0 0 0
52579 - 0 0 0 0 0 0 0 0 0 0 0 0
52580 - 0 0 0 0 0 0 0 0 0 0 0 0
52581 - 0 0 0 0 0 0 0 0 0 0 0 0
52582 - 0 0 0 0 0 0 0 0 0 0 0 0
52583 - 0 0 0 0 0 0 0 0 0 0 0 0
52584 - 0 0 0 0 0 0 0 0 0 6 6 6
52585 - 22 22 22 62 62 62 62 62 62 2 2 6
52586 - 2 2 6 2 2 6 2 2 6 2 2 6
52587 - 2 2 6 2 2 6 2 2 6 2 2 6
52588 - 2 2 6 2 2 6 2 2 6 26 26 26
52589 - 54 54 54 38 38 38 18 18 18 10 10 10
52590 - 2 2 6 2 2 6 34 34 34 82 82 82
52591 - 38 38 38 14 14 14 0 0 0 0 0 0
52592 - 0 0 0 0 0 0 0 0 0 0 0 0
52593 - 0 0 0 0 0 0 0 0 0 0 0 0
52594 - 0 0 0 0 0 0 0 0 0 0 0 0
52595 - 0 0 0 0 0 0 0 0 0 0 0 0
52596 - 0 0 0 0 0 0 0 0 0 0 0 0
52597 - 0 0 0 0 0 0 0 0 0 0 0 0
52598 - 0 0 0 0 0 0 0 0 0 0 0 0
52599 - 0 0 0 0 0 1 0 0 1 0 0 0
52600 - 0 0 0 0 0 0 0 0 0 0 0 0
52601 - 0 0 0 0 0 0 0 0 0 0 0 0
52602 - 0 0 0 0 0 0 0 0 0 0 0 0
52603 - 0 0 0 0 0 0 0 0 0 0 0 0
52604 - 0 0 0 0 0 0 0 0 0 6 6 6
52605 - 30 30 30 78 78 78 30 30 30 2 2 6
52606 - 2 2 6 2 2 6 2 2 6 2 2 6
52607 - 2 2 6 2 2 6 2 2 6 2 2 6
52608 - 2 2 6 2 2 6 2 2 6 10 10 10
52609 - 10 10 10 2 2 6 2 2 6 2 2 6
52610 - 2 2 6 2 2 6 2 2 6 78 78 78
52611 - 50 50 50 18 18 18 6 6 6 0 0 0
52612 - 0 0 0 0 0 0 0 0 0 0 0 0
52613 - 0 0 0 0 0 0 0 0 0 0 0 0
52614 - 0 0 0 0 0 0 0 0 0 0 0 0
52615 - 0 0 0 0 0 0 0 0 0 0 0 0
52616 - 0 0 0 0 0 0 0 0 0 0 0 0
52617 - 0 0 0 0 0 0 0 0 0 0 0 0
52618 - 0 0 0 0 0 0 0 0 0 0 0 0
52619 - 0 0 1 0 0 0 0 0 0 0 0 0
52620 - 0 0 0 0 0 0 0 0 0 0 0 0
52621 - 0 0 0 0 0 0 0 0 0 0 0 0
52622 - 0 0 0 0 0 0 0 0 0 0 0 0
52623 - 0 0 0 0 0 0 0 0 0 0 0 0
52624 - 0 0 0 0 0 0 0 0 0 10 10 10
52625 - 38 38 38 86 86 86 14 14 14 2 2 6
52626 - 2 2 6 2 2 6 2 2 6 2 2 6
52627 - 2 2 6 2 2 6 2 2 6 2 2 6
52628 - 2 2 6 2 2 6 2 2 6 2 2 6
52629 - 2 2 6 2 2 6 2 2 6 2 2 6
52630 - 2 2 6 2 2 6 2 2 6 54 54 54
52631 - 66 66 66 26 26 26 6 6 6 0 0 0
52632 - 0 0 0 0 0 0 0 0 0 0 0 0
52633 - 0 0 0 0 0 0 0 0 0 0 0 0
52634 - 0 0 0 0 0 0 0 0 0 0 0 0
52635 - 0 0 0 0 0 0 0 0 0 0 0 0
52636 - 0 0 0 0 0 0 0 0 0 0 0 0
52637 - 0 0 0 0 0 0 0 0 0 0 0 0
52638 - 0 0 0 0 0 0 0 0 0 0 0 0
52639 - 0 0 0 0 0 1 0 0 1 0 0 0
52640 - 0 0 0 0 0 0 0 0 0 0 0 0
52641 - 0 0 0 0 0 0 0 0 0 0 0 0
52642 - 0 0 0 0 0 0 0 0 0 0 0 0
52643 - 0 0 0 0 0 0 0 0 0 0 0 0
52644 - 0 0 0 0 0 0 0 0 0 14 14 14
52645 - 42 42 42 82 82 82 2 2 6 2 2 6
52646 - 2 2 6 6 6 6 10 10 10 2 2 6
52647 - 2 2 6 2 2 6 2 2 6 2 2 6
52648 - 2 2 6 2 2 6 2 2 6 6 6 6
52649 - 14 14 14 10 10 10 2 2 6 2 2 6
52650 - 2 2 6 2 2 6 2 2 6 18 18 18
52651 - 82 82 82 34 34 34 10 10 10 0 0 0
52652 - 0 0 0 0 0 0 0 0 0 0 0 0
52653 - 0 0 0 0 0 0 0 0 0 0 0 0
52654 - 0 0 0 0 0 0 0 0 0 0 0 0
52655 - 0 0 0 0 0 0 0 0 0 0 0 0
52656 - 0 0 0 0 0 0 0 0 0 0 0 0
52657 - 0 0 0 0 0 0 0 0 0 0 0 0
52658 - 0 0 0 0 0 0 0 0 0 0 0 0
52659 - 0 0 1 0 0 0 0 0 0 0 0 0
52660 - 0 0 0 0 0 0 0 0 0 0 0 0
52661 - 0 0 0 0 0 0 0 0 0 0 0 0
52662 - 0 0 0 0 0 0 0 0 0 0 0 0
52663 - 0 0 0 0 0 0 0 0 0 0 0 0
52664 - 0 0 0 0 0 0 0 0 0 14 14 14
52665 - 46 46 46 86 86 86 2 2 6 2 2 6
52666 - 6 6 6 6 6 6 22 22 22 34 34 34
52667 - 6 6 6 2 2 6 2 2 6 2 2 6
52668 - 2 2 6 2 2 6 18 18 18 34 34 34
52669 - 10 10 10 50 50 50 22 22 22 2 2 6
52670 - 2 2 6 2 2 6 2 2 6 10 10 10
52671 - 86 86 86 42 42 42 14 14 14 0 0 0
52672 - 0 0 0 0 0 0 0 0 0 0 0 0
52673 - 0 0 0 0 0 0 0 0 0 0 0 0
52674 - 0 0 0 0 0 0 0 0 0 0 0 0
52675 - 0 0 0 0 0 0 0 0 0 0 0 0
52676 - 0 0 0 0 0 0 0 0 0 0 0 0
52677 - 0 0 0 0 0 0 0 0 0 0 0 0
52678 - 0 0 0 0 0 0 0 0 0 0 0 0
52679 - 0 0 1 0 0 1 0 0 1 0 0 0
52680 - 0 0 0 0 0 0 0 0 0 0 0 0
52681 - 0 0 0 0 0 0 0 0 0 0 0 0
52682 - 0 0 0 0 0 0 0 0 0 0 0 0
52683 - 0 0 0 0 0 0 0 0 0 0 0 0
52684 - 0 0 0 0 0 0 0 0 0 14 14 14
52685 - 46 46 46 86 86 86 2 2 6 2 2 6
52686 - 38 38 38 116 116 116 94 94 94 22 22 22
52687 - 22 22 22 2 2 6 2 2 6 2 2 6
52688 - 14 14 14 86 86 86 138 138 138 162 162 162
52689 -154 154 154 38 38 38 26 26 26 6 6 6
52690 - 2 2 6 2 2 6 2 2 6 2 2 6
52691 - 86 86 86 46 46 46 14 14 14 0 0 0
52692 - 0 0 0 0 0 0 0 0 0 0 0 0
52693 - 0 0 0 0 0 0 0 0 0 0 0 0
52694 - 0 0 0 0 0 0 0 0 0 0 0 0
52695 - 0 0 0 0 0 0 0 0 0 0 0 0
52696 - 0 0 0 0 0 0 0 0 0 0 0 0
52697 - 0 0 0 0 0 0 0 0 0 0 0 0
52698 - 0 0 0 0 0 0 0 0 0 0 0 0
52699 - 0 0 0 0 0 0 0 0 0 0 0 0
52700 - 0 0 0 0 0 0 0 0 0 0 0 0
52701 - 0 0 0 0 0 0 0 0 0 0 0 0
52702 - 0 0 0 0 0 0 0 0 0 0 0 0
52703 - 0 0 0 0 0 0 0 0 0 0 0 0
52704 - 0 0 0 0 0 0 0 0 0 14 14 14
52705 - 46 46 46 86 86 86 2 2 6 14 14 14
52706 -134 134 134 198 198 198 195 195 195 116 116 116
52707 - 10 10 10 2 2 6 2 2 6 6 6 6
52708 -101 98 89 187 187 187 210 210 210 218 218 218
52709 -214 214 214 134 134 134 14 14 14 6 6 6
52710 - 2 2 6 2 2 6 2 2 6 2 2 6
52711 - 86 86 86 50 50 50 18 18 18 6 6 6
52712 - 0 0 0 0 0 0 0 0 0 0 0 0
52713 - 0 0 0 0 0 0 0 0 0 0 0 0
52714 - 0 0 0 0 0 0 0 0 0 0 0 0
52715 - 0 0 0 0 0 0 0 0 0 0 0 0
52716 - 0 0 0 0 0 0 0 0 0 0 0 0
52717 - 0 0 0 0 0 0 0 0 0 0 0 0
52718 - 0 0 0 0 0 0 0 0 1 0 0 0
52719 - 0 0 1 0 0 1 0 0 1 0 0 0
52720 - 0 0 0 0 0 0 0 0 0 0 0 0
52721 - 0 0 0 0 0 0 0 0 0 0 0 0
52722 - 0 0 0 0 0 0 0 0 0 0 0 0
52723 - 0 0 0 0 0 0 0 0 0 0 0 0
52724 - 0 0 0 0 0 0 0 0 0 14 14 14
52725 - 46 46 46 86 86 86 2 2 6 54 54 54
52726 -218 218 218 195 195 195 226 226 226 246 246 246
52727 - 58 58 58 2 2 6 2 2 6 30 30 30
52728 -210 210 210 253 253 253 174 174 174 123 123 123
52729 -221 221 221 234 234 234 74 74 74 2 2 6
52730 - 2 2 6 2 2 6 2 2 6 2 2 6
52731 - 70 70 70 58 58 58 22 22 22 6 6 6
52732 - 0 0 0 0 0 0 0 0 0 0 0 0
52733 - 0 0 0 0 0 0 0 0 0 0 0 0
52734 - 0 0 0 0 0 0 0 0 0 0 0 0
52735 - 0 0 0 0 0 0 0 0 0 0 0 0
52736 - 0 0 0 0 0 0 0 0 0 0 0 0
52737 - 0 0 0 0 0 0 0 0 0 0 0 0
52738 - 0 0 0 0 0 0 0 0 0 0 0 0
52739 - 0 0 0 0 0 0 0 0 0 0 0 0
52740 - 0 0 0 0 0 0 0 0 0 0 0 0
52741 - 0 0 0 0 0 0 0 0 0 0 0 0
52742 - 0 0 0 0 0 0 0 0 0 0 0 0
52743 - 0 0 0 0 0 0 0 0 0 0 0 0
52744 - 0 0 0 0 0 0 0 0 0 14 14 14
52745 - 46 46 46 82 82 82 2 2 6 106 106 106
52746 -170 170 170 26 26 26 86 86 86 226 226 226
52747 -123 123 123 10 10 10 14 14 14 46 46 46
52748 -231 231 231 190 190 190 6 6 6 70 70 70
52749 - 90 90 90 238 238 238 158 158 158 2 2 6
52750 - 2 2 6 2 2 6 2 2 6 2 2 6
52751 - 70 70 70 58 58 58 22 22 22 6 6 6
52752 - 0 0 0 0 0 0 0 0 0 0 0 0
52753 - 0 0 0 0 0 0 0 0 0 0 0 0
52754 - 0 0 0 0 0 0 0 0 0 0 0 0
52755 - 0 0 0 0 0 0 0 0 0 0 0 0
52756 - 0 0 0 0 0 0 0 0 0 0 0 0
52757 - 0 0 0 0 0 0 0 0 0 0 0 0
52758 - 0 0 0 0 0 0 0 0 1 0 0 0
52759 - 0 0 1 0 0 1 0 0 1 0 0 0
52760 - 0 0 0 0 0 0 0 0 0 0 0 0
52761 - 0 0 0 0 0 0 0 0 0 0 0 0
52762 - 0 0 0 0 0 0 0 0 0 0 0 0
52763 - 0 0 0 0 0 0 0 0 0 0 0 0
52764 - 0 0 0 0 0 0 0 0 0 14 14 14
52765 - 42 42 42 86 86 86 6 6 6 116 116 116
52766 -106 106 106 6 6 6 70 70 70 149 149 149
52767 -128 128 128 18 18 18 38 38 38 54 54 54
52768 -221 221 221 106 106 106 2 2 6 14 14 14
52769 - 46 46 46 190 190 190 198 198 198 2 2 6
52770 - 2 2 6 2 2 6 2 2 6 2 2 6
52771 - 74 74 74 62 62 62 22 22 22 6 6 6
52772 - 0 0 0 0 0 0 0 0 0 0 0 0
52773 - 0 0 0 0 0 0 0 0 0 0 0 0
52774 - 0 0 0 0 0 0 0 0 0 0 0 0
52775 - 0 0 0 0 0 0 0 0 0 0 0 0
52776 - 0 0 0 0 0 0 0 0 0 0 0 0
52777 - 0 0 0 0 0 0 0 0 0 0 0 0
52778 - 0 0 0 0 0 0 0 0 1 0 0 0
52779 - 0 0 1 0 0 0 0 0 1 0 0 0
52780 - 0 0 0 0 0 0 0 0 0 0 0 0
52781 - 0 0 0 0 0 0 0 0 0 0 0 0
52782 - 0 0 0 0 0 0 0 0 0 0 0 0
52783 - 0 0 0 0 0 0 0 0 0 0 0 0
52784 - 0 0 0 0 0 0 0 0 0 14 14 14
52785 - 42 42 42 94 94 94 14 14 14 101 101 101
52786 -128 128 128 2 2 6 18 18 18 116 116 116
52787 -118 98 46 121 92 8 121 92 8 98 78 10
52788 -162 162 162 106 106 106 2 2 6 2 2 6
52789 - 2 2 6 195 195 195 195 195 195 6 6 6
52790 - 2 2 6 2 2 6 2 2 6 2 2 6
52791 - 74 74 74 62 62 62 22 22 22 6 6 6
52792 - 0 0 0 0 0 0 0 0 0 0 0 0
52793 - 0 0 0 0 0 0 0 0 0 0 0 0
52794 - 0 0 0 0 0 0 0 0 0 0 0 0
52795 - 0 0 0 0 0 0 0 0 0 0 0 0
52796 - 0 0 0 0 0 0 0 0 0 0 0 0
52797 - 0 0 0 0 0 0 0 0 0 0 0 0
52798 - 0 0 0 0 0 0 0 0 1 0 0 1
52799 - 0 0 1 0 0 0 0 0 1 0 0 0
52800 - 0 0 0 0 0 0 0 0 0 0 0 0
52801 - 0 0 0 0 0 0 0 0 0 0 0 0
52802 - 0 0 0 0 0 0 0 0 0 0 0 0
52803 - 0 0 0 0 0 0 0 0 0 0 0 0
52804 - 0 0 0 0 0 0 0 0 0 10 10 10
52805 - 38 38 38 90 90 90 14 14 14 58 58 58
52806 -210 210 210 26 26 26 54 38 6 154 114 10
52807 -226 170 11 236 186 11 225 175 15 184 144 12
52808 -215 174 15 175 146 61 37 26 9 2 2 6
52809 - 70 70 70 246 246 246 138 138 138 2 2 6
52810 - 2 2 6 2 2 6 2 2 6 2 2 6
52811 - 70 70 70 66 66 66 26 26 26 6 6 6
52812 - 0 0 0 0 0 0 0 0 0 0 0 0
52813 - 0 0 0 0 0 0 0 0 0 0 0 0
52814 - 0 0 0 0 0 0 0 0 0 0 0 0
52815 - 0 0 0 0 0 0 0 0 0 0 0 0
52816 - 0 0 0 0 0 0 0 0 0 0 0 0
52817 - 0 0 0 0 0 0 0 0 0 0 0 0
52818 - 0 0 0 0 0 0 0 0 0 0 0 0
52819 - 0 0 0 0 0 0 0 0 0 0 0 0
52820 - 0 0 0 0 0 0 0 0 0 0 0 0
52821 - 0 0 0 0 0 0 0 0 0 0 0 0
52822 - 0 0 0 0 0 0 0 0 0 0 0 0
52823 - 0 0 0 0 0 0 0 0 0 0 0 0
52824 - 0 0 0 0 0 0 0 0 0 10 10 10
52825 - 38 38 38 86 86 86 14 14 14 10 10 10
52826 -195 195 195 188 164 115 192 133 9 225 175 15
52827 -239 182 13 234 190 10 232 195 16 232 200 30
52828 -245 207 45 241 208 19 232 195 16 184 144 12
52829 -218 194 134 211 206 186 42 42 42 2 2 6
52830 - 2 2 6 2 2 6 2 2 6 2 2 6
52831 - 50 50 50 74 74 74 30 30 30 6 6 6
52832 - 0 0 0 0 0 0 0 0 0 0 0 0
52833 - 0 0 0 0 0 0 0 0 0 0 0 0
52834 - 0 0 0 0 0 0 0 0 0 0 0 0
52835 - 0 0 0 0 0 0 0 0 0 0 0 0
52836 - 0 0 0 0 0 0 0 0 0 0 0 0
52837 - 0 0 0 0 0 0 0 0 0 0 0 0
52838 - 0 0 0 0 0 0 0 0 0 0 0 0
52839 - 0 0 0 0 0 0 0 0 0 0 0 0
52840 - 0 0 0 0 0 0 0 0 0 0 0 0
52841 - 0 0 0 0 0 0 0 0 0 0 0 0
52842 - 0 0 0 0 0 0 0 0 0 0 0 0
52843 - 0 0 0 0 0 0 0 0 0 0 0 0
52844 - 0 0 0 0 0 0 0 0 0 10 10 10
52845 - 34 34 34 86 86 86 14 14 14 2 2 6
52846 -121 87 25 192 133 9 219 162 10 239 182 13
52847 -236 186 11 232 195 16 241 208 19 244 214 54
52848 -246 218 60 246 218 38 246 215 20 241 208 19
52849 -241 208 19 226 184 13 121 87 25 2 2 6
52850 - 2 2 6 2 2 6 2 2 6 2 2 6
52851 - 50 50 50 82 82 82 34 34 34 10 10 10
52852 - 0 0 0 0 0 0 0 0 0 0 0 0
52853 - 0 0 0 0 0 0 0 0 0 0 0 0
52854 - 0 0 0 0 0 0 0 0 0 0 0 0
52855 - 0 0 0 0 0 0 0 0 0 0 0 0
52856 - 0 0 0 0 0 0 0 0 0 0 0 0
52857 - 0 0 0 0 0 0 0 0 0 0 0 0
52858 - 0 0 0 0 0 0 0 0 0 0 0 0
52859 - 0 0 0 0 0 0 0 0 0 0 0 0
52860 - 0 0 0 0 0 0 0 0 0 0 0 0
52861 - 0 0 0 0 0 0 0 0 0 0 0 0
52862 - 0 0 0 0 0 0 0 0 0 0 0 0
52863 - 0 0 0 0 0 0 0 0 0 0 0 0
52864 - 0 0 0 0 0 0 0 0 0 10 10 10
52865 - 34 34 34 82 82 82 30 30 30 61 42 6
52866 -180 123 7 206 145 10 230 174 11 239 182 13
52867 -234 190 10 238 202 15 241 208 19 246 218 74
52868 -246 218 38 246 215 20 246 215 20 246 215 20
52869 -226 184 13 215 174 15 184 144 12 6 6 6
52870 - 2 2 6 2 2 6 2 2 6 2 2 6
52871 - 26 26 26 94 94 94 42 42 42 14 14 14
52872 - 0 0 0 0 0 0 0 0 0 0 0 0
52873 - 0 0 0 0 0 0 0 0 0 0 0 0
52874 - 0 0 0 0 0 0 0 0 0 0 0 0
52875 - 0 0 0 0 0 0 0 0 0 0 0 0
52876 - 0 0 0 0 0 0 0 0 0 0 0 0
52877 - 0 0 0 0 0 0 0 0 0 0 0 0
52878 - 0 0 0 0 0 0 0 0 0 0 0 0
52879 - 0 0 0 0 0 0 0 0 0 0 0 0
52880 - 0 0 0 0 0 0 0 0 0 0 0 0
52881 - 0 0 0 0 0 0 0 0 0 0 0 0
52882 - 0 0 0 0 0 0 0 0 0 0 0 0
52883 - 0 0 0 0 0 0 0 0 0 0 0 0
52884 - 0 0 0 0 0 0 0 0 0 10 10 10
52885 - 30 30 30 78 78 78 50 50 50 104 69 6
52886 -192 133 9 216 158 10 236 178 12 236 186 11
52887 -232 195 16 241 208 19 244 214 54 245 215 43
52888 -246 215 20 246 215 20 241 208 19 198 155 10
52889 -200 144 11 216 158 10 156 118 10 2 2 6
52890 - 2 2 6 2 2 6 2 2 6 2 2 6
52891 - 6 6 6 90 90 90 54 54 54 18 18 18
52892 - 6 6 6 0 0 0 0 0 0 0 0 0
52893 - 0 0 0 0 0 0 0 0 0 0 0 0
52894 - 0 0 0 0 0 0 0 0 0 0 0 0
52895 - 0 0 0 0 0 0 0 0 0 0 0 0
52896 - 0 0 0 0 0 0 0 0 0 0 0 0
52897 - 0 0 0 0 0 0 0 0 0 0 0 0
52898 - 0 0 0 0 0 0 0 0 0 0 0 0
52899 - 0 0 0 0 0 0 0 0 0 0 0 0
52900 - 0 0 0 0 0 0 0 0 0 0 0 0
52901 - 0 0 0 0 0 0 0 0 0 0 0 0
52902 - 0 0 0 0 0 0 0 0 0 0 0 0
52903 - 0 0 0 0 0 0 0 0 0 0 0 0
52904 - 0 0 0 0 0 0 0 0 0 10 10 10
52905 - 30 30 30 78 78 78 46 46 46 22 22 22
52906 -137 92 6 210 162 10 239 182 13 238 190 10
52907 -238 202 15 241 208 19 246 215 20 246 215 20
52908 -241 208 19 203 166 17 185 133 11 210 150 10
52909 -216 158 10 210 150 10 102 78 10 2 2 6
52910 - 6 6 6 54 54 54 14 14 14 2 2 6
52911 - 2 2 6 62 62 62 74 74 74 30 30 30
52912 - 10 10 10 0 0 0 0 0 0 0 0 0
52913 - 0 0 0 0 0 0 0 0 0 0 0 0
52914 - 0 0 0 0 0 0 0 0 0 0 0 0
52915 - 0 0 0 0 0 0 0 0 0 0 0 0
52916 - 0 0 0 0 0 0 0 0 0 0 0 0
52917 - 0 0 0 0 0 0 0 0 0 0 0 0
52918 - 0 0 0 0 0 0 0 0 0 0 0 0
52919 - 0 0 0 0 0 0 0 0 0 0 0 0
52920 - 0 0 0 0 0 0 0 0 0 0 0 0
52921 - 0 0 0 0 0 0 0 0 0 0 0 0
52922 - 0 0 0 0 0 0 0 0 0 0 0 0
52923 - 0 0 0 0 0 0 0 0 0 0 0 0
52924 - 0 0 0 0 0 0 0 0 0 10 10 10
52925 - 34 34 34 78 78 78 50 50 50 6 6 6
52926 - 94 70 30 139 102 15 190 146 13 226 184 13
52927 -232 200 30 232 195 16 215 174 15 190 146 13
52928 -168 122 10 192 133 9 210 150 10 213 154 11
52929 -202 150 34 182 157 106 101 98 89 2 2 6
52930 - 2 2 6 78 78 78 116 116 116 58 58 58
52931 - 2 2 6 22 22 22 90 90 90 46 46 46
52932 - 18 18 18 6 6 6 0 0 0 0 0 0
52933 - 0 0 0 0 0 0 0 0 0 0 0 0
52934 - 0 0 0 0 0 0 0 0 0 0 0 0
52935 - 0 0 0 0 0 0 0 0 0 0 0 0
52936 - 0 0 0 0 0 0 0 0 0 0 0 0
52937 - 0 0 0 0 0 0 0 0 0 0 0 0
52938 - 0 0 0 0 0 0 0 0 0 0 0 0
52939 - 0 0 0 0 0 0 0 0 0 0 0 0
52940 - 0 0 0 0 0 0 0 0 0 0 0 0
52941 - 0 0 0 0 0 0 0 0 0 0 0 0
52942 - 0 0 0 0 0 0 0 0 0 0 0 0
52943 - 0 0 0 0 0 0 0 0 0 0 0 0
52944 - 0 0 0 0 0 0 0 0 0 10 10 10
52945 - 38 38 38 86 86 86 50 50 50 6 6 6
52946 -128 128 128 174 154 114 156 107 11 168 122 10
52947 -198 155 10 184 144 12 197 138 11 200 144 11
52948 -206 145 10 206 145 10 197 138 11 188 164 115
52949 -195 195 195 198 198 198 174 174 174 14 14 14
52950 - 2 2 6 22 22 22 116 116 116 116 116 116
52951 - 22 22 22 2 2 6 74 74 74 70 70 70
52952 - 30 30 30 10 10 10 0 0 0 0 0 0
52953 - 0 0 0 0 0 0 0 0 0 0 0 0
52954 - 0 0 0 0 0 0 0 0 0 0 0 0
52955 - 0 0 0 0 0 0 0 0 0 0 0 0
52956 - 0 0 0 0 0 0 0 0 0 0 0 0
52957 - 0 0 0 0 0 0 0 0 0 0 0 0
52958 - 0 0 0 0 0 0 0 0 0 0 0 0
52959 - 0 0 0 0 0 0 0 0 0 0 0 0
52960 - 0 0 0 0 0 0 0 0 0 0 0 0
52961 - 0 0 0 0 0 0 0 0 0 0 0 0
52962 - 0 0 0 0 0 0 0 0 0 0 0 0
52963 - 0 0 0 0 0 0 0 0 0 0 0 0
52964 - 0 0 0 0 0 0 6 6 6 18 18 18
52965 - 50 50 50 101 101 101 26 26 26 10 10 10
52966 -138 138 138 190 190 190 174 154 114 156 107 11
52967 -197 138 11 200 144 11 197 138 11 192 133 9
52968 -180 123 7 190 142 34 190 178 144 187 187 187
52969 -202 202 202 221 221 221 214 214 214 66 66 66
52970 - 2 2 6 2 2 6 50 50 50 62 62 62
52971 - 6 6 6 2 2 6 10 10 10 90 90 90
52972 - 50 50 50 18 18 18 6 6 6 0 0 0
52973 - 0 0 0 0 0 0 0 0 0 0 0 0
52974 - 0 0 0 0 0 0 0 0 0 0 0 0
52975 - 0 0 0 0 0 0 0 0 0 0 0 0
52976 - 0 0 0 0 0 0 0 0 0 0 0 0
52977 - 0 0 0 0 0 0 0 0 0 0 0 0
52978 - 0 0 0 0 0 0 0 0 0 0 0 0
52979 - 0 0 0 0 0 0 0 0 0 0 0 0
52980 - 0 0 0 0 0 0 0 0 0 0 0 0
52981 - 0 0 0 0 0 0 0 0 0 0 0 0
52982 - 0 0 0 0 0 0 0 0 0 0 0 0
52983 - 0 0 0 0 0 0 0 0 0 0 0 0
52984 - 0 0 0 0 0 0 10 10 10 34 34 34
52985 - 74 74 74 74 74 74 2 2 6 6 6 6
52986 -144 144 144 198 198 198 190 190 190 178 166 146
52987 -154 121 60 156 107 11 156 107 11 168 124 44
52988 -174 154 114 187 187 187 190 190 190 210 210 210
52989 -246 246 246 253 253 253 253 253 253 182 182 182
52990 - 6 6 6 2 2 6 2 2 6 2 2 6
52991 - 2 2 6 2 2 6 2 2 6 62 62 62
52992 - 74 74 74 34 34 34 14 14 14 0 0 0
52993 - 0 0 0 0 0 0 0 0 0 0 0 0
52994 - 0 0 0 0 0 0 0 0 0 0 0 0
52995 - 0 0 0 0 0 0 0 0 0 0 0 0
52996 - 0 0 0 0 0 0 0 0 0 0 0 0
52997 - 0 0 0 0 0 0 0 0 0 0 0 0
52998 - 0 0 0 0 0 0 0 0 0 0 0 0
52999 - 0 0 0 0 0 0 0 0 0 0 0 0
53000 - 0 0 0 0 0 0 0 0 0 0 0 0
53001 - 0 0 0 0 0 0 0 0 0 0 0 0
53002 - 0 0 0 0 0 0 0 0 0 0 0 0
53003 - 0 0 0 0 0 0 0 0 0 0 0 0
53004 - 0 0 0 10 10 10 22 22 22 54 54 54
53005 - 94 94 94 18 18 18 2 2 6 46 46 46
53006 -234 234 234 221 221 221 190 190 190 190 190 190
53007 -190 190 190 187 187 187 187 187 187 190 190 190
53008 -190 190 190 195 195 195 214 214 214 242 242 242
53009 -253 253 253 253 253 253 253 253 253 253 253 253
53010 - 82 82 82 2 2 6 2 2 6 2 2 6
53011 - 2 2 6 2 2 6 2 2 6 14 14 14
53012 - 86 86 86 54 54 54 22 22 22 6 6 6
53013 - 0 0 0 0 0 0 0 0 0 0 0 0
53014 - 0 0 0 0 0 0 0 0 0 0 0 0
53015 - 0 0 0 0 0 0 0 0 0 0 0 0
53016 - 0 0 0 0 0 0 0 0 0 0 0 0
53017 - 0 0 0 0 0 0 0 0 0 0 0 0
53018 - 0 0 0 0 0 0 0 0 0 0 0 0
53019 - 0 0 0 0 0 0 0 0 0 0 0 0
53020 - 0 0 0 0 0 0 0 0 0 0 0 0
53021 - 0 0 0 0 0 0 0 0 0 0 0 0
53022 - 0 0 0 0 0 0 0 0 0 0 0 0
53023 - 0 0 0 0 0 0 0 0 0 0 0 0
53024 - 6 6 6 18 18 18 46 46 46 90 90 90
53025 - 46 46 46 18 18 18 6 6 6 182 182 182
53026 -253 253 253 246 246 246 206 206 206 190 190 190
53027 -190 190 190 190 190 190 190 190 190 190 190 190
53028 -206 206 206 231 231 231 250 250 250 253 253 253
53029 -253 253 253 253 253 253 253 253 253 253 253 253
53030 -202 202 202 14 14 14 2 2 6 2 2 6
53031 - 2 2 6 2 2 6 2 2 6 2 2 6
53032 - 42 42 42 86 86 86 42 42 42 18 18 18
53033 - 6 6 6 0 0 0 0 0 0 0 0 0
53034 - 0 0 0 0 0 0 0 0 0 0 0 0
53035 - 0 0 0 0 0 0 0 0 0 0 0 0
53036 - 0 0 0 0 0 0 0 0 0 0 0 0
53037 - 0 0 0 0 0 0 0 0 0 0 0 0
53038 - 0 0 0 0 0 0 0 0 0 0 0 0
53039 - 0 0 0 0 0 0 0 0 0 0 0 0
53040 - 0 0 0 0 0 0 0 0 0 0 0 0
53041 - 0 0 0 0 0 0 0 0 0 0 0 0
53042 - 0 0 0 0 0 0 0 0 0 0 0 0
53043 - 0 0 0 0 0 0 0 0 0 6 6 6
53044 - 14 14 14 38 38 38 74 74 74 66 66 66
53045 - 2 2 6 6 6 6 90 90 90 250 250 250
53046 -253 253 253 253 253 253 238 238 238 198 198 198
53047 -190 190 190 190 190 190 195 195 195 221 221 221
53048 -246 246 246 253 253 253 253 253 253 253 253 253
53049 -253 253 253 253 253 253 253 253 253 253 253 253
53050 -253 253 253 82 82 82 2 2 6 2 2 6
53051 - 2 2 6 2 2 6 2 2 6 2 2 6
53052 - 2 2 6 78 78 78 70 70 70 34 34 34
53053 - 14 14 14 6 6 6 0 0 0 0 0 0
53054 - 0 0 0 0 0 0 0 0 0 0 0 0
53055 - 0 0 0 0 0 0 0 0 0 0 0 0
53056 - 0 0 0 0 0 0 0 0 0 0 0 0
53057 - 0 0 0 0 0 0 0 0 0 0 0 0
53058 - 0 0 0 0 0 0 0 0 0 0 0 0
53059 - 0 0 0 0 0 0 0 0 0 0 0 0
53060 - 0 0 0 0 0 0 0 0 0 0 0 0
53061 - 0 0 0 0 0 0 0 0 0 0 0 0
53062 - 0 0 0 0 0 0 0 0 0 0 0 0
53063 - 0 0 0 0 0 0 0 0 0 14 14 14
53064 - 34 34 34 66 66 66 78 78 78 6 6 6
53065 - 2 2 6 18 18 18 218 218 218 253 253 253
53066 -253 253 253 253 253 253 253 253 253 246 246 246
53067 -226 226 226 231 231 231 246 246 246 253 253 253
53068 -253 253 253 253 253 253 253 253 253 253 253 253
53069 -253 253 253 253 253 253 253 253 253 253 253 253
53070 -253 253 253 178 178 178 2 2 6 2 2 6
53071 - 2 2 6 2 2 6 2 2 6 2 2 6
53072 - 2 2 6 18 18 18 90 90 90 62 62 62
53073 - 30 30 30 10 10 10 0 0 0 0 0 0
53074 - 0 0 0 0 0 0 0 0 0 0 0 0
53075 - 0 0 0 0 0 0 0 0 0 0 0 0
53076 - 0 0 0 0 0 0 0 0 0 0 0 0
53077 - 0 0 0 0 0 0 0 0 0 0 0 0
53078 - 0 0 0 0 0 0 0 0 0 0 0 0
53079 - 0 0 0 0 0 0 0 0 0 0 0 0
53080 - 0 0 0 0 0 0 0 0 0 0 0 0
53081 - 0 0 0 0 0 0 0 0 0 0 0 0
53082 - 0 0 0 0 0 0 0 0 0 0 0 0
53083 - 0 0 0 0 0 0 10 10 10 26 26 26
53084 - 58 58 58 90 90 90 18 18 18 2 2 6
53085 - 2 2 6 110 110 110 253 253 253 253 253 253
53086 -253 253 253 253 253 253 253 253 253 253 253 253
53087 -250 250 250 253 253 253 253 253 253 253 253 253
53088 -253 253 253 253 253 253 253 253 253 253 253 253
53089 -253 253 253 253 253 253 253 253 253 253 253 253
53090 -253 253 253 231 231 231 18 18 18 2 2 6
53091 - 2 2 6 2 2 6 2 2 6 2 2 6
53092 - 2 2 6 2 2 6 18 18 18 94 94 94
53093 - 54 54 54 26 26 26 10 10 10 0 0 0
53094 - 0 0 0 0 0 0 0 0 0 0 0 0
53095 - 0 0 0 0 0 0 0 0 0 0 0 0
53096 - 0 0 0 0 0 0 0 0 0 0 0 0
53097 - 0 0 0 0 0 0 0 0 0 0 0 0
53098 - 0 0 0 0 0 0 0 0 0 0 0 0
53099 - 0 0 0 0 0 0 0 0 0 0 0 0
53100 - 0 0 0 0 0 0 0 0 0 0 0 0
53101 - 0 0 0 0 0 0 0 0 0 0 0 0
53102 - 0 0 0 0 0 0 0 0 0 0 0 0
53103 - 0 0 0 6 6 6 22 22 22 50 50 50
53104 - 90 90 90 26 26 26 2 2 6 2 2 6
53105 - 14 14 14 195 195 195 250 250 250 253 253 253
53106 -253 253 253 253 253 253 253 253 253 253 253 253
53107 -253 253 253 253 253 253 253 253 253 253 253 253
53108 -253 253 253 253 253 253 253 253 253 253 253 253
53109 -253 253 253 253 253 253 253 253 253 253 253 253
53110 -250 250 250 242 242 242 54 54 54 2 2 6
53111 - 2 2 6 2 2 6 2 2 6 2 2 6
53112 - 2 2 6 2 2 6 2 2 6 38 38 38
53113 - 86 86 86 50 50 50 22 22 22 6 6 6
53114 - 0 0 0 0 0 0 0 0 0 0 0 0
53115 - 0 0 0 0 0 0 0 0 0 0 0 0
53116 - 0 0 0 0 0 0 0 0 0 0 0 0
53117 - 0 0 0 0 0 0 0 0 0 0 0 0
53118 - 0 0 0 0 0 0 0 0 0 0 0 0
53119 - 0 0 0 0 0 0 0 0 0 0 0 0
53120 - 0 0 0 0 0 0 0 0 0 0 0 0
53121 - 0 0 0 0 0 0 0 0 0 0 0 0
53122 - 0 0 0 0 0 0 0 0 0 0 0 0
53123 - 6 6 6 14 14 14 38 38 38 82 82 82
53124 - 34 34 34 2 2 6 2 2 6 2 2 6
53125 - 42 42 42 195 195 195 246 246 246 253 253 253
53126 -253 253 253 253 253 253 253 253 253 250 250 250
53127 -242 242 242 242 242 242 250 250 250 253 253 253
53128 -253 253 253 253 253 253 253 253 253 253 253 253
53129 -253 253 253 250 250 250 246 246 246 238 238 238
53130 -226 226 226 231 231 231 101 101 101 6 6 6
53131 - 2 2 6 2 2 6 2 2 6 2 2 6
53132 - 2 2 6 2 2 6 2 2 6 2 2 6
53133 - 38 38 38 82 82 82 42 42 42 14 14 14
53134 - 6 6 6 0 0 0 0 0 0 0 0 0
53135 - 0 0 0 0 0 0 0 0 0 0 0 0
53136 - 0 0 0 0 0 0 0 0 0 0 0 0
53137 - 0 0 0 0 0 0 0 0 0 0 0 0
53138 - 0 0 0 0 0 0 0 0 0 0 0 0
53139 - 0 0 0 0 0 0 0 0 0 0 0 0
53140 - 0 0 0 0 0 0 0 0 0 0 0 0
53141 - 0 0 0 0 0 0 0 0 0 0 0 0
53142 - 0 0 0 0 0 0 0 0 0 0 0 0
53143 - 10 10 10 26 26 26 62 62 62 66 66 66
53144 - 2 2 6 2 2 6 2 2 6 6 6 6
53145 - 70 70 70 170 170 170 206 206 206 234 234 234
53146 -246 246 246 250 250 250 250 250 250 238 238 238
53147 -226 226 226 231 231 231 238 238 238 250 250 250
53148 -250 250 250 250 250 250 246 246 246 231 231 231
53149 -214 214 214 206 206 206 202 202 202 202 202 202
53150 -198 198 198 202 202 202 182 182 182 18 18 18
53151 - 2 2 6 2 2 6 2 2 6 2 2 6
53152 - 2 2 6 2 2 6 2 2 6 2 2 6
53153 - 2 2 6 62 62 62 66 66 66 30 30 30
53154 - 10 10 10 0 0 0 0 0 0 0 0 0
53155 - 0 0 0 0 0 0 0 0 0 0 0 0
53156 - 0 0 0 0 0 0 0 0 0 0 0 0
53157 - 0 0 0 0 0 0 0 0 0 0 0 0
53158 - 0 0 0 0 0 0 0 0 0 0 0 0
53159 - 0 0 0 0 0 0 0 0 0 0 0 0
53160 - 0 0 0 0 0 0 0 0 0 0 0 0
53161 - 0 0 0 0 0 0 0 0 0 0 0 0
53162 - 0 0 0 0 0 0 0 0 0 0 0 0
53163 - 14 14 14 42 42 42 82 82 82 18 18 18
53164 - 2 2 6 2 2 6 2 2 6 10 10 10
53165 - 94 94 94 182 182 182 218 218 218 242 242 242
53166 -250 250 250 253 253 253 253 253 253 250 250 250
53167 -234 234 234 253 253 253 253 253 253 253 253 253
53168 -253 253 253 253 253 253 253 253 253 246 246 246
53169 -238 238 238 226 226 226 210 210 210 202 202 202
53170 -195 195 195 195 195 195 210 210 210 158 158 158
53171 - 6 6 6 14 14 14 50 50 50 14 14 14
53172 - 2 2 6 2 2 6 2 2 6 2 2 6
53173 - 2 2 6 6 6 6 86 86 86 46 46 46
53174 - 18 18 18 6 6 6 0 0 0 0 0 0
53175 - 0 0 0 0 0 0 0 0 0 0 0 0
53176 - 0 0 0 0 0 0 0 0 0 0 0 0
53177 - 0 0 0 0 0 0 0 0 0 0 0 0
53178 - 0 0 0 0 0 0 0 0 0 0 0 0
53179 - 0 0 0 0 0 0 0 0 0 0 0 0
53180 - 0 0 0 0 0 0 0 0 0 0 0 0
53181 - 0 0 0 0 0 0 0 0 0 0 0 0
53182 - 0 0 0 0 0 0 0 0 0 6 6 6
53183 - 22 22 22 54 54 54 70 70 70 2 2 6
53184 - 2 2 6 10 10 10 2 2 6 22 22 22
53185 -166 166 166 231 231 231 250 250 250 253 253 253
53186 -253 253 253 253 253 253 253 253 253 250 250 250
53187 -242 242 242 253 253 253 253 253 253 253 253 253
53188 -253 253 253 253 253 253 253 253 253 253 253 253
53189 -253 253 253 253 253 253 253 253 253 246 246 246
53190 -231 231 231 206 206 206 198 198 198 226 226 226
53191 - 94 94 94 2 2 6 6 6 6 38 38 38
53192 - 30 30 30 2 2 6 2 2 6 2 2 6
53193 - 2 2 6 2 2 6 62 62 62 66 66 66
53194 - 26 26 26 10 10 10 0 0 0 0 0 0
53195 - 0 0 0 0 0 0 0 0 0 0 0 0
53196 - 0 0 0 0 0 0 0 0 0 0 0 0
53197 - 0 0 0 0 0 0 0 0 0 0 0 0
53198 - 0 0 0 0 0 0 0 0 0 0 0 0
53199 - 0 0 0 0 0 0 0 0 0 0 0 0
53200 - 0 0 0 0 0 0 0 0 0 0 0 0
53201 - 0 0 0 0 0 0 0 0 0 0 0 0
53202 - 0 0 0 0 0 0 0 0 0 10 10 10
53203 - 30 30 30 74 74 74 50 50 50 2 2 6
53204 - 26 26 26 26 26 26 2 2 6 106 106 106
53205 -238 238 238 253 253 253 253 253 253 253 253 253
53206 -253 253 253 253 253 253 253 253 253 253 253 253
53207 -253 253 253 253 253 253 253 253 253 253 253 253
53208 -253 253 253 253 253 253 253 253 253 253 253 253
53209 -253 253 253 253 253 253 253 253 253 253 253 253
53210 -253 253 253 246 246 246 218 218 218 202 202 202
53211 -210 210 210 14 14 14 2 2 6 2 2 6
53212 - 30 30 30 22 22 22 2 2 6 2 2 6
53213 - 2 2 6 2 2 6 18 18 18 86 86 86
53214 - 42 42 42 14 14 14 0 0 0 0 0 0
53215 - 0 0 0 0 0 0 0 0 0 0 0 0
53216 - 0 0 0 0 0 0 0 0 0 0 0 0
53217 - 0 0 0 0 0 0 0 0 0 0 0 0
53218 - 0 0 0 0 0 0 0 0 0 0 0 0
53219 - 0 0 0 0 0 0 0 0 0 0 0 0
53220 - 0 0 0 0 0 0 0 0 0 0 0 0
53221 - 0 0 0 0 0 0 0 0 0 0 0 0
53222 - 0 0 0 0 0 0 0 0 0 14 14 14
53223 - 42 42 42 90 90 90 22 22 22 2 2 6
53224 - 42 42 42 2 2 6 18 18 18 218 218 218
53225 -253 253 253 253 253 253 253 253 253 253 253 253
53226 -253 253 253 253 253 253 253 253 253 253 253 253
53227 -253 253 253 253 253 253 253 253 253 253 253 253
53228 -253 253 253 253 253 253 253 253 253 253 253 253
53229 -253 253 253 253 253 253 253 253 253 253 253 253
53230 -253 253 253 253 253 253 250 250 250 221 221 221
53231 -218 218 218 101 101 101 2 2 6 14 14 14
53232 - 18 18 18 38 38 38 10 10 10 2 2 6
53233 - 2 2 6 2 2 6 2 2 6 78 78 78
53234 - 58 58 58 22 22 22 6 6 6 0 0 0
53235 - 0 0 0 0 0 0 0 0 0 0 0 0
53236 - 0 0 0 0 0 0 0 0 0 0 0 0
53237 - 0 0 0 0 0 0 0 0 0 0 0 0
53238 - 0 0 0 0 0 0 0 0 0 0 0 0
53239 - 0 0 0 0 0 0 0 0 0 0 0 0
53240 - 0 0 0 0 0 0 0 0 0 0 0 0
53241 - 0 0 0 0 0 0 0 0 0 0 0 0
53242 - 0 0 0 0 0 0 6 6 6 18 18 18
53243 - 54 54 54 82 82 82 2 2 6 26 26 26
53244 - 22 22 22 2 2 6 123 123 123 253 253 253
53245 -253 253 253 253 253 253 253 253 253 253 253 253
53246 -253 253 253 253 253 253 253 253 253 253 253 253
53247 -253 253 253 253 253 253 253 253 253 253 253 253
53248 -253 253 253 253 253 253 253 253 253 253 253 253
53249 -253 253 253 253 253 253 253 253 253 253 253 253
53250 -253 253 253 253 253 253 253 253 253 250 250 250
53251 -238 238 238 198 198 198 6 6 6 38 38 38
53252 - 58 58 58 26 26 26 38 38 38 2 2 6
53253 - 2 2 6 2 2 6 2 2 6 46 46 46
53254 - 78 78 78 30 30 30 10 10 10 0 0 0
53255 - 0 0 0 0 0 0 0 0 0 0 0 0
53256 - 0 0 0 0 0 0 0 0 0 0 0 0
53257 - 0 0 0 0 0 0 0 0 0 0 0 0
53258 - 0 0 0 0 0 0 0 0 0 0 0 0
53259 - 0 0 0 0 0 0 0 0 0 0 0 0
53260 - 0 0 0 0 0 0 0 0 0 0 0 0
53261 - 0 0 0 0 0 0 0 0 0 0 0 0
53262 - 0 0 0 0 0 0 10 10 10 30 30 30
53263 - 74 74 74 58 58 58 2 2 6 42 42 42
53264 - 2 2 6 22 22 22 231 231 231 253 253 253
53265 -253 253 253 253 253 253 253 253 253 253 253 253
53266 -253 253 253 253 253 253 253 253 253 250 250 250
53267 -253 253 253 253 253 253 253 253 253 253 253 253
53268 -253 253 253 253 253 253 253 253 253 253 253 253
53269 -253 253 253 253 253 253 253 253 253 253 253 253
53270 -253 253 253 253 253 253 253 253 253 253 253 253
53271 -253 253 253 246 246 246 46 46 46 38 38 38
53272 - 42 42 42 14 14 14 38 38 38 14 14 14
53273 - 2 2 6 2 2 6 2 2 6 6 6 6
53274 - 86 86 86 46 46 46 14 14 14 0 0 0
53275 - 0 0 0 0 0 0 0 0 0 0 0 0
53276 - 0 0 0 0 0 0 0 0 0 0 0 0
53277 - 0 0 0 0 0 0 0 0 0 0 0 0
53278 - 0 0 0 0 0 0 0 0 0 0 0 0
53279 - 0 0 0 0 0 0 0 0 0 0 0 0
53280 - 0 0 0 0 0 0 0 0 0 0 0 0
53281 - 0 0 0 0 0 0 0 0 0 0 0 0
53282 - 0 0 0 6 6 6 14 14 14 42 42 42
53283 - 90 90 90 18 18 18 18 18 18 26 26 26
53284 - 2 2 6 116 116 116 253 253 253 253 253 253
53285 -253 253 253 253 253 253 253 253 253 253 253 253
53286 -253 253 253 253 253 253 250 250 250 238 238 238
53287 -253 253 253 253 253 253 253 253 253 253 253 253
53288 -253 253 253 253 253 253 253 253 253 253 253 253
53289 -253 253 253 253 253 253 253 253 253 253 253 253
53290 -253 253 253 253 253 253 253 253 253 253 253 253
53291 -253 253 253 253 253 253 94 94 94 6 6 6
53292 - 2 2 6 2 2 6 10 10 10 34 34 34
53293 - 2 2 6 2 2 6 2 2 6 2 2 6
53294 - 74 74 74 58 58 58 22 22 22 6 6 6
53295 - 0 0 0 0 0 0 0 0 0 0 0 0
53296 - 0 0 0 0 0 0 0 0 0 0 0 0
53297 - 0 0 0 0 0 0 0 0 0 0 0 0
53298 - 0 0 0 0 0 0 0 0 0 0 0 0
53299 - 0 0 0 0 0 0 0 0 0 0 0 0
53300 - 0 0 0 0 0 0 0 0 0 0 0 0
53301 - 0 0 0 0 0 0 0 0 0 0 0 0
53302 - 0 0 0 10 10 10 26 26 26 66 66 66
53303 - 82 82 82 2 2 6 38 38 38 6 6 6
53304 - 14 14 14 210 210 210 253 253 253 253 253 253
53305 -253 253 253 253 253 253 253 253 253 253 253 253
53306 -253 253 253 253 253 253 246 246 246 242 242 242
53307 -253 253 253 253 253 253 253 253 253 253 253 253
53308 -253 253 253 253 253 253 253 253 253 253 253 253
53309 -253 253 253 253 253 253 253 253 253 253 253 253
53310 -253 253 253 253 253 253 253 253 253 253 253 253
53311 -253 253 253 253 253 253 144 144 144 2 2 6
53312 - 2 2 6 2 2 6 2 2 6 46 46 46
53313 - 2 2 6 2 2 6 2 2 6 2 2 6
53314 - 42 42 42 74 74 74 30 30 30 10 10 10
53315 - 0 0 0 0 0 0 0 0 0 0 0 0
53316 - 0 0 0 0 0 0 0 0 0 0 0 0
53317 - 0 0 0 0 0 0 0 0 0 0 0 0
53318 - 0 0 0 0 0 0 0 0 0 0 0 0
53319 - 0 0 0 0 0 0 0 0 0 0 0 0
53320 - 0 0 0 0 0 0 0 0 0 0 0 0
53321 - 0 0 0 0 0 0 0 0 0 0 0 0
53322 - 6 6 6 14 14 14 42 42 42 90 90 90
53323 - 26 26 26 6 6 6 42 42 42 2 2 6
53324 - 74 74 74 250 250 250 253 253 253 253 253 253
53325 -253 253 253 253 253 253 253 253 253 253 253 253
53326 -253 253 253 253 253 253 242 242 242 242 242 242
53327 -253 253 253 253 253 253 253 253 253 253 253 253
53328 -253 253 253 253 253 253 253 253 253 253 253 253
53329 -253 253 253 253 253 253 253 253 253 253 253 253
53330 -253 253 253 253 253 253 253 253 253 253 253 253
53331 -253 253 253 253 253 253 182 182 182 2 2 6
53332 - 2 2 6 2 2 6 2 2 6 46 46 46
53333 - 2 2 6 2 2 6 2 2 6 2 2 6
53334 - 10 10 10 86 86 86 38 38 38 10 10 10
53335 - 0 0 0 0 0 0 0 0 0 0 0 0
53336 - 0 0 0 0 0 0 0 0 0 0 0 0
53337 - 0 0 0 0 0 0 0 0 0 0 0 0
53338 - 0 0 0 0 0 0 0 0 0 0 0 0
53339 - 0 0 0 0 0 0 0 0 0 0 0 0
53340 - 0 0 0 0 0 0 0 0 0 0 0 0
53341 - 0 0 0 0 0 0 0 0 0 0 0 0
53342 - 10 10 10 26 26 26 66 66 66 82 82 82
53343 - 2 2 6 22 22 22 18 18 18 2 2 6
53344 -149 149 149 253 253 253 253 253 253 253 253 253
53345 -253 253 253 253 253 253 253 253 253 253 253 253
53346 -253 253 253 253 253 253 234 234 234 242 242 242
53347 -253 253 253 253 253 253 253 253 253 253 253 253
53348 -253 253 253 253 253 253 253 253 253 253 253 253
53349 -253 253 253 253 253 253 253 253 253 253 253 253
53350 -253 253 253 253 253 253 253 253 253 253 253 253
53351 -253 253 253 253 253 253 206 206 206 2 2 6
53352 - 2 2 6 2 2 6 2 2 6 38 38 38
53353 - 2 2 6 2 2 6 2 2 6 2 2 6
53354 - 6 6 6 86 86 86 46 46 46 14 14 14
53355 - 0 0 0 0 0 0 0 0 0 0 0 0
53356 - 0 0 0 0 0 0 0 0 0 0 0 0
53357 - 0 0 0 0 0 0 0 0 0 0 0 0
53358 - 0 0 0 0 0 0 0 0 0 0 0 0
53359 - 0 0 0 0 0 0 0 0 0 0 0 0
53360 - 0 0 0 0 0 0 0 0 0 0 0 0
53361 - 0 0 0 0 0 0 0 0 0 6 6 6
53362 - 18 18 18 46 46 46 86 86 86 18 18 18
53363 - 2 2 6 34 34 34 10 10 10 6 6 6
53364 -210 210 210 253 253 253 253 253 253 253 253 253
53365 -253 253 253 253 253 253 253 253 253 253 253 253
53366 -253 253 253 253 253 253 234 234 234 242 242 242
53367 -253 253 253 253 253 253 253 253 253 253 253 253
53368 -253 253 253 253 253 253 253 253 253 253 253 253
53369 -253 253 253 253 253 253 253 253 253 253 253 253
53370 -253 253 253 253 253 253 253 253 253 253 253 253
53371 -253 253 253 253 253 253 221 221 221 6 6 6
53372 - 2 2 6 2 2 6 6 6 6 30 30 30
53373 - 2 2 6 2 2 6 2 2 6 2 2 6
53374 - 2 2 6 82 82 82 54 54 54 18 18 18
53375 - 6 6 6 0 0 0 0 0 0 0 0 0
53376 - 0 0 0 0 0 0 0 0 0 0 0 0
53377 - 0 0 0 0 0 0 0 0 0 0 0 0
53378 - 0 0 0 0 0 0 0 0 0 0 0 0
53379 - 0 0 0 0 0 0 0 0 0 0 0 0
53380 - 0 0 0 0 0 0 0 0 0 0 0 0
53381 - 0 0 0 0 0 0 0 0 0 10 10 10
53382 - 26 26 26 66 66 66 62 62 62 2 2 6
53383 - 2 2 6 38 38 38 10 10 10 26 26 26
53384 -238 238 238 253 253 253 253 253 253 253 253 253
53385 -253 253 253 253 253 253 253 253 253 253 253 253
53386 -253 253 253 253 253 253 231 231 231 238 238 238
53387 -253 253 253 253 253 253 253 253 253 253 253 253
53388 -253 253 253 253 253 253 253 253 253 253 253 253
53389 -253 253 253 253 253 253 253 253 253 253 253 253
53390 -253 253 253 253 253 253 253 253 253 253 253 253
53391 -253 253 253 253 253 253 231 231 231 6 6 6
53392 - 2 2 6 2 2 6 10 10 10 30 30 30
53393 - 2 2 6 2 2 6 2 2 6 2 2 6
53394 - 2 2 6 66 66 66 58 58 58 22 22 22
53395 - 6 6 6 0 0 0 0 0 0 0 0 0
53396 - 0 0 0 0 0 0 0 0 0 0 0 0
53397 - 0 0 0 0 0 0 0 0 0 0 0 0
53398 - 0 0 0 0 0 0 0 0 0 0 0 0
53399 - 0 0 0 0 0 0 0 0 0 0 0 0
53400 - 0 0 0 0 0 0 0 0 0 0 0 0
53401 - 0 0 0 0 0 0 0 0 0 10 10 10
53402 - 38 38 38 78 78 78 6 6 6 2 2 6
53403 - 2 2 6 46 46 46 14 14 14 42 42 42
53404 -246 246 246 253 253 253 253 253 253 253 253 253
53405 -253 253 253 253 253 253 253 253 253 253 253 253
53406 -253 253 253 253 253 253 231 231 231 242 242 242
53407 -253 253 253 253 253 253 253 253 253 253 253 253
53408 -253 253 253 253 253 253 253 253 253 253 253 253
53409 -253 253 253 253 253 253 253 253 253 253 253 253
53410 -253 253 253 253 253 253 253 253 253 253 253 253
53411 -253 253 253 253 253 253 234 234 234 10 10 10
53412 - 2 2 6 2 2 6 22 22 22 14 14 14
53413 - 2 2 6 2 2 6 2 2 6 2 2 6
53414 - 2 2 6 66 66 66 62 62 62 22 22 22
53415 - 6 6 6 0 0 0 0 0 0 0 0 0
53416 - 0 0 0 0 0 0 0 0 0 0 0 0
53417 - 0 0 0 0 0 0 0 0 0 0 0 0
53418 - 0 0 0 0 0 0 0 0 0 0 0 0
53419 - 0 0 0 0 0 0 0 0 0 0 0 0
53420 - 0 0 0 0 0 0 0 0 0 0 0 0
53421 - 0 0 0 0 0 0 6 6 6 18 18 18
53422 - 50 50 50 74 74 74 2 2 6 2 2 6
53423 - 14 14 14 70 70 70 34 34 34 62 62 62
53424 -250 250 250 253 253 253 253 253 253 253 253 253
53425 -253 253 253 253 253 253 253 253 253 253 253 253
53426 -253 253 253 253 253 253 231 231 231 246 246 246
53427 -253 253 253 253 253 253 253 253 253 253 253 253
53428 -253 253 253 253 253 253 253 253 253 253 253 253
53429 -253 253 253 253 253 253 253 253 253 253 253 253
53430 -253 253 253 253 253 253 253 253 253 253 253 253
53431 -253 253 253 253 253 253 234 234 234 14 14 14
53432 - 2 2 6 2 2 6 30 30 30 2 2 6
53433 - 2 2 6 2 2 6 2 2 6 2 2 6
53434 - 2 2 6 66 66 66 62 62 62 22 22 22
53435 - 6 6 6 0 0 0 0 0 0 0 0 0
53436 - 0 0 0 0 0 0 0 0 0 0 0 0
53437 - 0 0 0 0 0 0 0 0 0 0 0 0
53438 - 0 0 0 0 0 0 0 0 0 0 0 0
53439 - 0 0 0 0 0 0 0 0 0 0 0 0
53440 - 0 0 0 0 0 0 0 0 0 0 0 0
53441 - 0 0 0 0 0 0 6 6 6 18 18 18
53442 - 54 54 54 62 62 62 2 2 6 2 2 6
53443 - 2 2 6 30 30 30 46 46 46 70 70 70
53444 -250 250 250 253 253 253 253 253 253 253 253 253
53445 -253 253 253 253 253 253 253 253 253 253 253 253
53446 -253 253 253 253 253 253 231 231 231 246 246 246
53447 -253 253 253 253 253 253 253 253 253 253 253 253
53448 -253 253 253 253 253 253 253 253 253 253 253 253
53449 -253 253 253 253 253 253 253 253 253 253 253 253
53450 -253 253 253 253 253 253 253 253 253 253 253 253
53451 -253 253 253 253 253 253 226 226 226 10 10 10
53452 - 2 2 6 6 6 6 30 30 30 2 2 6
53453 - 2 2 6 2 2 6 2 2 6 2 2 6
53454 - 2 2 6 66 66 66 58 58 58 22 22 22
53455 - 6 6 6 0 0 0 0 0 0 0 0 0
53456 - 0 0 0 0 0 0 0 0 0 0 0 0
53457 - 0 0 0 0 0 0 0 0 0 0 0 0
53458 - 0 0 0 0 0 0 0 0 0 0 0 0
53459 - 0 0 0 0 0 0 0 0 0 0 0 0
53460 - 0 0 0 0 0 0 0 0 0 0 0 0
53461 - 0 0 0 0 0 0 6 6 6 22 22 22
53462 - 58 58 58 62 62 62 2 2 6 2 2 6
53463 - 2 2 6 2 2 6 30 30 30 78 78 78
53464 -250 250 250 253 253 253 253 253 253 253 253 253
53465 -253 253 253 253 253 253 253 253 253 253 253 253
53466 -253 253 253 253 253 253 231 231 231 246 246 246
53467 -253 253 253 253 253 253 253 253 253 253 253 253
53468 -253 253 253 253 253 253 253 253 253 253 253 253
53469 -253 253 253 253 253 253 253 253 253 253 253 253
53470 -253 253 253 253 253 253 253 253 253 253 253 253
53471 -253 253 253 253 253 253 206 206 206 2 2 6
53472 - 22 22 22 34 34 34 18 14 6 22 22 22
53473 - 26 26 26 18 18 18 6 6 6 2 2 6
53474 - 2 2 6 82 82 82 54 54 54 18 18 18
53475 - 6 6 6 0 0 0 0 0 0 0 0 0
53476 - 0 0 0 0 0 0 0 0 0 0 0 0
53477 - 0 0 0 0 0 0 0 0 0 0 0 0
53478 - 0 0 0 0 0 0 0 0 0 0 0 0
53479 - 0 0 0 0 0 0 0 0 0 0 0 0
53480 - 0 0 0 0 0 0 0 0 0 0 0 0
53481 - 0 0 0 0 0 0 6 6 6 26 26 26
53482 - 62 62 62 106 106 106 74 54 14 185 133 11
53483 -210 162 10 121 92 8 6 6 6 62 62 62
53484 -238 238 238 253 253 253 253 253 253 253 253 253
53485 -253 253 253 253 253 253 253 253 253 253 253 253
53486 -253 253 253 253 253 253 231 231 231 246 246 246
53487 -253 253 253 253 253 253 253 253 253 253 253 253
53488 -253 253 253 253 253 253 253 253 253 253 253 253
53489 -253 253 253 253 253 253 253 253 253 253 253 253
53490 -253 253 253 253 253 253 253 253 253 253 253 253
53491 -253 253 253 253 253 253 158 158 158 18 18 18
53492 - 14 14 14 2 2 6 2 2 6 2 2 6
53493 - 6 6 6 18 18 18 66 66 66 38 38 38
53494 - 6 6 6 94 94 94 50 50 50 18 18 18
53495 - 6 6 6 0 0 0 0 0 0 0 0 0
53496 - 0 0 0 0 0 0 0 0 0 0 0 0
53497 - 0 0 0 0 0 0 0 0 0 0 0 0
53498 - 0 0 0 0 0 0 0 0 0 0 0 0
53499 - 0 0 0 0 0 0 0 0 0 0 0 0
53500 - 0 0 0 0 0 0 0 0 0 6 6 6
53501 - 10 10 10 10 10 10 18 18 18 38 38 38
53502 - 78 78 78 142 134 106 216 158 10 242 186 14
53503 -246 190 14 246 190 14 156 118 10 10 10 10
53504 - 90 90 90 238 238 238 253 253 253 253 253 253
53505 -253 253 253 253 253 253 253 253 253 253 253 253
53506 -253 253 253 253 253 253 231 231 231 250 250 250
53507 -253 253 253 253 253 253 253 253 253 253 253 253
53508 -253 253 253 253 253 253 253 253 253 253 253 253
53509 -253 253 253 253 253 253 253 253 253 253 253 253
53510 -253 253 253 253 253 253 253 253 253 246 230 190
53511 -238 204 91 238 204 91 181 142 44 37 26 9
53512 - 2 2 6 2 2 6 2 2 6 2 2 6
53513 - 2 2 6 2 2 6 38 38 38 46 46 46
53514 - 26 26 26 106 106 106 54 54 54 18 18 18
53515 - 6 6 6 0 0 0 0 0 0 0 0 0
53516 - 0 0 0 0 0 0 0 0 0 0 0 0
53517 - 0 0 0 0 0 0 0 0 0 0 0 0
53518 - 0 0 0 0 0 0 0 0 0 0 0 0
53519 - 0 0 0 0 0 0 0 0 0 0 0 0
53520 - 0 0 0 6 6 6 14 14 14 22 22 22
53521 - 30 30 30 38 38 38 50 50 50 70 70 70
53522 -106 106 106 190 142 34 226 170 11 242 186 14
53523 -246 190 14 246 190 14 246 190 14 154 114 10
53524 - 6 6 6 74 74 74 226 226 226 253 253 253
53525 -253 253 253 253 253 253 253 253 253 253 253 253
53526 -253 253 253 253 253 253 231 231 231 250 250 250
53527 -253 253 253 253 253 253 253 253 253 253 253 253
53528 -253 253 253 253 253 253 253 253 253 253 253 253
53529 -253 253 253 253 253 253 253 253 253 253 253 253
53530 -253 253 253 253 253 253 253 253 253 228 184 62
53531 -241 196 14 241 208 19 232 195 16 38 30 10
53532 - 2 2 6 2 2 6 2 2 6 2 2 6
53533 - 2 2 6 6 6 6 30 30 30 26 26 26
53534 -203 166 17 154 142 90 66 66 66 26 26 26
53535 - 6 6 6 0 0 0 0 0 0 0 0 0
53536 - 0 0 0 0 0 0 0 0 0 0 0 0
53537 - 0 0 0 0 0 0 0 0 0 0 0 0
53538 - 0 0 0 0 0 0 0 0 0 0 0 0
53539 - 0 0 0 0 0 0 0 0 0 0 0 0
53540 - 6 6 6 18 18 18 38 38 38 58 58 58
53541 - 78 78 78 86 86 86 101 101 101 123 123 123
53542 -175 146 61 210 150 10 234 174 13 246 186 14
53543 -246 190 14 246 190 14 246 190 14 238 190 10
53544 -102 78 10 2 2 6 46 46 46 198 198 198
53545 -253 253 253 253 253 253 253 253 253 253 253 253
53546 -253 253 253 253 253 253 234 234 234 242 242 242
53547 -253 253 253 253 253 253 253 253 253 253 253 253
53548 -253 253 253 253 253 253 253 253 253 253 253 253
53549 -253 253 253 253 253 253 253 253 253 253 253 253
53550 -253 253 253 253 253 253 253 253 253 224 178 62
53551 -242 186 14 241 196 14 210 166 10 22 18 6
53552 - 2 2 6 2 2 6 2 2 6 2 2 6
53553 - 2 2 6 2 2 6 6 6 6 121 92 8
53554 -238 202 15 232 195 16 82 82 82 34 34 34
53555 - 10 10 10 0 0 0 0 0 0 0 0 0
53556 - 0 0 0 0 0 0 0 0 0 0 0 0
53557 - 0 0 0 0 0 0 0 0 0 0 0 0
53558 - 0 0 0 0 0 0 0 0 0 0 0 0
53559 - 0 0 0 0 0 0 0 0 0 0 0 0
53560 - 14 14 14 38 38 38 70 70 70 154 122 46
53561 -190 142 34 200 144 11 197 138 11 197 138 11
53562 -213 154 11 226 170 11 242 186 14 246 190 14
53563 -246 190 14 246 190 14 246 190 14 246 190 14
53564 -225 175 15 46 32 6 2 2 6 22 22 22
53565 -158 158 158 250 250 250 253 253 253 253 253 253
53566 -253 253 253 253 253 253 253 253 253 253 253 253
53567 -253 253 253 253 253 253 253 253 253 253 253 253
53568 -253 253 253 253 253 253 253 253 253 253 253 253
53569 -253 253 253 253 253 253 253 253 253 253 253 253
53570 -253 253 253 250 250 250 242 242 242 224 178 62
53571 -239 182 13 236 186 11 213 154 11 46 32 6
53572 - 2 2 6 2 2 6 2 2 6 2 2 6
53573 - 2 2 6 2 2 6 61 42 6 225 175 15
53574 -238 190 10 236 186 11 112 100 78 42 42 42
53575 - 14 14 14 0 0 0 0 0 0 0 0 0
53576 - 0 0 0 0 0 0 0 0 0 0 0 0
53577 - 0 0 0 0 0 0 0 0 0 0 0 0
53578 - 0 0 0 0 0 0 0 0 0 0 0 0
53579 - 0 0 0 0 0 0 0 0 0 6 6 6
53580 - 22 22 22 54 54 54 154 122 46 213 154 11
53581 -226 170 11 230 174 11 226 170 11 226 170 11
53582 -236 178 12 242 186 14 246 190 14 246 190 14
53583 -246 190 14 246 190 14 246 190 14 246 190 14
53584 -241 196 14 184 144 12 10 10 10 2 2 6
53585 - 6 6 6 116 116 116 242 242 242 253 253 253
53586 -253 253 253 253 253 253 253 253 253 253 253 253
53587 -253 253 253 253 253 253 253 253 253 253 253 253
53588 -253 253 253 253 253 253 253 253 253 253 253 253
53589 -253 253 253 253 253 253 253 253 253 253 253 253
53590 -253 253 253 231 231 231 198 198 198 214 170 54
53591 -236 178 12 236 178 12 210 150 10 137 92 6
53592 - 18 14 6 2 2 6 2 2 6 2 2 6
53593 - 6 6 6 70 47 6 200 144 11 236 178 12
53594 -239 182 13 239 182 13 124 112 88 58 58 58
53595 - 22 22 22 6 6 6 0 0 0 0 0 0
53596 - 0 0 0 0 0 0 0 0 0 0 0 0
53597 - 0 0 0 0 0 0 0 0 0 0 0 0
53598 - 0 0 0 0 0 0 0 0 0 0 0 0
53599 - 0 0 0 0 0 0 0 0 0 10 10 10
53600 - 30 30 30 70 70 70 180 133 36 226 170 11
53601 -239 182 13 242 186 14 242 186 14 246 186 14
53602 -246 190 14 246 190 14 246 190 14 246 190 14
53603 -246 190 14 246 190 14 246 190 14 246 190 14
53604 -246 190 14 232 195 16 98 70 6 2 2 6
53605 - 2 2 6 2 2 6 66 66 66 221 221 221
53606 -253 253 253 253 253 253 253 253 253 253 253 253
53607 -253 253 253 253 253 253 253 253 253 253 253 253
53608 -253 253 253 253 253 253 253 253 253 253 253 253
53609 -253 253 253 253 253 253 253 253 253 253 253 253
53610 -253 253 253 206 206 206 198 198 198 214 166 58
53611 -230 174 11 230 174 11 216 158 10 192 133 9
53612 -163 110 8 116 81 8 102 78 10 116 81 8
53613 -167 114 7 197 138 11 226 170 11 239 182 13
53614 -242 186 14 242 186 14 162 146 94 78 78 78
53615 - 34 34 34 14 14 14 6 6 6 0 0 0
53616 - 0 0 0 0 0 0 0 0 0 0 0 0
53617 - 0 0 0 0 0 0 0 0 0 0 0 0
53618 - 0 0 0 0 0 0 0 0 0 0 0 0
53619 - 0 0 0 0 0 0 0 0 0 6 6 6
53620 - 30 30 30 78 78 78 190 142 34 226 170 11
53621 -239 182 13 246 190 14 246 190 14 246 190 14
53622 -246 190 14 246 190 14 246 190 14 246 190 14
53623 -246 190 14 246 190 14 246 190 14 246 190 14
53624 -246 190 14 241 196 14 203 166 17 22 18 6
53625 - 2 2 6 2 2 6 2 2 6 38 38 38
53626 -218 218 218 253 253 253 253 253 253 253 253 253
53627 -253 253 253 253 253 253 253 253 253 253 253 253
53628 -253 253 253 253 253 253 253 253 253 253 253 253
53629 -253 253 253 253 253 253 253 253 253 253 253 253
53630 -250 250 250 206 206 206 198 198 198 202 162 69
53631 -226 170 11 236 178 12 224 166 10 210 150 10
53632 -200 144 11 197 138 11 192 133 9 197 138 11
53633 -210 150 10 226 170 11 242 186 14 246 190 14
53634 -246 190 14 246 186 14 225 175 15 124 112 88
53635 - 62 62 62 30 30 30 14 14 14 6 6 6
53636 - 0 0 0 0 0 0 0 0 0 0 0 0
53637 - 0 0 0 0 0 0 0 0 0 0 0 0
53638 - 0 0 0 0 0 0 0 0 0 0 0 0
53639 - 0 0 0 0 0 0 0 0 0 10 10 10
53640 - 30 30 30 78 78 78 174 135 50 224 166 10
53641 -239 182 13 246 190 14 246 190 14 246 190 14
53642 -246 190 14 246 190 14 246 190 14 246 190 14
53643 -246 190 14 246 190 14 246 190 14 246 190 14
53644 -246 190 14 246 190 14 241 196 14 139 102 15
53645 - 2 2 6 2 2 6 2 2 6 2 2 6
53646 - 78 78 78 250 250 250 253 253 253 253 253 253
53647 -253 253 253 253 253 253 253 253 253 253 253 253
53648 -253 253 253 253 253 253 253 253 253 253 253 253
53649 -253 253 253 253 253 253 253 253 253 253 253 253
53650 -250 250 250 214 214 214 198 198 198 190 150 46
53651 -219 162 10 236 178 12 234 174 13 224 166 10
53652 -216 158 10 213 154 11 213 154 11 216 158 10
53653 -226 170 11 239 182 13 246 190 14 246 190 14
53654 -246 190 14 246 190 14 242 186 14 206 162 42
53655 -101 101 101 58 58 58 30 30 30 14 14 14
53656 - 6 6 6 0 0 0 0 0 0 0 0 0
53657 - 0 0 0 0 0 0 0 0 0 0 0 0
53658 - 0 0 0 0 0 0 0 0 0 0 0 0
53659 - 0 0 0 0 0 0 0 0 0 10 10 10
53660 - 30 30 30 74 74 74 174 135 50 216 158 10
53661 -236 178 12 246 190 14 246 190 14 246 190 14
53662 -246 190 14 246 190 14 246 190 14 246 190 14
53663 -246 190 14 246 190 14 246 190 14 246 190 14
53664 -246 190 14 246 190 14 241 196 14 226 184 13
53665 - 61 42 6 2 2 6 2 2 6 2 2 6
53666 - 22 22 22 238 238 238 253 253 253 253 253 253
53667 -253 253 253 253 253 253 253 253 253 253 253 253
53668 -253 253 253 253 253 253 253 253 253 253 253 253
53669 -253 253 253 253 253 253 253 253 253 253 253 253
53670 -253 253 253 226 226 226 187 187 187 180 133 36
53671 -216 158 10 236 178 12 239 182 13 236 178 12
53672 -230 174 11 226 170 11 226 170 11 230 174 11
53673 -236 178 12 242 186 14 246 190 14 246 190 14
53674 -246 190 14 246 190 14 246 186 14 239 182 13
53675 -206 162 42 106 106 106 66 66 66 34 34 34
53676 - 14 14 14 6 6 6 0 0 0 0 0 0
53677 - 0 0 0 0 0 0 0 0 0 0 0 0
53678 - 0 0 0 0 0 0 0 0 0 0 0 0
53679 - 0 0 0 0 0 0 0 0 0 6 6 6
53680 - 26 26 26 70 70 70 163 133 67 213 154 11
53681 -236 178 12 246 190 14 246 190 14 246 190 14
53682 -246 190 14 246 190 14 246 190 14 246 190 14
53683 -246 190 14 246 190 14 246 190 14 246 190 14
53684 -246 190 14 246 190 14 246 190 14 241 196 14
53685 -190 146 13 18 14 6 2 2 6 2 2 6
53686 - 46 46 46 246 246 246 253 253 253 253 253 253
53687 -253 253 253 253 253 253 253 253 253 253 253 253
53688 -253 253 253 253 253 253 253 253 253 253 253 253
53689 -253 253 253 253 253 253 253 253 253 253 253 253
53690 -253 253 253 221 221 221 86 86 86 156 107 11
53691 -216 158 10 236 178 12 242 186 14 246 186 14
53692 -242 186 14 239 182 13 239 182 13 242 186 14
53693 -242 186 14 246 186 14 246 190 14 246 190 14
53694 -246 190 14 246 190 14 246 190 14 246 190 14
53695 -242 186 14 225 175 15 142 122 72 66 66 66
53696 - 30 30 30 10 10 10 0 0 0 0 0 0
53697 - 0 0 0 0 0 0 0 0 0 0 0 0
53698 - 0 0 0 0 0 0 0 0 0 0 0 0
53699 - 0 0 0 0 0 0 0 0 0 6 6 6
53700 - 26 26 26 70 70 70 163 133 67 210 150 10
53701 -236 178 12 246 190 14 246 190 14 246 190 14
53702 -246 190 14 246 190 14 246 190 14 246 190 14
53703 -246 190 14 246 190 14 246 190 14 246 190 14
53704 -246 190 14 246 190 14 246 190 14 246 190 14
53705 -232 195 16 121 92 8 34 34 34 106 106 106
53706 -221 221 221 253 253 253 253 253 253 253 253 253
53707 -253 253 253 253 253 253 253 253 253 253 253 253
53708 -253 253 253 253 253 253 253 253 253 253 253 253
53709 -253 253 253 253 253 253 253 253 253 253 253 253
53710 -242 242 242 82 82 82 18 14 6 163 110 8
53711 -216 158 10 236 178 12 242 186 14 246 190 14
53712 -246 190 14 246 190 14 246 190 14 246 190 14
53713 -246 190 14 246 190 14 246 190 14 246 190 14
53714 -246 190 14 246 190 14 246 190 14 246 190 14
53715 -246 190 14 246 190 14 242 186 14 163 133 67
53716 - 46 46 46 18 18 18 6 6 6 0 0 0
53717 - 0 0 0 0 0 0 0 0 0 0 0 0
53718 - 0 0 0 0 0 0 0 0 0 0 0 0
53719 - 0 0 0 0 0 0 0 0 0 10 10 10
53720 - 30 30 30 78 78 78 163 133 67 210 150 10
53721 -236 178 12 246 186 14 246 190 14 246 190 14
53722 -246 190 14 246 190 14 246 190 14 246 190 14
53723 -246 190 14 246 190 14 246 190 14 246 190 14
53724 -246 190 14 246 190 14 246 190 14 246 190 14
53725 -241 196 14 215 174 15 190 178 144 253 253 253
53726 -253 253 253 253 253 253 253 253 253 253 253 253
53727 -253 253 253 253 253 253 253 253 253 253 253 253
53728 -253 253 253 253 253 253 253 253 253 253 253 253
53729 -253 253 253 253 253 253 253 253 253 218 218 218
53730 - 58 58 58 2 2 6 22 18 6 167 114 7
53731 -216 158 10 236 178 12 246 186 14 246 190 14
53732 -246 190 14 246 190 14 246 190 14 246 190 14
53733 -246 190 14 246 190 14 246 190 14 246 190 14
53734 -246 190 14 246 190 14 246 190 14 246 190 14
53735 -246 190 14 246 186 14 242 186 14 190 150 46
53736 - 54 54 54 22 22 22 6 6 6 0 0 0
53737 - 0 0 0 0 0 0 0 0 0 0 0 0
53738 - 0 0 0 0 0 0 0 0 0 0 0 0
53739 - 0 0 0 0 0 0 0 0 0 14 14 14
53740 - 38 38 38 86 86 86 180 133 36 213 154 11
53741 -236 178 12 246 186 14 246 190 14 246 190 14
53742 -246 190 14 246 190 14 246 190 14 246 190 14
53743 -246 190 14 246 190 14 246 190 14 246 190 14
53744 -246 190 14 246 190 14 246 190 14 246 190 14
53745 -246 190 14 232 195 16 190 146 13 214 214 214
53746 -253 253 253 253 253 253 253 253 253 253 253 253
53747 -253 253 253 253 253 253 253 253 253 253 253 253
53748 -253 253 253 253 253 253 253 253 253 253 253 253
53749 -253 253 253 250 250 250 170 170 170 26 26 26
53750 - 2 2 6 2 2 6 37 26 9 163 110 8
53751 -219 162 10 239 182 13 246 186 14 246 190 14
53752 -246 190 14 246 190 14 246 190 14 246 190 14
53753 -246 190 14 246 190 14 246 190 14 246 190 14
53754 -246 190 14 246 190 14 246 190 14 246 190 14
53755 -246 186 14 236 178 12 224 166 10 142 122 72
53756 - 46 46 46 18 18 18 6 6 6 0 0 0
53757 - 0 0 0 0 0 0 0 0 0 0 0 0
53758 - 0 0 0 0 0 0 0 0 0 0 0 0
53759 - 0 0 0 0 0 0 6 6 6 18 18 18
53760 - 50 50 50 109 106 95 192 133 9 224 166 10
53761 -242 186 14 246 190 14 246 190 14 246 190 14
53762 -246 190 14 246 190 14 246 190 14 246 190 14
53763 -246 190 14 246 190 14 246 190 14 246 190 14
53764 -246 190 14 246 190 14 246 190 14 246 190 14
53765 -242 186 14 226 184 13 210 162 10 142 110 46
53766 -226 226 226 253 253 253 253 253 253 253 253 253
53767 -253 253 253 253 253 253 253 253 253 253 253 253
53768 -253 253 253 253 253 253 253 253 253 253 253 253
53769 -198 198 198 66 66 66 2 2 6 2 2 6
53770 - 2 2 6 2 2 6 50 34 6 156 107 11
53771 -219 162 10 239 182 13 246 186 14 246 190 14
53772 -246 190 14 246 190 14 246 190 14 246 190 14
53773 -246 190 14 246 190 14 246 190 14 246 190 14
53774 -246 190 14 246 190 14 246 190 14 242 186 14
53775 -234 174 13 213 154 11 154 122 46 66 66 66
53776 - 30 30 30 10 10 10 0 0 0 0 0 0
53777 - 0 0 0 0 0 0 0 0 0 0 0 0
53778 - 0 0 0 0 0 0 0 0 0 0 0 0
53779 - 0 0 0 0 0 0 6 6 6 22 22 22
53780 - 58 58 58 154 121 60 206 145 10 234 174 13
53781 -242 186 14 246 186 14 246 190 14 246 190 14
53782 -246 190 14 246 190 14 246 190 14 246 190 14
53783 -246 190 14 246 190 14 246 190 14 246 190 14
53784 -246 190 14 246 190 14 246 190 14 246 190 14
53785 -246 186 14 236 178 12 210 162 10 163 110 8
53786 - 61 42 6 138 138 138 218 218 218 250 250 250
53787 -253 253 253 253 253 253 253 253 253 250 250 250
53788 -242 242 242 210 210 210 144 144 144 66 66 66
53789 - 6 6 6 2 2 6 2 2 6 2 2 6
53790 - 2 2 6 2 2 6 61 42 6 163 110 8
53791 -216 158 10 236 178 12 246 190 14 246 190 14
53792 -246 190 14 246 190 14 246 190 14 246 190 14
53793 -246 190 14 246 190 14 246 190 14 246 190 14
53794 -246 190 14 239 182 13 230 174 11 216 158 10
53795 -190 142 34 124 112 88 70 70 70 38 38 38
53796 - 18 18 18 6 6 6 0 0 0 0 0 0
53797 - 0 0 0 0 0 0 0 0 0 0 0 0
53798 - 0 0 0 0 0 0 0 0 0 0 0 0
53799 - 0 0 0 0 0 0 6 6 6 22 22 22
53800 - 62 62 62 168 124 44 206 145 10 224 166 10
53801 -236 178 12 239 182 13 242 186 14 242 186 14
53802 -246 186 14 246 190 14 246 190 14 246 190 14
53803 -246 190 14 246 190 14 246 190 14 246 190 14
53804 -246 190 14 246 190 14 246 190 14 246 190 14
53805 -246 190 14 236 178 12 216 158 10 175 118 6
53806 - 80 54 7 2 2 6 6 6 6 30 30 30
53807 - 54 54 54 62 62 62 50 50 50 38 38 38
53808 - 14 14 14 2 2 6 2 2 6 2 2 6
53809 - 2 2 6 2 2 6 2 2 6 2 2 6
53810 - 2 2 6 6 6 6 80 54 7 167 114 7
53811 -213 154 11 236 178 12 246 190 14 246 190 14
53812 -246 190 14 246 190 14 246 190 14 246 190 14
53813 -246 190 14 242 186 14 239 182 13 239 182 13
53814 -230 174 11 210 150 10 174 135 50 124 112 88
53815 - 82 82 82 54 54 54 34 34 34 18 18 18
53816 - 6 6 6 0 0 0 0 0 0 0 0 0
53817 - 0 0 0 0 0 0 0 0 0 0 0 0
53818 - 0 0 0 0 0 0 0 0 0 0 0 0
53819 - 0 0 0 0 0 0 6 6 6 18 18 18
53820 - 50 50 50 158 118 36 192 133 9 200 144 11
53821 -216 158 10 219 162 10 224 166 10 226 170 11
53822 -230 174 11 236 178 12 239 182 13 239 182 13
53823 -242 186 14 246 186 14 246 190 14 246 190 14
53824 -246 190 14 246 190 14 246 190 14 246 190 14
53825 -246 186 14 230 174 11 210 150 10 163 110 8
53826 -104 69 6 10 10 10 2 2 6 2 2 6
53827 - 2 2 6 2 2 6 2 2 6 2 2 6
53828 - 2 2 6 2 2 6 2 2 6 2 2 6
53829 - 2 2 6 2 2 6 2 2 6 2 2 6
53830 - 2 2 6 6 6 6 91 60 6 167 114 7
53831 -206 145 10 230 174 11 242 186 14 246 190 14
53832 -246 190 14 246 190 14 246 186 14 242 186 14
53833 -239 182 13 230 174 11 224 166 10 213 154 11
53834 -180 133 36 124 112 88 86 86 86 58 58 58
53835 - 38 38 38 22 22 22 10 10 10 6 6 6
53836 - 0 0 0 0 0 0 0 0 0 0 0 0
53837 - 0 0 0 0 0 0 0 0 0 0 0 0
53838 - 0 0 0 0 0 0 0 0 0 0 0 0
53839 - 0 0 0 0 0 0 0 0 0 14 14 14
53840 - 34 34 34 70 70 70 138 110 50 158 118 36
53841 -167 114 7 180 123 7 192 133 9 197 138 11
53842 -200 144 11 206 145 10 213 154 11 219 162 10
53843 -224 166 10 230 174 11 239 182 13 242 186 14
53844 -246 186 14 246 186 14 246 186 14 246 186 14
53845 -239 182 13 216 158 10 185 133 11 152 99 6
53846 -104 69 6 18 14 6 2 2 6 2 2 6
53847 - 2 2 6 2 2 6 2 2 6 2 2 6
53848 - 2 2 6 2 2 6 2 2 6 2 2 6
53849 - 2 2 6 2 2 6 2 2 6 2 2 6
53850 - 2 2 6 6 6 6 80 54 7 152 99 6
53851 -192 133 9 219 162 10 236 178 12 239 182 13
53852 -246 186 14 242 186 14 239 182 13 236 178 12
53853 -224 166 10 206 145 10 192 133 9 154 121 60
53854 - 94 94 94 62 62 62 42 42 42 22 22 22
53855 - 14 14 14 6 6 6 0 0 0 0 0 0
53856 - 0 0 0 0 0 0 0 0 0 0 0 0
53857 - 0 0 0 0 0 0 0 0 0 0 0 0
53858 - 0 0 0 0 0 0 0 0 0 0 0 0
53859 - 0 0 0 0 0 0 0 0 0 6 6 6
53860 - 18 18 18 34 34 34 58 58 58 78 78 78
53861 -101 98 89 124 112 88 142 110 46 156 107 11
53862 -163 110 8 167 114 7 175 118 6 180 123 7
53863 -185 133 11 197 138 11 210 150 10 219 162 10
53864 -226 170 11 236 178 12 236 178 12 234 174 13
53865 -219 162 10 197 138 11 163 110 8 130 83 6
53866 - 91 60 6 10 10 10 2 2 6 2 2 6
53867 - 18 18 18 38 38 38 38 38 38 38 38 38
53868 - 38 38 38 38 38 38 38 38 38 38 38 38
53869 - 38 38 38 38 38 38 26 26 26 2 2 6
53870 - 2 2 6 6 6 6 70 47 6 137 92 6
53871 -175 118 6 200 144 11 219 162 10 230 174 11
53872 -234 174 13 230 174 11 219 162 10 210 150 10
53873 -192 133 9 163 110 8 124 112 88 82 82 82
53874 - 50 50 50 30 30 30 14 14 14 6 6 6
53875 - 0 0 0 0 0 0 0 0 0 0 0 0
53876 - 0 0 0 0 0 0 0 0 0 0 0 0
53877 - 0 0 0 0 0 0 0 0 0 0 0 0
53878 - 0 0 0 0 0 0 0 0 0 0 0 0
53879 - 0 0 0 0 0 0 0 0 0 0 0 0
53880 - 6 6 6 14 14 14 22 22 22 34 34 34
53881 - 42 42 42 58 58 58 74 74 74 86 86 86
53882 -101 98 89 122 102 70 130 98 46 121 87 25
53883 -137 92 6 152 99 6 163 110 8 180 123 7
53884 -185 133 11 197 138 11 206 145 10 200 144 11
53885 -180 123 7 156 107 11 130 83 6 104 69 6
53886 - 50 34 6 54 54 54 110 110 110 101 98 89
53887 - 86 86 86 82 82 82 78 78 78 78 78 78
53888 - 78 78 78 78 78 78 78 78 78 78 78 78
53889 - 78 78 78 82 82 82 86 86 86 94 94 94
53890 -106 106 106 101 101 101 86 66 34 124 80 6
53891 -156 107 11 180 123 7 192 133 9 200 144 11
53892 -206 145 10 200 144 11 192 133 9 175 118 6
53893 -139 102 15 109 106 95 70 70 70 42 42 42
53894 - 22 22 22 10 10 10 0 0 0 0 0 0
53895 - 0 0 0 0 0 0 0 0 0 0 0 0
53896 - 0 0 0 0 0 0 0 0 0 0 0 0
53897 - 0 0 0 0 0 0 0 0 0 0 0 0
53898 - 0 0 0 0 0 0 0 0 0 0 0 0
53899 - 0 0 0 0 0 0 0 0 0 0 0 0
53900 - 0 0 0 0 0 0 6 6 6 10 10 10
53901 - 14 14 14 22 22 22 30 30 30 38 38 38
53902 - 50 50 50 62 62 62 74 74 74 90 90 90
53903 -101 98 89 112 100 78 121 87 25 124 80 6
53904 -137 92 6 152 99 6 152 99 6 152 99 6
53905 -138 86 6 124 80 6 98 70 6 86 66 30
53906 -101 98 89 82 82 82 58 58 58 46 46 46
53907 - 38 38 38 34 34 34 34 34 34 34 34 34
53908 - 34 34 34 34 34 34 34 34 34 34 34 34
53909 - 34 34 34 34 34 34 38 38 38 42 42 42
53910 - 54 54 54 82 82 82 94 86 76 91 60 6
53911 -134 86 6 156 107 11 167 114 7 175 118 6
53912 -175 118 6 167 114 7 152 99 6 121 87 25
53913 -101 98 89 62 62 62 34 34 34 18 18 18
53914 - 6 6 6 0 0 0 0 0 0 0 0 0
53915 - 0 0 0 0 0 0 0 0 0 0 0 0
53916 - 0 0 0 0 0 0 0 0 0 0 0 0
53917 - 0 0 0 0 0 0 0 0 0 0 0 0
53918 - 0 0 0 0 0 0 0 0 0 0 0 0
53919 - 0 0 0 0 0 0 0 0 0 0 0 0
53920 - 0 0 0 0 0 0 0 0 0 0 0 0
53921 - 0 0 0 6 6 6 6 6 6 10 10 10
53922 - 18 18 18 22 22 22 30 30 30 42 42 42
53923 - 50 50 50 66 66 66 86 86 86 101 98 89
53924 -106 86 58 98 70 6 104 69 6 104 69 6
53925 -104 69 6 91 60 6 82 62 34 90 90 90
53926 - 62 62 62 38 38 38 22 22 22 14 14 14
53927 - 10 10 10 10 10 10 10 10 10 10 10 10
53928 - 10 10 10 10 10 10 6 6 6 10 10 10
53929 - 10 10 10 10 10 10 10 10 10 14 14 14
53930 - 22 22 22 42 42 42 70 70 70 89 81 66
53931 - 80 54 7 104 69 6 124 80 6 137 92 6
53932 -134 86 6 116 81 8 100 82 52 86 86 86
53933 - 58 58 58 30 30 30 14 14 14 6 6 6
53934 - 0 0 0 0 0 0 0 0 0 0 0 0
53935 - 0 0 0 0 0 0 0 0 0 0 0 0
53936 - 0 0 0 0 0 0 0 0 0 0 0 0
53937 - 0 0 0 0 0 0 0 0 0 0 0 0
53938 - 0 0 0 0 0 0 0 0 0 0 0 0
53939 - 0 0 0 0 0 0 0 0 0 0 0 0
53940 - 0 0 0 0 0 0 0 0 0 0 0 0
53941 - 0 0 0 0 0 0 0 0 0 0 0 0
53942 - 0 0 0 6 6 6 10 10 10 14 14 14
53943 - 18 18 18 26 26 26 38 38 38 54 54 54
53944 - 70 70 70 86 86 86 94 86 76 89 81 66
53945 - 89 81 66 86 86 86 74 74 74 50 50 50
53946 - 30 30 30 14 14 14 6 6 6 0 0 0
53947 - 0 0 0 0 0 0 0 0 0 0 0 0
53948 - 0 0 0 0 0 0 0 0 0 0 0 0
53949 - 0 0 0 0 0 0 0 0 0 0 0 0
53950 - 6 6 6 18 18 18 34 34 34 58 58 58
53951 - 82 82 82 89 81 66 89 81 66 89 81 66
53952 - 94 86 66 94 86 76 74 74 74 50 50 50
53953 - 26 26 26 14 14 14 6 6 6 0 0 0
53954 - 0 0 0 0 0 0 0 0 0 0 0 0
53955 - 0 0 0 0 0 0 0 0 0 0 0 0
53956 - 0 0 0 0 0 0 0 0 0 0 0 0
53957 - 0 0 0 0 0 0 0 0 0 0 0 0
53958 - 0 0 0 0 0 0 0 0 0 0 0 0
53959 - 0 0 0 0 0 0 0 0 0 0 0 0
53960 - 0 0 0 0 0 0 0 0 0 0 0 0
53961 - 0 0 0 0 0 0 0 0 0 0 0 0
53962 - 0 0 0 0 0 0 0 0 0 0 0 0
53963 - 6 6 6 6 6 6 14 14 14 18 18 18
53964 - 30 30 30 38 38 38 46 46 46 54 54 54
53965 - 50 50 50 42 42 42 30 30 30 18 18 18
53966 - 10 10 10 0 0 0 0 0 0 0 0 0
53967 - 0 0 0 0 0 0 0 0 0 0 0 0
53968 - 0 0 0 0 0 0 0 0 0 0 0 0
53969 - 0 0 0 0 0 0 0 0 0 0 0 0
53970 - 0 0 0 6 6 6 14 14 14 26 26 26
53971 - 38 38 38 50 50 50 58 58 58 58 58 58
53972 - 54 54 54 42 42 42 30 30 30 18 18 18
53973 - 10 10 10 0 0 0 0 0 0 0 0 0
53974 - 0 0 0 0 0 0 0 0 0 0 0 0
53975 - 0 0 0 0 0 0 0 0 0 0 0 0
53976 - 0 0 0 0 0 0 0 0 0 0 0 0
53977 - 0 0 0 0 0 0 0 0 0 0 0 0
53978 - 0 0 0 0 0 0 0 0 0 0 0 0
53979 - 0 0 0 0 0 0 0 0 0 0 0 0
53980 - 0 0 0 0 0 0 0 0 0 0 0 0
53981 - 0 0 0 0 0 0 0 0 0 0 0 0
53982 - 0 0 0 0 0 0 0 0 0 0 0 0
53983 - 0 0 0 0 0 0 0 0 0 6 6 6
53984 - 6 6 6 10 10 10 14 14 14 18 18 18
53985 - 18 18 18 14 14 14 10 10 10 6 6 6
53986 - 0 0 0 0 0 0 0 0 0 0 0 0
53987 - 0 0 0 0 0 0 0 0 0 0 0 0
53988 - 0 0 0 0 0 0 0 0 0 0 0 0
53989 - 0 0 0 0 0 0 0 0 0 0 0 0
53990 - 0 0 0 0 0 0 0 0 0 6 6 6
53991 - 14 14 14 18 18 18 22 22 22 22 22 22
53992 - 18 18 18 14 14 14 10 10 10 6 6 6
53993 - 0 0 0 0 0 0 0 0 0 0 0 0
53994 - 0 0 0 0 0 0 0 0 0 0 0 0
53995 - 0 0 0 0 0 0 0 0 0 0 0 0
53996 - 0 0 0 0 0 0 0 0 0 0 0 0
53997 - 0 0 0 0 0 0 0 0 0 0 0 0
53998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54011 +4 4 4 4 4 4
54012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54025 +4 4 4 4 4 4
54026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54039 +4 4 4 4 4 4
54040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54053 +4 4 4 4 4 4
54054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54067 +4 4 4 4 4 4
54068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54081 +4 4 4 4 4 4
54082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54086 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
54087 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
54088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54091 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
54092 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54093 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
54094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54095 +4 4 4 4 4 4
54096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54100 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
54101 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
54102 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54105 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
54106 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
54107 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
54108 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54109 +4 4 4 4 4 4
54110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54114 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
54115 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
54116 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54119 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
54120 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
54121 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
54122 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
54123 +4 4 4 4 4 4
54124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54127 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
54128 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
54129 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
54130 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
54131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54132 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54133 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
54134 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
54135 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
54136 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
54137 +4 4 4 4 4 4
54138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54141 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
54142 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
54143 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
54144 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
54145 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54146 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
54147 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
54148 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
54149 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
54150 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
54151 +4 4 4 4 4 4
54152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54155 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
54156 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
54157 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
54158 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
54159 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
54160 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
54161 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
54162 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
54163 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
54164 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
54165 +4 4 4 4 4 4
54166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54168 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
54169 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
54170 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
54171 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
54172 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
54173 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
54174 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
54175 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
54176 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
54177 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
54178 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
54179 +4 4 4 4 4 4
54180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54182 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
54183 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
54184 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
54185 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
54186 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
54187 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
54188 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
54189 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
54190 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
54191 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
54192 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
54193 +4 4 4 4 4 4
54194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54196 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
54197 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
54198 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
54199 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
54200 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
54201 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
54202 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
54203 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
54204 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
54205 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
54206 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54207 +4 4 4 4 4 4
54208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54210 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
54211 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
54212 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
54213 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
54214 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
54215 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
54216 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
54217 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
54218 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
54219 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
54220 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
54221 +4 4 4 4 4 4
54222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54223 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
54224 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
54225 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
54226 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
54227 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
54228 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
54229 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
54230 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
54231 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
54232 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
54233 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
54234 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
54235 +4 4 4 4 4 4
54236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54237 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
54238 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
54239 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
54240 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54241 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
54242 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
54243 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
54244 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
54245 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
54246 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
54247 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
54248 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
54249 +0 0 0 4 4 4
54250 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54251 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
54252 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
54253 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
54254 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
54255 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
54256 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
54257 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
54258 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
54259 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
54260 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
54261 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
54262 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
54263 +2 0 0 0 0 0
54264 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
54265 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
54266 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
54267 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
54268 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
54269 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
54270 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
54271 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
54272 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
54273 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
54274 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
54275 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
54276 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
54277 +37 38 37 0 0 0
54278 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
54279 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
54280 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
54281 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
54282 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
54283 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
54284 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
54285 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
54286 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
54287 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
54288 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
54289 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
54290 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
54291 +85 115 134 4 0 0
54292 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
54293 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
54294 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
54295 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
54296 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
54297 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
54298 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
54299 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
54300 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
54301 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
54302 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
54303 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
54304 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
54305 +60 73 81 4 0 0
54306 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
54307 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
54308 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
54309 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
54310 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
54311 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
54312 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
54313 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
54314 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
54315 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
54316 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
54317 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
54318 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
54319 +16 19 21 4 0 0
54320 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
54321 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
54322 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
54323 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
54324 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
54325 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
54326 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
54327 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
54328 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
54329 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
54330 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
54331 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
54332 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
54333 +4 0 0 4 3 3
54334 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
54335 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
54336 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
54337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
54338 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
54339 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
54340 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
54341 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
54342 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
54343 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
54344 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
54345 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
54346 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
54347 +3 2 2 4 4 4
54348 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
54349 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
54350 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
54351 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
54352 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
54353 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
54354 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
54355 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
54356 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
54357 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
54358 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
54359 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
54360 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
54361 +4 4 4 4 4 4
54362 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
54363 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
54364 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
54365 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
54366 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
54367 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
54368 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
54369 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
54370 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
54371 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
54372 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
54373 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
54374 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
54375 +4 4 4 4 4 4
54376 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
54377 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
54378 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
54379 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
54380 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
54381 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
54382 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
54383 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
54384 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
54385 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
54386 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
54387 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
54388 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
54389 +5 5 5 5 5 5
54390 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
54391 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
54392 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
54393 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
54394 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
54395 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54396 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
54397 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
54398 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
54399 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
54400 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
54401 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
54402 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54403 +5 5 5 4 4 4
54404 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
54405 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
54406 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
54407 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
54408 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54409 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
54410 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
54411 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
54412 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
54413 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
54414 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
54415 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54417 +4 4 4 4 4 4
54418 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
54419 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
54420 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
54421 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
54422 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
54423 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54424 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54425 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
54426 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
54427 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
54428 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
54429 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
54430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54431 +4 4 4 4 4 4
54432 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
54433 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
54434 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
54435 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
54436 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54437 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
54438 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
54439 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
54440 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
54441 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
54442 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
54443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54445 +4 4 4 4 4 4
54446 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
54447 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
54448 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
54449 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
54450 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54451 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54452 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54453 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
54454 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
54455 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
54456 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
54457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54459 +4 4 4 4 4 4
54460 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
54461 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
54462 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
54463 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
54464 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54465 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
54466 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
54467 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
54468 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
54469 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
54470 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54473 +4 4 4 4 4 4
54474 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
54475 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
54476 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
54477 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
54478 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54479 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
54480 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
54481 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
54482 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
54483 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
54484 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
54485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54487 +4 4 4 4 4 4
54488 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
54489 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
54490 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
54491 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
54492 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54493 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
54494 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
54495 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
54496 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
54497 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
54498 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
54499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54501 +4 4 4 4 4 4
54502 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
54503 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
54504 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
54505 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
54506 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
54507 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
54508 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
54509 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
54510 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
54511 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
54512 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54515 +4 4 4 4 4 4
54516 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
54517 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
54518 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
54519 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
54520 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54521 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
54522 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
54523 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
54524 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
54525 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
54526 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54529 +4 4 4 4 4 4
54530 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
54531 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
54532 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
54533 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
54534 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54535 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
54536 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
54537 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
54538 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
54539 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
54540 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54543 +4 4 4 4 4 4
54544 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
54545 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
54546 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
54547 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
54548 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54549 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
54550 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
54551 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
54552 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
54553 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54554 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54557 +4 4 4 4 4 4
54558 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
54559 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
54560 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
54561 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
54562 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
54563 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
54564 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
54565 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
54566 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54567 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54568 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54571 +4 4 4 4 4 4
54572 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
54573 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
54574 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
54575 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
54576 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54577 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
54578 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
54579 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
54580 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54581 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54582 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54585 +4 4 4 4 4 4
54586 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
54587 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
54588 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
54589 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
54590 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
54591 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
54592 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
54593 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
54594 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54595 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54596 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54599 +4 4 4 4 4 4
54600 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
54601 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
54602 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54603 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
54604 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
54605 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
54606 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
54607 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
54608 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
54609 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54610 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54613 +4 4 4 4 4 4
54614 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
54615 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
54616 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
54617 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
54618 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
54619 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
54620 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
54621 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
54622 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54623 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54624 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54627 +4 4 4 4 4 4
54628 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
54629 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
54630 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54631 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
54632 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
54633 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
54634 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
54635 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
54636 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
54637 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54638 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54641 +4 4 4 4 4 4
54642 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
54643 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
54644 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
54645 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
54646 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
54647 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
54648 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
54649 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
54650 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54651 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54652 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54655 +4 4 4 4 4 4
54656 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54657 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
54658 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54659 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
54660 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
54661 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
54662 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
54663 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
54664 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54665 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54666 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54669 +4 4 4 4 4 4
54670 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
54671 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
54672 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
54673 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
54674 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
54675 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
54676 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54677 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
54678 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54679 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54680 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54683 +4 4 4 4 4 4
54684 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54685 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
54686 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
54687 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
54688 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
54689 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
54690 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54691 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
54692 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54693 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54694 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54697 +4 4 4 4 4 4
54698 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
54699 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
54700 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
54701 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
54702 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
54703 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
54704 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
54705 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
54706 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
54707 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54708 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54711 +4 4 4 4 4 4
54712 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54713 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
54714 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
54715 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
54716 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
54717 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
54718 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
54719 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
54720 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
54721 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54722 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54725 +4 4 4 4 4 4
54726 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
54727 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
54728 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
54729 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
54730 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
54731 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
54732 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
54733 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
54734 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
54735 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54736 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54739 +4 4 4 4 4 4
54740 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54741 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
54742 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
54743 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
54744 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
54745 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
54746 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
54747 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
54748 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
54749 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54750 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54753 +4 4 4 4 4 4
54754 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
54755 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
54756 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
54757 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
54758 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
54759 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
54760 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
54761 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
54762 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
54763 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
54764 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54767 +4 4 4 4 4 4
54768 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
54769 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54770 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
54771 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
54772 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
54773 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
54774 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
54775 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
54776 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
54777 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
54778 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54781 +4 4 4 4 4 4
54782 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
54783 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54784 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
54785 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
54786 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
54787 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
54788 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54789 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
54790 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
54791 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
54792 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54795 +4 4 4 4 4 4
54796 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
54797 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
54798 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
54799 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
54800 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
54801 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
54802 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
54803 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
54804 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
54805 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
54806 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54809 +4 4 4 4 4 4
54810 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
54811 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
54812 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54813 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
54814 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
54815 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
54816 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
54817 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
54818 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
54819 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
54820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54823 +4 4 4 4 4 4
54824 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54825 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
54826 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
54827 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
54828 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
54829 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
54830 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
54831 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
54832 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
54833 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54837 +4 4 4 4 4 4
54838 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
54839 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
54840 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
54841 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
54842 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
54843 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
54844 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
54845 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
54846 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
54847 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54851 +4 4 4 4 4 4
54852 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
54853 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
54854 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
54855 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
54856 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
54857 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
54858 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
54859 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
54860 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54861 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54865 +4 4 4 4 4 4
54866 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
54867 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54868 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
54869 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54870 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
54871 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
54872 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
54873 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
54874 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
54875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54879 +4 4 4 4 4 4
54880 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
54881 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
54882 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
54883 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
54884 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
54885 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
54886 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
54887 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
54888 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
54889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54893 +4 4 4 4 4 4
54894 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54895 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
54896 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
54897 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
54898 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
54899 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
54900 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
54901 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
54902 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54907 +4 4 4 4 4 4
54908 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
54909 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
54910 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54911 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
54912 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
54913 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
54914 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
54915 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
54916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54921 +4 4 4 4 4 4
54922 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54923 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
54924 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
54925 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
54926 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
54927 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
54928 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
54929 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54935 +4 4 4 4 4 4
54936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54937 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
54938 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54939 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
54940 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
54941 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
54942 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
54943 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
54944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54949 +4 4 4 4 4 4
54950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54951 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
54952 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
54953 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
54954 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
54955 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
54956 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
54957 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
54958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54963 +4 4 4 4 4 4
54964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54965 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54966 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
54967 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54968 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
54969 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
54970 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
54971 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54977 +4 4 4 4 4 4
54978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54980 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54981 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
54982 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
54983 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
54984 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
54985 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54991 +4 4 4 4 4 4
54992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54995 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54996 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
54997 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
54998 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
54999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55005 +4 4 4 4 4 4
55006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55009 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
55010 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
55011 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
55012 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
55013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55019 +4 4 4 4 4 4
55020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55023 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
55024 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
55025 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
55026 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
55027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55033 +4 4 4 4 4 4
55034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55037 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
55038 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
55039 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
55040 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
55041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55047 +4 4 4 4 4 4
55048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
55052 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
55053 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
55054 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55061 +4 4 4 4 4 4
55062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55066 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
55067 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
55068 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
55069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55075 +4 4 4 4 4 4
55076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55080 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
55081 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
55082 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55089 +4 4 4 4 4 4
55090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55094 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
55095 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
55096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55103 +4 4 4 4 4 4
55104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55108 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
55109 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
55110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55117 +4 4 4 4 4 4
55118 diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
55119 index a01147f..5d896f8 100644
55120 --- a/drivers/video/matrox/matroxfb_DAC1064.c
55121 +++ b/drivers/video/matrox/matroxfb_DAC1064.c
55122 @@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55123
55124 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55125 struct matrox_switch matrox_mystique = {
55126 - MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55127 + .preinit = MGA1064_preinit,
55128 + .reset = MGA1064_reset,
55129 + .init = MGA1064_init,
55130 + .restore = MGA1064_restore,
55131 };
55132 EXPORT_SYMBOL(matrox_mystique);
55133 #endif
55134
55135 #ifdef CONFIG_FB_MATROX_G
55136 struct matrox_switch matrox_G100 = {
55137 - MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55138 + .preinit = MGAG100_preinit,
55139 + .reset = MGAG100_reset,
55140 + .init = MGAG100_init,
55141 + .restore = MGAG100_restore,
55142 };
55143 EXPORT_SYMBOL(matrox_G100);
55144 #endif
55145 diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
55146 index 195ad7c..09743fc 100644
55147 --- a/drivers/video/matrox/matroxfb_Ti3026.c
55148 +++ b/drivers/video/matrox/matroxfb_Ti3026.c
55149 @@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55150 }
55151
55152 struct matrox_switch matrox_millennium = {
55153 - Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55154 + .preinit = Ti3026_preinit,
55155 + .reset = Ti3026_reset,
55156 + .init = Ti3026_init,
55157 + .restore = Ti3026_restore
55158 };
55159 EXPORT_SYMBOL(matrox_millennium);
55160 #endif
55161 diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
55162 index fe92eed..106e085 100644
55163 --- a/drivers/video/mb862xx/mb862xxfb_accel.c
55164 +++ b/drivers/video/mb862xx/mb862xxfb_accel.c
55165 @@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55166 struct mb862xxfb_par *par = info->par;
55167
55168 if (info->var.bits_per_pixel == 32) {
55169 - info->fbops->fb_fillrect = cfb_fillrect;
55170 - info->fbops->fb_copyarea = cfb_copyarea;
55171 - info->fbops->fb_imageblit = cfb_imageblit;
55172 + pax_open_kernel();
55173 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55174 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55175 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55176 + pax_close_kernel();
55177 } else {
55178 outreg(disp, GC_L0EM, 3);
55179 - info->fbops->fb_fillrect = mb86290fb_fillrect;
55180 - info->fbops->fb_copyarea = mb86290fb_copyarea;
55181 - info->fbops->fb_imageblit = mb86290fb_imageblit;
55182 + pax_open_kernel();
55183 + *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55184 + *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55185 + *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55186 + pax_close_kernel();
55187 }
55188 outreg(draw, GDC_REG_DRAW_BASE, 0);
55189 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55190 diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
55191 index ff22871..b129bed 100644
55192 --- a/drivers/video/nvidia/nvidia.c
55193 +++ b/drivers/video/nvidia/nvidia.c
55194 @@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55195 info->fix.line_length = (info->var.xres_virtual *
55196 info->var.bits_per_pixel) >> 3;
55197 if (info->var.accel_flags) {
55198 - info->fbops->fb_imageblit = nvidiafb_imageblit;
55199 - info->fbops->fb_fillrect = nvidiafb_fillrect;
55200 - info->fbops->fb_copyarea = nvidiafb_copyarea;
55201 - info->fbops->fb_sync = nvidiafb_sync;
55202 + pax_open_kernel();
55203 + *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55204 + *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55205 + *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55206 + *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55207 + pax_close_kernel();
55208 info->pixmap.scan_align = 4;
55209 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55210 info->flags |= FBINFO_READS_FAST;
55211 NVResetGraphics(info);
55212 } else {
55213 - info->fbops->fb_imageblit = cfb_imageblit;
55214 - info->fbops->fb_fillrect = cfb_fillrect;
55215 - info->fbops->fb_copyarea = cfb_copyarea;
55216 - info->fbops->fb_sync = NULL;
55217 + pax_open_kernel();
55218 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55219 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55220 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55221 + *(void **)&info->fbops->fb_sync = NULL;
55222 + pax_close_kernel();
55223 info->pixmap.scan_align = 1;
55224 info->flags |= FBINFO_HWACCEL_DISABLED;
55225 info->flags &= ~FBINFO_READS_FAST;
55226 @@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55227 info->pixmap.size = 8 * 1024;
55228 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55229
55230 - if (!hwcur)
55231 - info->fbops->fb_cursor = NULL;
55232 + if (!hwcur) {
55233 + pax_open_kernel();
55234 + *(void **)&info->fbops->fb_cursor = NULL;
55235 + pax_close_kernel();
55236 + }
55237
55238 info->var.accel_flags = (!noaccel);
55239
55240 diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
55241 index 669a81f..e216d76 100644
55242 --- a/drivers/video/omap2/dss/display.c
55243 +++ b/drivers/video/omap2/dss/display.c
55244 @@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55245 snprintf(dssdev->alias, sizeof(dssdev->alias),
55246 "display%d", disp_num_counter++);
55247
55248 + pax_open_kernel();
55249 if (drv && drv->get_resolution == NULL)
55250 - drv->get_resolution = omapdss_default_get_resolution;
55251 + *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55252 if (drv && drv->get_recommended_bpp == NULL)
55253 - drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55254 + *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55255 if (drv && drv->get_timings == NULL)
55256 - drv->get_timings = omapdss_default_get_timings;
55257 + *(void **)&drv->get_timings = omapdss_default_get_timings;
55258 + pax_close_kernel();
55259
55260 mutex_lock(&panel_list_mutex);
55261 list_add_tail(&dssdev->panel_list, &panel_list);
55262 diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
55263 index 83433cb..71e9b98 100644
55264 --- a/drivers/video/s1d13xxxfb.c
55265 +++ b/drivers/video/s1d13xxxfb.c
55266 @@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55267
55268 switch(prod_id) {
55269 case S1D13506_PROD_ID: /* activate acceleration */
55270 - s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55271 - s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55272 + pax_open_kernel();
55273 + *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55274 + *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55275 + pax_close_kernel();
55276 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55277 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55278 break;
55279 diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
55280 index d513ed6..90b0de9 100644
55281 --- a/drivers/video/smscufx.c
55282 +++ b/drivers/video/smscufx.c
55283 @@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55284 fb_deferred_io_cleanup(info);
55285 kfree(info->fbdefio);
55286 info->fbdefio = NULL;
55287 - info->fbops->fb_mmap = ufx_ops_mmap;
55288 + pax_open_kernel();
55289 + *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55290 + pax_close_kernel();
55291 }
55292
55293 pr_debug("released /dev/fb%d user=%d count=%d",
55294 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
55295 index 025f14e..20eb4db 100644
55296 --- a/drivers/video/udlfb.c
55297 +++ b/drivers/video/udlfb.c
55298 @@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55299 dlfb_urb_completion(urb);
55300
55301 error:
55302 - atomic_add(bytes_sent, &dev->bytes_sent);
55303 - atomic_add(bytes_identical, &dev->bytes_identical);
55304 - atomic_add(width*height*2, &dev->bytes_rendered);
55305 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55306 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55307 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55308 end_cycles = get_cycles();
55309 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
55310 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55311 >> 10)), /* Kcycles */
55312 &dev->cpu_kcycles_used);
55313
55314 @@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55315 dlfb_urb_completion(urb);
55316
55317 error:
55318 - atomic_add(bytes_sent, &dev->bytes_sent);
55319 - atomic_add(bytes_identical, &dev->bytes_identical);
55320 - atomic_add(bytes_rendered, &dev->bytes_rendered);
55321 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55322 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55323 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55324 end_cycles = get_cycles();
55325 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
55326 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55327 >> 10)), /* Kcycles */
55328 &dev->cpu_kcycles_used);
55329 }
55330 @@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55331 fb_deferred_io_cleanup(info);
55332 kfree(info->fbdefio);
55333 info->fbdefio = NULL;
55334 - info->fbops->fb_mmap = dlfb_ops_mmap;
55335 + pax_open_kernel();
55336 + *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55337 + pax_close_kernel();
55338 }
55339
55340 pr_warn("released /dev/fb%d user=%d count=%d\n",
55341 @@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55342 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55343 struct dlfb_data *dev = fb_info->par;
55344 return snprintf(buf, PAGE_SIZE, "%u\n",
55345 - atomic_read(&dev->bytes_rendered));
55346 + atomic_read_unchecked(&dev->bytes_rendered));
55347 }
55348
55349 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55350 @@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55351 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55352 struct dlfb_data *dev = fb_info->par;
55353 return snprintf(buf, PAGE_SIZE, "%u\n",
55354 - atomic_read(&dev->bytes_identical));
55355 + atomic_read_unchecked(&dev->bytes_identical));
55356 }
55357
55358 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55359 @@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55360 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55361 struct dlfb_data *dev = fb_info->par;
55362 return snprintf(buf, PAGE_SIZE, "%u\n",
55363 - atomic_read(&dev->bytes_sent));
55364 + atomic_read_unchecked(&dev->bytes_sent));
55365 }
55366
55367 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55368 @@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55369 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55370 struct dlfb_data *dev = fb_info->par;
55371 return snprintf(buf, PAGE_SIZE, "%u\n",
55372 - atomic_read(&dev->cpu_kcycles_used));
55373 + atomic_read_unchecked(&dev->cpu_kcycles_used));
55374 }
55375
55376 static ssize_t edid_show(
55377 @@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55378 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55379 struct dlfb_data *dev = fb_info->par;
55380
55381 - atomic_set(&dev->bytes_rendered, 0);
55382 - atomic_set(&dev->bytes_identical, 0);
55383 - atomic_set(&dev->bytes_sent, 0);
55384 - atomic_set(&dev->cpu_kcycles_used, 0);
55385 + atomic_set_unchecked(&dev->bytes_rendered, 0);
55386 + atomic_set_unchecked(&dev->bytes_identical, 0);
55387 + atomic_set_unchecked(&dev->bytes_sent, 0);
55388 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55389
55390 return count;
55391 }
55392 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
55393 index 256fba7..6e75516 100644
55394 --- a/drivers/video/uvesafb.c
55395 +++ b/drivers/video/uvesafb.c
55396 @@ -19,6 +19,7 @@
55397 #include <linux/io.h>
55398 #include <linux/mutex.h>
55399 #include <linux/slab.h>
55400 +#include <linux/moduleloader.h>
55401 #include <video/edid.h>
55402 #include <video/uvesafb.h>
55403 #ifdef CONFIG_X86
55404 @@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55405 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55406 par->pmi_setpal = par->ypan = 0;
55407 } else {
55408 +
55409 +#ifdef CONFIG_PAX_KERNEXEC
55410 +#ifdef CONFIG_MODULES
55411 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55412 +#endif
55413 + if (!par->pmi_code) {
55414 + par->pmi_setpal = par->ypan = 0;
55415 + return 0;
55416 + }
55417 +#endif
55418 +
55419 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55420 + task->t.regs.edi);
55421 +
55422 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55423 + pax_open_kernel();
55424 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55425 + pax_close_kernel();
55426 +
55427 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55428 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55429 +#else
55430 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55431 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55432 +#endif
55433 +
55434 printk(KERN_INFO "uvesafb: protected mode interface info at "
55435 "%04x:%04x\n",
55436 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55437 @@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55438 par->ypan = ypan;
55439
55440 if (par->pmi_setpal || par->ypan) {
55441 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55442 if (__supported_pte_mask & _PAGE_NX) {
55443 par->pmi_setpal = par->ypan = 0;
55444 printk(KERN_WARNING "uvesafb: NX protection is active, "
55445 "better not use the PMI.\n");
55446 - } else {
55447 + } else
55448 +#endif
55449 uvesafb_vbe_getpmi(task, par);
55450 - }
55451 }
55452 #else
55453 /* The protected mode interface is not available on non-x86. */
55454 @@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55455 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55456
55457 /* Disable blanking if the user requested so. */
55458 - if (!blank)
55459 - info->fbops->fb_blank = NULL;
55460 + if (!blank) {
55461 + pax_open_kernel();
55462 + *(void **)&info->fbops->fb_blank = NULL;
55463 + pax_close_kernel();
55464 + }
55465
55466 /*
55467 * Find out how much IO memory is required for the mode with
55468 @@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55469 info->flags = FBINFO_FLAG_DEFAULT |
55470 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55471
55472 - if (!par->ypan)
55473 - info->fbops->fb_pan_display = NULL;
55474 + if (!par->ypan) {
55475 + pax_open_kernel();
55476 + *(void **)&info->fbops->fb_pan_display = NULL;
55477 + pax_close_kernel();
55478 + }
55479 }
55480
55481 static void uvesafb_init_mtrr(struct fb_info *info)
55482 @@ -1792,6 +1822,11 @@ out_mode:
55483 out:
55484 kfree(par->vbe_modes);
55485
55486 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55487 + if (par->pmi_code)
55488 + module_free_exec(NULL, par->pmi_code);
55489 +#endif
55490 +
55491 framebuffer_release(info);
55492 return err;
55493 }
55494 @@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev)
55495 kfree(par->vbe_modes);
55496 kfree(par->vbe_state_orig);
55497 kfree(par->vbe_state_saved);
55498 +
55499 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55500 + if (par->pmi_code)
55501 + module_free_exec(NULL, par->pmi_code);
55502 +#endif
55503 +
55504 }
55505
55506 framebuffer_release(info);
55507 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
55508 index 1c7da3b..56ea0bd 100644
55509 --- a/drivers/video/vesafb.c
55510 +++ b/drivers/video/vesafb.c
55511 @@ -9,6 +9,7 @@
55512 */
55513
55514 #include <linux/module.h>
55515 +#include <linux/moduleloader.h>
55516 #include <linux/kernel.h>
55517 #include <linux/errno.h>
55518 #include <linux/string.h>
55519 @@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55520 static int vram_total; /* Set total amount of memory */
55521 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55522 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55523 -static void (*pmi_start)(void) __read_mostly;
55524 -static void (*pmi_pal) (void) __read_mostly;
55525 +static void (*pmi_start)(void) __read_only;
55526 +static void (*pmi_pal) (void) __read_only;
55527 static int depth __read_mostly;
55528 static int vga_compat __read_mostly;
55529 /* --------------------------------------------------------------------- */
55530 @@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
55531 unsigned int size_remap;
55532 unsigned int size_total;
55533 char *option = NULL;
55534 + void *pmi_code = NULL;
55535
55536 /* ignore error return of fb_get_options */
55537 fb_get_options("vesafb", &option);
55538 @@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
55539 size_remap = size_total;
55540 vesafb_fix.smem_len = size_remap;
55541
55542 -#ifndef __i386__
55543 - screen_info.vesapm_seg = 0;
55544 -#endif
55545 -
55546 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55547 printk(KERN_WARNING
55548 "vesafb: cannot reserve video memory at 0x%lx\n",
55549 @@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55550 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55551 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55552
55553 +#ifdef __i386__
55554 +
55555 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55556 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
55557 + if (!pmi_code)
55558 +#elif !defined(CONFIG_PAX_KERNEXEC)
55559 + if (0)
55560 +#endif
55561 +
55562 +#endif
55563 + screen_info.vesapm_seg = 0;
55564 +
55565 if (screen_info.vesapm_seg) {
55566 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55567 - screen_info.vesapm_seg,screen_info.vesapm_off);
55568 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55569 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55570 }
55571
55572 if (screen_info.vesapm_seg < 0xc000)
55573 @@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55574
55575 if (ypan || pmi_setpal) {
55576 unsigned short *pmi_base;
55577 +
55578 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55579 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55580 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55581 +
55582 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55583 + pax_open_kernel();
55584 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55585 +#else
55586 + pmi_code = pmi_base;
55587 +#endif
55588 +
55589 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55590 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55591 +
55592 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55593 + pmi_start = ktva_ktla(pmi_start);
55594 + pmi_pal = ktva_ktla(pmi_pal);
55595 + pax_close_kernel();
55596 +#endif
55597 +
55598 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55599 if (pmi_base[3]) {
55600 printk(KERN_INFO "vesafb: pmi: ports = ");
55601 @@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55602 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55603 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55604
55605 - if (!ypan)
55606 - info->fbops->fb_pan_display = NULL;
55607 + if (!ypan) {
55608 + pax_open_kernel();
55609 + *(void **)&info->fbops->fb_pan_display = NULL;
55610 + pax_close_kernel();
55611 + }
55612
55613 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55614 err = -ENOMEM;
55615 @@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55616 fb_info(info, "%s frame buffer device\n", info->fix.id);
55617 return 0;
55618 err:
55619 +
55620 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55621 + module_free_exec(NULL, pmi_code);
55622 +#endif
55623 +
55624 if (info->screen_base)
55625 iounmap(info->screen_base);
55626 framebuffer_release(info);
55627 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
55628 index 88714ae..16c2e11 100644
55629 --- a/drivers/video/via/via_clock.h
55630 +++ b/drivers/video/via/via_clock.h
55631 @@ -56,7 +56,7 @@ struct via_clock {
55632
55633 void (*set_engine_pll_state)(u8 state);
55634 void (*set_engine_pll)(struct via_pll_config config);
55635 -};
55636 +} __no_const;
55637
55638
55639 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55640 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
55641 index 4c02e2b..2c85267 100644
55642 --- a/drivers/xen/balloon.c
55643 +++ b/drivers/xen/balloon.c
55644 @@ -406,12 +406,26 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
55645 state = BP_EAGAIN;
55646 break;
55647 }
55648 -
55649 - pfn = page_to_pfn(page);
55650 - frame_list[i] = pfn_to_mfn(pfn);
55651 -
55652 scrub_page(page);
55653
55654 + frame_list[i] = page_to_pfn(page);
55655 + }
55656 +
55657 + /*
55658 + * Ensure that ballooned highmem pages don't have kmaps.
55659 + *
55660 + * Do this before changing the p2m as kmap_flush_unused()
55661 + * reads PTEs to obtain pages (and hence needs the original
55662 + * p2m entry).
55663 + */
55664 + kmap_flush_unused();
55665 +
55666 + /* Update direct mapping, invalidate P2M, and add to balloon. */
55667 + for (i = 0; i < nr_pages; i++) {
55668 + pfn = frame_list[i];
55669 + frame_list[i] = pfn_to_mfn(pfn);
55670 + page = pfn_to_page(pfn);
55671 +
55672 #ifdef CONFIG_XEN_HAVE_PVMMU
55673 /*
55674 * Ballooned out frames are effectively replaced with
55675 @@ -436,11 +450,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
55676 }
55677 #endif
55678
55679 - balloon_append(pfn_to_page(pfn));
55680 + balloon_append(page);
55681 }
55682
55683 - /* Ensure that ballooned highmem pages don't have kmaps. */
55684 - kmap_flush_unused();
55685 flush_tlb_all();
55686
55687 set_xen_guest_handle(reservation.extent_start, frame_list);
55688 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
55689 index fef20db..d28b1ab 100644
55690 --- a/drivers/xen/xenfs/xenstored.c
55691 +++ b/drivers/xen/xenfs/xenstored.c
55692 @@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
55693 static int xsd_kva_open(struct inode *inode, struct file *file)
55694 {
55695 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
55696 +#ifdef CONFIG_GRKERNSEC_HIDESYM
55697 + NULL);
55698 +#else
55699 xen_store_interface);
55700 +#endif
55701 +
55702 if (!file->private_data)
55703 return -ENOMEM;
55704 return 0;
55705 diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
55706 index 9ff073f..05cef23 100644
55707 --- a/fs/9p/vfs_addr.c
55708 +++ b/fs/9p/vfs_addr.c
55709 @@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
55710
55711 retval = v9fs_file_write_internal(inode,
55712 v9inode->writeback_fid,
55713 - (__force const char __user *)buffer,
55714 + (const char __force_user *)buffer,
55715 len, &offset, 0);
55716 if (retval > 0)
55717 retval = 0;
55718 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
55719 index 4e65aa9..043dc9a 100644
55720 --- a/fs/9p/vfs_inode.c
55721 +++ b/fs/9p/vfs_inode.c
55722 @@ -1306,7 +1306,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55723 void
55724 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
55725 {
55726 - char *s = nd_get_link(nd);
55727 + const char *s = nd_get_link(nd);
55728
55729 p9_debug(P9_DEBUG_VFS, " %s %s\n",
55730 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
55731 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
55732 index 370b24c..ff0be7b 100644
55733 --- a/fs/Kconfig.binfmt
55734 +++ b/fs/Kconfig.binfmt
55735 @@ -103,7 +103,7 @@ config HAVE_AOUT
55736
55737 config BINFMT_AOUT
55738 tristate "Kernel support for a.out and ECOFF binaries"
55739 - depends on HAVE_AOUT
55740 + depends on HAVE_AOUT && BROKEN
55741 ---help---
55742 A.out (Assembler.OUTput) is a set of formats for libraries and
55743 executables used in the earliest versions of UNIX. Linux used
55744 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
55745 index ce25d75..dc09eeb 100644
55746 --- a/fs/afs/inode.c
55747 +++ b/fs/afs/inode.c
55748 @@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
55749 struct afs_vnode *vnode;
55750 struct super_block *sb;
55751 struct inode *inode;
55752 - static atomic_t afs_autocell_ino;
55753 + static atomic_unchecked_t afs_autocell_ino;
55754
55755 _enter("{%x:%u},%*.*s,",
55756 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
55757 @@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
55758 data.fid.unique = 0;
55759 data.fid.vnode = 0;
55760
55761 - inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
55762 + inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
55763 afs_iget5_autocell_test, afs_iget5_set,
55764 &data);
55765 if (!inode) {
55766 diff --git a/fs/aio.c b/fs/aio.c
55767 index 062a5f6..e5618e0 100644
55768 --- a/fs/aio.c
55769 +++ b/fs/aio.c
55770 @@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx)
55771 size += sizeof(struct io_event) * nr_events;
55772
55773 nr_pages = PFN_UP(size);
55774 - if (nr_pages < 0)
55775 + if (nr_pages <= 0)
55776 return -EINVAL;
55777
55778 file = aio_private_file(ctx, nr_pages);
55779 diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
55780 index 2408473..80ef38c 100644
55781 --- a/fs/anon_inodes.c
55782 +++ b/fs/anon_inodes.c
55783 @@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
55784 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
55785 int flags, const char *dev_name, void *data)
55786 {
55787 - struct dentry *root;
55788 - root = mount_pseudo(fs_type, "anon_inode:", NULL,
55789 + return mount_pseudo(fs_type, "anon_inode:", NULL,
55790 &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
55791 - if (!IS_ERR(root)) {
55792 - struct super_block *s = root->d_sb;
55793 - anon_inode_inode = alloc_anon_inode(s);
55794 - if (IS_ERR(anon_inode_inode)) {
55795 - dput(root);
55796 - deactivate_locked_super(s);
55797 - root = ERR_CAST(anon_inode_inode);
55798 - }
55799 - }
55800 - return root;
55801 }
55802
55803 static struct file_system_type anon_inode_fs_type = {
55804 @@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
55805
55806 static int __init anon_inode_init(void)
55807 {
55808 - int error;
55809 -
55810 - error = register_filesystem(&anon_inode_fs_type);
55811 - if (error)
55812 - goto err_exit;
55813 anon_inode_mnt = kern_mount(&anon_inode_fs_type);
55814 - if (IS_ERR(anon_inode_mnt)) {
55815 - error = PTR_ERR(anon_inode_mnt);
55816 - goto err_unregister_filesystem;
55817 - }
55818 + if (IS_ERR(anon_inode_mnt))
55819 + panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
55820 +
55821 + anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
55822 + if (IS_ERR(anon_inode_inode))
55823 + panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
55824 +
55825 return 0;
55826 -
55827 -err_unregister_filesystem:
55828 - unregister_filesystem(&anon_inode_fs_type);
55829 -err_exit:
55830 - panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
55831 }
55832
55833 fs_initcall(anon_inode_init);
55834 diff --git a/fs/attr.c b/fs/attr.c
55835 index 5d4e59d..fd02418 100644
55836 --- a/fs/attr.c
55837 +++ b/fs/attr.c
55838 @@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
55839 unsigned long limit;
55840
55841 limit = rlimit(RLIMIT_FSIZE);
55842 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
55843 if (limit != RLIM_INFINITY && offset > limit)
55844 goto out_sig;
55845 if (offset > inode->i_sb->s_maxbytes)
55846 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
55847 index 689e40d..515cac5 100644
55848 --- a/fs/autofs4/waitq.c
55849 +++ b/fs/autofs4/waitq.c
55850 @@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
55851 {
55852 unsigned long sigpipe, flags;
55853 mm_segment_t fs;
55854 - const char *data = (const char *)addr;
55855 + const char __user *data = (const char __force_user *)addr;
55856 ssize_t wr = 0;
55857
55858 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
55859 @@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
55860 return 1;
55861 }
55862
55863 +#ifdef CONFIG_GRKERNSEC_HIDESYM
55864 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
55865 +#endif
55866 +
55867 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55868 enum autofs_notify notify)
55869 {
55870 @@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55871
55872 /* If this is a direct mount request create a dummy name */
55873 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
55874 +#ifdef CONFIG_GRKERNSEC_HIDESYM
55875 + /* this name does get written to userland via autofs4_write() */
55876 + qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
55877 +#else
55878 qstr.len = sprintf(name, "%p", dentry);
55879 +#endif
55880 else {
55881 qstr.len = autofs4_getpath(sbi, dentry, &name);
55882 if (!qstr.len) {
55883 diff --git a/fs/befs/endian.h b/fs/befs/endian.h
55884 index 2722387..56059b5 100644
55885 --- a/fs/befs/endian.h
55886 +++ b/fs/befs/endian.h
55887 @@ -11,7 +11,7 @@
55888
55889 #include <asm/byteorder.h>
55890
55891 -static inline u64
55892 +static inline u64 __intentional_overflow(-1)
55893 fs64_to_cpu(const struct super_block *sb, fs64 n)
55894 {
55895 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55896 @@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
55897 return (__force fs64)cpu_to_be64(n);
55898 }
55899
55900 -static inline u32
55901 +static inline u32 __intentional_overflow(-1)
55902 fs32_to_cpu(const struct super_block *sb, fs32 n)
55903 {
55904 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55905 @@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
55906 return (__force fs32)cpu_to_be32(n);
55907 }
55908
55909 -static inline u16
55910 +static inline u16 __intentional_overflow(-1)
55911 fs16_to_cpu(const struct super_block *sb, fs16 n)
55912 {
55913 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55914 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
55915 index ca0ba15..0fa3257 100644
55916 --- a/fs/binfmt_aout.c
55917 +++ b/fs/binfmt_aout.c
55918 @@ -16,6 +16,7 @@
55919 #include <linux/string.h>
55920 #include <linux/fs.h>
55921 #include <linux/file.h>
55922 +#include <linux/security.h>
55923 #include <linux/stat.h>
55924 #include <linux/fcntl.h>
55925 #include <linux/ptrace.h>
55926 @@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
55927 #endif
55928 # define START_STACK(u) ((void __user *)u.start_stack)
55929
55930 + memset(&dump, 0, sizeof(dump));
55931 +
55932 fs = get_fs();
55933 set_fs(KERNEL_DS);
55934 has_dumped = 1;
55935 @@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
55936
55937 /* If the size of the dump file exceeds the rlimit, then see what would happen
55938 if we wrote the stack, but not the data area. */
55939 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
55940 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
55941 dump.u_dsize = 0;
55942
55943 /* Make sure we have enough room to write the stack and data areas. */
55944 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
55945 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
55946 dump.u_ssize = 0;
55947
55948 @@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
55949 rlim = rlimit(RLIMIT_DATA);
55950 if (rlim >= RLIM_INFINITY)
55951 rlim = ~0;
55952 +
55953 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
55954 if (ex.a_data + ex.a_bss > rlim)
55955 return -ENOMEM;
55956
55957 @@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
55958
55959 install_exec_creds(bprm);
55960
55961 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55962 + current->mm->pax_flags = 0UL;
55963 +#endif
55964 +
55965 +#ifdef CONFIG_PAX_PAGEEXEC
55966 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
55967 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
55968 +
55969 +#ifdef CONFIG_PAX_EMUTRAMP
55970 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
55971 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
55972 +#endif
55973 +
55974 +#ifdef CONFIG_PAX_MPROTECT
55975 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
55976 + current->mm->pax_flags |= MF_PAX_MPROTECT;
55977 +#endif
55978 +
55979 + }
55980 +#endif
55981 +
55982 if (N_MAGIC(ex) == OMAGIC) {
55983 unsigned long text_addr, map_size;
55984 loff_t pos;
55985 @@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
55986 }
55987
55988 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
55989 - PROT_READ | PROT_WRITE | PROT_EXEC,
55990 + PROT_READ | PROT_WRITE,
55991 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
55992 fd_offset + ex.a_text);
55993 if (error != N_DATADDR(ex)) {
55994 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
55995 index 571a423..eed5754 100644
55996 --- a/fs/binfmt_elf.c
55997 +++ b/fs/binfmt_elf.c
55998 @@ -34,6 +34,7 @@
55999 #include <linux/utsname.h>
56000 #include <linux/coredump.h>
56001 #include <linux/sched.h>
56002 +#include <linux/xattr.h>
56003 #include <asm/uaccess.h>
56004 #include <asm/param.h>
56005 #include <asm/page.h>
56006 @@ -48,7 +49,7 @@
56007 static int load_elf_binary(struct linux_binprm *bprm);
56008 static int load_elf_library(struct file *);
56009 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
56010 - int, int, unsigned long);
56011 + int, int, unsigned long) __intentional_overflow(-1);
56012
56013 /*
56014 * If we don't support core dumping, then supply a NULL so we
56015 @@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
56016 #define elf_core_dump NULL
56017 #endif
56018
56019 +#ifdef CONFIG_PAX_MPROTECT
56020 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
56021 +#endif
56022 +
56023 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56024 +static void elf_handle_mmap(struct file *file);
56025 +#endif
56026 +
56027 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
56028 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
56029 #else
56030 @@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
56031 .load_binary = load_elf_binary,
56032 .load_shlib = load_elf_library,
56033 .core_dump = elf_core_dump,
56034 +
56035 +#ifdef CONFIG_PAX_MPROTECT
56036 + .handle_mprotect= elf_handle_mprotect,
56037 +#endif
56038 +
56039 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56040 + .handle_mmap = elf_handle_mmap,
56041 +#endif
56042 +
56043 .min_coredump = ELF_EXEC_PAGESIZE,
56044 };
56045
56046 @@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
56047
56048 static int set_brk(unsigned long start, unsigned long end)
56049 {
56050 + unsigned long e = end;
56051 +
56052 start = ELF_PAGEALIGN(start);
56053 end = ELF_PAGEALIGN(end);
56054 if (end > start) {
56055 @@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
56056 if (BAD_ADDR(addr))
56057 return addr;
56058 }
56059 - current->mm->start_brk = current->mm->brk = end;
56060 + current->mm->start_brk = current->mm->brk = e;
56061 return 0;
56062 }
56063
56064 @@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56065 elf_addr_t __user *u_rand_bytes;
56066 const char *k_platform = ELF_PLATFORM;
56067 const char *k_base_platform = ELF_BASE_PLATFORM;
56068 - unsigned char k_rand_bytes[16];
56069 + u32 k_rand_bytes[4];
56070 int items;
56071 elf_addr_t *elf_info;
56072 int ei_index = 0;
56073 const struct cred *cred = current_cred();
56074 struct vm_area_struct *vma;
56075 + unsigned long saved_auxv[AT_VECTOR_SIZE];
56076
56077 /*
56078 * In some cases (e.g. Hyper-Threading), we want to avoid L1
56079 @@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56080 * Generate 16 random bytes for userspace PRNG seeding.
56081 */
56082 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
56083 - u_rand_bytes = (elf_addr_t __user *)
56084 - STACK_ALLOC(p, sizeof(k_rand_bytes));
56085 + prandom_seed(k_rand_bytes[0] ^ prandom_u32());
56086 + prandom_seed(k_rand_bytes[1] ^ prandom_u32());
56087 + prandom_seed(k_rand_bytes[2] ^ prandom_u32());
56088 + prandom_seed(k_rand_bytes[3] ^ prandom_u32());
56089 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
56090 + u_rand_bytes = (elf_addr_t __user *) p;
56091 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
56092 return -EFAULT;
56093
56094 @@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56095 return -EFAULT;
56096 current->mm->env_end = p;
56097
56098 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
56099 +
56100 /* Put the elf_info on the stack in the right place. */
56101 sp = (elf_addr_t __user *)envp + 1;
56102 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
56103 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
56104 return -EFAULT;
56105 return 0;
56106 }
56107 @@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
56108 an ELF header */
56109
56110 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56111 - struct file *interpreter, unsigned long *interp_map_addr,
56112 - unsigned long no_base)
56113 + struct file *interpreter, unsigned long no_base)
56114 {
56115 struct elf_phdr *elf_phdata;
56116 struct elf_phdr *eppnt;
56117 - unsigned long load_addr = 0;
56118 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
56119 int load_addr_set = 0;
56120 unsigned long last_bss = 0, elf_bss = 0;
56121 - unsigned long error = ~0UL;
56122 + unsigned long error = -EINVAL;
56123 unsigned long total_size;
56124 int retval, i, size;
56125
56126 @@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56127 goto out_close;
56128 }
56129
56130 +#ifdef CONFIG_PAX_SEGMEXEC
56131 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
56132 + pax_task_size = SEGMEXEC_TASK_SIZE;
56133 +#endif
56134 +
56135 eppnt = elf_phdata;
56136 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
56137 if (eppnt->p_type == PT_LOAD) {
56138 @@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56139 map_addr = elf_map(interpreter, load_addr + vaddr,
56140 eppnt, elf_prot, elf_type, total_size);
56141 total_size = 0;
56142 - if (!*interp_map_addr)
56143 - *interp_map_addr = map_addr;
56144 error = map_addr;
56145 if (BAD_ADDR(map_addr))
56146 goto out_close;
56147 @@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56148 k = load_addr + eppnt->p_vaddr;
56149 if (BAD_ADDR(k) ||
56150 eppnt->p_filesz > eppnt->p_memsz ||
56151 - eppnt->p_memsz > TASK_SIZE ||
56152 - TASK_SIZE - eppnt->p_memsz < k) {
56153 + eppnt->p_memsz > pax_task_size ||
56154 + pax_task_size - eppnt->p_memsz < k) {
56155 error = -ENOMEM;
56156 goto out_close;
56157 }
56158 @@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56159 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
56160
56161 /* Map the last of the bss segment */
56162 - error = vm_brk(elf_bss, last_bss - elf_bss);
56163 - if (BAD_ADDR(error))
56164 - goto out_close;
56165 + if (last_bss > elf_bss) {
56166 + error = vm_brk(elf_bss, last_bss - elf_bss);
56167 + if (BAD_ADDR(error))
56168 + goto out_close;
56169 + }
56170 }
56171
56172 error = load_addr;
56173 @@ -538,6 +569,336 @@ out:
56174 return error;
56175 }
56176
56177 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
56178 +#ifdef CONFIG_PAX_SOFTMODE
56179 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
56180 +{
56181 + unsigned long pax_flags = 0UL;
56182 +
56183 +#ifdef CONFIG_PAX_PAGEEXEC
56184 + if (elf_phdata->p_flags & PF_PAGEEXEC)
56185 + pax_flags |= MF_PAX_PAGEEXEC;
56186 +#endif
56187 +
56188 +#ifdef CONFIG_PAX_SEGMEXEC
56189 + if (elf_phdata->p_flags & PF_SEGMEXEC)
56190 + pax_flags |= MF_PAX_SEGMEXEC;
56191 +#endif
56192 +
56193 +#ifdef CONFIG_PAX_EMUTRAMP
56194 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56195 + pax_flags |= MF_PAX_EMUTRAMP;
56196 +#endif
56197 +
56198 +#ifdef CONFIG_PAX_MPROTECT
56199 + if (elf_phdata->p_flags & PF_MPROTECT)
56200 + pax_flags |= MF_PAX_MPROTECT;
56201 +#endif
56202 +
56203 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56204 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
56205 + pax_flags |= MF_PAX_RANDMMAP;
56206 +#endif
56207 +
56208 + return pax_flags;
56209 +}
56210 +#endif
56211 +
56212 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
56213 +{
56214 + unsigned long pax_flags = 0UL;
56215 +
56216 +#ifdef CONFIG_PAX_PAGEEXEC
56217 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
56218 + pax_flags |= MF_PAX_PAGEEXEC;
56219 +#endif
56220 +
56221 +#ifdef CONFIG_PAX_SEGMEXEC
56222 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
56223 + pax_flags |= MF_PAX_SEGMEXEC;
56224 +#endif
56225 +
56226 +#ifdef CONFIG_PAX_EMUTRAMP
56227 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
56228 + pax_flags |= MF_PAX_EMUTRAMP;
56229 +#endif
56230 +
56231 +#ifdef CONFIG_PAX_MPROTECT
56232 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
56233 + pax_flags |= MF_PAX_MPROTECT;
56234 +#endif
56235 +
56236 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56237 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
56238 + pax_flags |= MF_PAX_RANDMMAP;
56239 +#endif
56240 +
56241 + return pax_flags;
56242 +}
56243 +#endif
56244 +
56245 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56246 +#ifdef CONFIG_PAX_SOFTMODE
56247 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
56248 +{
56249 + unsigned long pax_flags = 0UL;
56250 +
56251 +#ifdef CONFIG_PAX_PAGEEXEC
56252 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
56253 + pax_flags |= MF_PAX_PAGEEXEC;
56254 +#endif
56255 +
56256 +#ifdef CONFIG_PAX_SEGMEXEC
56257 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
56258 + pax_flags |= MF_PAX_SEGMEXEC;
56259 +#endif
56260 +
56261 +#ifdef CONFIG_PAX_EMUTRAMP
56262 + if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56263 + pax_flags |= MF_PAX_EMUTRAMP;
56264 +#endif
56265 +
56266 +#ifdef CONFIG_PAX_MPROTECT
56267 + if (pax_flags_softmode & MF_PAX_MPROTECT)
56268 + pax_flags |= MF_PAX_MPROTECT;
56269 +#endif
56270 +
56271 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56272 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
56273 + pax_flags |= MF_PAX_RANDMMAP;
56274 +#endif
56275 +
56276 + return pax_flags;
56277 +}
56278 +#endif
56279 +
56280 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
56281 +{
56282 + unsigned long pax_flags = 0UL;
56283 +
56284 +#ifdef CONFIG_PAX_PAGEEXEC
56285 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
56286 + pax_flags |= MF_PAX_PAGEEXEC;
56287 +#endif
56288 +
56289 +#ifdef CONFIG_PAX_SEGMEXEC
56290 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
56291 + pax_flags |= MF_PAX_SEGMEXEC;
56292 +#endif
56293 +
56294 +#ifdef CONFIG_PAX_EMUTRAMP
56295 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
56296 + pax_flags |= MF_PAX_EMUTRAMP;
56297 +#endif
56298 +
56299 +#ifdef CONFIG_PAX_MPROTECT
56300 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
56301 + pax_flags |= MF_PAX_MPROTECT;
56302 +#endif
56303 +
56304 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56305 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
56306 + pax_flags |= MF_PAX_RANDMMAP;
56307 +#endif
56308 +
56309 + return pax_flags;
56310 +}
56311 +#endif
56312 +
56313 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56314 +static unsigned long pax_parse_defaults(void)
56315 +{
56316 + unsigned long pax_flags = 0UL;
56317 +
56318 +#ifdef CONFIG_PAX_SOFTMODE
56319 + if (pax_softmode)
56320 + return pax_flags;
56321 +#endif
56322 +
56323 +#ifdef CONFIG_PAX_PAGEEXEC
56324 + pax_flags |= MF_PAX_PAGEEXEC;
56325 +#endif
56326 +
56327 +#ifdef CONFIG_PAX_SEGMEXEC
56328 + pax_flags |= MF_PAX_SEGMEXEC;
56329 +#endif
56330 +
56331 +#ifdef CONFIG_PAX_MPROTECT
56332 + pax_flags |= MF_PAX_MPROTECT;
56333 +#endif
56334 +
56335 +#ifdef CONFIG_PAX_RANDMMAP
56336 + if (randomize_va_space)
56337 + pax_flags |= MF_PAX_RANDMMAP;
56338 +#endif
56339 +
56340 + return pax_flags;
56341 +}
56342 +
56343 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
56344 +{
56345 + unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
56346 +
56347 +#ifdef CONFIG_PAX_EI_PAX
56348 +
56349 +#ifdef CONFIG_PAX_SOFTMODE
56350 + if (pax_softmode)
56351 + return pax_flags;
56352 +#endif
56353 +
56354 + pax_flags = 0UL;
56355 +
56356 +#ifdef CONFIG_PAX_PAGEEXEC
56357 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
56358 + pax_flags |= MF_PAX_PAGEEXEC;
56359 +#endif
56360 +
56361 +#ifdef CONFIG_PAX_SEGMEXEC
56362 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
56363 + pax_flags |= MF_PAX_SEGMEXEC;
56364 +#endif
56365 +
56366 +#ifdef CONFIG_PAX_EMUTRAMP
56367 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
56368 + pax_flags |= MF_PAX_EMUTRAMP;
56369 +#endif
56370 +
56371 +#ifdef CONFIG_PAX_MPROTECT
56372 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
56373 + pax_flags |= MF_PAX_MPROTECT;
56374 +#endif
56375 +
56376 +#ifdef CONFIG_PAX_ASLR
56377 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
56378 + pax_flags |= MF_PAX_RANDMMAP;
56379 +#endif
56380 +
56381 +#endif
56382 +
56383 + return pax_flags;
56384 +
56385 +}
56386 +
56387 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
56388 +{
56389 +
56390 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
56391 + unsigned long i;
56392 +
56393 + for (i = 0UL; i < elf_ex->e_phnum; i++)
56394 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
56395 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
56396 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
56397 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
56398 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
56399 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
56400 + return PAX_PARSE_FLAGS_FALLBACK;
56401 +
56402 +#ifdef CONFIG_PAX_SOFTMODE
56403 + if (pax_softmode)
56404 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
56405 + else
56406 +#endif
56407 +
56408 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
56409 + break;
56410 + }
56411 +#endif
56412 +
56413 + return PAX_PARSE_FLAGS_FALLBACK;
56414 +}
56415 +
56416 +static unsigned long pax_parse_xattr_pax(struct file * const file)
56417 +{
56418 +
56419 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56420 + ssize_t xattr_size, i;
56421 + unsigned char xattr_value[sizeof("pemrs") - 1];
56422 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
56423 +
56424 + xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
56425 + if (xattr_size < 0 || xattr_size > sizeof xattr_value)
56426 + return PAX_PARSE_FLAGS_FALLBACK;
56427 +
56428 + for (i = 0; i < xattr_size; i++)
56429 + switch (xattr_value[i]) {
56430 + default:
56431 + return PAX_PARSE_FLAGS_FALLBACK;
56432 +
56433 +#define parse_flag(option1, option2, flag) \
56434 + case option1: \
56435 + if (pax_flags_hardmode & MF_PAX_##flag) \
56436 + return PAX_PARSE_FLAGS_FALLBACK;\
56437 + pax_flags_hardmode |= MF_PAX_##flag; \
56438 + break; \
56439 + case option2: \
56440 + if (pax_flags_softmode & MF_PAX_##flag) \
56441 + return PAX_PARSE_FLAGS_FALLBACK;\
56442 + pax_flags_softmode |= MF_PAX_##flag; \
56443 + break;
56444 +
56445 + parse_flag('p', 'P', PAGEEXEC);
56446 + parse_flag('e', 'E', EMUTRAMP);
56447 + parse_flag('m', 'M', MPROTECT);
56448 + parse_flag('r', 'R', RANDMMAP);
56449 + parse_flag('s', 'S', SEGMEXEC);
56450 +
56451 +#undef parse_flag
56452 + }
56453 +
56454 + if (pax_flags_hardmode & pax_flags_softmode)
56455 + return PAX_PARSE_FLAGS_FALLBACK;
56456 +
56457 +#ifdef CONFIG_PAX_SOFTMODE
56458 + if (pax_softmode)
56459 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
56460 + else
56461 +#endif
56462 +
56463 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
56464 +#else
56465 + return PAX_PARSE_FLAGS_FALLBACK;
56466 +#endif
56467 +
56468 +}
56469 +
56470 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
56471 +{
56472 + unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
56473 +
56474 + pax_flags = pax_parse_defaults();
56475 + ei_pax_flags = pax_parse_ei_pax(elf_ex);
56476 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
56477 + xattr_pax_flags = pax_parse_xattr_pax(file);
56478 +
56479 + if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
56480 + xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
56481 + pt_pax_flags != xattr_pax_flags)
56482 + return -EINVAL;
56483 + if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56484 + pax_flags = xattr_pax_flags;
56485 + else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56486 + pax_flags = pt_pax_flags;
56487 + else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56488 + pax_flags = ei_pax_flags;
56489 +
56490 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
56491 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56492 + if ((__supported_pte_mask & _PAGE_NX))
56493 + pax_flags &= ~MF_PAX_SEGMEXEC;
56494 + else
56495 + pax_flags &= ~MF_PAX_PAGEEXEC;
56496 + }
56497 +#endif
56498 +
56499 + if (0 > pax_check_flags(&pax_flags))
56500 + return -EINVAL;
56501 +
56502 + current->mm->pax_flags = pax_flags;
56503 + return 0;
56504 +}
56505 +#endif
56506 +
56507 /*
56508 * These are the functions used to load ELF style executables and shared
56509 * libraries. There is no binary dependent code anywhere else.
56510 @@ -554,6 +915,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
56511 {
56512 unsigned int random_variable = 0;
56513
56514 +#ifdef CONFIG_PAX_RANDUSTACK
56515 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
56516 + return stack_top - current->mm->delta_stack;
56517 +#endif
56518 +
56519 if ((current->flags & PF_RANDOMIZE) &&
56520 !(current->personality & ADDR_NO_RANDOMIZE)) {
56521 random_variable = get_random_int() & STACK_RND_MASK;
56522 @@ -572,7 +938,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
56523 unsigned long load_addr = 0, load_bias = 0;
56524 int load_addr_set = 0;
56525 char * elf_interpreter = NULL;
56526 - unsigned long error;
56527 + unsigned long error = 0;
56528 struct elf_phdr *elf_ppnt, *elf_phdata;
56529 unsigned long elf_bss, elf_brk;
56530 int retval, i;
56531 @@ -582,12 +948,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
56532 unsigned long start_code, end_code, start_data, end_data;
56533 unsigned long reloc_func_desc __maybe_unused = 0;
56534 int executable_stack = EXSTACK_DEFAULT;
56535 - unsigned long def_flags = 0;
56536 struct pt_regs *regs = current_pt_regs();
56537 struct {
56538 struct elfhdr elf_ex;
56539 struct elfhdr interp_elf_ex;
56540 } *loc;
56541 + unsigned long pax_task_size;
56542
56543 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
56544 if (!loc) {
56545 @@ -723,11 +1089,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
56546 goto out_free_dentry;
56547
56548 /* OK, This is the point of no return */
56549 - current->mm->def_flags = def_flags;
56550 + current->mm->def_flags = 0;
56551
56552 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
56553 may depend on the personality. */
56554 SET_PERSONALITY(loc->elf_ex);
56555 +
56556 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56557 + current->mm->pax_flags = 0UL;
56558 +#endif
56559 +
56560 +#ifdef CONFIG_PAX_DLRESOLVE
56561 + current->mm->call_dl_resolve = 0UL;
56562 +#endif
56563 +
56564 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56565 + current->mm->call_syscall = 0UL;
56566 +#endif
56567 +
56568 +#ifdef CONFIG_PAX_ASLR
56569 + current->mm->delta_mmap = 0UL;
56570 + current->mm->delta_stack = 0UL;
56571 +#endif
56572 +
56573 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56574 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
56575 + send_sig(SIGKILL, current, 0);
56576 + goto out_free_dentry;
56577 + }
56578 +#endif
56579 +
56580 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56581 + pax_set_initial_flags(bprm);
56582 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56583 + if (pax_set_initial_flags_func)
56584 + (pax_set_initial_flags_func)(bprm);
56585 +#endif
56586 +
56587 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56588 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
56589 + current->mm->context.user_cs_limit = PAGE_SIZE;
56590 + current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
56591 + }
56592 +#endif
56593 +
56594 +#ifdef CONFIG_PAX_SEGMEXEC
56595 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
56596 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
56597 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
56598 + pax_task_size = SEGMEXEC_TASK_SIZE;
56599 + current->mm->def_flags |= VM_NOHUGEPAGE;
56600 + } else
56601 +#endif
56602 +
56603 + pax_task_size = TASK_SIZE;
56604 +
56605 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
56606 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56607 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
56608 + put_cpu();
56609 + }
56610 +#endif
56611 +
56612 +#ifdef CONFIG_PAX_ASLR
56613 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
56614 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
56615 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
56616 + }
56617 +#endif
56618 +
56619 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56620 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56621 + executable_stack = EXSTACK_DISABLE_X;
56622 + current->personality &= ~READ_IMPLIES_EXEC;
56623 + } else
56624 +#endif
56625 +
56626 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
56627 current->personality |= READ_IMPLIES_EXEC;
56628
56629 @@ -817,6 +1254,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
56630 #else
56631 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
56632 #endif
56633 +
56634 +#ifdef CONFIG_PAX_RANDMMAP
56635 + /* PaX: randomize base address at the default exe base if requested */
56636 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
56637 +#ifdef CONFIG_SPARC64
56638 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
56639 +#else
56640 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
56641 +#endif
56642 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
56643 + elf_flags |= MAP_FIXED;
56644 + }
56645 +#endif
56646 +
56647 }
56648
56649 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
56650 @@ -849,9 +1300,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
56651 * allowed task size. Note that p_filesz must always be
56652 * <= p_memsz so it is only necessary to check p_memsz.
56653 */
56654 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
56655 - elf_ppnt->p_memsz > TASK_SIZE ||
56656 - TASK_SIZE - elf_ppnt->p_memsz < k) {
56657 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
56658 + elf_ppnt->p_memsz > pax_task_size ||
56659 + pax_task_size - elf_ppnt->p_memsz < k) {
56660 /* set_brk can never work. Avoid overflows. */
56661 send_sig(SIGKILL, current, 0);
56662 retval = -EINVAL;
56663 @@ -890,17 +1341,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
56664 goto out_free_dentry;
56665 }
56666 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
56667 - send_sig(SIGSEGV, current, 0);
56668 - retval = -EFAULT; /* Nobody gets to see this, but.. */
56669 - goto out_free_dentry;
56670 + /*
56671 + * This bss-zeroing can fail if the ELF
56672 + * file specifies odd protections. So
56673 + * we don't check the return value
56674 + */
56675 }
56676
56677 +#ifdef CONFIG_PAX_RANDMMAP
56678 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
56679 + unsigned long start, size, flags;
56680 + vm_flags_t vm_flags;
56681 +
56682 + start = ELF_PAGEALIGN(elf_brk);
56683 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
56684 + flags = MAP_FIXED | MAP_PRIVATE;
56685 + vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
56686 +
56687 + down_write(&current->mm->mmap_sem);
56688 + start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
56689 + retval = -ENOMEM;
56690 + if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
56691 +// if (current->personality & ADDR_NO_RANDOMIZE)
56692 +// vm_flags |= VM_READ | VM_MAYREAD;
56693 + start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
56694 + retval = IS_ERR_VALUE(start) ? start : 0;
56695 + }
56696 + up_write(&current->mm->mmap_sem);
56697 + if (retval == 0)
56698 + retval = set_brk(start + size, start + size + PAGE_SIZE);
56699 + if (retval < 0) {
56700 + send_sig(SIGKILL, current, 0);
56701 + goto out_free_dentry;
56702 + }
56703 + }
56704 +#endif
56705 +
56706 if (elf_interpreter) {
56707 - unsigned long interp_map_addr = 0;
56708 -
56709 elf_entry = load_elf_interp(&loc->interp_elf_ex,
56710 interpreter,
56711 - &interp_map_addr,
56712 load_bias);
56713 if (!IS_ERR((void *)elf_entry)) {
56714 /*
56715 @@ -1122,7 +1601,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
56716 * Decide what to dump of a segment, part, all or none.
56717 */
56718 static unsigned long vma_dump_size(struct vm_area_struct *vma,
56719 - unsigned long mm_flags)
56720 + unsigned long mm_flags, long signr)
56721 {
56722 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
56723
56724 @@ -1160,7 +1639,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
56725 if (vma->vm_file == NULL)
56726 return 0;
56727
56728 - if (FILTER(MAPPED_PRIVATE))
56729 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
56730 goto whole;
56731
56732 /*
56733 @@ -1367,9 +1846,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
56734 {
56735 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
56736 int i = 0;
56737 - do
56738 + do {
56739 i += 2;
56740 - while (auxv[i - 2] != AT_NULL);
56741 + } while (auxv[i - 2] != AT_NULL);
56742 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
56743 }
56744
56745 @@ -1378,7 +1857,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
56746 {
56747 mm_segment_t old_fs = get_fs();
56748 set_fs(KERNEL_DS);
56749 - copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
56750 + copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
56751 set_fs(old_fs);
56752 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
56753 }
56754 @@ -2002,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
56755 }
56756
56757 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
56758 - unsigned long mm_flags)
56759 + struct coredump_params *cprm)
56760 {
56761 struct vm_area_struct *vma;
56762 size_t size = 0;
56763
56764 for (vma = first_vma(current, gate_vma); vma != NULL;
56765 vma = next_vma(vma, gate_vma))
56766 - size += vma_dump_size(vma, mm_flags);
56767 + size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56768 return size;
56769 }
56770
56771 @@ -2100,7 +2579,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56772
56773 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
56774
56775 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
56776 + offset += elf_core_vma_data_size(gate_vma, cprm);
56777 offset += elf_core_extra_data_size();
56778 e_shoff = offset;
56779
56780 @@ -2128,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56781 phdr.p_offset = offset;
56782 phdr.p_vaddr = vma->vm_start;
56783 phdr.p_paddr = 0;
56784 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
56785 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56786 phdr.p_memsz = vma->vm_end - vma->vm_start;
56787 offset += phdr.p_filesz;
56788 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
56789 @@ -2161,7 +2640,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56790 unsigned long addr;
56791 unsigned long end;
56792
56793 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
56794 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56795
56796 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
56797 struct page *page;
56798 @@ -2202,6 +2681,167 @@ out:
56799
56800 #endif /* CONFIG_ELF_CORE */
56801
56802 +#ifdef CONFIG_PAX_MPROTECT
56803 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
56804 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
56805 + * we'll remove VM_MAYWRITE for good on RELRO segments.
56806 + *
56807 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
56808 + * basis because we want to allow the common case and not the special ones.
56809 + */
56810 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
56811 +{
56812 + struct elfhdr elf_h;
56813 + struct elf_phdr elf_p;
56814 + unsigned long i;
56815 + unsigned long oldflags;
56816 + bool is_textrel_rw, is_textrel_rx, is_relro;
56817 +
56818 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
56819 + return;
56820 +
56821 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
56822 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
56823 +
56824 +#ifdef CONFIG_PAX_ELFRELOCS
56825 + /* possible TEXTREL */
56826 + is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
56827 + is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
56828 +#else
56829 + is_textrel_rw = false;
56830 + is_textrel_rx = false;
56831 +#endif
56832 +
56833 + /* possible RELRO */
56834 + is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
56835 +
56836 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
56837 + return;
56838 +
56839 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56840 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56841 +
56842 +#ifdef CONFIG_PAX_ETEXECRELOCS
56843 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
56844 +#else
56845 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
56846 +#endif
56847 +
56848 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
56849 + !elf_check_arch(&elf_h) ||
56850 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56851 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56852 + return;
56853 +
56854 + for (i = 0UL; i < elf_h.e_phnum; i++) {
56855 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56856 + return;
56857 + switch (elf_p.p_type) {
56858 + case PT_DYNAMIC:
56859 + if (!is_textrel_rw && !is_textrel_rx)
56860 + continue;
56861 + i = 0UL;
56862 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
56863 + elf_dyn dyn;
56864 +
56865 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
56866 + break;
56867 + if (dyn.d_tag == DT_NULL)
56868 + break;
56869 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
56870 + gr_log_textrel(vma);
56871 + if (is_textrel_rw)
56872 + vma->vm_flags |= VM_MAYWRITE;
56873 + else
56874 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
56875 + vma->vm_flags &= ~VM_MAYWRITE;
56876 + break;
56877 + }
56878 + i++;
56879 + }
56880 + is_textrel_rw = false;
56881 + is_textrel_rx = false;
56882 + continue;
56883 +
56884 + case PT_GNU_RELRO:
56885 + if (!is_relro)
56886 + continue;
56887 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
56888 + vma->vm_flags &= ~VM_MAYWRITE;
56889 + is_relro = false;
56890 + continue;
56891 +
56892 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
56893 + case PT_PAX_FLAGS: {
56894 + const char *msg_mprotect = "", *msg_emutramp = "";
56895 + char *buffer_lib, *buffer_exe;
56896 +
56897 + if (elf_p.p_flags & PF_NOMPROTECT)
56898 + msg_mprotect = "MPROTECT disabled";
56899 +
56900 +#ifdef CONFIG_PAX_EMUTRAMP
56901 + if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
56902 + msg_emutramp = "EMUTRAMP enabled";
56903 +#endif
56904 +
56905 + if (!msg_mprotect[0] && !msg_emutramp[0])
56906 + continue;
56907 +
56908 + if (!printk_ratelimit())
56909 + continue;
56910 +
56911 + buffer_lib = (char *)__get_free_page(GFP_KERNEL);
56912 + buffer_exe = (char *)__get_free_page(GFP_KERNEL);
56913 + if (buffer_lib && buffer_exe) {
56914 + char *path_lib, *path_exe;
56915 +
56916 + path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
56917 + path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
56918 +
56919 + pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
56920 + (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
56921 +
56922 + }
56923 + free_page((unsigned long)buffer_exe);
56924 + free_page((unsigned long)buffer_lib);
56925 + continue;
56926 + }
56927 +#endif
56928 +
56929 + }
56930 + }
56931 +}
56932 +#endif
56933 +
56934 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56935 +
56936 +extern int grsec_enable_log_rwxmaps;
56937 +
56938 +static void elf_handle_mmap(struct file *file)
56939 +{
56940 + struct elfhdr elf_h;
56941 + struct elf_phdr elf_p;
56942 + unsigned long i;
56943 +
56944 + if (!grsec_enable_log_rwxmaps)
56945 + return;
56946 +
56947 + if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56948 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56949 + (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
56950 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56951 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56952 + return;
56953 +
56954 + for (i = 0UL; i < elf_h.e_phnum; i++) {
56955 + if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56956 + return;
56957 + if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
56958 + gr_log_ptgnustack(file);
56959 + }
56960 +}
56961 +#endif
56962 +
56963 static int __init init_elf_binfmt(void)
56964 {
56965 register_binfmt(&elf_format);
56966 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
56967 index d50bbe5..af3b649 100644
56968 --- a/fs/binfmt_flat.c
56969 +++ b/fs/binfmt_flat.c
56970 @@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
56971 realdatastart = (unsigned long) -ENOMEM;
56972 printk("Unable to allocate RAM for process data, errno %d\n",
56973 (int)-realdatastart);
56974 + down_write(&current->mm->mmap_sem);
56975 vm_munmap(textpos, text_len);
56976 + up_write(&current->mm->mmap_sem);
56977 ret = realdatastart;
56978 goto err;
56979 }
56980 @@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56981 }
56982 if (IS_ERR_VALUE(result)) {
56983 printk("Unable to read data+bss, errno %d\n", (int)-result);
56984 + down_write(&current->mm->mmap_sem);
56985 vm_munmap(textpos, text_len);
56986 vm_munmap(realdatastart, len);
56987 + up_write(&current->mm->mmap_sem);
56988 ret = result;
56989 goto err;
56990 }
56991 @@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56992 }
56993 if (IS_ERR_VALUE(result)) {
56994 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
56995 + down_write(&current->mm->mmap_sem);
56996 vm_munmap(textpos, text_len + data_len + extra +
56997 MAX_SHARED_LIBS * sizeof(unsigned long));
56998 + up_write(&current->mm->mmap_sem);
56999 ret = result;
57000 goto err;
57001 }
57002 diff --git a/fs/bio.c b/fs/bio.c
57003 index 33d79a4..c3c9893 100644
57004 --- a/fs/bio.c
57005 +++ b/fs/bio.c
57006 @@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
57007 /*
57008 * Overflow, abort
57009 */
57010 - if (end < start)
57011 + if (end < start || end - start > INT_MAX - nr_pages)
57012 return ERR_PTR(-EINVAL);
57013
57014 nr_pages += end - start;
57015 @@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
57016 /*
57017 * Overflow, abort
57018 */
57019 - if (end < start)
57020 + if (end < start || end - start > INT_MAX - nr_pages)
57021 return ERR_PTR(-EINVAL);
57022
57023 nr_pages += end - start;
57024 @@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
57025 const int read = bio_data_dir(bio) == READ;
57026 struct bio_map_data *bmd = bio->bi_private;
57027 int i;
57028 - char *p = bmd->sgvecs[0].iov_base;
57029 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
57030
57031 bio_for_each_segment_all(bvec, bio, i) {
57032 char *addr = page_address(bvec->bv_page);
57033 diff --git a/fs/block_dev.c b/fs/block_dev.c
57034 index 1e86823..8e34695 100644
57035 --- a/fs/block_dev.c
57036 +++ b/fs/block_dev.c
57037 @@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
57038 else if (bdev->bd_contains == bdev)
57039 return true; /* is a whole device which isn't held */
57040
57041 - else if (whole->bd_holder == bd_may_claim)
57042 + else if (whole->bd_holder == (void *)bd_may_claim)
57043 return true; /* is a partition of a device that is being partitioned */
57044 else if (whole->bd_holder != NULL)
57045 return false; /* is a partition of a held device */
57046 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
57047 index 3de01b4..6547c39 100644
57048 --- a/fs/btrfs/ctree.c
57049 +++ b/fs/btrfs/ctree.c
57050 @@ -1217,9 +1217,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
57051 free_extent_buffer(buf);
57052 add_root_to_dirty_list(root);
57053 } else {
57054 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
57055 - parent_start = parent->start;
57056 - else
57057 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
57058 + if (parent)
57059 + parent_start = parent->start;
57060 + else
57061 + parent_start = 0;
57062 + } else
57063 parent_start = 0;
57064
57065 WARN_ON(trans->transid != btrfs_header_generation(parent));
57066 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
57067 index 8d292fb..bc205c2 100644
57068 --- a/fs/btrfs/delayed-inode.c
57069 +++ b/fs/btrfs/delayed-inode.c
57070 @@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
57071
57072 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
57073 {
57074 - int seq = atomic_inc_return(&delayed_root->items_seq);
57075 + int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
57076 if ((atomic_dec_return(&delayed_root->items) <
57077 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
57078 waitqueue_active(&delayed_root->wait))
57079 @@ -1379,7 +1379,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
57080 static int refs_newer(struct btrfs_delayed_root *delayed_root,
57081 int seq, int count)
57082 {
57083 - int val = atomic_read(&delayed_root->items_seq);
57084 + int val = atomic_read_unchecked(&delayed_root->items_seq);
57085
57086 if (val < seq || val >= seq + count)
57087 return 1;
57088 @@ -1396,7 +1396,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
57089 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
57090 return;
57091
57092 - seq = atomic_read(&delayed_root->items_seq);
57093 + seq = atomic_read_unchecked(&delayed_root->items_seq);
57094
57095 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
57096 int ret;
57097 diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
57098 index a4b38f9..f86a509 100644
57099 --- a/fs/btrfs/delayed-inode.h
57100 +++ b/fs/btrfs/delayed-inode.h
57101 @@ -43,7 +43,7 @@ struct btrfs_delayed_root {
57102 */
57103 struct list_head prepare_list;
57104 atomic_t items; /* for delayed items */
57105 - atomic_t items_seq; /* for delayed items */
57106 + atomic_unchecked_t items_seq; /* for delayed items */
57107 int nodes; /* for delayed nodes */
57108 wait_queue_head_t wait;
57109 };
57110 @@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
57111 struct btrfs_delayed_root *delayed_root)
57112 {
57113 atomic_set(&delayed_root->items, 0);
57114 - atomic_set(&delayed_root->items_seq, 0);
57115 + atomic_set_unchecked(&delayed_root->items_seq, 0);
57116 delayed_root->nodes = 0;
57117 spin_lock_init(&delayed_root->lock);
57118 init_waitqueue_head(&delayed_root->wait);
57119 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
57120 index 9f831bb..14afde5 100644
57121 --- a/fs/btrfs/ioctl.c
57122 +++ b/fs/btrfs/ioctl.c
57123 @@ -3457,9 +3457,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
57124 for (i = 0; i < num_types; i++) {
57125 struct btrfs_space_info *tmp;
57126
57127 + /* Don't copy in more than we allocated */
57128 if (!slot_count)
57129 break;
57130
57131 + slot_count--;
57132 +
57133 info = NULL;
57134 rcu_read_lock();
57135 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
57136 @@ -3481,10 +3484,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
57137 memcpy(dest, &space, sizeof(space));
57138 dest++;
57139 space_args.total_spaces++;
57140 - slot_count--;
57141 }
57142 - if (!slot_count)
57143 - break;
57144 }
57145 up_read(&info->groups_sem);
57146 }
57147 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
57148 index d71a11d..384e2c4 100644
57149 --- a/fs/btrfs/super.c
57150 +++ b/fs/btrfs/super.c
57151 @@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
57152 function, line, errstr);
57153 return;
57154 }
57155 - ACCESS_ONCE(trans->transaction->aborted) = errno;
57156 + ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
57157 /* Wake up anybody who may be waiting on this transaction */
57158 wake_up(&root->fs_info->transaction_wait);
57159 wake_up(&root->fs_info->transaction_blocked_wait);
57160 diff --git a/fs/buffer.c b/fs/buffer.c
57161 index aeeea65..7651d590 100644
57162 --- a/fs/buffer.c
57163 +++ b/fs/buffer.c
57164 @@ -3428,7 +3428,7 @@ void __init buffer_init(void)
57165 bh_cachep = kmem_cache_create("buffer_head",
57166 sizeof(struct buffer_head), 0,
57167 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
57168 - SLAB_MEM_SPREAD),
57169 + SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
57170 NULL);
57171
57172 /*
57173 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
57174 index 622f469..e8d2d55 100644
57175 --- a/fs/cachefiles/bind.c
57176 +++ b/fs/cachefiles/bind.c
57177 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
57178 args);
57179
57180 /* start by checking things over */
57181 - ASSERT(cache->fstop_percent >= 0 &&
57182 - cache->fstop_percent < cache->fcull_percent &&
57183 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
57184 cache->fcull_percent < cache->frun_percent &&
57185 cache->frun_percent < 100);
57186
57187 - ASSERT(cache->bstop_percent >= 0 &&
57188 - cache->bstop_percent < cache->bcull_percent &&
57189 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
57190 cache->bcull_percent < cache->brun_percent &&
57191 cache->brun_percent < 100);
57192
57193 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
57194 index 0a1467b..6a53245 100644
57195 --- a/fs/cachefiles/daemon.c
57196 +++ b/fs/cachefiles/daemon.c
57197 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
57198 if (n > buflen)
57199 return -EMSGSIZE;
57200
57201 - if (copy_to_user(_buffer, buffer, n) != 0)
57202 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
57203 return -EFAULT;
57204
57205 return n;
57206 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
57207 if (test_bit(CACHEFILES_DEAD, &cache->flags))
57208 return -EIO;
57209
57210 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
57211 + if (datalen > PAGE_SIZE - 1)
57212 return -EOPNOTSUPP;
57213
57214 /* drag the command string into the kernel so we can parse it */
57215 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
57216 if (args[0] != '%' || args[1] != '\0')
57217 return -EINVAL;
57218
57219 - if (fstop < 0 || fstop >= cache->fcull_percent)
57220 + if (fstop >= cache->fcull_percent)
57221 return cachefiles_daemon_range_error(cache, args);
57222
57223 cache->fstop_percent = fstop;
57224 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
57225 if (args[0] != '%' || args[1] != '\0')
57226 return -EINVAL;
57227
57228 - if (bstop < 0 || bstop >= cache->bcull_percent)
57229 + if (bstop >= cache->bcull_percent)
57230 return cachefiles_daemon_range_error(cache, args);
57231
57232 cache->bstop_percent = bstop;
57233 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
57234 index 5349473..d6c0b93 100644
57235 --- a/fs/cachefiles/internal.h
57236 +++ b/fs/cachefiles/internal.h
57237 @@ -59,7 +59,7 @@ struct cachefiles_cache {
57238 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
57239 struct rb_root active_nodes; /* active nodes (can't be culled) */
57240 rwlock_t active_lock; /* lock for active_nodes */
57241 - atomic_t gravecounter; /* graveyard uniquifier */
57242 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
57243 unsigned frun_percent; /* when to stop culling (% files) */
57244 unsigned fcull_percent; /* when to start culling (% files) */
57245 unsigned fstop_percent; /* when to stop allocating (% files) */
57246 @@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
57247 * proc.c
57248 */
57249 #ifdef CONFIG_CACHEFILES_HISTOGRAM
57250 -extern atomic_t cachefiles_lookup_histogram[HZ];
57251 -extern atomic_t cachefiles_mkdir_histogram[HZ];
57252 -extern atomic_t cachefiles_create_histogram[HZ];
57253 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57254 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57255 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
57256
57257 extern int __init cachefiles_proc_init(void);
57258 extern void cachefiles_proc_cleanup(void);
57259 static inline
57260 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
57261 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
57262 {
57263 unsigned long jif = jiffies - start_jif;
57264 if (jif >= HZ)
57265 jif = HZ - 1;
57266 - atomic_inc(&histogram[jif]);
57267 + atomic_inc_unchecked(&histogram[jif]);
57268 }
57269
57270 #else
57271 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
57272 index ca65f39..48921e3 100644
57273 --- a/fs/cachefiles/namei.c
57274 +++ b/fs/cachefiles/namei.c
57275 @@ -317,7 +317,7 @@ try_again:
57276 /* first step is to make up a grave dentry in the graveyard */
57277 sprintf(nbuffer, "%08x%08x",
57278 (uint32_t) get_seconds(),
57279 - (uint32_t) atomic_inc_return(&cache->gravecounter));
57280 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
57281
57282 /* do the multiway lock magic */
57283 trap = lock_rename(cache->graveyard, dir);
57284 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
57285 index eccd339..4c1d995 100644
57286 --- a/fs/cachefiles/proc.c
57287 +++ b/fs/cachefiles/proc.c
57288 @@ -14,9 +14,9 @@
57289 #include <linux/seq_file.h>
57290 #include "internal.h"
57291
57292 -atomic_t cachefiles_lookup_histogram[HZ];
57293 -atomic_t cachefiles_mkdir_histogram[HZ];
57294 -atomic_t cachefiles_create_histogram[HZ];
57295 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57296 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57297 +atomic_unchecked_t cachefiles_create_histogram[HZ];
57298
57299 /*
57300 * display the latency histogram
57301 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
57302 return 0;
57303 default:
57304 index = (unsigned long) v - 3;
57305 - x = atomic_read(&cachefiles_lookup_histogram[index]);
57306 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
57307 - z = atomic_read(&cachefiles_create_histogram[index]);
57308 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
57309 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
57310 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
57311 if (x == 0 && y == 0 && z == 0)
57312 return 0;
57313
57314 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
57315 index ebaff36..7e3ea26 100644
57316 --- a/fs/cachefiles/rdwr.c
57317 +++ b/fs/cachefiles/rdwr.c
57318 @@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
57319 old_fs = get_fs();
57320 set_fs(KERNEL_DS);
57321 ret = file->f_op->write(
57322 - file, (const void __user *) data, len, &pos);
57323 + file, (const void __force_user *) data, len, &pos);
57324 set_fs(old_fs);
57325 kunmap(page);
57326 file_end_write(file);
57327 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
57328 index 2a0bcae..34ec24e 100644
57329 --- a/fs/ceph/dir.c
57330 +++ b/fs/ceph/dir.c
57331 @@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
57332 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
57333 struct ceph_mds_client *mdsc = fsc->mdsc;
57334 unsigned frag = fpos_frag(ctx->pos);
57335 - int off = fpos_off(ctx->pos);
57336 + unsigned int off = fpos_off(ctx->pos);
57337 int err;
57338 u32 ftype;
57339 struct ceph_mds_reply_info_parsed *rinfo;
57340 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
57341 index 6a0951e..03fac6d 100644
57342 --- a/fs/ceph/super.c
57343 +++ b/fs/ceph/super.c
57344 @@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
57345 /*
57346 * construct our own bdi so we can control readahead, etc.
57347 */
57348 -static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
57349 +static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
57350
57351 static int ceph_register_bdi(struct super_block *sb,
57352 struct ceph_fs_client *fsc)
57353 @@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
57354 default_backing_dev_info.ra_pages;
57355
57356 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
57357 - atomic_long_inc_return(&bdi_seq));
57358 + atomic_long_inc_return_unchecked(&bdi_seq));
57359 if (!err)
57360 sb->s_bdi = &fsc->backing_dev_info;
57361 return err;
57362 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
57363 index f3ac415..3d2420c 100644
57364 --- a/fs/cifs/cifs_debug.c
57365 +++ b/fs/cifs/cifs_debug.c
57366 @@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57367
57368 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
57369 #ifdef CONFIG_CIFS_STATS2
57370 - atomic_set(&totBufAllocCount, 0);
57371 - atomic_set(&totSmBufAllocCount, 0);
57372 + atomic_set_unchecked(&totBufAllocCount, 0);
57373 + atomic_set_unchecked(&totSmBufAllocCount, 0);
57374 #endif /* CONFIG_CIFS_STATS2 */
57375 spin_lock(&cifs_tcp_ses_lock);
57376 list_for_each(tmp1, &cifs_tcp_ses_list) {
57377 @@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57378 tcon = list_entry(tmp3,
57379 struct cifs_tcon,
57380 tcon_list);
57381 - atomic_set(&tcon->num_smbs_sent, 0);
57382 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
57383 if (server->ops->clear_stats)
57384 server->ops->clear_stats(tcon);
57385 }
57386 @@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
57387 smBufAllocCount.counter, cifs_min_small);
57388 #ifdef CONFIG_CIFS_STATS2
57389 seq_printf(m, "Total Large %d Small %d Allocations\n",
57390 - atomic_read(&totBufAllocCount),
57391 - atomic_read(&totSmBufAllocCount));
57392 + atomic_read_unchecked(&totBufAllocCount),
57393 + atomic_read_unchecked(&totSmBufAllocCount));
57394 #endif /* CONFIG_CIFS_STATS2 */
57395
57396 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
57397 @@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
57398 if (tcon->need_reconnect)
57399 seq_puts(m, "\tDISCONNECTED ");
57400 seq_printf(m, "\nSMBs: %d",
57401 - atomic_read(&tcon->num_smbs_sent));
57402 + atomic_read_unchecked(&tcon->num_smbs_sent));
57403 if (server->ops->print_stats)
57404 server->ops->print_stats(m, tcon);
57405 }
57406 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
57407 index 849f613..eae6dec 100644
57408 --- a/fs/cifs/cifsfs.c
57409 +++ b/fs/cifs/cifsfs.c
57410 @@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
57411 */
57412 cifs_req_cachep = kmem_cache_create("cifs_request",
57413 CIFSMaxBufSize + max_hdr_size, 0,
57414 - SLAB_HWCACHE_ALIGN, NULL);
57415 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
57416 if (cifs_req_cachep == NULL)
57417 return -ENOMEM;
57418
57419 @@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
57420 efficient to alloc 1 per page off the slab compared to 17K (5page)
57421 alloc of large cifs buffers even when page debugging is on */
57422 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
57423 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
57424 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
57425 NULL);
57426 if (cifs_sm_req_cachep == NULL) {
57427 mempool_destroy(cifs_req_poolp);
57428 @@ -1168,8 +1168,8 @@ init_cifs(void)
57429 atomic_set(&bufAllocCount, 0);
57430 atomic_set(&smBufAllocCount, 0);
57431 #ifdef CONFIG_CIFS_STATS2
57432 - atomic_set(&totBufAllocCount, 0);
57433 - atomic_set(&totSmBufAllocCount, 0);
57434 + atomic_set_unchecked(&totBufAllocCount, 0);
57435 + atomic_set_unchecked(&totSmBufAllocCount, 0);
57436 #endif /* CONFIG_CIFS_STATS2 */
57437
57438 atomic_set(&midCount, 0);
57439 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
57440 index 579c6d5..95b6d03353 100644
57441 --- a/fs/cifs/cifsglob.h
57442 +++ b/fs/cifs/cifsglob.h
57443 @@ -797,35 +797,35 @@ struct cifs_tcon {
57444 __u16 Flags; /* optional support bits */
57445 enum statusEnum tidStatus;
57446 #ifdef CONFIG_CIFS_STATS
57447 - atomic_t num_smbs_sent;
57448 + atomic_unchecked_t num_smbs_sent;
57449 union {
57450 struct {
57451 - atomic_t num_writes;
57452 - atomic_t num_reads;
57453 - atomic_t num_flushes;
57454 - atomic_t num_oplock_brks;
57455 - atomic_t num_opens;
57456 - atomic_t num_closes;
57457 - atomic_t num_deletes;
57458 - atomic_t num_mkdirs;
57459 - atomic_t num_posixopens;
57460 - atomic_t num_posixmkdirs;
57461 - atomic_t num_rmdirs;
57462 - atomic_t num_renames;
57463 - atomic_t num_t2renames;
57464 - atomic_t num_ffirst;
57465 - atomic_t num_fnext;
57466 - atomic_t num_fclose;
57467 - atomic_t num_hardlinks;
57468 - atomic_t num_symlinks;
57469 - atomic_t num_locks;
57470 - atomic_t num_acl_get;
57471 - atomic_t num_acl_set;
57472 + atomic_unchecked_t num_writes;
57473 + atomic_unchecked_t num_reads;
57474 + atomic_unchecked_t num_flushes;
57475 + atomic_unchecked_t num_oplock_brks;
57476 + atomic_unchecked_t num_opens;
57477 + atomic_unchecked_t num_closes;
57478 + atomic_unchecked_t num_deletes;
57479 + atomic_unchecked_t num_mkdirs;
57480 + atomic_unchecked_t num_posixopens;
57481 + atomic_unchecked_t num_posixmkdirs;
57482 + atomic_unchecked_t num_rmdirs;
57483 + atomic_unchecked_t num_renames;
57484 + atomic_unchecked_t num_t2renames;
57485 + atomic_unchecked_t num_ffirst;
57486 + atomic_unchecked_t num_fnext;
57487 + atomic_unchecked_t num_fclose;
57488 + atomic_unchecked_t num_hardlinks;
57489 + atomic_unchecked_t num_symlinks;
57490 + atomic_unchecked_t num_locks;
57491 + atomic_unchecked_t num_acl_get;
57492 + atomic_unchecked_t num_acl_set;
57493 } cifs_stats;
57494 #ifdef CONFIG_CIFS_SMB2
57495 struct {
57496 - atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
57497 - atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
57498 + atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
57499 + atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
57500 } smb2_stats;
57501 #endif /* CONFIG_CIFS_SMB2 */
57502 } stats;
57503 @@ -1155,7 +1155,7 @@ convert_delimiter(char *path, char delim)
57504 }
57505
57506 #ifdef CONFIG_CIFS_STATS
57507 -#define cifs_stats_inc atomic_inc
57508 +#define cifs_stats_inc atomic_inc_unchecked
57509
57510 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
57511 unsigned int bytes)
57512 @@ -1521,8 +1521,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
57513 /* Various Debug counters */
57514 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
57515 #ifdef CONFIG_CIFS_STATS2
57516 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
57517 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
57518 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
57519 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
57520 #endif
57521 GLOBAL_EXTERN atomic_t smBufAllocCount;
57522 GLOBAL_EXTERN atomic_t midCount;
57523 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
57524 index a1c9ead..63e4c62 100644
57525 --- a/fs/cifs/file.c
57526 +++ b/fs/cifs/file.c
57527 @@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
57528 index = mapping->writeback_index; /* Start from prev offset */
57529 end = -1;
57530 } else {
57531 - index = wbc->range_start >> PAGE_CACHE_SHIFT;
57532 - end = wbc->range_end >> PAGE_CACHE_SHIFT;
57533 - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
57534 + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
57535 range_whole = true;
57536 + index = 0;
57537 + end = ULONG_MAX;
57538 + } else {
57539 + index = wbc->range_start >> PAGE_CACHE_SHIFT;
57540 + end = wbc->range_end >> PAGE_CACHE_SHIFT;
57541 + }
57542 scanned = true;
57543 }
57544 retry:
57545 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
57546 index 2f9f379..43f8025 100644
57547 --- a/fs/cifs/misc.c
57548 +++ b/fs/cifs/misc.c
57549 @@ -170,7 +170,7 @@ cifs_buf_get(void)
57550 memset(ret_buf, 0, buf_size + 3);
57551 atomic_inc(&bufAllocCount);
57552 #ifdef CONFIG_CIFS_STATS2
57553 - atomic_inc(&totBufAllocCount);
57554 + atomic_inc_unchecked(&totBufAllocCount);
57555 #endif /* CONFIG_CIFS_STATS2 */
57556 }
57557
57558 @@ -205,7 +205,7 @@ cifs_small_buf_get(void)
57559 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
57560 atomic_inc(&smBufAllocCount);
57561 #ifdef CONFIG_CIFS_STATS2
57562 - atomic_inc(&totSmBufAllocCount);
57563 + atomic_inc_unchecked(&totSmBufAllocCount);
57564 #endif /* CONFIG_CIFS_STATS2 */
57565
57566 }
57567 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
57568 index ffc9ef9..b3c992b 100644
57569 --- a/fs/cifs/smb1ops.c
57570 +++ b/fs/cifs/smb1ops.c
57571 @@ -609,27 +609,27 @@ static void
57572 cifs_clear_stats(struct cifs_tcon *tcon)
57573 {
57574 #ifdef CONFIG_CIFS_STATS
57575 - atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
57576 - atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
57577 - atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
57578 - atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
57579 - atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
57580 - atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
57581 - atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
57582 - atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
57583 - atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
57584 - atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
57585 - atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
57586 - atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
57587 - atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
57588 - atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
57589 - atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
57590 - atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
57591 - atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
57592 - atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
57593 - atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
57594 - atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
57595 - atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
57596 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
57597 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
57598 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
57599 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
57600 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
57601 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
57602 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
57603 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
57604 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
57605 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
57606 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
57607 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
57608 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
57609 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
57610 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
57611 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
57612 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
57613 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
57614 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
57615 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
57616 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
57617 #endif
57618 }
57619
57620 @@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
57621 {
57622 #ifdef CONFIG_CIFS_STATS
57623 seq_printf(m, " Oplocks breaks: %d",
57624 - atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
57625 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
57626 seq_printf(m, "\nReads: %d Bytes: %llu",
57627 - atomic_read(&tcon->stats.cifs_stats.num_reads),
57628 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
57629 (long long)(tcon->bytes_read));
57630 seq_printf(m, "\nWrites: %d Bytes: %llu",
57631 - atomic_read(&tcon->stats.cifs_stats.num_writes),
57632 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
57633 (long long)(tcon->bytes_written));
57634 seq_printf(m, "\nFlushes: %d",
57635 - atomic_read(&tcon->stats.cifs_stats.num_flushes));
57636 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
57637 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
57638 - atomic_read(&tcon->stats.cifs_stats.num_locks),
57639 - atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
57640 - atomic_read(&tcon->stats.cifs_stats.num_symlinks));
57641 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
57642 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
57643 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
57644 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
57645 - atomic_read(&tcon->stats.cifs_stats.num_opens),
57646 - atomic_read(&tcon->stats.cifs_stats.num_closes),
57647 - atomic_read(&tcon->stats.cifs_stats.num_deletes));
57648 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
57649 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
57650 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
57651 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
57652 - atomic_read(&tcon->stats.cifs_stats.num_posixopens),
57653 - atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
57654 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
57655 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
57656 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
57657 - atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
57658 - atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
57659 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
57660 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
57661 seq_printf(m, "\nRenames: %d T2 Renames %d",
57662 - atomic_read(&tcon->stats.cifs_stats.num_renames),
57663 - atomic_read(&tcon->stats.cifs_stats.num_t2renames));
57664 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
57665 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
57666 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
57667 - atomic_read(&tcon->stats.cifs_stats.num_ffirst),
57668 - atomic_read(&tcon->stats.cifs_stats.num_fnext),
57669 - atomic_read(&tcon->stats.cifs_stats.num_fclose));
57670 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
57671 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
57672 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
57673 #endif
57674 }
57675
57676 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
57677 index 192f51a..539307e 100644
57678 --- a/fs/cifs/smb2ops.c
57679 +++ b/fs/cifs/smb2ops.c
57680 @@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
57681 #ifdef CONFIG_CIFS_STATS
57682 int i;
57683 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
57684 - atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
57685 - atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
57686 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
57687 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
57688 }
57689 #endif
57690 }
57691 @@ -405,65 +405,65 @@ static void
57692 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
57693 {
57694 #ifdef CONFIG_CIFS_STATS
57695 - atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
57696 - atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
57697 + atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
57698 + atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
57699 seq_printf(m, "\nNegotiates: %d sent %d failed",
57700 - atomic_read(&sent[SMB2_NEGOTIATE_HE]),
57701 - atomic_read(&failed[SMB2_NEGOTIATE_HE]));
57702 + atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
57703 + atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
57704 seq_printf(m, "\nSessionSetups: %d sent %d failed",
57705 - atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
57706 - atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
57707 + atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
57708 + atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
57709 seq_printf(m, "\nLogoffs: %d sent %d failed",
57710 - atomic_read(&sent[SMB2_LOGOFF_HE]),
57711 - atomic_read(&failed[SMB2_LOGOFF_HE]));
57712 + atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
57713 + atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
57714 seq_printf(m, "\nTreeConnects: %d sent %d failed",
57715 - atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
57716 - atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
57717 + atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
57718 + atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
57719 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
57720 - atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
57721 - atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
57722 + atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
57723 + atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
57724 seq_printf(m, "\nCreates: %d sent %d failed",
57725 - atomic_read(&sent[SMB2_CREATE_HE]),
57726 - atomic_read(&failed[SMB2_CREATE_HE]));
57727 + atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
57728 + atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
57729 seq_printf(m, "\nCloses: %d sent %d failed",
57730 - atomic_read(&sent[SMB2_CLOSE_HE]),
57731 - atomic_read(&failed[SMB2_CLOSE_HE]));
57732 + atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
57733 + atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
57734 seq_printf(m, "\nFlushes: %d sent %d failed",
57735 - atomic_read(&sent[SMB2_FLUSH_HE]),
57736 - atomic_read(&failed[SMB2_FLUSH_HE]));
57737 + atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
57738 + atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
57739 seq_printf(m, "\nReads: %d sent %d failed",
57740 - atomic_read(&sent[SMB2_READ_HE]),
57741 - atomic_read(&failed[SMB2_READ_HE]));
57742 + atomic_read_unchecked(&sent[SMB2_READ_HE]),
57743 + atomic_read_unchecked(&failed[SMB2_READ_HE]));
57744 seq_printf(m, "\nWrites: %d sent %d failed",
57745 - atomic_read(&sent[SMB2_WRITE_HE]),
57746 - atomic_read(&failed[SMB2_WRITE_HE]));
57747 + atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
57748 + atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
57749 seq_printf(m, "\nLocks: %d sent %d failed",
57750 - atomic_read(&sent[SMB2_LOCK_HE]),
57751 - atomic_read(&failed[SMB2_LOCK_HE]));
57752 + atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
57753 + atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
57754 seq_printf(m, "\nIOCTLs: %d sent %d failed",
57755 - atomic_read(&sent[SMB2_IOCTL_HE]),
57756 - atomic_read(&failed[SMB2_IOCTL_HE]));
57757 + atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
57758 + atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
57759 seq_printf(m, "\nCancels: %d sent %d failed",
57760 - atomic_read(&sent[SMB2_CANCEL_HE]),
57761 - atomic_read(&failed[SMB2_CANCEL_HE]));
57762 + atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
57763 + atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
57764 seq_printf(m, "\nEchos: %d sent %d failed",
57765 - atomic_read(&sent[SMB2_ECHO_HE]),
57766 - atomic_read(&failed[SMB2_ECHO_HE]));
57767 + atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
57768 + atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
57769 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
57770 - atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
57771 - atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
57772 + atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
57773 + atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
57774 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
57775 - atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
57776 - atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
57777 + atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
57778 + atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
57779 seq_printf(m, "\nQueryInfos: %d sent %d failed",
57780 - atomic_read(&sent[SMB2_QUERY_INFO_HE]),
57781 - atomic_read(&failed[SMB2_QUERY_INFO_HE]));
57782 + atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
57783 + atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
57784 seq_printf(m, "\nSetInfos: %d sent %d failed",
57785 - atomic_read(&sent[SMB2_SET_INFO_HE]),
57786 - atomic_read(&failed[SMB2_SET_INFO_HE]));
57787 + atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
57788 + atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
57789 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
57790 - atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
57791 - atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
57792 + atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
57793 + atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
57794 #endif
57795 }
57796
57797 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
57798 index 787e171..31dcd0a 100644
57799 --- a/fs/cifs/smb2pdu.c
57800 +++ b/fs/cifs/smb2pdu.c
57801 @@ -2093,8 +2093,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
57802 default:
57803 cifs_dbg(VFS, "info level %u isn't supported\n",
57804 srch_inf->info_level);
57805 - rc = -EINVAL;
57806 - goto qdir_exit;
57807 + return -EINVAL;
57808 }
57809
57810 req->FileIndex = cpu_to_le32(index);
57811 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
57812 index 1da168c..8bc7ff6 100644
57813 --- a/fs/coda/cache.c
57814 +++ b/fs/coda/cache.c
57815 @@ -24,7 +24,7 @@
57816 #include "coda_linux.h"
57817 #include "coda_cache.h"
57818
57819 -static atomic_t permission_epoch = ATOMIC_INIT(0);
57820 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
57821
57822 /* replace or extend an acl cache hit */
57823 void coda_cache_enter(struct inode *inode, int mask)
57824 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
57825 struct coda_inode_info *cii = ITOC(inode);
57826
57827 spin_lock(&cii->c_lock);
57828 - cii->c_cached_epoch = atomic_read(&permission_epoch);
57829 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
57830 if (!uid_eq(cii->c_uid, current_fsuid())) {
57831 cii->c_uid = current_fsuid();
57832 cii->c_cached_perm = mask;
57833 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
57834 {
57835 struct coda_inode_info *cii = ITOC(inode);
57836 spin_lock(&cii->c_lock);
57837 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
57838 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
57839 spin_unlock(&cii->c_lock);
57840 }
57841
57842 /* remove all acl caches */
57843 void coda_cache_clear_all(struct super_block *sb)
57844 {
57845 - atomic_inc(&permission_epoch);
57846 + atomic_inc_unchecked(&permission_epoch);
57847 }
57848
57849
57850 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
57851 spin_lock(&cii->c_lock);
57852 hit = (mask & cii->c_cached_perm) == mask &&
57853 uid_eq(cii->c_uid, current_fsuid()) &&
57854 - cii->c_cached_epoch == atomic_read(&permission_epoch);
57855 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
57856 spin_unlock(&cii->c_lock);
57857
57858 return hit;
57859 diff --git a/fs/compat.c b/fs/compat.c
57860 index 6af20de..fec3fbb 100644
57861 --- a/fs/compat.c
57862 +++ b/fs/compat.c
57863 @@ -54,7 +54,7 @@
57864 #include <asm/ioctls.h>
57865 #include "internal.h"
57866
57867 -int compat_log = 1;
57868 +int compat_log = 0;
57869
57870 int compat_printk(const char *fmt, ...)
57871 {
57872 @@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
57873
57874 set_fs(KERNEL_DS);
57875 /* The __user pointer cast is valid because of the set_fs() */
57876 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
57877 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
57878 set_fs(oldfs);
57879 /* truncating is ok because it's a user address */
57880 if (!ret)
57881 @@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
57882 goto out;
57883
57884 ret = -EINVAL;
57885 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
57886 + if (nr_segs > UIO_MAXIOV)
57887 goto out;
57888 if (nr_segs > fast_segs) {
57889 ret = -ENOMEM;
57890 @@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
57891 struct compat_readdir_callback {
57892 struct dir_context ctx;
57893 struct compat_old_linux_dirent __user *dirent;
57894 + struct file * file;
57895 int result;
57896 };
57897
57898 @@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
57899 buf->result = -EOVERFLOW;
57900 return -EOVERFLOW;
57901 }
57902 +
57903 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57904 + return 0;
57905 +
57906 buf->result++;
57907 dirent = buf->dirent;
57908 if (!access_ok(VERIFY_WRITE, dirent,
57909 @@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
57910 if (!f.file)
57911 return -EBADF;
57912
57913 + buf.file = f.file;
57914 error = iterate_dir(f.file, &buf.ctx);
57915 if (buf.result)
57916 error = buf.result;
57917 @@ -901,6 +907,7 @@ struct compat_getdents_callback {
57918 struct dir_context ctx;
57919 struct compat_linux_dirent __user *current_dir;
57920 struct compat_linux_dirent __user *previous;
57921 + struct file * file;
57922 int count;
57923 int error;
57924 };
57925 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
57926 buf->error = -EOVERFLOW;
57927 return -EOVERFLOW;
57928 }
57929 +
57930 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57931 + return 0;
57932 +
57933 dirent = buf->previous;
57934 if (dirent) {
57935 if (__put_user(offset, &dirent->d_off))
57936 @@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
57937 if (!f.file)
57938 return -EBADF;
57939
57940 + buf.file = f.file;
57941 error = iterate_dir(f.file, &buf.ctx);
57942 if (error >= 0)
57943 error = buf.error;
57944 @@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
57945 struct dir_context ctx;
57946 struct linux_dirent64 __user *current_dir;
57947 struct linux_dirent64 __user *previous;
57948 + struct file * file;
57949 int count;
57950 int error;
57951 };
57952 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
57953 buf->error = -EINVAL; /* only used if we fail.. */
57954 if (reclen > buf->count)
57955 return -EINVAL;
57956 +
57957 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57958 + return 0;
57959 +
57960 dirent = buf->previous;
57961
57962 if (dirent) {
57963 @@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
57964 if (!f.file)
57965 return -EBADF;
57966
57967 + buf.file = f.file;
57968 error = iterate_dir(f.file, &buf.ctx);
57969 if (error >= 0)
57970 error = buf.error;
57971 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
57972 index a81147e..20bf2b5 100644
57973 --- a/fs/compat_binfmt_elf.c
57974 +++ b/fs/compat_binfmt_elf.c
57975 @@ -30,11 +30,13 @@
57976 #undef elf_phdr
57977 #undef elf_shdr
57978 #undef elf_note
57979 +#undef elf_dyn
57980 #undef elf_addr_t
57981 #define elfhdr elf32_hdr
57982 #define elf_phdr elf32_phdr
57983 #define elf_shdr elf32_shdr
57984 #define elf_note elf32_note
57985 +#define elf_dyn Elf32_Dyn
57986 #define elf_addr_t Elf32_Addr
57987
57988 /*
57989 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
57990 index dc52e13..ec61057 100644
57991 --- a/fs/compat_ioctl.c
57992 +++ b/fs/compat_ioctl.c
57993 @@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
57994 return -EFAULT;
57995 if (__get_user(udata, &ss32->iomem_base))
57996 return -EFAULT;
57997 - ss.iomem_base = compat_ptr(udata);
57998 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
57999 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
58000 __get_user(ss.port_high, &ss32->port_high))
58001 return -EFAULT;
58002 @@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
58003 for (i = 0; i < nmsgs; i++) {
58004 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
58005 return -EFAULT;
58006 - if (get_user(datap, &umsgs[i].buf) ||
58007 - put_user(compat_ptr(datap), &tmsgs[i].buf))
58008 + if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
58009 + put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
58010 return -EFAULT;
58011 }
58012 return sys_ioctl(fd, cmd, (unsigned long)tdata);
58013 @@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
58014 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
58015 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
58016 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
58017 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
58018 + copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
58019 return -EFAULT;
58020
58021 return ioctl_preallocate(file, p);
58022 @@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
58023 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
58024 {
58025 unsigned int a, b;
58026 - a = *(unsigned int *)p;
58027 - b = *(unsigned int *)q;
58028 + a = *(const unsigned int *)p;
58029 + b = *(const unsigned int *)q;
58030 if (a > b)
58031 return 1;
58032 if (a < b)
58033 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
58034 index e081acb..911df21 100644
58035 --- a/fs/configfs/dir.c
58036 +++ b/fs/configfs/dir.c
58037 @@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
58038 }
58039 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
58040 struct configfs_dirent *next;
58041 - const char *name;
58042 + const unsigned char * name;
58043 + char d_name[sizeof(next->s_dentry->d_iname)];
58044 int len;
58045 struct inode *inode = NULL;
58046
58047 @@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
58048 continue;
58049
58050 name = configfs_get_name(next);
58051 - len = strlen(name);
58052 + if (next->s_dentry && name == next->s_dentry->d_iname) {
58053 + len = next->s_dentry->d_name.len;
58054 + memcpy(d_name, name, len);
58055 + name = d_name;
58056 + } else
58057 + len = strlen(name);
58058
58059 /*
58060 * We'll have a dentry and an inode for
58061 diff --git a/fs/coredump.c b/fs/coredump.c
58062 index bc3fbcd..6031650 100644
58063 --- a/fs/coredump.c
58064 +++ b/fs/coredump.c
58065 @@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
58066 struct pipe_inode_info *pipe = file->private_data;
58067
58068 pipe_lock(pipe);
58069 - pipe->readers++;
58070 - pipe->writers--;
58071 + atomic_inc(&pipe->readers);
58072 + atomic_dec(&pipe->writers);
58073 wake_up_interruptible_sync(&pipe->wait);
58074 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58075 pipe_unlock(pipe);
58076 @@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
58077 * We actually want wait_event_freezable() but then we need
58078 * to clear TIF_SIGPENDING and improve dump_interrupted().
58079 */
58080 - wait_event_interruptible(pipe->wait, pipe->readers == 1);
58081 + wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
58082
58083 pipe_lock(pipe);
58084 - pipe->readers--;
58085 - pipe->writers++;
58086 + atomic_dec(&pipe->readers);
58087 + atomic_inc(&pipe->writers);
58088 pipe_unlock(pipe);
58089 }
58090
58091 @@ -499,7 +499,9 @@ void do_coredump(const siginfo_t *siginfo)
58092 struct files_struct *displaced;
58093 bool need_nonrelative = false;
58094 bool core_dumped = false;
58095 - static atomic_t core_dump_count = ATOMIC_INIT(0);
58096 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
58097 + long signr = siginfo->si_signo;
58098 + int dumpable;
58099 struct coredump_params cprm = {
58100 .siginfo = siginfo,
58101 .regs = signal_pt_regs(),
58102 @@ -512,12 +514,17 @@ void do_coredump(const siginfo_t *siginfo)
58103 .mm_flags = mm->flags,
58104 };
58105
58106 - audit_core_dumps(siginfo->si_signo);
58107 + audit_core_dumps(signr);
58108 +
58109 + dumpable = __get_dumpable(cprm.mm_flags);
58110 +
58111 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
58112 + gr_handle_brute_attach(dumpable);
58113
58114 binfmt = mm->binfmt;
58115 if (!binfmt || !binfmt->core_dump)
58116 goto fail;
58117 - if (!__get_dumpable(cprm.mm_flags))
58118 + if (!dumpable)
58119 goto fail;
58120
58121 cred = prepare_creds();
58122 @@ -536,7 +543,7 @@ void do_coredump(const siginfo_t *siginfo)
58123 need_nonrelative = true;
58124 }
58125
58126 - retval = coredump_wait(siginfo->si_signo, &core_state);
58127 + retval = coredump_wait(signr, &core_state);
58128 if (retval < 0)
58129 goto fail_creds;
58130
58131 @@ -579,7 +586,7 @@ void do_coredump(const siginfo_t *siginfo)
58132 }
58133 cprm.limit = RLIM_INFINITY;
58134
58135 - dump_count = atomic_inc_return(&core_dump_count);
58136 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
58137 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
58138 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
58139 task_tgid_vnr(current), current->comm);
58140 @@ -611,6 +618,8 @@ void do_coredump(const siginfo_t *siginfo)
58141 } else {
58142 struct inode *inode;
58143
58144 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
58145 +
58146 if (cprm.limit < binfmt->min_coredump)
58147 goto fail_unlock;
58148
58149 @@ -669,7 +678,7 @@ close_fail:
58150 filp_close(cprm.file, NULL);
58151 fail_dropcount:
58152 if (ispipe)
58153 - atomic_dec(&core_dump_count);
58154 + atomic_dec_unchecked(&core_dump_count);
58155 fail_unlock:
58156 kfree(cn.corename);
58157 coredump_finish(mm, core_dumped);
58158 @@ -690,6 +699,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
58159 struct file *file = cprm->file;
58160 loff_t pos = file->f_pos;
58161 ssize_t n;
58162 +
58163 + gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
58164 if (cprm->written + nr > cprm->limit)
58165 return 0;
58166 while (nr) {
58167 diff --git a/fs/dcache.c b/fs/dcache.c
58168 index fdbe230..d852932 100644
58169 --- a/fs/dcache.c
58170 +++ b/fs/dcache.c
58171 @@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
58172 */
58173 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
58174 if (name->len > DNAME_INLINE_LEN-1) {
58175 - dname = kmalloc(name->len + 1, GFP_KERNEL);
58176 + dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
58177 if (!dname) {
58178 kmem_cache_free(dentry_cache, dentry);
58179 return NULL;
58180 @@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58181 u32 dlen = ACCESS_ONCE(name->len);
58182 char *p;
58183
58184 - if (*buflen < dlen + 1)
58185 - return -ENAMETOOLONG;
58186 *buflen -= dlen + 1;
58187 + if (*buflen < 0)
58188 + return -ENAMETOOLONG;
58189 p = *buffer -= dlen + 1;
58190 *p++ = '/';
58191 while (dlen--) {
58192 @@ -3428,7 +3428,8 @@ void __init vfs_caches_init(unsigned long mempages)
58193 mempages -= reserve;
58194
58195 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
58196 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
58197 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
58198 + SLAB_NO_SANITIZE, NULL);
58199
58200 dcache_init();
58201 inode_init();
58202 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
58203 index 9c0444c..628490c 100644
58204 --- a/fs/debugfs/inode.c
58205 +++ b/fs/debugfs/inode.c
58206 @@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
58207 */
58208 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
58209 {
58210 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
58211 + return __create_file(name, S_IFDIR | S_IRWXU,
58212 +#else
58213 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
58214 +#endif
58215 parent, NULL, NULL);
58216 }
58217 EXPORT_SYMBOL_GPL(debugfs_create_dir);
58218 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
58219 index c36c448..fc96710 100644
58220 --- a/fs/ecryptfs/inode.c
58221 +++ b/fs/ecryptfs/inode.c
58222 @@ -675,7 +675,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
58223 old_fs = get_fs();
58224 set_fs(get_ds());
58225 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
58226 - (char __user *)lower_buf,
58227 + (char __force_user *)lower_buf,
58228 PATH_MAX);
58229 set_fs(old_fs);
58230 if (rc < 0)
58231 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
58232 index e4141f2..d8263e8 100644
58233 --- a/fs/ecryptfs/miscdev.c
58234 +++ b/fs/ecryptfs/miscdev.c
58235 @@ -304,7 +304,7 @@ check_list:
58236 goto out_unlock_msg_ctx;
58237 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
58238 if (msg_ctx->msg) {
58239 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
58240 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
58241 goto out_unlock_msg_ctx;
58242 i += packet_length_size;
58243 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
58244 diff --git a/fs/exec.c b/fs/exec.c
58245 index 7ea097f..0158d8a 100644
58246 --- a/fs/exec.c
58247 +++ b/fs/exec.c
58248 @@ -55,8 +55,20 @@
58249 #include <linux/pipe_fs_i.h>
58250 #include <linux/oom.h>
58251 #include <linux/compat.h>
58252 +#include <linux/random.h>
58253 +#include <linux/seq_file.h>
58254 +#include <linux/coredump.h>
58255 +#include <linux/mman.h>
58256 +
58257 +#ifdef CONFIG_PAX_REFCOUNT
58258 +#include <linux/kallsyms.h>
58259 +#include <linux/kdebug.h>
58260 +#endif
58261 +
58262 +#include <trace/events/fs.h>
58263
58264 #include <asm/uaccess.h>
58265 +#include <asm/sections.h>
58266 #include <asm/mmu_context.h>
58267 #include <asm/tlb.h>
58268
58269 @@ -66,19 +78,34 @@
58270
58271 #include <trace/events/sched.h>
58272
58273 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58274 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
58275 +{
58276 + pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
58277 +}
58278 +#endif
58279 +
58280 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
58281 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
58282 +EXPORT_SYMBOL(pax_set_initial_flags_func);
58283 +#endif
58284 +
58285 int suid_dumpable = 0;
58286
58287 static LIST_HEAD(formats);
58288 static DEFINE_RWLOCK(binfmt_lock);
58289
58290 +extern int gr_process_kernel_exec_ban(void);
58291 +extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
58292 +
58293 void __register_binfmt(struct linux_binfmt * fmt, int insert)
58294 {
58295 BUG_ON(!fmt);
58296 if (WARN_ON(!fmt->load_binary))
58297 return;
58298 write_lock(&binfmt_lock);
58299 - insert ? list_add(&fmt->lh, &formats) :
58300 - list_add_tail(&fmt->lh, &formats);
58301 + insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
58302 + pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
58303 write_unlock(&binfmt_lock);
58304 }
58305
58306 @@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
58307 void unregister_binfmt(struct linux_binfmt * fmt)
58308 {
58309 write_lock(&binfmt_lock);
58310 - list_del(&fmt->lh);
58311 + pax_list_del((struct list_head *)&fmt->lh);
58312 write_unlock(&binfmt_lock);
58313 }
58314
58315 @@ -181,18 +208,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58316 int write)
58317 {
58318 struct page *page;
58319 - int ret;
58320
58321 -#ifdef CONFIG_STACK_GROWSUP
58322 - if (write) {
58323 - ret = expand_downwards(bprm->vma, pos);
58324 - if (ret < 0)
58325 - return NULL;
58326 - }
58327 -#endif
58328 - ret = get_user_pages(current, bprm->mm, pos,
58329 - 1, write, 1, &page, NULL);
58330 - if (ret <= 0)
58331 + if (0 > expand_downwards(bprm->vma, pos))
58332 + return NULL;
58333 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
58334 return NULL;
58335
58336 if (write) {
58337 @@ -208,6 +227,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58338 if (size <= ARG_MAX)
58339 return page;
58340
58341 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58342 + // only allow 512KB for argv+env on suid/sgid binaries
58343 + // to prevent easy ASLR exhaustion
58344 + if (((!uid_eq(bprm->cred->euid, current_euid())) ||
58345 + (!gid_eq(bprm->cred->egid, current_egid()))) &&
58346 + (size > (512 * 1024))) {
58347 + put_page(page);
58348 + return NULL;
58349 + }
58350 +#endif
58351 +
58352 /*
58353 * Limit to 1/4-th the stack size for the argv+env strings.
58354 * This ensures that:
58355 @@ -267,6 +297,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58356 vma->vm_end = STACK_TOP_MAX;
58357 vma->vm_start = vma->vm_end - PAGE_SIZE;
58358 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58359 +
58360 +#ifdef CONFIG_PAX_SEGMEXEC
58361 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
58362 +#endif
58363 +
58364 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
58365 INIT_LIST_HEAD(&vma->anon_vma_chain);
58366
58367 @@ -277,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58368 mm->stack_vm = mm->total_vm = 1;
58369 up_write(&mm->mmap_sem);
58370 bprm->p = vma->vm_end - sizeof(void *);
58371 +
58372 +#ifdef CONFIG_PAX_RANDUSTACK
58373 + if (randomize_va_space)
58374 + bprm->p ^= prandom_u32() & ~PAGE_MASK;
58375 +#endif
58376 +
58377 return 0;
58378 err:
58379 up_write(&mm->mmap_sem);
58380 @@ -397,7 +438,7 @@ struct user_arg_ptr {
58381 } ptr;
58382 };
58383
58384 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58385 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58386 {
58387 const char __user *native;
58388
58389 @@ -406,14 +447,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58390 compat_uptr_t compat;
58391
58392 if (get_user(compat, argv.ptr.compat + nr))
58393 - return ERR_PTR(-EFAULT);
58394 + return (const char __force_user *)ERR_PTR(-EFAULT);
58395
58396 return compat_ptr(compat);
58397 }
58398 #endif
58399
58400 if (get_user(native, argv.ptr.native + nr))
58401 - return ERR_PTR(-EFAULT);
58402 + return (const char __force_user *)ERR_PTR(-EFAULT);
58403
58404 return native;
58405 }
58406 @@ -432,7 +473,7 @@ static int count(struct user_arg_ptr argv, int max)
58407 if (!p)
58408 break;
58409
58410 - if (IS_ERR(p))
58411 + if (IS_ERR((const char __force_kernel *)p))
58412 return -EFAULT;
58413
58414 if (i >= max)
58415 @@ -467,7 +508,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
58416
58417 ret = -EFAULT;
58418 str = get_user_arg_ptr(argv, argc);
58419 - if (IS_ERR(str))
58420 + if (IS_ERR((const char __force_kernel *)str))
58421 goto out;
58422
58423 len = strnlen_user(str, MAX_ARG_STRLEN);
58424 @@ -549,7 +590,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
58425 int r;
58426 mm_segment_t oldfs = get_fs();
58427 struct user_arg_ptr argv = {
58428 - .ptr.native = (const char __user *const __user *)__argv,
58429 + .ptr.native = (const char __user * const __force_user *)__argv,
58430 };
58431
58432 set_fs(KERNEL_DS);
58433 @@ -584,7 +625,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58434 unsigned long new_end = old_end - shift;
58435 struct mmu_gather tlb;
58436
58437 - BUG_ON(new_start > new_end);
58438 + if (new_start >= new_end || new_start < mmap_min_addr)
58439 + return -ENOMEM;
58440
58441 /*
58442 * ensure there are no vmas between where we want to go
58443 @@ -593,6 +635,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58444 if (vma != find_vma(mm, new_start))
58445 return -EFAULT;
58446
58447 +#ifdef CONFIG_PAX_SEGMEXEC
58448 + BUG_ON(pax_find_mirror_vma(vma));
58449 +#endif
58450 +
58451 /*
58452 * cover the whole range: [new_start, old_end)
58453 */
58454 @@ -673,10 +719,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58455 stack_top = arch_align_stack(stack_top);
58456 stack_top = PAGE_ALIGN(stack_top);
58457
58458 - if (unlikely(stack_top < mmap_min_addr) ||
58459 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
58460 - return -ENOMEM;
58461 -
58462 stack_shift = vma->vm_end - stack_top;
58463
58464 bprm->p -= stack_shift;
58465 @@ -688,8 +730,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
58466 bprm->exec -= stack_shift;
58467
58468 down_write(&mm->mmap_sem);
58469 +
58470 + /* Move stack pages down in memory. */
58471 + if (stack_shift) {
58472 + ret = shift_arg_pages(vma, stack_shift);
58473 + if (ret)
58474 + goto out_unlock;
58475 + }
58476 +
58477 vm_flags = VM_STACK_FLAGS;
58478
58479 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58480 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58481 + vm_flags &= ~VM_EXEC;
58482 +
58483 +#ifdef CONFIG_PAX_MPROTECT
58484 + if (mm->pax_flags & MF_PAX_MPROTECT)
58485 + vm_flags &= ~VM_MAYEXEC;
58486 +#endif
58487 +
58488 + }
58489 +#endif
58490 +
58491 /*
58492 * Adjust stack execute permissions; explicitly enable for
58493 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
58494 @@ -708,13 +770,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58495 goto out_unlock;
58496 BUG_ON(prev != vma);
58497
58498 - /* Move stack pages down in memory. */
58499 - if (stack_shift) {
58500 - ret = shift_arg_pages(vma, stack_shift);
58501 - if (ret)
58502 - goto out_unlock;
58503 - }
58504 -
58505 /* mprotect_fixup is overkill to remove the temporary stack flags */
58506 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
58507
58508 @@ -738,6 +793,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
58509 #endif
58510 current->mm->start_stack = bprm->p;
58511 ret = expand_stack(vma, stack_base);
58512 +
58513 +#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
58514 + if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
58515 + unsigned long size;
58516 + vm_flags_t vm_flags;
58517 +
58518 + size = STACK_TOP - vma->vm_end;
58519 + vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
58520 +
58521 + ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
58522 +
58523 +#ifdef CONFIG_X86
58524 + if (!ret) {
58525 + size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
58526 + ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
58527 + }
58528 +#endif
58529 +
58530 + }
58531 +#endif
58532 +
58533 if (ret)
58534 ret = -EFAULT;
58535
58536 @@ -774,6 +850,8 @@ struct file *open_exec(const char *name)
58537
58538 fsnotify_open(file);
58539
58540 + trace_open_exec(name);
58541 +
58542 err = deny_write_access(file);
58543 if (err)
58544 goto exit;
58545 @@ -797,7 +875,7 @@ int kernel_read(struct file *file, loff_t offset,
58546 old_fs = get_fs();
58547 set_fs(get_ds());
58548 /* The cast to a user pointer is valid due to the set_fs() */
58549 - result = vfs_read(file, (void __user *)addr, count, &pos);
58550 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
58551 set_fs(old_fs);
58552 return result;
58553 }
58554 @@ -1253,7 +1331,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
58555 }
58556 rcu_read_unlock();
58557
58558 - if (p->fs->users > n_fs) {
58559 + if (atomic_read(&p->fs->users) > n_fs) {
58560 bprm->unsafe |= LSM_UNSAFE_SHARE;
58561 } else {
58562 res = -EAGAIN;
58563 @@ -1443,6 +1521,31 @@ static int exec_binprm(struct linux_binprm *bprm)
58564 return ret;
58565 }
58566
58567 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58568 +static DEFINE_PER_CPU(u64, exec_counter);
58569 +static int __init init_exec_counters(void)
58570 +{
58571 + unsigned int cpu;
58572 +
58573 + for_each_possible_cpu(cpu) {
58574 + per_cpu(exec_counter, cpu) = (u64)cpu;
58575 + }
58576 +
58577 + return 0;
58578 +}
58579 +early_initcall(init_exec_counters);
58580 +static inline void increment_exec_counter(void)
58581 +{
58582 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
58583 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
58584 +}
58585 +#else
58586 +static inline void increment_exec_counter(void) {}
58587 +#endif
58588 +
58589 +extern void gr_handle_exec_args(struct linux_binprm *bprm,
58590 + struct user_arg_ptr argv);
58591 +
58592 /*
58593 * sys_execve() executes a new program.
58594 */
58595 @@ -1450,12 +1553,19 @@ static int do_execve_common(const char *filename,
58596 struct user_arg_ptr argv,
58597 struct user_arg_ptr envp)
58598 {
58599 +#ifdef CONFIG_GRKERNSEC
58600 + struct file *old_exec_file;
58601 + struct acl_subject_label *old_acl;
58602 + struct rlimit old_rlim[RLIM_NLIMITS];
58603 +#endif
58604 struct linux_binprm *bprm;
58605 struct file *file;
58606 struct files_struct *displaced;
58607 bool clear_in_exec;
58608 int retval;
58609
58610 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
58611 +
58612 /*
58613 * We move the actual failure in case of RLIMIT_NPROC excess from
58614 * set*uid() to execve() because too many poorly written programs
58615 @@ -1496,12 +1606,22 @@ static int do_execve_common(const char *filename,
58616 if (IS_ERR(file))
58617 goto out_unmark;
58618
58619 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
58620 + retval = -EPERM;
58621 + goto out_file;
58622 + }
58623 +
58624 sched_exec();
58625
58626 bprm->file = file;
58627 bprm->filename = filename;
58628 bprm->interp = filename;
58629
58630 + if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
58631 + retval = -EACCES;
58632 + goto out_file;
58633 + }
58634 +
58635 retval = bprm_mm_init(bprm);
58636 if (retval)
58637 goto out_file;
58638 @@ -1518,24 +1638,70 @@ static int do_execve_common(const char *filename,
58639 if (retval < 0)
58640 goto out;
58641
58642 +#ifdef CONFIG_GRKERNSEC
58643 + old_acl = current->acl;
58644 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
58645 + old_exec_file = current->exec_file;
58646 + get_file(file);
58647 + current->exec_file = file;
58648 +#endif
58649 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58650 + /* limit suid stack to 8MB
58651 + * we saved the old limits above and will restore them if this exec fails
58652 + */
58653 + if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
58654 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
58655 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
58656 +#endif
58657 +
58658 + if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
58659 + retval = -EPERM;
58660 + goto out_fail;
58661 + }
58662 +
58663 + if (!gr_tpe_allow(file)) {
58664 + retval = -EACCES;
58665 + goto out_fail;
58666 + }
58667 +
58668 + if (gr_check_crash_exec(file)) {
58669 + retval = -EACCES;
58670 + goto out_fail;
58671 + }
58672 +
58673 + retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
58674 + bprm->unsafe);
58675 + if (retval < 0)
58676 + goto out_fail;
58677 +
58678 retval = copy_strings_kernel(1, &bprm->filename, bprm);
58679 if (retval < 0)
58680 - goto out;
58681 + goto out_fail;
58682
58683 bprm->exec = bprm->p;
58684 retval = copy_strings(bprm->envc, envp, bprm);
58685 if (retval < 0)
58686 - goto out;
58687 + goto out_fail;
58688
58689 retval = copy_strings(bprm->argc, argv, bprm);
58690 if (retval < 0)
58691 - goto out;
58692 + goto out_fail;
58693 +
58694 + gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
58695 +
58696 + gr_handle_exec_args(bprm, argv);
58697
58698 retval = exec_binprm(bprm);
58699 if (retval < 0)
58700 - goto out;
58701 + goto out_fail;
58702 +#ifdef CONFIG_GRKERNSEC
58703 + if (old_exec_file)
58704 + fput(old_exec_file);
58705 +#endif
58706
58707 /* execve succeeded */
58708 +
58709 + increment_exec_counter();
58710 current->fs->in_exec = 0;
58711 current->in_execve = 0;
58712 acct_update_integrals(current);
58713 @@ -1545,6 +1711,14 @@ static int do_execve_common(const char *filename,
58714 put_files_struct(displaced);
58715 return retval;
58716
58717 +out_fail:
58718 +#ifdef CONFIG_GRKERNSEC
58719 + current->acl = old_acl;
58720 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
58721 + fput(current->exec_file);
58722 + current->exec_file = old_exec_file;
58723 +#endif
58724 +
58725 out:
58726 if (bprm->mm) {
58727 acct_arg_size(bprm, 0);
58728 @@ -1699,3 +1873,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
58729 return error;
58730 }
58731 #endif
58732 +
58733 +int pax_check_flags(unsigned long *flags)
58734 +{
58735 + int retval = 0;
58736 +
58737 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
58738 + if (*flags & MF_PAX_SEGMEXEC)
58739 + {
58740 + *flags &= ~MF_PAX_SEGMEXEC;
58741 + retval = -EINVAL;
58742 + }
58743 +#endif
58744 +
58745 + if ((*flags & MF_PAX_PAGEEXEC)
58746 +
58747 +#ifdef CONFIG_PAX_PAGEEXEC
58748 + && (*flags & MF_PAX_SEGMEXEC)
58749 +#endif
58750 +
58751 + )
58752 + {
58753 + *flags &= ~MF_PAX_PAGEEXEC;
58754 + retval = -EINVAL;
58755 + }
58756 +
58757 + if ((*flags & MF_PAX_MPROTECT)
58758 +
58759 +#ifdef CONFIG_PAX_MPROTECT
58760 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
58761 +#endif
58762 +
58763 + )
58764 + {
58765 + *flags &= ~MF_PAX_MPROTECT;
58766 + retval = -EINVAL;
58767 + }
58768 +
58769 + if ((*flags & MF_PAX_EMUTRAMP)
58770 +
58771 +#ifdef CONFIG_PAX_EMUTRAMP
58772 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
58773 +#endif
58774 +
58775 + )
58776 + {
58777 + *flags &= ~MF_PAX_EMUTRAMP;
58778 + retval = -EINVAL;
58779 + }
58780 +
58781 + return retval;
58782 +}
58783 +
58784 +EXPORT_SYMBOL(pax_check_flags);
58785 +
58786 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58787 +char *pax_get_path(const struct path *path, char *buf, int buflen)
58788 +{
58789 + char *pathname = d_path(path, buf, buflen);
58790 +
58791 + if (IS_ERR(pathname))
58792 + goto toolong;
58793 +
58794 + pathname = mangle_path(buf, pathname, "\t\n\\");
58795 + if (!pathname)
58796 + goto toolong;
58797 +
58798 + *pathname = 0;
58799 + return buf;
58800 +
58801 +toolong:
58802 + return "<path too long>";
58803 +}
58804 +EXPORT_SYMBOL(pax_get_path);
58805 +
58806 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
58807 +{
58808 + struct task_struct *tsk = current;
58809 + struct mm_struct *mm = current->mm;
58810 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
58811 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
58812 + char *path_exec = NULL;
58813 + char *path_fault = NULL;
58814 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
58815 + siginfo_t info = { };
58816 +
58817 + if (buffer_exec && buffer_fault) {
58818 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
58819 +
58820 + down_read(&mm->mmap_sem);
58821 + vma = mm->mmap;
58822 + while (vma && (!vma_exec || !vma_fault)) {
58823 + if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
58824 + vma_exec = vma;
58825 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
58826 + vma_fault = vma;
58827 + vma = vma->vm_next;
58828 + }
58829 + if (vma_exec)
58830 + path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
58831 + if (vma_fault) {
58832 + start = vma_fault->vm_start;
58833 + end = vma_fault->vm_end;
58834 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
58835 + if (vma_fault->vm_file)
58836 + path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
58837 + else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
58838 + path_fault = "<heap>";
58839 + else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
58840 + path_fault = "<stack>";
58841 + else
58842 + path_fault = "<anonymous mapping>";
58843 + }
58844 + up_read(&mm->mmap_sem);
58845 + }
58846 + if (tsk->signal->curr_ip)
58847 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
58848 + else
58849 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
58850 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
58851 + from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
58852 + free_page((unsigned long)buffer_exec);
58853 + free_page((unsigned long)buffer_fault);
58854 + pax_report_insns(regs, pc, sp);
58855 + info.si_signo = SIGKILL;
58856 + info.si_errno = 0;
58857 + info.si_code = SI_KERNEL;
58858 + info.si_pid = 0;
58859 + info.si_uid = 0;
58860 + do_coredump(&info);
58861 +}
58862 +#endif
58863 +
58864 +#ifdef CONFIG_PAX_REFCOUNT
58865 +void pax_report_refcount_overflow(struct pt_regs *regs)
58866 +{
58867 + if (current->signal->curr_ip)
58868 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
58869 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
58870 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58871 + else
58872 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
58873 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58874 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
58875 + preempt_disable();
58876 + show_regs(regs);
58877 + preempt_enable();
58878 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58879 +}
58880 +#endif
58881 +
58882 +#ifdef CONFIG_PAX_USERCOPY
58883 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
58884 +static noinline int check_stack_object(const void *obj, unsigned long len)
58885 +{
58886 + const void * const stack = task_stack_page(current);
58887 + const void * const stackend = stack + THREAD_SIZE;
58888 +
58889 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58890 + const void *frame = NULL;
58891 + const void *oldframe;
58892 +#endif
58893 +
58894 + if (obj + len < obj)
58895 + return -1;
58896 +
58897 + if (obj + len <= stack || stackend <= obj)
58898 + return 0;
58899 +
58900 + if (obj < stack || stackend < obj + len)
58901 + return -1;
58902 +
58903 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58904 + oldframe = __builtin_frame_address(1);
58905 + if (oldframe)
58906 + frame = __builtin_frame_address(2);
58907 + /*
58908 + low ----------------------------------------------> high
58909 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
58910 + ^----------------^
58911 + allow copies only within here
58912 + */
58913 + while (stack <= frame && frame < stackend) {
58914 + /* if obj + len extends past the last frame, this
58915 + check won't pass and the next frame will be 0,
58916 + causing us to bail out and correctly report
58917 + the copy as invalid
58918 + */
58919 + if (obj + len <= frame)
58920 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
58921 + oldframe = frame;
58922 + frame = *(const void * const *)frame;
58923 + }
58924 + return -1;
58925 +#else
58926 + return 1;
58927 +#endif
58928 +}
58929 +
58930 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
58931 +{
58932 + if (current->signal->curr_ip)
58933 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58934 + &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58935 + else
58936 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58937 + to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58938 + dump_stack();
58939 + gr_handle_kernel_exploit();
58940 + do_group_exit(SIGKILL);
58941 +}
58942 +#endif
58943 +
58944 +#ifdef CONFIG_PAX_USERCOPY
58945 +static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
58946 +{
58947 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
58948 + unsigned long textlow = ktla_ktva((unsigned long)_stext);
58949 +#ifdef CONFIG_MODULES
58950 + unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
58951 +#else
58952 + unsigned long texthigh = ktla_ktva((unsigned long)_etext);
58953 +#endif
58954 +
58955 +#else
58956 + unsigned long textlow = (unsigned long)_stext;
58957 + unsigned long texthigh = (unsigned long)_etext;
58958 +
58959 +#ifdef CONFIG_X86_64
58960 + /* check against linear mapping as well */
58961 + if (high > (unsigned long)__va(__pa(textlow)) &&
58962 + low <= (unsigned long)__va(__pa(texthigh)))
58963 + return true;
58964 +#endif
58965 +
58966 +#endif
58967 +
58968 + if (high <= textlow || low > texthigh)
58969 + return false;
58970 + else
58971 + return true;
58972 +}
58973 +#endif
58974 +
58975 +void __check_object_size(const void *ptr, unsigned long n, bool to_user)
58976 +{
58977 +
58978 +#ifdef CONFIG_PAX_USERCOPY
58979 + const char *type;
58980 +
58981 + if (!n)
58982 + return;
58983 +
58984 + type = check_heap_object(ptr, n);
58985 + if (!type) {
58986 + int ret = check_stack_object(ptr, n);
58987 + if (ret == 1 || ret == 2)
58988 + return;
58989 + if (ret == 0) {
58990 + if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
58991 + type = "<kernel text>";
58992 + else
58993 + return;
58994 + } else
58995 + type = "<process stack>";
58996 + }
58997 +
58998 + pax_report_usercopy(ptr, n, to_user, type);
58999 +#endif
59000 +
59001 +}
59002 +EXPORT_SYMBOL(__check_object_size);
59003 +
59004 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
59005 +void pax_track_stack(void)
59006 +{
59007 + unsigned long sp = (unsigned long)&sp;
59008 + if (sp < current_thread_info()->lowest_stack &&
59009 + sp > (unsigned long)task_stack_page(current))
59010 + current_thread_info()->lowest_stack = sp;
59011 +}
59012 +EXPORT_SYMBOL(pax_track_stack);
59013 +#endif
59014 +
59015 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
59016 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
59017 +{
59018 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
59019 + dump_stack();
59020 + do_group_exit(SIGKILL);
59021 +}
59022 +EXPORT_SYMBOL(report_size_overflow);
59023 +#endif
59024 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
59025 index 9f9992b..8b59411 100644
59026 --- a/fs/ext2/balloc.c
59027 +++ b/fs/ext2/balloc.c
59028 @@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
59029
59030 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
59031 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
59032 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
59033 + if (free_blocks < root_blocks + 1 &&
59034 !uid_eq(sbi->s_resuid, current_fsuid()) &&
59035 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
59036 - !in_group_p (sbi->s_resgid))) {
59037 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
59038 return 0;
59039 }
59040 return 1;
59041 diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
59042 index 2d7557d..14e38f94 100644
59043 --- a/fs/ext2/xattr.c
59044 +++ b/fs/ext2/xattr.c
59045 @@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
59046 struct buffer_head *bh = NULL;
59047 struct ext2_xattr_entry *entry;
59048 char *end;
59049 - size_t rest = buffer_size;
59050 + size_t rest = buffer_size, total_size = 0;
59051 int error;
59052
59053 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
59054 @@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
59055 buffer += size;
59056 }
59057 rest -= size;
59058 + total_size += size;
59059 }
59060 }
59061 - error = buffer_size - rest; /* total size */
59062 + error = total_size;
59063
59064 cleanup:
59065 brelse(bh);
59066 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
59067 index 22548f5..41521d8 100644
59068 --- a/fs/ext3/balloc.c
59069 +++ b/fs/ext3/balloc.c
59070 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
59071
59072 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
59073 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
59074 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
59075 + if (free_blocks < root_blocks + 1 &&
59076 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
59077 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
59078 - !in_group_p (sbi->s_resgid))) {
59079 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
59080 return 0;
59081 }
59082 return 1;
59083 diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
59084 index b1fc963..881228c 100644
59085 --- a/fs/ext3/xattr.c
59086 +++ b/fs/ext3/xattr.c
59087 @@ -330,7 +330,7 @@ static int
59088 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
59089 char *buffer, size_t buffer_size)
59090 {
59091 - size_t rest = buffer_size;
59092 + size_t rest = buffer_size, total_size = 0;
59093
59094 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
59095 const struct xattr_handler *handler =
59096 @@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
59097 buffer += size;
59098 }
59099 rest -= size;
59100 + total_size += size;
59101 }
59102 }
59103 - return buffer_size - rest;
59104 + return total_size;
59105 }
59106
59107 static int
59108 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
59109 index 6ea7b14..8fa16d9 100644
59110 --- a/fs/ext4/balloc.c
59111 +++ b/fs/ext4/balloc.c
59112 @@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
59113 /* Hm, nope. Are (enough) root reserved clusters available? */
59114 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
59115 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
59116 - capable(CAP_SYS_RESOURCE) ||
59117 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
59118 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
59119 + capable_nolog(CAP_SYS_RESOURCE)) {
59120
59121 if (free_clusters >= (nclusters + dirty_clusters +
59122 resv_clusters))
59123 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
59124 index d3a534f..242c50a 100644
59125 --- a/fs/ext4/ext4.h
59126 +++ b/fs/ext4/ext4.h
59127 @@ -1269,19 +1269,19 @@ struct ext4_sb_info {
59128 unsigned long s_mb_last_start;
59129
59130 /* stats for buddy allocator */
59131 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
59132 - atomic_t s_bal_success; /* we found long enough chunks */
59133 - atomic_t s_bal_allocated; /* in blocks */
59134 - atomic_t s_bal_ex_scanned; /* total extents scanned */
59135 - atomic_t s_bal_goals; /* goal hits */
59136 - atomic_t s_bal_breaks; /* too long searches */
59137 - atomic_t s_bal_2orders; /* 2^order hits */
59138 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
59139 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
59140 + atomic_unchecked_t s_bal_allocated; /* in blocks */
59141 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
59142 + atomic_unchecked_t s_bal_goals; /* goal hits */
59143 + atomic_unchecked_t s_bal_breaks; /* too long searches */
59144 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
59145 spinlock_t s_bal_lock;
59146 unsigned long s_mb_buddies_generated;
59147 unsigned long long s_mb_generation_time;
59148 - atomic_t s_mb_lost_chunks;
59149 - atomic_t s_mb_preallocated;
59150 - atomic_t s_mb_discarded;
59151 + atomic_unchecked_t s_mb_lost_chunks;
59152 + atomic_unchecked_t s_mb_preallocated;
59153 + atomic_unchecked_t s_mb_discarded;
59154 atomic_t s_lock_busy;
59155
59156 /* locality groups */
59157 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
59158 index 04a5c75..09894fa 100644
59159 --- a/fs/ext4/mballoc.c
59160 +++ b/fs/ext4/mballoc.c
59161 @@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
59162 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
59163
59164 if (EXT4_SB(sb)->s_mb_stats)
59165 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
59166 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
59167
59168 break;
59169 }
59170 @@ -2189,7 +2189,7 @@ repeat:
59171 ac->ac_status = AC_STATUS_CONTINUE;
59172 ac->ac_flags |= EXT4_MB_HINT_FIRST;
59173 cr = 3;
59174 - atomic_inc(&sbi->s_mb_lost_chunks);
59175 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
59176 goto repeat;
59177 }
59178 }
59179 @@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
59180 if (sbi->s_mb_stats) {
59181 ext4_msg(sb, KERN_INFO,
59182 "mballoc: %u blocks %u reqs (%u success)",
59183 - atomic_read(&sbi->s_bal_allocated),
59184 - atomic_read(&sbi->s_bal_reqs),
59185 - atomic_read(&sbi->s_bal_success));
59186 + atomic_read_unchecked(&sbi->s_bal_allocated),
59187 + atomic_read_unchecked(&sbi->s_bal_reqs),
59188 + atomic_read_unchecked(&sbi->s_bal_success));
59189 ext4_msg(sb, KERN_INFO,
59190 "mballoc: %u extents scanned, %u goal hits, "
59191 "%u 2^N hits, %u breaks, %u lost",
59192 - atomic_read(&sbi->s_bal_ex_scanned),
59193 - atomic_read(&sbi->s_bal_goals),
59194 - atomic_read(&sbi->s_bal_2orders),
59195 - atomic_read(&sbi->s_bal_breaks),
59196 - atomic_read(&sbi->s_mb_lost_chunks));
59197 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
59198 + atomic_read_unchecked(&sbi->s_bal_goals),
59199 + atomic_read_unchecked(&sbi->s_bal_2orders),
59200 + atomic_read_unchecked(&sbi->s_bal_breaks),
59201 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
59202 ext4_msg(sb, KERN_INFO,
59203 "mballoc: %lu generated and it took %Lu",
59204 sbi->s_mb_buddies_generated,
59205 sbi->s_mb_generation_time);
59206 ext4_msg(sb, KERN_INFO,
59207 "mballoc: %u preallocated, %u discarded",
59208 - atomic_read(&sbi->s_mb_preallocated),
59209 - atomic_read(&sbi->s_mb_discarded));
59210 + atomic_read_unchecked(&sbi->s_mb_preallocated),
59211 + atomic_read_unchecked(&sbi->s_mb_discarded));
59212 }
59213
59214 free_percpu(sbi->s_locality_groups);
59215 @@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
59216 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
59217
59218 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
59219 - atomic_inc(&sbi->s_bal_reqs);
59220 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59221 + atomic_inc_unchecked(&sbi->s_bal_reqs);
59222 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59223 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
59224 - atomic_inc(&sbi->s_bal_success);
59225 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
59226 + atomic_inc_unchecked(&sbi->s_bal_success);
59227 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
59228 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
59229 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
59230 - atomic_inc(&sbi->s_bal_goals);
59231 + atomic_inc_unchecked(&sbi->s_bal_goals);
59232 if (ac->ac_found > sbi->s_mb_max_to_scan)
59233 - atomic_inc(&sbi->s_bal_breaks);
59234 + atomic_inc_unchecked(&sbi->s_bal_breaks);
59235 }
59236
59237 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
59238 @@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
59239 trace_ext4_mb_new_inode_pa(ac, pa);
59240
59241 ext4_mb_use_inode_pa(ac, pa);
59242 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
59243 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
59244
59245 ei = EXT4_I(ac->ac_inode);
59246 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59247 @@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
59248 trace_ext4_mb_new_group_pa(ac, pa);
59249
59250 ext4_mb_use_group_pa(ac, pa);
59251 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59252 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59253
59254 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59255 lg = ac->ac_lg;
59256 @@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
59257 * from the bitmap and continue.
59258 */
59259 }
59260 - atomic_add(free, &sbi->s_mb_discarded);
59261 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
59262
59263 return err;
59264 }
59265 @@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
59266 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
59267 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
59268 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
59269 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59270 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59271 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
59272
59273 return 0;
59274 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
59275 index 04434ad..6404663 100644
59276 --- a/fs/ext4/mmp.c
59277 +++ b/fs/ext4/mmp.c
59278 @@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59279 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
59280 const char *function, unsigned int line, const char *msg)
59281 {
59282 - __ext4_warning(sb, function, line, msg);
59283 + __ext4_warning(sb, function, line, "%s", msg);
59284 __ext4_warning(sb, function, line,
59285 "MMP failure info: last update time: %llu, last update "
59286 "node: %s, last update device: %s\n",
59287 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
59288 index 710fed2..a82e4e8 100644
59289 --- a/fs/ext4/super.c
59290 +++ b/fs/ext4/super.c
59291 @@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
59292 }
59293
59294 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
59295 -static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59296 +static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59297 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
59298
59299 #ifdef CONFIG_QUOTA
59300 @@ -2450,7 +2450,7 @@ struct ext4_attr {
59301 int offset;
59302 int deprecated_val;
59303 } u;
59304 -};
59305 +} __do_const;
59306
59307 static int parse_strtoull(const char *buf,
59308 unsigned long long max, unsigned long long *value)
59309 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
59310 index 1423c48..9c0c6dc 100644
59311 --- a/fs/ext4/xattr.c
59312 +++ b/fs/ext4/xattr.c
59313 @@ -381,7 +381,7 @@ static int
59314 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59315 char *buffer, size_t buffer_size)
59316 {
59317 - size_t rest = buffer_size;
59318 + size_t rest = buffer_size, total_size = 0;
59319
59320 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
59321 const struct xattr_handler *handler =
59322 @@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59323 buffer += size;
59324 }
59325 rest -= size;
59326 + total_size += size;
59327 }
59328 }
59329 - return buffer_size - rest;
59330 + return total_size;
59331 }
59332
59333 static int
59334 diff --git a/fs/fcntl.c b/fs/fcntl.c
59335 index ef68665..5deacdc 100644
59336 --- a/fs/fcntl.c
59337 +++ b/fs/fcntl.c
59338 @@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
59339 if (err)
59340 return err;
59341
59342 + if (gr_handle_chroot_fowner(pid, type))
59343 + return -ENOENT;
59344 + if (gr_check_protected_task_fowner(pid, type))
59345 + return -EACCES;
59346 +
59347 f_modown(filp, pid, type, force);
59348 return 0;
59349 }
59350 diff --git a/fs/fhandle.c b/fs/fhandle.c
59351 index 999ff5c..41f4109 100644
59352 --- a/fs/fhandle.c
59353 +++ b/fs/fhandle.c
59354 @@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
59355 } else
59356 retval = 0;
59357 /* copy the mount id */
59358 - if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
59359 - sizeof(*mnt_id)) ||
59360 + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
59361 copy_to_user(ufh, handle,
59362 sizeof(struct file_handle) + handle_bytes))
59363 retval = -EFAULT;
59364 diff --git a/fs/file.c b/fs/file.c
59365 index 9de2026..8e334ca 100644
59366 --- a/fs/file.c
59367 +++ b/fs/file.c
59368 @@ -16,6 +16,7 @@
59369 #include <linux/slab.h>
59370 #include <linux/vmalloc.h>
59371 #include <linux/file.h>
59372 +#include <linux/security.h>
59373 #include <linux/fdtable.h>
59374 #include <linux/bitops.h>
59375 #include <linux/interrupt.h>
59376 @@ -141,7 +142,7 @@ out:
59377 * Return <0 error code on error; 1 on successful completion.
59378 * The files->file_lock should be held on entry, and will be held on exit.
59379 */
59380 -static int expand_fdtable(struct files_struct *files, int nr)
59381 +static int expand_fdtable(struct files_struct *files, unsigned int nr)
59382 __releases(files->file_lock)
59383 __acquires(files->file_lock)
59384 {
59385 @@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
59386 * expanded and execution may have blocked.
59387 * The files->file_lock should be held on entry, and will be held on exit.
59388 */
59389 -static int expand_files(struct files_struct *files, int nr)
59390 +static int expand_files(struct files_struct *files, unsigned int nr)
59391 {
59392 struct fdtable *fdt;
59393
59394 @@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
59395 if (!file)
59396 return __close_fd(files, fd);
59397
59398 + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
59399 if (fd >= rlimit(RLIMIT_NOFILE))
59400 return -EBADF;
59401
59402 @@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
59403 if (unlikely(oldfd == newfd))
59404 return -EINVAL;
59405
59406 + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
59407 if (newfd >= rlimit(RLIMIT_NOFILE))
59408 return -EBADF;
59409
59410 @@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
59411 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
59412 {
59413 int err;
59414 + gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
59415 if (from >= rlimit(RLIMIT_NOFILE))
59416 return -EINVAL;
59417 err = alloc_fd(from, flags);
59418 diff --git a/fs/filesystems.c b/fs/filesystems.c
59419 index 92567d9..fcd8cbf 100644
59420 --- a/fs/filesystems.c
59421 +++ b/fs/filesystems.c
59422 @@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
59423 int len = dot ? dot - name : strlen(name);
59424
59425 fs = __get_fs_type(name, len);
59426 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59427 + if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
59428 +#else
59429 if (!fs && (request_module("fs-%.*s", len, name) == 0))
59430 +#endif
59431 fs = __get_fs_type(name, len);
59432
59433 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
59434 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
59435 index 7dca743..543d620 100644
59436 --- a/fs/fs_struct.c
59437 +++ b/fs/fs_struct.c
59438 @@ -4,6 +4,7 @@
59439 #include <linux/path.h>
59440 #include <linux/slab.h>
59441 #include <linux/fs_struct.h>
59442 +#include <linux/grsecurity.h>
59443 #include "internal.h"
59444
59445 /*
59446 @@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
59447 write_seqcount_begin(&fs->seq);
59448 old_root = fs->root;
59449 fs->root = *path;
59450 + gr_set_chroot_entries(current, path);
59451 write_seqcount_end(&fs->seq);
59452 spin_unlock(&fs->lock);
59453 if (old_root.dentry)
59454 @@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
59455 int hits = 0;
59456 spin_lock(&fs->lock);
59457 write_seqcount_begin(&fs->seq);
59458 + /* this root replacement is only done by pivot_root,
59459 + leave grsec's chroot tagging alone for this task
59460 + so that a pivoted root isn't treated as a chroot
59461 + */
59462 hits += replace_path(&fs->root, old_root, new_root);
59463 hits += replace_path(&fs->pwd, old_root, new_root);
59464 write_seqcount_end(&fs->seq);
59465 @@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
59466 task_lock(tsk);
59467 spin_lock(&fs->lock);
59468 tsk->fs = NULL;
59469 - kill = !--fs->users;
59470 + gr_clear_chroot_entries(tsk);
59471 + kill = !atomic_dec_return(&fs->users);
59472 spin_unlock(&fs->lock);
59473 task_unlock(tsk);
59474 if (kill)
59475 @@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
59476 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
59477 /* We don't need to lock fs - think why ;-) */
59478 if (fs) {
59479 - fs->users = 1;
59480 + atomic_set(&fs->users, 1);
59481 fs->in_exec = 0;
59482 spin_lock_init(&fs->lock);
59483 seqcount_init(&fs->seq);
59484 @@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
59485 spin_lock(&old->lock);
59486 fs->root = old->root;
59487 path_get(&fs->root);
59488 + /* instead of calling gr_set_chroot_entries here,
59489 + we call it from every caller of this function
59490 + */
59491 fs->pwd = old->pwd;
59492 path_get(&fs->pwd);
59493 spin_unlock(&old->lock);
59494 @@ -139,8 +149,9 @@ int unshare_fs_struct(void)
59495
59496 task_lock(current);
59497 spin_lock(&fs->lock);
59498 - kill = !--fs->users;
59499 + kill = !atomic_dec_return(&fs->users);
59500 current->fs = new_fs;
59501 + gr_set_chroot_entries(current, &new_fs->root);
59502 spin_unlock(&fs->lock);
59503 task_unlock(current);
59504
59505 @@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
59506
59507 int current_umask(void)
59508 {
59509 - return current->fs->umask;
59510 + return current->fs->umask | gr_acl_umask();
59511 }
59512 EXPORT_SYMBOL(current_umask);
59513
59514 /* to be mentioned only in INIT_TASK */
59515 struct fs_struct init_fs = {
59516 - .users = 1,
59517 + .users = ATOMIC_INIT(1),
59518 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
59519 .seq = SEQCNT_ZERO(init_fs.seq),
59520 .umask = 0022,
59521 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
59522 index 29d7feb..303644d 100644
59523 --- a/fs/fscache/cookie.c
59524 +++ b/fs/fscache/cookie.c
59525 @@ -19,7 +19,7 @@
59526
59527 struct kmem_cache *fscache_cookie_jar;
59528
59529 -static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
59530 +static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
59531
59532 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
59533 static int fscache_alloc_object(struct fscache_cache *cache,
59534 @@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
59535 parent ? (char *) parent->def->name : "<no-parent>",
59536 def->name, netfs_data, enable);
59537
59538 - fscache_stat(&fscache_n_acquires);
59539 + fscache_stat_unchecked(&fscache_n_acquires);
59540
59541 /* if there's no parent cookie, then we don't create one here either */
59542 if (!parent) {
59543 - fscache_stat(&fscache_n_acquires_null);
59544 + fscache_stat_unchecked(&fscache_n_acquires_null);
59545 _leave(" [no parent]");
59546 return NULL;
59547 }
59548 @@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59549 /* allocate and initialise a cookie */
59550 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
59551 if (!cookie) {
59552 - fscache_stat(&fscache_n_acquires_oom);
59553 + fscache_stat_unchecked(&fscache_n_acquires_oom);
59554 _leave(" [ENOMEM]");
59555 return NULL;
59556 }
59557 @@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
59558
59559 switch (cookie->def->type) {
59560 case FSCACHE_COOKIE_TYPE_INDEX:
59561 - fscache_stat(&fscache_n_cookie_index);
59562 + fscache_stat_unchecked(&fscache_n_cookie_index);
59563 break;
59564 case FSCACHE_COOKIE_TYPE_DATAFILE:
59565 - fscache_stat(&fscache_n_cookie_data);
59566 + fscache_stat_unchecked(&fscache_n_cookie_data);
59567 break;
59568 default:
59569 - fscache_stat(&fscache_n_cookie_special);
59570 + fscache_stat_unchecked(&fscache_n_cookie_special);
59571 break;
59572 }
59573
59574 @@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59575 } else {
59576 atomic_dec(&parent->n_children);
59577 __fscache_cookie_put(cookie);
59578 - fscache_stat(&fscache_n_acquires_nobufs);
59579 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
59580 _leave(" = NULL");
59581 return NULL;
59582 }
59583 @@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59584 }
59585 }
59586
59587 - fscache_stat(&fscache_n_acquires_ok);
59588 + fscache_stat_unchecked(&fscache_n_acquires_ok);
59589 _leave(" = %p", cookie);
59590 return cookie;
59591 }
59592 @@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
59593 cache = fscache_select_cache_for_object(cookie->parent);
59594 if (!cache) {
59595 up_read(&fscache_addremove_sem);
59596 - fscache_stat(&fscache_n_acquires_no_cache);
59597 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
59598 _leave(" = -ENOMEDIUM [no cache]");
59599 return -ENOMEDIUM;
59600 }
59601 @@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
59602 object = cache->ops->alloc_object(cache, cookie);
59603 fscache_stat_d(&fscache_n_cop_alloc_object);
59604 if (IS_ERR(object)) {
59605 - fscache_stat(&fscache_n_object_no_alloc);
59606 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
59607 ret = PTR_ERR(object);
59608 goto error;
59609 }
59610
59611 - fscache_stat(&fscache_n_object_alloc);
59612 + fscache_stat_unchecked(&fscache_n_object_alloc);
59613
59614 - object->debug_id = atomic_inc_return(&fscache_object_debug_id);
59615 + object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
59616
59617 _debug("ALLOC OBJ%x: %s {%lx}",
59618 object->debug_id, cookie->def->name, object->events);
59619 @@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
59620
59621 _enter("{%s}", cookie->def->name);
59622
59623 - fscache_stat(&fscache_n_invalidates);
59624 + fscache_stat_unchecked(&fscache_n_invalidates);
59625
59626 /* Only permit invalidation of data files. Invalidating an index will
59627 * require the caller to release all its attachments to the tree rooted
59628 @@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
59629 {
59630 struct fscache_object *object;
59631
59632 - fscache_stat(&fscache_n_updates);
59633 + fscache_stat_unchecked(&fscache_n_updates);
59634
59635 if (!cookie) {
59636 - fscache_stat(&fscache_n_updates_null);
59637 + fscache_stat_unchecked(&fscache_n_updates_null);
59638 _leave(" [no cookie]");
59639 return;
59640 }
59641 @@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
59642 */
59643 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
59644 {
59645 - fscache_stat(&fscache_n_relinquishes);
59646 + fscache_stat_unchecked(&fscache_n_relinquishes);
59647 if (retire)
59648 - fscache_stat(&fscache_n_relinquishes_retire);
59649 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
59650
59651 if (!cookie) {
59652 - fscache_stat(&fscache_n_relinquishes_null);
59653 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
59654 _leave(" [no cookie]");
59655 return;
59656 }
59657 @@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
59658 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
59659 goto inconsistent;
59660
59661 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59662 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59663
59664 __fscache_use_cookie(cookie);
59665 if (fscache_submit_op(object, op) < 0)
59666 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
59667 index 4226f66..0fb3f45 100644
59668 --- a/fs/fscache/internal.h
59669 +++ b/fs/fscache/internal.h
59670 @@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
59671 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
59672 extern int fscache_wait_for_operation_activation(struct fscache_object *,
59673 struct fscache_operation *,
59674 - atomic_t *,
59675 - atomic_t *,
59676 + atomic_unchecked_t *,
59677 + atomic_unchecked_t *,
59678 void (*)(struct fscache_operation *));
59679 extern void fscache_invalidate_writes(struct fscache_cookie *);
59680
59681 @@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
59682 * stats.c
59683 */
59684 #ifdef CONFIG_FSCACHE_STATS
59685 -extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
59686 -extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
59687 +extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
59688 +extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
59689
59690 -extern atomic_t fscache_n_op_pend;
59691 -extern atomic_t fscache_n_op_run;
59692 -extern atomic_t fscache_n_op_enqueue;
59693 -extern atomic_t fscache_n_op_deferred_release;
59694 -extern atomic_t fscache_n_op_release;
59695 -extern atomic_t fscache_n_op_gc;
59696 -extern atomic_t fscache_n_op_cancelled;
59697 -extern atomic_t fscache_n_op_rejected;
59698 +extern atomic_unchecked_t fscache_n_op_pend;
59699 +extern atomic_unchecked_t fscache_n_op_run;
59700 +extern atomic_unchecked_t fscache_n_op_enqueue;
59701 +extern atomic_unchecked_t fscache_n_op_deferred_release;
59702 +extern atomic_unchecked_t fscache_n_op_release;
59703 +extern atomic_unchecked_t fscache_n_op_gc;
59704 +extern atomic_unchecked_t fscache_n_op_cancelled;
59705 +extern atomic_unchecked_t fscache_n_op_rejected;
59706
59707 -extern atomic_t fscache_n_attr_changed;
59708 -extern atomic_t fscache_n_attr_changed_ok;
59709 -extern atomic_t fscache_n_attr_changed_nobufs;
59710 -extern atomic_t fscache_n_attr_changed_nomem;
59711 -extern atomic_t fscache_n_attr_changed_calls;
59712 +extern atomic_unchecked_t fscache_n_attr_changed;
59713 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
59714 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
59715 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
59716 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
59717
59718 -extern atomic_t fscache_n_allocs;
59719 -extern atomic_t fscache_n_allocs_ok;
59720 -extern atomic_t fscache_n_allocs_wait;
59721 -extern atomic_t fscache_n_allocs_nobufs;
59722 -extern atomic_t fscache_n_allocs_intr;
59723 -extern atomic_t fscache_n_allocs_object_dead;
59724 -extern atomic_t fscache_n_alloc_ops;
59725 -extern atomic_t fscache_n_alloc_op_waits;
59726 +extern atomic_unchecked_t fscache_n_allocs;
59727 +extern atomic_unchecked_t fscache_n_allocs_ok;
59728 +extern atomic_unchecked_t fscache_n_allocs_wait;
59729 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
59730 +extern atomic_unchecked_t fscache_n_allocs_intr;
59731 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
59732 +extern atomic_unchecked_t fscache_n_alloc_ops;
59733 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
59734
59735 -extern atomic_t fscache_n_retrievals;
59736 -extern atomic_t fscache_n_retrievals_ok;
59737 -extern atomic_t fscache_n_retrievals_wait;
59738 -extern atomic_t fscache_n_retrievals_nodata;
59739 -extern atomic_t fscache_n_retrievals_nobufs;
59740 -extern atomic_t fscache_n_retrievals_intr;
59741 -extern atomic_t fscache_n_retrievals_nomem;
59742 -extern atomic_t fscache_n_retrievals_object_dead;
59743 -extern atomic_t fscache_n_retrieval_ops;
59744 -extern atomic_t fscache_n_retrieval_op_waits;
59745 +extern atomic_unchecked_t fscache_n_retrievals;
59746 +extern atomic_unchecked_t fscache_n_retrievals_ok;
59747 +extern atomic_unchecked_t fscache_n_retrievals_wait;
59748 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
59749 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
59750 +extern atomic_unchecked_t fscache_n_retrievals_intr;
59751 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
59752 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
59753 +extern atomic_unchecked_t fscache_n_retrieval_ops;
59754 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
59755
59756 -extern atomic_t fscache_n_stores;
59757 -extern atomic_t fscache_n_stores_ok;
59758 -extern atomic_t fscache_n_stores_again;
59759 -extern atomic_t fscache_n_stores_nobufs;
59760 -extern atomic_t fscache_n_stores_oom;
59761 -extern atomic_t fscache_n_store_ops;
59762 -extern atomic_t fscache_n_store_calls;
59763 -extern atomic_t fscache_n_store_pages;
59764 -extern atomic_t fscache_n_store_radix_deletes;
59765 -extern atomic_t fscache_n_store_pages_over_limit;
59766 +extern atomic_unchecked_t fscache_n_stores;
59767 +extern atomic_unchecked_t fscache_n_stores_ok;
59768 +extern atomic_unchecked_t fscache_n_stores_again;
59769 +extern atomic_unchecked_t fscache_n_stores_nobufs;
59770 +extern atomic_unchecked_t fscache_n_stores_oom;
59771 +extern atomic_unchecked_t fscache_n_store_ops;
59772 +extern atomic_unchecked_t fscache_n_store_calls;
59773 +extern atomic_unchecked_t fscache_n_store_pages;
59774 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
59775 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
59776
59777 -extern atomic_t fscache_n_store_vmscan_not_storing;
59778 -extern atomic_t fscache_n_store_vmscan_gone;
59779 -extern atomic_t fscache_n_store_vmscan_busy;
59780 -extern atomic_t fscache_n_store_vmscan_cancelled;
59781 -extern atomic_t fscache_n_store_vmscan_wait;
59782 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
59783 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
59784 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
59785 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
59786 +extern atomic_unchecked_t fscache_n_store_vmscan_wait;
59787
59788 -extern atomic_t fscache_n_marks;
59789 -extern atomic_t fscache_n_uncaches;
59790 +extern atomic_unchecked_t fscache_n_marks;
59791 +extern atomic_unchecked_t fscache_n_uncaches;
59792
59793 -extern atomic_t fscache_n_acquires;
59794 -extern atomic_t fscache_n_acquires_null;
59795 -extern atomic_t fscache_n_acquires_no_cache;
59796 -extern atomic_t fscache_n_acquires_ok;
59797 -extern atomic_t fscache_n_acquires_nobufs;
59798 -extern atomic_t fscache_n_acquires_oom;
59799 +extern atomic_unchecked_t fscache_n_acquires;
59800 +extern atomic_unchecked_t fscache_n_acquires_null;
59801 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
59802 +extern atomic_unchecked_t fscache_n_acquires_ok;
59803 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
59804 +extern atomic_unchecked_t fscache_n_acquires_oom;
59805
59806 -extern atomic_t fscache_n_invalidates;
59807 -extern atomic_t fscache_n_invalidates_run;
59808 +extern atomic_unchecked_t fscache_n_invalidates;
59809 +extern atomic_unchecked_t fscache_n_invalidates_run;
59810
59811 -extern atomic_t fscache_n_updates;
59812 -extern atomic_t fscache_n_updates_null;
59813 -extern atomic_t fscache_n_updates_run;
59814 +extern atomic_unchecked_t fscache_n_updates;
59815 +extern atomic_unchecked_t fscache_n_updates_null;
59816 +extern atomic_unchecked_t fscache_n_updates_run;
59817
59818 -extern atomic_t fscache_n_relinquishes;
59819 -extern atomic_t fscache_n_relinquishes_null;
59820 -extern atomic_t fscache_n_relinquishes_waitcrt;
59821 -extern atomic_t fscache_n_relinquishes_retire;
59822 +extern atomic_unchecked_t fscache_n_relinquishes;
59823 +extern atomic_unchecked_t fscache_n_relinquishes_null;
59824 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
59825 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
59826
59827 -extern atomic_t fscache_n_cookie_index;
59828 -extern atomic_t fscache_n_cookie_data;
59829 -extern atomic_t fscache_n_cookie_special;
59830 +extern atomic_unchecked_t fscache_n_cookie_index;
59831 +extern atomic_unchecked_t fscache_n_cookie_data;
59832 +extern atomic_unchecked_t fscache_n_cookie_special;
59833
59834 -extern atomic_t fscache_n_object_alloc;
59835 -extern atomic_t fscache_n_object_no_alloc;
59836 -extern atomic_t fscache_n_object_lookups;
59837 -extern atomic_t fscache_n_object_lookups_negative;
59838 -extern atomic_t fscache_n_object_lookups_positive;
59839 -extern atomic_t fscache_n_object_lookups_timed_out;
59840 -extern atomic_t fscache_n_object_created;
59841 -extern atomic_t fscache_n_object_avail;
59842 -extern atomic_t fscache_n_object_dead;
59843 +extern atomic_unchecked_t fscache_n_object_alloc;
59844 +extern atomic_unchecked_t fscache_n_object_no_alloc;
59845 +extern atomic_unchecked_t fscache_n_object_lookups;
59846 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
59847 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
59848 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
59849 +extern atomic_unchecked_t fscache_n_object_created;
59850 +extern atomic_unchecked_t fscache_n_object_avail;
59851 +extern atomic_unchecked_t fscache_n_object_dead;
59852
59853 -extern atomic_t fscache_n_checkaux_none;
59854 -extern atomic_t fscache_n_checkaux_okay;
59855 -extern atomic_t fscache_n_checkaux_update;
59856 -extern atomic_t fscache_n_checkaux_obsolete;
59857 +extern atomic_unchecked_t fscache_n_checkaux_none;
59858 +extern atomic_unchecked_t fscache_n_checkaux_okay;
59859 +extern atomic_unchecked_t fscache_n_checkaux_update;
59860 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
59861
59862 extern atomic_t fscache_n_cop_alloc_object;
59863 extern atomic_t fscache_n_cop_lookup_object;
59864 @@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
59865 atomic_inc(stat);
59866 }
59867
59868 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
59869 +{
59870 + atomic_inc_unchecked(stat);
59871 +}
59872 +
59873 static inline void fscache_stat_d(atomic_t *stat)
59874 {
59875 atomic_dec(stat);
59876 @@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
59877
59878 #define __fscache_stat(stat) (NULL)
59879 #define fscache_stat(stat) do {} while (0)
59880 +#define fscache_stat_unchecked(stat) do {} while (0)
59881 #define fscache_stat_d(stat) do {} while (0)
59882 #endif
59883
59884 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
59885 index 53d35c5..5d68ed4 100644
59886 --- a/fs/fscache/object.c
59887 +++ b/fs/fscache/object.c
59888 @@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59889 _debug("LOOKUP \"%s\" in \"%s\"",
59890 cookie->def->name, object->cache->tag->name);
59891
59892 - fscache_stat(&fscache_n_object_lookups);
59893 + fscache_stat_unchecked(&fscache_n_object_lookups);
59894 fscache_stat(&fscache_n_cop_lookup_object);
59895 ret = object->cache->ops->lookup_object(object);
59896 fscache_stat_d(&fscache_n_cop_lookup_object);
59897 @@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59898 if (ret == -ETIMEDOUT) {
59899 /* probably stuck behind another object, so move this one to
59900 * the back of the queue */
59901 - fscache_stat(&fscache_n_object_lookups_timed_out);
59902 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
59903 _leave(" [timeout]");
59904 return NO_TRANSIT;
59905 }
59906 @@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
59907 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
59908
59909 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59910 - fscache_stat(&fscache_n_object_lookups_negative);
59911 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
59912
59913 /* Allow write requests to begin stacking up and read requests to begin
59914 * returning ENODATA.
59915 @@ -524,7 +524,7 @@ void fscache_obtained_object(struct fscache_object *object)
59916 /* if we were still looking up, then we must have a positive lookup
59917 * result, in which case there may be data available */
59918 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59919 - fscache_stat(&fscache_n_object_lookups_positive);
59920 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
59921
59922 /* We do (presumably) have data */
59923 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
59924 @@ -536,7 +536,7 @@ void fscache_obtained_object(struct fscache_object *object)
59925 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
59926 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
59927 } else {
59928 - fscache_stat(&fscache_n_object_created);
59929 + fscache_stat_unchecked(&fscache_n_object_created);
59930 }
59931
59932 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
59933 @@ -572,7 +572,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
59934 fscache_stat_d(&fscache_n_cop_lookup_complete);
59935
59936 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
59937 - fscache_stat(&fscache_n_object_avail);
59938 + fscache_stat_unchecked(&fscache_n_object_avail);
59939
59940 _leave("");
59941 return transit_to(JUMPSTART_DEPS);
59942 @@ -719,7 +719,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
59943
59944 /* this just shifts the object release to the work processor */
59945 fscache_put_object(object);
59946 - fscache_stat(&fscache_n_object_dead);
59947 + fscache_stat_unchecked(&fscache_n_object_dead);
59948
59949 _leave("");
59950 return transit_to(OBJECT_DEAD);
59951 @@ -884,7 +884,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59952 enum fscache_checkaux result;
59953
59954 if (!object->cookie->def->check_aux) {
59955 - fscache_stat(&fscache_n_checkaux_none);
59956 + fscache_stat_unchecked(&fscache_n_checkaux_none);
59957 return FSCACHE_CHECKAUX_OKAY;
59958 }
59959
59960 @@ -893,17 +893,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59961 switch (result) {
59962 /* entry okay as is */
59963 case FSCACHE_CHECKAUX_OKAY:
59964 - fscache_stat(&fscache_n_checkaux_okay);
59965 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
59966 break;
59967
59968 /* entry requires update */
59969 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
59970 - fscache_stat(&fscache_n_checkaux_update);
59971 + fscache_stat_unchecked(&fscache_n_checkaux_update);
59972 break;
59973
59974 /* entry requires deletion */
59975 case FSCACHE_CHECKAUX_OBSOLETE:
59976 - fscache_stat(&fscache_n_checkaux_obsolete);
59977 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
59978 break;
59979
59980 default:
59981 @@ -989,7 +989,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
59982 {
59983 const struct fscache_state *s;
59984
59985 - fscache_stat(&fscache_n_invalidates_run);
59986 + fscache_stat_unchecked(&fscache_n_invalidates_run);
59987 fscache_stat(&fscache_n_cop_invalidate_object);
59988 s = _fscache_invalidate_object(object, event);
59989 fscache_stat_d(&fscache_n_cop_invalidate_object);
59990 @@ -1004,7 +1004,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
59991 {
59992 _enter("{OBJ%x},%d", object->debug_id, event);
59993
59994 - fscache_stat(&fscache_n_updates_run);
59995 + fscache_stat_unchecked(&fscache_n_updates_run);
59996 fscache_stat(&fscache_n_cop_update_object);
59997 object->cache->ops->update_object(object);
59998 fscache_stat_d(&fscache_n_cop_update_object);
59999 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
60000 index 318071a..379938b 100644
60001 --- a/fs/fscache/operation.c
60002 +++ b/fs/fscache/operation.c
60003 @@ -17,7 +17,7 @@
60004 #include <linux/slab.h>
60005 #include "internal.h"
60006
60007 -atomic_t fscache_op_debug_id;
60008 +atomic_unchecked_t fscache_op_debug_id;
60009 EXPORT_SYMBOL(fscache_op_debug_id);
60010
60011 /**
60012 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
60013 ASSERTCMP(atomic_read(&op->usage), >, 0);
60014 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
60015
60016 - fscache_stat(&fscache_n_op_enqueue);
60017 + fscache_stat_unchecked(&fscache_n_op_enqueue);
60018 switch (op->flags & FSCACHE_OP_TYPE) {
60019 case FSCACHE_OP_ASYNC:
60020 _debug("queue async");
60021 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
60022 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
60023 if (op->processor)
60024 fscache_enqueue_operation(op);
60025 - fscache_stat(&fscache_n_op_run);
60026 + fscache_stat_unchecked(&fscache_n_op_run);
60027 }
60028
60029 /*
60030 @@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
60031 if (object->n_in_progress > 0) {
60032 atomic_inc(&op->usage);
60033 list_add_tail(&op->pend_link, &object->pending_ops);
60034 - fscache_stat(&fscache_n_op_pend);
60035 + fscache_stat_unchecked(&fscache_n_op_pend);
60036 } else if (!list_empty(&object->pending_ops)) {
60037 atomic_inc(&op->usage);
60038 list_add_tail(&op->pend_link, &object->pending_ops);
60039 - fscache_stat(&fscache_n_op_pend);
60040 + fscache_stat_unchecked(&fscache_n_op_pend);
60041 fscache_start_operations(object);
60042 } else {
60043 ASSERTCMP(object->n_in_progress, ==, 0);
60044 @@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
60045 object->n_exclusive++; /* reads and writes must wait */
60046 atomic_inc(&op->usage);
60047 list_add_tail(&op->pend_link, &object->pending_ops);
60048 - fscache_stat(&fscache_n_op_pend);
60049 + fscache_stat_unchecked(&fscache_n_op_pend);
60050 ret = 0;
60051 } else {
60052 /* If we're in any other state, there must have been an I/O
60053 @@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
60054 if (object->n_exclusive > 0) {
60055 atomic_inc(&op->usage);
60056 list_add_tail(&op->pend_link, &object->pending_ops);
60057 - fscache_stat(&fscache_n_op_pend);
60058 + fscache_stat_unchecked(&fscache_n_op_pend);
60059 } else if (!list_empty(&object->pending_ops)) {
60060 atomic_inc(&op->usage);
60061 list_add_tail(&op->pend_link, &object->pending_ops);
60062 - fscache_stat(&fscache_n_op_pend);
60063 + fscache_stat_unchecked(&fscache_n_op_pend);
60064 fscache_start_operations(object);
60065 } else {
60066 ASSERTCMP(object->n_exclusive, ==, 0);
60067 @@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
60068 object->n_ops++;
60069 atomic_inc(&op->usage);
60070 list_add_tail(&op->pend_link, &object->pending_ops);
60071 - fscache_stat(&fscache_n_op_pend);
60072 + fscache_stat_unchecked(&fscache_n_op_pend);
60073 ret = 0;
60074 } else if (fscache_object_is_dying(object)) {
60075 - fscache_stat(&fscache_n_op_rejected);
60076 + fscache_stat_unchecked(&fscache_n_op_rejected);
60077 op->state = FSCACHE_OP_ST_CANCELLED;
60078 ret = -ENOBUFS;
60079 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
60080 @@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
60081 ret = -EBUSY;
60082 if (op->state == FSCACHE_OP_ST_PENDING) {
60083 ASSERT(!list_empty(&op->pend_link));
60084 - fscache_stat(&fscache_n_op_cancelled);
60085 + fscache_stat_unchecked(&fscache_n_op_cancelled);
60086 list_del_init(&op->pend_link);
60087 if (do_cancel)
60088 do_cancel(op);
60089 @@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
60090 while (!list_empty(&object->pending_ops)) {
60091 op = list_entry(object->pending_ops.next,
60092 struct fscache_operation, pend_link);
60093 - fscache_stat(&fscache_n_op_cancelled);
60094 + fscache_stat_unchecked(&fscache_n_op_cancelled);
60095 list_del_init(&op->pend_link);
60096
60097 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
60098 @@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
60099 op->state, ==, FSCACHE_OP_ST_CANCELLED);
60100 op->state = FSCACHE_OP_ST_DEAD;
60101
60102 - fscache_stat(&fscache_n_op_release);
60103 + fscache_stat_unchecked(&fscache_n_op_release);
60104
60105 if (op->release) {
60106 op->release(op);
60107 @@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
60108 * lock, and defer it otherwise */
60109 if (!spin_trylock(&object->lock)) {
60110 _debug("defer put");
60111 - fscache_stat(&fscache_n_op_deferred_release);
60112 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
60113
60114 cache = object->cache;
60115 spin_lock(&cache->op_gc_list_lock);
60116 @@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
60117
60118 _debug("GC DEFERRED REL OBJ%x OP%x",
60119 object->debug_id, op->debug_id);
60120 - fscache_stat(&fscache_n_op_gc);
60121 + fscache_stat_unchecked(&fscache_n_op_gc);
60122
60123 ASSERTCMP(atomic_read(&op->usage), ==, 0);
60124 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
60125 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
60126 index 7f5c658..6c1e164 100644
60127 --- a/fs/fscache/page.c
60128 +++ b/fs/fscache/page.c
60129 @@ -61,7 +61,7 @@ try_again:
60130 val = radix_tree_lookup(&cookie->stores, page->index);
60131 if (!val) {
60132 rcu_read_unlock();
60133 - fscache_stat(&fscache_n_store_vmscan_not_storing);
60134 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
60135 __fscache_uncache_page(cookie, page);
60136 return true;
60137 }
60138 @@ -91,11 +91,11 @@ try_again:
60139 spin_unlock(&cookie->stores_lock);
60140
60141 if (xpage) {
60142 - fscache_stat(&fscache_n_store_vmscan_cancelled);
60143 - fscache_stat(&fscache_n_store_radix_deletes);
60144 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
60145 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
60146 ASSERTCMP(xpage, ==, page);
60147 } else {
60148 - fscache_stat(&fscache_n_store_vmscan_gone);
60149 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
60150 }
60151
60152 wake_up_bit(&cookie->flags, 0);
60153 @@ -110,11 +110,11 @@ page_busy:
60154 * sleeping on memory allocation, so we may need to impose a timeout
60155 * too. */
60156 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
60157 - fscache_stat(&fscache_n_store_vmscan_busy);
60158 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
60159 return false;
60160 }
60161
60162 - fscache_stat(&fscache_n_store_vmscan_wait);
60163 + fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
60164 __fscache_wait_on_page_write(cookie, page);
60165 gfp &= ~__GFP_WAIT;
60166 goto try_again;
60167 @@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
60168 FSCACHE_COOKIE_STORING_TAG);
60169 if (!radix_tree_tag_get(&cookie->stores, page->index,
60170 FSCACHE_COOKIE_PENDING_TAG)) {
60171 - fscache_stat(&fscache_n_store_radix_deletes);
60172 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
60173 xpage = radix_tree_delete(&cookie->stores, page->index);
60174 }
60175 spin_unlock(&cookie->stores_lock);
60176 @@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
60177
60178 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
60179
60180 - fscache_stat(&fscache_n_attr_changed_calls);
60181 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
60182
60183 if (fscache_object_is_active(object)) {
60184 fscache_stat(&fscache_n_cop_attr_changed);
60185 @@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60186
60187 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60188
60189 - fscache_stat(&fscache_n_attr_changed);
60190 + fscache_stat_unchecked(&fscache_n_attr_changed);
60191
60192 op = kzalloc(sizeof(*op), GFP_KERNEL);
60193 if (!op) {
60194 - fscache_stat(&fscache_n_attr_changed_nomem);
60195 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
60196 _leave(" = -ENOMEM");
60197 return -ENOMEM;
60198 }
60199 @@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60200 if (fscache_submit_exclusive_op(object, op) < 0)
60201 goto nobufs;
60202 spin_unlock(&cookie->lock);
60203 - fscache_stat(&fscache_n_attr_changed_ok);
60204 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
60205 fscache_put_operation(op);
60206 _leave(" = 0");
60207 return 0;
60208 @@ -225,7 +225,7 @@ nobufs:
60209 kfree(op);
60210 if (wake_cookie)
60211 __fscache_wake_unused_cookie(cookie);
60212 - fscache_stat(&fscache_n_attr_changed_nobufs);
60213 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
60214 _leave(" = %d", -ENOBUFS);
60215 return -ENOBUFS;
60216 }
60217 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
60218 /* allocate a retrieval operation and attempt to submit it */
60219 op = kzalloc(sizeof(*op), GFP_NOIO);
60220 if (!op) {
60221 - fscache_stat(&fscache_n_retrievals_nomem);
60222 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60223 return NULL;
60224 }
60225
60226 @@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
60227 return 0;
60228 }
60229
60230 - fscache_stat(&fscache_n_retrievals_wait);
60231 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
60232
60233 jif = jiffies;
60234 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
60235 fscache_wait_bit_interruptible,
60236 TASK_INTERRUPTIBLE) != 0) {
60237 - fscache_stat(&fscache_n_retrievals_intr);
60238 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
60239 _leave(" = -ERESTARTSYS");
60240 return -ERESTARTSYS;
60241 }
60242 @@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
60243 */
60244 int fscache_wait_for_operation_activation(struct fscache_object *object,
60245 struct fscache_operation *op,
60246 - atomic_t *stat_op_waits,
60247 - atomic_t *stat_object_dead,
60248 + atomic_unchecked_t *stat_op_waits,
60249 + atomic_unchecked_t *stat_object_dead,
60250 void (*do_cancel)(struct fscache_operation *))
60251 {
60252 int ret;
60253 @@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60254
60255 _debug(">>> WT");
60256 if (stat_op_waits)
60257 - fscache_stat(stat_op_waits);
60258 + fscache_stat_unchecked(stat_op_waits);
60259 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
60260 fscache_wait_bit_interruptible,
60261 TASK_INTERRUPTIBLE) != 0) {
60262 @@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60263 check_if_dead:
60264 if (op->state == FSCACHE_OP_ST_CANCELLED) {
60265 if (stat_object_dead)
60266 - fscache_stat(stat_object_dead);
60267 + fscache_stat_unchecked(stat_object_dead);
60268 _leave(" = -ENOBUFS [cancelled]");
60269 return -ENOBUFS;
60270 }
60271 @@ -366,7 +366,7 @@ check_if_dead:
60272 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
60273 fscache_cancel_op(op, do_cancel);
60274 if (stat_object_dead)
60275 - fscache_stat(stat_object_dead);
60276 + fscache_stat_unchecked(stat_object_dead);
60277 return -ENOBUFS;
60278 }
60279 return 0;
60280 @@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60281
60282 _enter("%p,%p,,,", cookie, page);
60283
60284 - fscache_stat(&fscache_n_retrievals);
60285 + fscache_stat_unchecked(&fscache_n_retrievals);
60286
60287 if (hlist_empty(&cookie->backing_objects))
60288 goto nobufs;
60289 @@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60290 goto nobufs_unlock_dec;
60291 spin_unlock(&cookie->lock);
60292
60293 - fscache_stat(&fscache_n_retrieval_ops);
60294 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
60295
60296 /* pin the netfs read context in case we need to do the actual netfs
60297 * read because we've encountered a cache read failure */
60298 @@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60299
60300 error:
60301 if (ret == -ENOMEM)
60302 - fscache_stat(&fscache_n_retrievals_nomem);
60303 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60304 else if (ret == -ERESTARTSYS)
60305 - fscache_stat(&fscache_n_retrievals_intr);
60306 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
60307 else if (ret == -ENODATA)
60308 - fscache_stat(&fscache_n_retrievals_nodata);
60309 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60310 else if (ret < 0)
60311 - fscache_stat(&fscache_n_retrievals_nobufs);
60312 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60313 else
60314 - fscache_stat(&fscache_n_retrievals_ok);
60315 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
60316
60317 fscache_put_retrieval(op);
60318 _leave(" = %d", ret);
60319 @@ -490,7 +490,7 @@ nobufs_unlock:
60320 __fscache_wake_unused_cookie(cookie);
60321 kfree(op);
60322 nobufs:
60323 - fscache_stat(&fscache_n_retrievals_nobufs);
60324 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60325 _leave(" = -ENOBUFS");
60326 return -ENOBUFS;
60327 }
60328 @@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60329
60330 _enter("%p,,%d,,,", cookie, *nr_pages);
60331
60332 - fscache_stat(&fscache_n_retrievals);
60333 + fscache_stat_unchecked(&fscache_n_retrievals);
60334
60335 if (hlist_empty(&cookie->backing_objects))
60336 goto nobufs;
60337 @@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60338 goto nobufs_unlock_dec;
60339 spin_unlock(&cookie->lock);
60340
60341 - fscache_stat(&fscache_n_retrieval_ops);
60342 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
60343
60344 /* pin the netfs read context in case we need to do the actual netfs
60345 * read because we've encountered a cache read failure */
60346 @@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60347
60348 error:
60349 if (ret == -ENOMEM)
60350 - fscache_stat(&fscache_n_retrievals_nomem);
60351 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60352 else if (ret == -ERESTARTSYS)
60353 - fscache_stat(&fscache_n_retrievals_intr);
60354 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
60355 else if (ret == -ENODATA)
60356 - fscache_stat(&fscache_n_retrievals_nodata);
60357 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60358 else if (ret < 0)
60359 - fscache_stat(&fscache_n_retrievals_nobufs);
60360 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60361 else
60362 - fscache_stat(&fscache_n_retrievals_ok);
60363 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
60364
60365 fscache_put_retrieval(op);
60366 _leave(" = %d", ret);
60367 @@ -621,7 +621,7 @@ nobufs_unlock:
60368 if (wake_cookie)
60369 __fscache_wake_unused_cookie(cookie);
60370 nobufs:
60371 - fscache_stat(&fscache_n_retrievals_nobufs);
60372 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60373 _leave(" = -ENOBUFS");
60374 return -ENOBUFS;
60375 }
60376 @@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60377
60378 _enter("%p,%p,,,", cookie, page);
60379
60380 - fscache_stat(&fscache_n_allocs);
60381 + fscache_stat_unchecked(&fscache_n_allocs);
60382
60383 if (hlist_empty(&cookie->backing_objects))
60384 goto nobufs;
60385 @@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60386 goto nobufs_unlock_dec;
60387 spin_unlock(&cookie->lock);
60388
60389 - fscache_stat(&fscache_n_alloc_ops);
60390 + fscache_stat_unchecked(&fscache_n_alloc_ops);
60391
60392 ret = fscache_wait_for_operation_activation(
60393 object, &op->op,
60394 @@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60395
60396 error:
60397 if (ret == -ERESTARTSYS)
60398 - fscache_stat(&fscache_n_allocs_intr);
60399 + fscache_stat_unchecked(&fscache_n_allocs_intr);
60400 else if (ret < 0)
60401 - fscache_stat(&fscache_n_allocs_nobufs);
60402 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
60403 else
60404 - fscache_stat(&fscache_n_allocs_ok);
60405 + fscache_stat_unchecked(&fscache_n_allocs_ok);
60406
60407 fscache_put_retrieval(op);
60408 _leave(" = %d", ret);
60409 @@ -715,7 +715,7 @@ nobufs_unlock:
60410 if (wake_cookie)
60411 __fscache_wake_unused_cookie(cookie);
60412 nobufs:
60413 - fscache_stat(&fscache_n_allocs_nobufs);
60414 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
60415 _leave(" = -ENOBUFS");
60416 return -ENOBUFS;
60417 }
60418 @@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60419
60420 spin_lock(&cookie->stores_lock);
60421
60422 - fscache_stat(&fscache_n_store_calls);
60423 + fscache_stat_unchecked(&fscache_n_store_calls);
60424
60425 /* find a page to store */
60426 page = NULL;
60427 @@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60428 page = results[0];
60429 _debug("gang %d [%lx]", n, page->index);
60430 if (page->index > op->store_limit) {
60431 - fscache_stat(&fscache_n_store_pages_over_limit);
60432 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
60433 goto superseded;
60434 }
60435
60436 @@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60437 spin_unlock(&cookie->stores_lock);
60438 spin_unlock(&object->lock);
60439
60440 - fscache_stat(&fscache_n_store_pages);
60441 + fscache_stat_unchecked(&fscache_n_store_pages);
60442 fscache_stat(&fscache_n_cop_write_page);
60443 ret = object->cache->ops->write_page(op, page);
60444 fscache_stat_d(&fscache_n_cop_write_page);
60445 @@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60446 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60447 ASSERT(PageFsCache(page));
60448
60449 - fscache_stat(&fscache_n_stores);
60450 + fscache_stat_unchecked(&fscache_n_stores);
60451
60452 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
60453 _leave(" = -ENOBUFS [invalidating]");
60454 @@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60455 spin_unlock(&cookie->stores_lock);
60456 spin_unlock(&object->lock);
60457
60458 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
60459 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60460 op->store_limit = object->store_limit;
60461
60462 __fscache_use_cookie(cookie);
60463 @@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60464
60465 spin_unlock(&cookie->lock);
60466 radix_tree_preload_end();
60467 - fscache_stat(&fscache_n_store_ops);
60468 - fscache_stat(&fscache_n_stores_ok);
60469 + fscache_stat_unchecked(&fscache_n_store_ops);
60470 + fscache_stat_unchecked(&fscache_n_stores_ok);
60471
60472 /* the work queue now carries its own ref on the object */
60473 fscache_put_operation(&op->op);
60474 @@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60475 return 0;
60476
60477 already_queued:
60478 - fscache_stat(&fscache_n_stores_again);
60479 + fscache_stat_unchecked(&fscache_n_stores_again);
60480 already_pending:
60481 spin_unlock(&cookie->stores_lock);
60482 spin_unlock(&object->lock);
60483 spin_unlock(&cookie->lock);
60484 radix_tree_preload_end();
60485 kfree(op);
60486 - fscache_stat(&fscache_n_stores_ok);
60487 + fscache_stat_unchecked(&fscache_n_stores_ok);
60488 _leave(" = 0");
60489 return 0;
60490
60491 @@ -1024,14 +1024,14 @@ nobufs:
60492 kfree(op);
60493 if (wake_cookie)
60494 __fscache_wake_unused_cookie(cookie);
60495 - fscache_stat(&fscache_n_stores_nobufs);
60496 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
60497 _leave(" = -ENOBUFS");
60498 return -ENOBUFS;
60499
60500 nomem_free:
60501 kfree(op);
60502 nomem:
60503 - fscache_stat(&fscache_n_stores_oom);
60504 + fscache_stat_unchecked(&fscache_n_stores_oom);
60505 _leave(" = -ENOMEM");
60506 return -ENOMEM;
60507 }
60508 @@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
60509 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60510 ASSERTCMP(page, !=, NULL);
60511
60512 - fscache_stat(&fscache_n_uncaches);
60513 + fscache_stat_unchecked(&fscache_n_uncaches);
60514
60515 /* cache withdrawal may beat us to it */
60516 if (!PageFsCache(page))
60517 @@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
60518 struct fscache_cookie *cookie = op->op.object->cookie;
60519
60520 #ifdef CONFIG_FSCACHE_STATS
60521 - atomic_inc(&fscache_n_marks);
60522 + atomic_inc_unchecked(&fscache_n_marks);
60523 #endif
60524
60525 _debug("- mark %p{%lx}", page, page->index);
60526 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
60527 index 40d13c7..ddf52b9 100644
60528 --- a/fs/fscache/stats.c
60529 +++ b/fs/fscache/stats.c
60530 @@ -18,99 +18,99 @@
60531 /*
60532 * operation counters
60533 */
60534 -atomic_t fscache_n_op_pend;
60535 -atomic_t fscache_n_op_run;
60536 -atomic_t fscache_n_op_enqueue;
60537 -atomic_t fscache_n_op_requeue;
60538 -atomic_t fscache_n_op_deferred_release;
60539 -atomic_t fscache_n_op_release;
60540 -atomic_t fscache_n_op_gc;
60541 -atomic_t fscache_n_op_cancelled;
60542 -atomic_t fscache_n_op_rejected;
60543 +atomic_unchecked_t fscache_n_op_pend;
60544 +atomic_unchecked_t fscache_n_op_run;
60545 +atomic_unchecked_t fscache_n_op_enqueue;
60546 +atomic_unchecked_t fscache_n_op_requeue;
60547 +atomic_unchecked_t fscache_n_op_deferred_release;
60548 +atomic_unchecked_t fscache_n_op_release;
60549 +atomic_unchecked_t fscache_n_op_gc;
60550 +atomic_unchecked_t fscache_n_op_cancelled;
60551 +atomic_unchecked_t fscache_n_op_rejected;
60552
60553 -atomic_t fscache_n_attr_changed;
60554 -atomic_t fscache_n_attr_changed_ok;
60555 -atomic_t fscache_n_attr_changed_nobufs;
60556 -atomic_t fscache_n_attr_changed_nomem;
60557 -atomic_t fscache_n_attr_changed_calls;
60558 +atomic_unchecked_t fscache_n_attr_changed;
60559 +atomic_unchecked_t fscache_n_attr_changed_ok;
60560 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
60561 +atomic_unchecked_t fscache_n_attr_changed_nomem;
60562 +atomic_unchecked_t fscache_n_attr_changed_calls;
60563
60564 -atomic_t fscache_n_allocs;
60565 -atomic_t fscache_n_allocs_ok;
60566 -atomic_t fscache_n_allocs_wait;
60567 -atomic_t fscache_n_allocs_nobufs;
60568 -atomic_t fscache_n_allocs_intr;
60569 -atomic_t fscache_n_allocs_object_dead;
60570 -atomic_t fscache_n_alloc_ops;
60571 -atomic_t fscache_n_alloc_op_waits;
60572 +atomic_unchecked_t fscache_n_allocs;
60573 +atomic_unchecked_t fscache_n_allocs_ok;
60574 +atomic_unchecked_t fscache_n_allocs_wait;
60575 +atomic_unchecked_t fscache_n_allocs_nobufs;
60576 +atomic_unchecked_t fscache_n_allocs_intr;
60577 +atomic_unchecked_t fscache_n_allocs_object_dead;
60578 +atomic_unchecked_t fscache_n_alloc_ops;
60579 +atomic_unchecked_t fscache_n_alloc_op_waits;
60580
60581 -atomic_t fscache_n_retrievals;
60582 -atomic_t fscache_n_retrievals_ok;
60583 -atomic_t fscache_n_retrievals_wait;
60584 -atomic_t fscache_n_retrievals_nodata;
60585 -atomic_t fscache_n_retrievals_nobufs;
60586 -atomic_t fscache_n_retrievals_intr;
60587 -atomic_t fscache_n_retrievals_nomem;
60588 -atomic_t fscache_n_retrievals_object_dead;
60589 -atomic_t fscache_n_retrieval_ops;
60590 -atomic_t fscache_n_retrieval_op_waits;
60591 +atomic_unchecked_t fscache_n_retrievals;
60592 +atomic_unchecked_t fscache_n_retrievals_ok;
60593 +atomic_unchecked_t fscache_n_retrievals_wait;
60594 +atomic_unchecked_t fscache_n_retrievals_nodata;
60595 +atomic_unchecked_t fscache_n_retrievals_nobufs;
60596 +atomic_unchecked_t fscache_n_retrievals_intr;
60597 +atomic_unchecked_t fscache_n_retrievals_nomem;
60598 +atomic_unchecked_t fscache_n_retrievals_object_dead;
60599 +atomic_unchecked_t fscache_n_retrieval_ops;
60600 +atomic_unchecked_t fscache_n_retrieval_op_waits;
60601
60602 -atomic_t fscache_n_stores;
60603 -atomic_t fscache_n_stores_ok;
60604 -atomic_t fscache_n_stores_again;
60605 -atomic_t fscache_n_stores_nobufs;
60606 -atomic_t fscache_n_stores_oom;
60607 -atomic_t fscache_n_store_ops;
60608 -atomic_t fscache_n_store_calls;
60609 -atomic_t fscache_n_store_pages;
60610 -atomic_t fscache_n_store_radix_deletes;
60611 -atomic_t fscache_n_store_pages_over_limit;
60612 +atomic_unchecked_t fscache_n_stores;
60613 +atomic_unchecked_t fscache_n_stores_ok;
60614 +atomic_unchecked_t fscache_n_stores_again;
60615 +atomic_unchecked_t fscache_n_stores_nobufs;
60616 +atomic_unchecked_t fscache_n_stores_oom;
60617 +atomic_unchecked_t fscache_n_store_ops;
60618 +atomic_unchecked_t fscache_n_store_calls;
60619 +atomic_unchecked_t fscache_n_store_pages;
60620 +atomic_unchecked_t fscache_n_store_radix_deletes;
60621 +atomic_unchecked_t fscache_n_store_pages_over_limit;
60622
60623 -atomic_t fscache_n_store_vmscan_not_storing;
60624 -atomic_t fscache_n_store_vmscan_gone;
60625 -atomic_t fscache_n_store_vmscan_busy;
60626 -atomic_t fscache_n_store_vmscan_cancelled;
60627 -atomic_t fscache_n_store_vmscan_wait;
60628 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
60629 +atomic_unchecked_t fscache_n_store_vmscan_gone;
60630 +atomic_unchecked_t fscache_n_store_vmscan_busy;
60631 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
60632 +atomic_unchecked_t fscache_n_store_vmscan_wait;
60633
60634 -atomic_t fscache_n_marks;
60635 -atomic_t fscache_n_uncaches;
60636 +atomic_unchecked_t fscache_n_marks;
60637 +atomic_unchecked_t fscache_n_uncaches;
60638
60639 -atomic_t fscache_n_acquires;
60640 -atomic_t fscache_n_acquires_null;
60641 -atomic_t fscache_n_acquires_no_cache;
60642 -atomic_t fscache_n_acquires_ok;
60643 -atomic_t fscache_n_acquires_nobufs;
60644 -atomic_t fscache_n_acquires_oom;
60645 +atomic_unchecked_t fscache_n_acquires;
60646 +atomic_unchecked_t fscache_n_acquires_null;
60647 +atomic_unchecked_t fscache_n_acquires_no_cache;
60648 +atomic_unchecked_t fscache_n_acquires_ok;
60649 +atomic_unchecked_t fscache_n_acquires_nobufs;
60650 +atomic_unchecked_t fscache_n_acquires_oom;
60651
60652 -atomic_t fscache_n_invalidates;
60653 -atomic_t fscache_n_invalidates_run;
60654 +atomic_unchecked_t fscache_n_invalidates;
60655 +atomic_unchecked_t fscache_n_invalidates_run;
60656
60657 -atomic_t fscache_n_updates;
60658 -atomic_t fscache_n_updates_null;
60659 -atomic_t fscache_n_updates_run;
60660 +atomic_unchecked_t fscache_n_updates;
60661 +atomic_unchecked_t fscache_n_updates_null;
60662 +atomic_unchecked_t fscache_n_updates_run;
60663
60664 -atomic_t fscache_n_relinquishes;
60665 -atomic_t fscache_n_relinquishes_null;
60666 -atomic_t fscache_n_relinquishes_waitcrt;
60667 -atomic_t fscache_n_relinquishes_retire;
60668 +atomic_unchecked_t fscache_n_relinquishes;
60669 +atomic_unchecked_t fscache_n_relinquishes_null;
60670 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
60671 +atomic_unchecked_t fscache_n_relinquishes_retire;
60672
60673 -atomic_t fscache_n_cookie_index;
60674 -atomic_t fscache_n_cookie_data;
60675 -atomic_t fscache_n_cookie_special;
60676 +atomic_unchecked_t fscache_n_cookie_index;
60677 +atomic_unchecked_t fscache_n_cookie_data;
60678 +atomic_unchecked_t fscache_n_cookie_special;
60679
60680 -atomic_t fscache_n_object_alloc;
60681 -atomic_t fscache_n_object_no_alloc;
60682 -atomic_t fscache_n_object_lookups;
60683 -atomic_t fscache_n_object_lookups_negative;
60684 -atomic_t fscache_n_object_lookups_positive;
60685 -atomic_t fscache_n_object_lookups_timed_out;
60686 -atomic_t fscache_n_object_created;
60687 -atomic_t fscache_n_object_avail;
60688 -atomic_t fscache_n_object_dead;
60689 +atomic_unchecked_t fscache_n_object_alloc;
60690 +atomic_unchecked_t fscache_n_object_no_alloc;
60691 +atomic_unchecked_t fscache_n_object_lookups;
60692 +atomic_unchecked_t fscache_n_object_lookups_negative;
60693 +atomic_unchecked_t fscache_n_object_lookups_positive;
60694 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
60695 +atomic_unchecked_t fscache_n_object_created;
60696 +atomic_unchecked_t fscache_n_object_avail;
60697 +atomic_unchecked_t fscache_n_object_dead;
60698
60699 -atomic_t fscache_n_checkaux_none;
60700 -atomic_t fscache_n_checkaux_okay;
60701 -atomic_t fscache_n_checkaux_update;
60702 -atomic_t fscache_n_checkaux_obsolete;
60703 +atomic_unchecked_t fscache_n_checkaux_none;
60704 +atomic_unchecked_t fscache_n_checkaux_okay;
60705 +atomic_unchecked_t fscache_n_checkaux_update;
60706 +atomic_unchecked_t fscache_n_checkaux_obsolete;
60707
60708 atomic_t fscache_n_cop_alloc_object;
60709 atomic_t fscache_n_cop_lookup_object;
60710 @@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
60711 seq_puts(m, "FS-Cache statistics\n");
60712
60713 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
60714 - atomic_read(&fscache_n_cookie_index),
60715 - atomic_read(&fscache_n_cookie_data),
60716 - atomic_read(&fscache_n_cookie_special));
60717 + atomic_read_unchecked(&fscache_n_cookie_index),
60718 + atomic_read_unchecked(&fscache_n_cookie_data),
60719 + atomic_read_unchecked(&fscache_n_cookie_special));
60720
60721 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
60722 - atomic_read(&fscache_n_object_alloc),
60723 - atomic_read(&fscache_n_object_no_alloc),
60724 - atomic_read(&fscache_n_object_avail),
60725 - atomic_read(&fscache_n_object_dead));
60726 + atomic_read_unchecked(&fscache_n_object_alloc),
60727 + atomic_read_unchecked(&fscache_n_object_no_alloc),
60728 + atomic_read_unchecked(&fscache_n_object_avail),
60729 + atomic_read_unchecked(&fscache_n_object_dead));
60730 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
60731 - atomic_read(&fscache_n_checkaux_none),
60732 - atomic_read(&fscache_n_checkaux_okay),
60733 - atomic_read(&fscache_n_checkaux_update),
60734 - atomic_read(&fscache_n_checkaux_obsolete));
60735 + atomic_read_unchecked(&fscache_n_checkaux_none),
60736 + atomic_read_unchecked(&fscache_n_checkaux_okay),
60737 + atomic_read_unchecked(&fscache_n_checkaux_update),
60738 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
60739
60740 seq_printf(m, "Pages : mrk=%u unc=%u\n",
60741 - atomic_read(&fscache_n_marks),
60742 - atomic_read(&fscache_n_uncaches));
60743 + atomic_read_unchecked(&fscache_n_marks),
60744 + atomic_read_unchecked(&fscache_n_uncaches));
60745
60746 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
60747 " oom=%u\n",
60748 - atomic_read(&fscache_n_acquires),
60749 - atomic_read(&fscache_n_acquires_null),
60750 - atomic_read(&fscache_n_acquires_no_cache),
60751 - atomic_read(&fscache_n_acquires_ok),
60752 - atomic_read(&fscache_n_acquires_nobufs),
60753 - atomic_read(&fscache_n_acquires_oom));
60754 + atomic_read_unchecked(&fscache_n_acquires),
60755 + atomic_read_unchecked(&fscache_n_acquires_null),
60756 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
60757 + atomic_read_unchecked(&fscache_n_acquires_ok),
60758 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
60759 + atomic_read_unchecked(&fscache_n_acquires_oom));
60760
60761 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
60762 - atomic_read(&fscache_n_object_lookups),
60763 - atomic_read(&fscache_n_object_lookups_negative),
60764 - atomic_read(&fscache_n_object_lookups_positive),
60765 - atomic_read(&fscache_n_object_created),
60766 - atomic_read(&fscache_n_object_lookups_timed_out));
60767 + atomic_read_unchecked(&fscache_n_object_lookups),
60768 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
60769 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
60770 + atomic_read_unchecked(&fscache_n_object_created),
60771 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
60772
60773 seq_printf(m, "Invals : n=%u run=%u\n",
60774 - atomic_read(&fscache_n_invalidates),
60775 - atomic_read(&fscache_n_invalidates_run));
60776 + atomic_read_unchecked(&fscache_n_invalidates),
60777 + atomic_read_unchecked(&fscache_n_invalidates_run));
60778
60779 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
60780 - atomic_read(&fscache_n_updates),
60781 - atomic_read(&fscache_n_updates_null),
60782 - atomic_read(&fscache_n_updates_run));
60783 + atomic_read_unchecked(&fscache_n_updates),
60784 + atomic_read_unchecked(&fscache_n_updates_null),
60785 + atomic_read_unchecked(&fscache_n_updates_run));
60786
60787 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
60788 - atomic_read(&fscache_n_relinquishes),
60789 - atomic_read(&fscache_n_relinquishes_null),
60790 - atomic_read(&fscache_n_relinquishes_waitcrt),
60791 - atomic_read(&fscache_n_relinquishes_retire));
60792 + atomic_read_unchecked(&fscache_n_relinquishes),
60793 + atomic_read_unchecked(&fscache_n_relinquishes_null),
60794 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
60795 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
60796
60797 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
60798 - atomic_read(&fscache_n_attr_changed),
60799 - atomic_read(&fscache_n_attr_changed_ok),
60800 - atomic_read(&fscache_n_attr_changed_nobufs),
60801 - atomic_read(&fscache_n_attr_changed_nomem),
60802 - atomic_read(&fscache_n_attr_changed_calls));
60803 + atomic_read_unchecked(&fscache_n_attr_changed),
60804 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
60805 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
60806 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
60807 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
60808
60809 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
60810 - atomic_read(&fscache_n_allocs),
60811 - atomic_read(&fscache_n_allocs_ok),
60812 - atomic_read(&fscache_n_allocs_wait),
60813 - atomic_read(&fscache_n_allocs_nobufs),
60814 - atomic_read(&fscache_n_allocs_intr));
60815 + atomic_read_unchecked(&fscache_n_allocs),
60816 + atomic_read_unchecked(&fscache_n_allocs_ok),
60817 + atomic_read_unchecked(&fscache_n_allocs_wait),
60818 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
60819 + atomic_read_unchecked(&fscache_n_allocs_intr));
60820 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
60821 - atomic_read(&fscache_n_alloc_ops),
60822 - atomic_read(&fscache_n_alloc_op_waits),
60823 - atomic_read(&fscache_n_allocs_object_dead));
60824 + atomic_read_unchecked(&fscache_n_alloc_ops),
60825 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
60826 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
60827
60828 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
60829 " int=%u oom=%u\n",
60830 - atomic_read(&fscache_n_retrievals),
60831 - atomic_read(&fscache_n_retrievals_ok),
60832 - atomic_read(&fscache_n_retrievals_wait),
60833 - atomic_read(&fscache_n_retrievals_nodata),
60834 - atomic_read(&fscache_n_retrievals_nobufs),
60835 - atomic_read(&fscache_n_retrievals_intr),
60836 - atomic_read(&fscache_n_retrievals_nomem));
60837 + atomic_read_unchecked(&fscache_n_retrievals),
60838 + atomic_read_unchecked(&fscache_n_retrievals_ok),
60839 + atomic_read_unchecked(&fscache_n_retrievals_wait),
60840 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
60841 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
60842 + atomic_read_unchecked(&fscache_n_retrievals_intr),
60843 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
60844 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
60845 - atomic_read(&fscache_n_retrieval_ops),
60846 - atomic_read(&fscache_n_retrieval_op_waits),
60847 - atomic_read(&fscache_n_retrievals_object_dead));
60848 + atomic_read_unchecked(&fscache_n_retrieval_ops),
60849 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
60850 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
60851
60852 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
60853 - atomic_read(&fscache_n_stores),
60854 - atomic_read(&fscache_n_stores_ok),
60855 - atomic_read(&fscache_n_stores_again),
60856 - atomic_read(&fscache_n_stores_nobufs),
60857 - atomic_read(&fscache_n_stores_oom));
60858 + atomic_read_unchecked(&fscache_n_stores),
60859 + atomic_read_unchecked(&fscache_n_stores_ok),
60860 + atomic_read_unchecked(&fscache_n_stores_again),
60861 + atomic_read_unchecked(&fscache_n_stores_nobufs),
60862 + atomic_read_unchecked(&fscache_n_stores_oom));
60863 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
60864 - atomic_read(&fscache_n_store_ops),
60865 - atomic_read(&fscache_n_store_calls),
60866 - atomic_read(&fscache_n_store_pages),
60867 - atomic_read(&fscache_n_store_radix_deletes),
60868 - atomic_read(&fscache_n_store_pages_over_limit));
60869 + atomic_read_unchecked(&fscache_n_store_ops),
60870 + atomic_read_unchecked(&fscache_n_store_calls),
60871 + atomic_read_unchecked(&fscache_n_store_pages),
60872 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
60873 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
60874
60875 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
60876 - atomic_read(&fscache_n_store_vmscan_not_storing),
60877 - atomic_read(&fscache_n_store_vmscan_gone),
60878 - atomic_read(&fscache_n_store_vmscan_busy),
60879 - atomic_read(&fscache_n_store_vmscan_cancelled),
60880 - atomic_read(&fscache_n_store_vmscan_wait));
60881 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
60882 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
60883 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
60884 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
60885 + atomic_read_unchecked(&fscache_n_store_vmscan_wait));
60886
60887 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
60888 - atomic_read(&fscache_n_op_pend),
60889 - atomic_read(&fscache_n_op_run),
60890 - atomic_read(&fscache_n_op_enqueue),
60891 - atomic_read(&fscache_n_op_cancelled),
60892 - atomic_read(&fscache_n_op_rejected));
60893 + atomic_read_unchecked(&fscache_n_op_pend),
60894 + atomic_read_unchecked(&fscache_n_op_run),
60895 + atomic_read_unchecked(&fscache_n_op_enqueue),
60896 + atomic_read_unchecked(&fscache_n_op_cancelled),
60897 + atomic_read_unchecked(&fscache_n_op_rejected));
60898 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
60899 - atomic_read(&fscache_n_op_deferred_release),
60900 - atomic_read(&fscache_n_op_release),
60901 - atomic_read(&fscache_n_op_gc));
60902 + atomic_read_unchecked(&fscache_n_op_deferred_release),
60903 + atomic_read_unchecked(&fscache_n_op_release),
60904 + atomic_read_unchecked(&fscache_n_op_gc));
60905
60906 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
60907 atomic_read(&fscache_n_cop_alloc_object),
60908 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
60909 index b96a49b..9bfdc47 100644
60910 --- a/fs/fuse/cuse.c
60911 +++ b/fs/fuse/cuse.c
60912 @@ -606,10 +606,12 @@ static int __init cuse_init(void)
60913 INIT_LIST_HEAD(&cuse_conntbl[i]);
60914
60915 /* inherit and extend fuse_dev_operations */
60916 - cuse_channel_fops = fuse_dev_operations;
60917 - cuse_channel_fops.owner = THIS_MODULE;
60918 - cuse_channel_fops.open = cuse_channel_open;
60919 - cuse_channel_fops.release = cuse_channel_release;
60920 + pax_open_kernel();
60921 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
60922 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
60923 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
60924 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
60925 + pax_close_kernel();
60926
60927 cuse_class = class_create(THIS_MODULE, "cuse");
60928 if (IS_ERR(cuse_class))
60929 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
60930 index fa8cb4b..4acb935 100644
60931 --- a/fs/fuse/dev.c
60932 +++ b/fs/fuse/dev.c
60933 @@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60934 ret = 0;
60935 pipe_lock(pipe);
60936
60937 - if (!pipe->readers) {
60938 + if (!atomic_read(&pipe->readers)) {
60939 send_sig(SIGPIPE, current, 0);
60940 if (!ret)
60941 ret = -EPIPE;
60942 @@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60943 page_nr++;
60944 ret += buf->len;
60945
60946 - if (pipe->files)
60947 + if (atomic_read(&pipe->files))
60948 do_wakeup = 1;
60949 }
60950
60951 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
60952 index c3eb2c4..98007d4 100644
60953 --- a/fs/fuse/dir.c
60954 +++ b/fs/fuse/dir.c
60955 @@ -1408,7 +1408,7 @@ static char *read_link(struct dentry *dentry)
60956 return link;
60957 }
60958
60959 -static void free_link(char *link)
60960 +static void free_link(const char *link)
60961 {
60962 if (!IS_ERR(link))
60963 free_page((unsigned long) link);
60964 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
60965 index db23ce1..9e6cd9d 100644
60966 --- a/fs/hostfs/hostfs_kern.c
60967 +++ b/fs/hostfs/hostfs_kern.c
60968 @@ -895,7 +895,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60969
60970 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60971 {
60972 - char *s = nd_get_link(nd);
60973 + const char *s = nd_get_link(nd);
60974 if (!IS_ERR(s))
60975 __putname(s);
60976 }
60977 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
60978 index d19b30a..ef89c36 100644
60979 --- a/fs/hugetlbfs/inode.c
60980 +++ b/fs/hugetlbfs/inode.c
60981 @@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60982 struct mm_struct *mm = current->mm;
60983 struct vm_area_struct *vma;
60984 struct hstate *h = hstate_file(file);
60985 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
60986 struct vm_unmapped_area_info info;
60987
60988 if (len & ~huge_page_mask(h))
60989 @@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60990 return addr;
60991 }
60992
60993 +#ifdef CONFIG_PAX_RANDMMAP
60994 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
60995 +#endif
60996 +
60997 if (addr) {
60998 addr = ALIGN(addr, huge_page_size(h));
60999 vma = find_vma(mm, addr);
61000 - if (TASK_SIZE - len >= addr &&
61001 - (!vma || addr + len <= vma->vm_start))
61002 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
61003 return addr;
61004 }
61005
61006 info.flags = 0;
61007 info.length = len;
61008 info.low_limit = TASK_UNMAPPED_BASE;
61009 +
61010 +#ifdef CONFIG_PAX_RANDMMAP
61011 + if (mm->pax_flags & MF_PAX_RANDMMAP)
61012 + info.low_limit += mm->delta_mmap;
61013 +#endif
61014 +
61015 info.high_limit = TASK_SIZE;
61016 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
61017 info.align_offset = 0;
61018 @@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
61019 };
61020 MODULE_ALIAS_FS("hugetlbfs");
61021
61022 -static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
61023 +struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
61024
61025 static int can_do_hugetlb_shm(void)
61026 {
61027 diff --git a/fs/inode.c b/fs/inode.c
61028 index 4bcdad3..1883822 100644
61029 --- a/fs/inode.c
61030 +++ b/fs/inode.c
61031 @@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
61032
61033 #ifdef CONFIG_SMP
61034 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
61035 - static atomic_t shared_last_ino;
61036 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
61037 + static atomic_unchecked_t shared_last_ino;
61038 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
61039
61040 res = next - LAST_INO_BATCH;
61041 }
61042 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
61043 index 4a6cf28..d3a29d3 100644
61044 --- a/fs/jffs2/erase.c
61045 +++ b/fs/jffs2/erase.c
61046 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
61047 struct jffs2_unknown_node marker = {
61048 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
61049 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
61050 - .totlen = cpu_to_je32(c->cleanmarker_size)
61051 + .totlen = cpu_to_je32(c->cleanmarker_size),
61052 + .hdr_crc = cpu_to_je32(0)
61053 };
61054
61055 jffs2_prealloc_raw_node_refs(c, jeb, 1);
61056 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
61057 index a6597d6..41b30ec 100644
61058 --- a/fs/jffs2/wbuf.c
61059 +++ b/fs/jffs2/wbuf.c
61060 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
61061 {
61062 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
61063 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
61064 - .totlen = constant_cpu_to_je32(8)
61065 + .totlen = constant_cpu_to_je32(8),
61066 + .hdr_crc = constant_cpu_to_je32(0)
61067 };
61068
61069 /*
61070 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
61071 index 6669aa2..36b033d 100644
61072 --- a/fs/jfs/super.c
61073 +++ b/fs/jfs/super.c
61074 @@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
61075
61076 jfs_inode_cachep =
61077 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
61078 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
61079 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
61080 init_once);
61081 if (jfs_inode_cachep == NULL)
61082 return -ENOMEM;
61083 diff --git a/fs/libfs.c b/fs/libfs.c
61084 index a184424..944ddce 100644
61085 --- a/fs/libfs.c
61086 +++ b/fs/libfs.c
61087 @@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
61088
61089 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
61090 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
61091 + char d_name[sizeof(next->d_iname)];
61092 + const unsigned char *name;
61093 +
61094 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
61095 if (!simple_positive(next)) {
61096 spin_unlock(&next->d_lock);
61097 @@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
61098
61099 spin_unlock(&next->d_lock);
61100 spin_unlock(&dentry->d_lock);
61101 - if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
61102 + name = next->d_name.name;
61103 + if (name == next->d_iname) {
61104 + memcpy(d_name, name, next->d_name.len);
61105 + name = d_name;
61106 + }
61107 + if (!dir_emit(ctx, name, next->d_name.len,
61108 next->d_inode->i_ino, dt_type(next->d_inode)))
61109 return 0;
61110 spin_lock(&dentry->d_lock);
61111 @@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync);
61112 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
61113 void *cookie)
61114 {
61115 - char *s = nd_get_link(nd);
61116 + const char *s = nd_get_link(nd);
61117 if (!IS_ERR(s))
61118 kfree(s);
61119 }
61120 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
61121 index acd3947..1f896e2 100644
61122 --- a/fs/lockd/clntproc.c
61123 +++ b/fs/lockd/clntproc.c
61124 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
61125 /*
61126 * Cookie counter for NLM requests
61127 */
61128 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
61129 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
61130
61131 void nlmclnt_next_cookie(struct nlm_cookie *c)
61132 {
61133 - u32 cookie = atomic_inc_return(&nlm_cookie);
61134 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
61135
61136 memcpy(c->data, &cookie, 4);
61137 c->len=4;
61138 diff --git a/fs/locks.c b/fs/locks.c
61139 index 92a0f0a..45a48f0 100644
61140 --- a/fs/locks.c
61141 +++ b/fs/locks.c
61142 @@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp)
61143 return;
61144
61145 if (filp->f_op->flock) {
61146 - struct file_lock fl = {
61147 + struct file_lock flock = {
61148 .fl_pid = current->tgid,
61149 .fl_file = filp,
61150 .fl_flags = FL_FLOCK,
61151 .fl_type = F_UNLCK,
61152 .fl_end = OFFSET_MAX,
61153 };
61154 - filp->f_op->flock(filp, F_SETLKW, &fl);
61155 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
61156 - fl.fl_ops->fl_release_private(&fl);
61157 + filp->f_op->flock(filp, F_SETLKW, &flock);
61158 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
61159 + flock.fl_ops->fl_release_private(&flock);
61160 }
61161
61162 spin_lock(&inode->i_lock);
61163 diff --git a/fs/mount.h b/fs/mount.h
61164 index a17458c..e69fb5b 100644
61165 --- a/fs/mount.h
61166 +++ b/fs/mount.h
61167 @@ -11,7 +11,7 @@ struct mnt_namespace {
61168 u64 seq; /* Sequence number to prevent loops */
61169 wait_queue_head_t poll;
61170 int event;
61171 -};
61172 +} __randomize_layout;
61173
61174 struct mnt_pcp {
61175 int mnt_count;
61176 @@ -57,7 +57,7 @@ struct mount {
61177 int mnt_expiry_mark; /* true if marked for expiry */
61178 int mnt_pinned;
61179 struct path mnt_ex_mountpoint;
61180 -};
61181 +} __randomize_layout;
61182
61183 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
61184
61185 diff --git a/fs/namei.c b/fs/namei.c
61186 index cfe6608..a24748c 100644
61187 --- a/fs/namei.c
61188 +++ b/fs/namei.c
61189 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
61190 if (ret != -EACCES)
61191 return ret;
61192
61193 +#ifdef CONFIG_GRKERNSEC
61194 + /* we'll block if we have to log due to a denied capability use */
61195 + if (mask & MAY_NOT_BLOCK)
61196 + return -ECHILD;
61197 +#endif
61198 +
61199 if (S_ISDIR(inode->i_mode)) {
61200 /* DACs are overridable for directories */
61201 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
61202 - return 0;
61203 if (!(mask & MAY_WRITE))
61204 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61205 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61206 + inode_capable(inode, CAP_DAC_READ_SEARCH))
61207 return 0;
61208 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
61209 + return 0;
61210 return -EACCES;
61211 }
61212 /*
61213 + * Searching includes executable on directories, else just read.
61214 + */
61215 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61216 + if (mask == MAY_READ)
61217 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61218 + inode_capable(inode, CAP_DAC_READ_SEARCH))
61219 + return 0;
61220 +
61221 + /*
61222 * Read/write DACs are always overridable.
61223 * Executable DACs are overridable when there is
61224 * at least one exec bit set.
61225 @@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
61226 if (inode_capable(inode, CAP_DAC_OVERRIDE))
61227 return 0;
61228
61229 - /*
61230 - * Searching includes executable on directories, else just read.
61231 - */
61232 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61233 - if (mask == MAY_READ)
61234 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61235 - return 0;
61236 -
61237 return -EACCES;
61238 }
61239
61240 @@ -810,7 +818,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61241 {
61242 struct dentry *dentry = link->dentry;
61243 int error;
61244 - char *s;
61245 + const char *s;
61246
61247 BUG_ON(nd->flags & LOOKUP_RCU);
61248
61249 @@ -831,6 +839,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61250 if (error)
61251 goto out_put_nd_path;
61252
61253 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
61254 + dentry->d_inode, dentry, nd->path.mnt)) {
61255 + error = -EACCES;
61256 + goto out_put_nd_path;
61257 + }
61258 +
61259 nd->last_type = LAST_BIND;
61260 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
61261 error = PTR_ERR(*p);
61262 @@ -1098,7 +1112,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
61263 return false;
61264
61265 if (!d_mountpoint(path->dentry))
61266 - break;
61267 + return true;
61268
61269 mounted = __lookup_mnt(path->mnt, path->dentry);
61270 if (!mounted)
61271 @@ -1114,20 +1128,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
61272 */
61273 *inode = path->dentry->d_inode;
61274 }
61275 - return true;
61276 -}
61277 -
61278 -static void follow_mount_rcu(struct nameidata *nd)
61279 -{
61280 - while (d_mountpoint(nd->path.dentry)) {
61281 - struct mount *mounted;
61282 - mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
61283 - if (!mounted)
61284 - break;
61285 - nd->path.mnt = &mounted->mnt;
61286 - nd->path.dentry = mounted->mnt.mnt_root;
61287 - nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
61288 - }
61289 + return read_seqretry(&mount_lock, nd->m_seq);
61290 }
61291
61292 static int follow_dotdot_rcu(struct nameidata *nd)
61293 @@ -1155,7 +1156,17 @@ static int follow_dotdot_rcu(struct nameidata *nd)
61294 break;
61295 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
61296 }
61297 - follow_mount_rcu(nd);
61298 + while (d_mountpoint(nd->path.dentry)) {
61299 + struct mount *mounted;
61300 + mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
61301 + if (!mounted)
61302 + break;
61303 + nd->path.mnt = &mounted->mnt;
61304 + nd->path.dentry = mounted->mnt.mnt_root;
61305 + nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
61306 + if (!read_seqretry(&mount_lock, nd->m_seq))
61307 + goto failed;
61308 + }
61309 nd->inode = nd->path.dentry->d_inode;
61310 return 0;
61311
61312 @@ -1582,6 +1593,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
61313 if (res)
61314 break;
61315 res = walk_component(nd, path, LOOKUP_FOLLOW);
61316 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
61317 + res = -EACCES;
61318 put_link(nd, &link, cookie);
61319 } while (res > 0);
61320
61321 @@ -1655,7 +1668,7 @@ EXPORT_SYMBOL(full_name_hash);
61322 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
61323 {
61324 unsigned long a, b, adata, bdata, mask, hash, len;
61325 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61326 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61327
61328 hash = a = 0;
61329 len = -sizeof(unsigned long);
61330 @@ -1939,6 +1952,8 @@ static int path_lookupat(int dfd, const char *name,
61331 if (err)
61332 break;
61333 err = lookup_last(nd, &path);
61334 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
61335 + err = -EACCES;
61336 put_link(nd, &link, cookie);
61337 }
61338 }
61339 @@ -1946,6 +1961,13 @@ static int path_lookupat(int dfd, const char *name,
61340 if (!err)
61341 err = complete_walk(nd);
61342
61343 + if (!err && !(nd->flags & LOOKUP_PARENT)) {
61344 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
61345 + path_put(&nd->path);
61346 + err = -ENOENT;
61347 + }
61348 + }
61349 +
61350 if (!err && nd->flags & LOOKUP_DIRECTORY) {
61351 if (!d_is_directory(nd->path.dentry)) {
61352 path_put(&nd->path);
61353 @@ -1973,8 +1995,15 @@ static int filename_lookup(int dfd, struct filename *name,
61354 retval = path_lookupat(dfd, name->name,
61355 flags | LOOKUP_REVAL, nd);
61356
61357 - if (likely(!retval))
61358 + if (likely(!retval)) {
61359 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
61360 + if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
61361 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
61362 + path_put(&nd->path);
61363 + return -ENOENT;
61364 + }
61365 + }
61366 + }
61367 return retval;
61368 }
61369
61370 @@ -2548,6 +2577,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
61371 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
61372 return -EPERM;
61373
61374 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
61375 + return -EPERM;
61376 + if (gr_handle_rawio(inode))
61377 + return -EPERM;
61378 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
61379 + return -EACCES;
61380 +
61381 return 0;
61382 }
61383
61384 @@ -2779,7 +2815,7 @@ looked_up:
61385 * cleared otherwise prior to returning.
61386 */
61387 static int lookup_open(struct nameidata *nd, struct path *path,
61388 - struct file *file,
61389 + struct path *link, struct file *file,
61390 const struct open_flags *op,
61391 bool got_write, int *opened)
61392 {
61393 @@ -2814,6 +2850,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
61394 /* Negative dentry, just create the file */
61395 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
61396 umode_t mode = op->mode;
61397 +
61398 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
61399 + error = -EACCES;
61400 + goto out_dput;
61401 + }
61402 +
61403 + if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
61404 + error = -EACCES;
61405 + goto out_dput;
61406 + }
61407 +
61408 if (!IS_POSIXACL(dir->d_inode))
61409 mode &= ~current_umask();
61410 /*
61411 @@ -2835,6 +2882,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
61412 nd->flags & LOOKUP_EXCL);
61413 if (error)
61414 goto out_dput;
61415 + else
61416 + gr_handle_create(dentry, nd->path.mnt);
61417 }
61418 out_no_open:
61419 path->dentry = dentry;
61420 @@ -2849,7 +2898,7 @@ out_dput:
61421 /*
61422 * Handle the last step of open()
61423 */
61424 -static int do_last(struct nameidata *nd, struct path *path,
61425 +static int do_last(struct nameidata *nd, struct path *path, struct path *link,
61426 struct file *file, const struct open_flags *op,
61427 int *opened, struct filename *name)
61428 {
61429 @@ -2899,6 +2948,15 @@ static int do_last(struct nameidata *nd, struct path *path,
61430 if (error)
61431 return error;
61432
61433 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
61434 + error = -ENOENT;
61435 + goto out;
61436 + }
61437 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
61438 + error = -EACCES;
61439 + goto out;
61440 + }
61441 +
61442 audit_inode(name, dir, LOOKUP_PARENT);
61443 error = -EISDIR;
61444 /* trailing slashes? */
61445 @@ -2918,7 +2976,7 @@ retry_lookup:
61446 */
61447 }
61448 mutex_lock(&dir->d_inode->i_mutex);
61449 - error = lookup_open(nd, path, file, op, got_write, opened);
61450 + error = lookup_open(nd, path, link, file, op, got_write, opened);
61451 mutex_unlock(&dir->d_inode->i_mutex);
61452
61453 if (error <= 0) {
61454 @@ -2942,11 +3000,28 @@ retry_lookup:
61455 goto finish_open_created;
61456 }
61457
61458 + if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
61459 + error = -ENOENT;
61460 + goto exit_dput;
61461 + }
61462 + if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
61463 + error = -EACCES;
61464 + goto exit_dput;
61465 + }
61466 +
61467 /*
61468 * create/update audit record if it already exists.
61469 */
61470 - if (d_is_positive(path->dentry))
61471 + if (d_is_positive(path->dentry)) {
61472 + /* only check if O_CREAT is specified, all other checks need to go
61473 + into may_open */
61474 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
61475 + error = -EACCES;
61476 + goto exit_dput;
61477 + }
61478 +
61479 audit_inode(name, path->dentry, 0);
61480 + }
61481
61482 /*
61483 * If atomic_open() acquired write access it is dropped now due to
61484 @@ -2987,6 +3062,11 @@ finish_lookup:
61485 }
61486 }
61487 BUG_ON(inode != path->dentry->d_inode);
61488 + /* if we're resolving a symlink to another symlink */
61489 + if (link && gr_handle_symlink_owner(link, inode)) {
61490 + error = -EACCES;
61491 + goto out;
61492 + }
61493 return 1;
61494 }
61495
61496 @@ -2996,7 +3076,6 @@ finish_lookup:
61497 save_parent.dentry = nd->path.dentry;
61498 save_parent.mnt = mntget(path->mnt);
61499 nd->path.dentry = path->dentry;
61500 -
61501 }
61502 nd->inode = inode;
61503 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
61504 @@ -3006,7 +3085,18 @@ finish_open:
61505 path_put(&save_parent);
61506 return error;
61507 }
61508 +
61509 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
61510 + error = -ENOENT;
61511 + goto out;
61512 + }
61513 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
61514 + error = -EACCES;
61515 + goto out;
61516 + }
61517 +
61518 audit_inode(name, nd->path.dentry, 0);
61519 +
61520 error = -EISDIR;
61521 if ((open_flag & O_CREAT) &&
61522 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
61523 @@ -3170,7 +3260,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
61524 if (unlikely(error))
61525 goto out;
61526
61527 - error = do_last(nd, &path, file, op, &opened, pathname);
61528 + error = do_last(nd, &path, NULL, file, op, &opened, pathname);
61529 while (unlikely(error > 0)) { /* trailing symlink */
61530 struct path link = path;
61531 void *cookie;
61532 @@ -3188,7 +3278,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
61533 error = follow_link(&link, nd, &cookie);
61534 if (unlikely(error))
61535 break;
61536 - error = do_last(nd, &path, file, op, &opened, pathname);
61537 + error = do_last(nd, &path, &link, file, op, &opened, pathname);
61538 put_link(nd, &link, cookie);
61539 }
61540 out:
61541 @@ -3288,9 +3378,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
61542 goto unlock;
61543
61544 error = -EEXIST;
61545 - if (d_is_positive(dentry))
61546 + if (d_is_positive(dentry)) {
61547 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
61548 + error = -ENOENT;
61549 goto fail;
61550 -
61551 + }
61552 /*
61553 * Special case - lookup gave negative, but... we had foo/bar/
61554 * From the vfs_mknod() POV we just have a negative dentry -
61555 @@ -3342,6 +3434,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
61556 }
61557 EXPORT_SYMBOL(user_path_create);
61558
61559 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
61560 +{
61561 + struct filename *tmp = getname(pathname);
61562 + struct dentry *res;
61563 + if (IS_ERR(tmp))
61564 + return ERR_CAST(tmp);
61565 + res = kern_path_create(dfd, tmp->name, path, lookup_flags);
61566 + if (IS_ERR(res))
61567 + putname(tmp);
61568 + else
61569 + *to = tmp;
61570 + return res;
61571 +}
61572 +
61573 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
61574 {
61575 int error = may_create(dir, dentry);
61576 @@ -3404,6 +3510,17 @@ retry:
61577
61578 if (!IS_POSIXACL(path.dentry->d_inode))
61579 mode &= ~current_umask();
61580 +
61581 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
61582 + error = -EPERM;
61583 + goto out;
61584 + }
61585 +
61586 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
61587 + error = -EACCES;
61588 + goto out;
61589 + }
61590 +
61591 error = security_path_mknod(&path, dentry, mode, dev);
61592 if (error)
61593 goto out;
61594 @@ -3420,6 +3537,8 @@ retry:
61595 break;
61596 }
61597 out:
61598 + if (!error)
61599 + gr_handle_create(dentry, path.mnt);
61600 done_path_create(&path, dentry);
61601 if (retry_estale(error, lookup_flags)) {
61602 lookup_flags |= LOOKUP_REVAL;
61603 @@ -3472,9 +3591,16 @@ retry:
61604
61605 if (!IS_POSIXACL(path.dentry->d_inode))
61606 mode &= ~current_umask();
61607 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
61608 + error = -EACCES;
61609 + goto out;
61610 + }
61611 error = security_path_mkdir(&path, dentry, mode);
61612 if (!error)
61613 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
61614 + if (!error)
61615 + gr_handle_create(dentry, path.mnt);
61616 +out:
61617 done_path_create(&path, dentry);
61618 if (retry_estale(error, lookup_flags)) {
61619 lookup_flags |= LOOKUP_REVAL;
61620 @@ -3555,6 +3681,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
61621 struct filename *name;
61622 struct dentry *dentry;
61623 struct nameidata nd;
61624 + ino_t saved_ino = 0;
61625 + dev_t saved_dev = 0;
61626 unsigned int lookup_flags = 0;
61627 retry:
61628 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
61629 @@ -3587,10 +3715,21 @@ retry:
61630 error = -ENOENT;
61631 goto exit3;
61632 }
61633 +
61634 + saved_ino = dentry->d_inode->i_ino;
61635 + saved_dev = gr_get_dev_from_dentry(dentry);
61636 +
61637 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
61638 + error = -EACCES;
61639 + goto exit3;
61640 + }
61641 +
61642 error = security_path_rmdir(&nd.path, dentry);
61643 if (error)
61644 goto exit3;
61645 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
61646 + if (!error && (saved_dev || saved_ino))
61647 + gr_handle_delete(saved_ino, saved_dev);
61648 exit3:
61649 dput(dentry);
61650 exit2:
61651 @@ -3680,6 +3819,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
61652 struct nameidata nd;
61653 struct inode *inode = NULL;
61654 struct inode *delegated_inode = NULL;
61655 + ino_t saved_ino = 0;
61656 + dev_t saved_dev = 0;
61657 unsigned int lookup_flags = 0;
61658 retry:
61659 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
61660 @@ -3706,10 +3847,22 @@ retry_deleg:
61661 if (d_is_negative(dentry))
61662 goto slashes;
61663 ihold(inode);
61664 +
61665 + if (inode->i_nlink <= 1) {
61666 + saved_ino = inode->i_ino;
61667 + saved_dev = gr_get_dev_from_dentry(dentry);
61668 + }
61669 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
61670 + error = -EACCES;
61671 + goto exit2;
61672 + }
61673 +
61674 error = security_path_unlink(&nd.path, dentry);
61675 if (error)
61676 goto exit2;
61677 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
61678 + if (!error && (saved_ino || saved_dev))
61679 + gr_handle_delete(saved_ino, saved_dev);
61680 exit2:
61681 dput(dentry);
61682 }
61683 @@ -3797,9 +3950,17 @@ retry:
61684 if (IS_ERR(dentry))
61685 goto out_putname;
61686
61687 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
61688 + error = -EACCES;
61689 + goto out;
61690 + }
61691 +
61692 error = security_path_symlink(&path, dentry, from->name);
61693 if (!error)
61694 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
61695 + if (!error)
61696 + gr_handle_create(dentry, path.mnt);
61697 +out:
61698 done_path_create(&path, dentry);
61699 if (retry_estale(error, lookup_flags)) {
61700 lookup_flags |= LOOKUP_REVAL;
61701 @@ -3902,6 +4063,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
61702 struct dentry *new_dentry;
61703 struct path old_path, new_path;
61704 struct inode *delegated_inode = NULL;
61705 + struct filename *to = NULL;
61706 int how = 0;
61707 int error;
61708
61709 @@ -3925,7 +4087,7 @@ retry:
61710 if (error)
61711 return error;
61712
61713 - new_dentry = user_path_create(newdfd, newname, &new_path,
61714 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
61715 (how & LOOKUP_REVAL));
61716 error = PTR_ERR(new_dentry);
61717 if (IS_ERR(new_dentry))
61718 @@ -3937,11 +4099,28 @@ retry:
61719 error = may_linkat(&old_path);
61720 if (unlikely(error))
61721 goto out_dput;
61722 +
61723 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
61724 + old_path.dentry->d_inode,
61725 + old_path.dentry->d_inode->i_mode, to)) {
61726 + error = -EACCES;
61727 + goto out_dput;
61728 + }
61729 +
61730 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
61731 + old_path.dentry, old_path.mnt, to)) {
61732 + error = -EACCES;
61733 + goto out_dput;
61734 + }
61735 +
61736 error = security_path_link(old_path.dentry, &new_path, new_dentry);
61737 if (error)
61738 goto out_dput;
61739 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
61740 + if (!error)
61741 + gr_handle_create(new_dentry, new_path.mnt);
61742 out_dput:
61743 + putname(to);
61744 done_path_create(&new_path, new_dentry);
61745 if (delegated_inode) {
61746 error = break_deleg_wait(&delegated_inode);
61747 @@ -4228,6 +4407,12 @@ retry_deleg:
61748 if (new_dentry == trap)
61749 goto exit5;
61750
61751 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
61752 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
61753 + to);
61754 + if (error)
61755 + goto exit5;
61756 +
61757 error = security_path_rename(&oldnd.path, old_dentry,
61758 &newnd.path, new_dentry);
61759 if (error)
61760 @@ -4235,6 +4420,9 @@ retry_deleg:
61761 error = vfs_rename(old_dir->d_inode, old_dentry,
61762 new_dir->d_inode, new_dentry,
61763 &delegated_inode);
61764 + if (!error)
61765 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
61766 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
61767 exit5:
61768 dput(new_dentry);
61769 exit4:
61770 @@ -4271,6 +4459,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
61771
61772 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
61773 {
61774 + char tmpbuf[64];
61775 + const char *newlink;
61776 int len;
61777
61778 len = PTR_ERR(link);
61779 @@ -4280,7 +4470,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
61780 len = strlen(link);
61781 if (len > (unsigned) buflen)
61782 len = buflen;
61783 - if (copy_to_user(buffer, link, len))
61784 +
61785 + if (len < sizeof(tmpbuf)) {
61786 + memcpy(tmpbuf, link, len);
61787 + newlink = tmpbuf;
61788 + } else
61789 + newlink = link;
61790 +
61791 + if (copy_to_user(buffer, newlink, len))
61792 len = -EFAULT;
61793 out:
61794 return len;
61795 diff --git a/fs/namespace.c b/fs/namespace.c
61796 index be32ebc..c595734 100644
61797 --- a/fs/namespace.c
61798 +++ b/fs/namespace.c
61799 @@ -1293,6 +1293,9 @@ static int do_umount(struct mount *mnt, int flags)
61800 if (!(sb->s_flags & MS_RDONLY))
61801 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
61802 up_write(&sb->s_umount);
61803 +
61804 + gr_log_remount(mnt->mnt_devname, retval);
61805 +
61806 return retval;
61807 }
61808
61809 @@ -1315,6 +1318,9 @@ static int do_umount(struct mount *mnt, int flags)
61810 }
61811 unlock_mount_hash();
61812 namespace_unlock();
61813 +
61814 + gr_log_unmount(mnt->mnt_devname, retval);
61815 +
61816 return retval;
61817 }
61818
61819 @@ -1334,7 +1340,7 @@ static inline bool may_mount(void)
61820 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
61821 */
61822
61823 -SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
61824 +SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
61825 {
61826 struct path path;
61827 struct mount *mnt;
61828 @@ -1376,7 +1382,7 @@ out:
61829 /*
61830 * The 2.0 compatible umount. No flags.
61831 */
61832 -SYSCALL_DEFINE1(oldumount, char __user *, name)
61833 +SYSCALL_DEFINE1(oldumount, const char __user *, name)
61834 {
61835 return sys_umount(name, 0);
61836 }
61837 @@ -2379,6 +2385,16 @@ long do_mount(const char *dev_name, const char *dir_name,
61838 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
61839 MS_STRICTATIME);
61840
61841 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
61842 + retval = -EPERM;
61843 + goto dput_out;
61844 + }
61845 +
61846 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
61847 + retval = -EPERM;
61848 + goto dput_out;
61849 + }
61850 +
61851 if (flags & MS_REMOUNT)
61852 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
61853 data_page);
61854 @@ -2393,6 +2409,9 @@ long do_mount(const char *dev_name, const char *dir_name,
61855 dev_name, data_page);
61856 dput_out:
61857 path_put(&path);
61858 +
61859 + gr_log_mount(dev_name, dir_name, retval);
61860 +
61861 return retval;
61862 }
61863
61864 @@ -2410,7 +2429,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
61865 * number incrementing at 10Ghz will take 12,427 years to wrap which
61866 * is effectively never, so we can ignore the possibility.
61867 */
61868 -static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
61869 +static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
61870
61871 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61872 {
61873 @@ -2425,7 +2444,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61874 kfree(new_ns);
61875 return ERR_PTR(ret);
61876 }
61877 - new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
61878 + new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
61879 atomic_set(&new_ns->count, 1);
61880 new_ns->root = NULL;
61881 INIT_LIST_HEAD(&new_ns->list);
61882 @@ -2435,7 +2454,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61883 return new_ns;
61884 }
61885
61886 -struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
61887 +__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
61888 struct user_namespace *user_ns, struct fs_struct *new_fs)
61889 {
61890 struct mnt_namespace *new_ns;
61891 @@ -2556,8 +2575,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
61892 }
61893 EXPORT_SYMBOL(mount_subtree);
61894
61895 -SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
61896 - char __user *, type, unsigned long, flags, void __user *, data)
61897 +SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
61898 + const char __user *, type, unsigned long, flags, void __user *, data)
61899 {
61900 int ret;
61901 char *kernel_type;
61902 @@ -2670,6 +2689,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
61903 if (error)
61904 goto out2;
61905
61906 + if (gr_handle_chroot_pivot()) {
61907 + error = -EPERM;
61908 + goto out2;
61909 + }
61910 +
61911 get_fs_root(current->fs, &root);
61912 old_mp = lock_mount(&old);
61913 error = PTR_ERR(old_mp);
61914 @@ -2930,7 +2954,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
61915 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
61916 return -EPERM;
61917
61918 - if (fs->users != 1)
61919 + if (atomic_read(&fs->users) != 1)
61920 return -EINVAL;
61921
61922 get_mnt_ns(mnt_ns);
61923 diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
61924 index f4ccfe6..a5cf064 100644
61925 --- a/fs/nfs/callback_xdr.c
61926 +++ b/fs/nfs/callback_xdr.c
61927 @@ -51,7 +51,7 @@ struct callback_op {
61928 callback_decode_arg_t decode_args;
61929 callback_encode_res_t encode_res;
61930 long res_maxsize;
61931 -};
61932 +} __do_const;
61933
61934 static struct callback_op callback_ops[];
61935
61936 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
61937 index 5d94c02..630214f 100644
61938 --- a/fs/nfs/inode.c
61939 +++ b/fs/nfs/inode.c
61940 @@ -1153,16 +1153,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
61941 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
61942 }
61943
61944 -static atomic_long_t nfs_attr_generation_counter;
61945 +static atomic_long_unchecked_t nfs_attr_generation_counter;
61946
61947 static unsigned long nfs_read_attr_generation_counter(void)
61948 {
61949 - return atomic_long_read(&nfs_attr_generation_counter);
61950 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
61951 }
61952
61953 unsigned long nfs_inc_attr_generation_counter(void)
61954 {
61955 - return atomic_long_inc_return(&nfs_attr_generation_counter);
61956 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
61957 }
61958
61959 void nfs_fattr_init(struct nfs_fattr *fattr)
61960 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
61961 index 419572f..5414a23 100644
61962 --- a/fs/nfsd/nfs4proc.c
61963 +++ b/fs/nfsd/nfs4proc.c
61964 @@ -1168,7 +1168,7 @@ struct nfsd4_operation {
61965 nfsd4op_rsize op_rsize_bop;
61966 stateid_getter op_get_currentstateid;
61967 stateid_setter op_set_currentstateid;
61968 -};
61969 +} __do_const;
61970
61971 static struct nfsd4_operation nfsd4_ops[];
61972
61973 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
61974 index ee7237f..e3ae60a 100644
61975 --- a/fs/nfsd/nfs4xdr.c
61976 +++ b/fs/nfsd/nfs4xdr.c
61977 @@ -1523,7 +1523,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
61978
61979 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
61980
61981 -static nfsd4_dec nfsd4_dec_ops[] = {
61982 +static const nfsd4_dec nfsd4_dec_ops[] = {
61983 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
61984 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
61985 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
61986 diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
61987 index b6af150..f6ec5e3 100644
61988 --- a/fs/nfsd/nfscache.c
61989 +++ b/fs/nfsd/nfscache.c
61990 @@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
61991 {
61992 struct svc_cacherep *rp = rqstp->rq_cacherep;
61993 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
61994 - int len;
61995 + long len;
61996 size_t bufsize = 0;
61997
61998 if (!rp)
61999 return;
62000
62001 - len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
62002 - len >>= 2;
62003 + if (statp) {
62004 + len = (char*)statp - (char*)resv->iov_base;
62005 + len = resv->iov_len - len;
62006 + len >>= 2;
62007 + }
62008
62009 /* Don't cache excessive amounts of data and XDR failures */
62010 if (!statp || len > (256 >> 2)) {
62011 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
62012 index 7eea63c..a35f4fb 100644
62013 --- a/fs/nfsd/vfs.c
62014 +++ b/fs/nfsd/vfs.c
62015 @@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
62016 } else {
62017 oldfs = get_fs();
62018 set_fs(KERNEL_DS);
62019 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
62020 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
62021 set_fs(oldfs);
62022 }
62023
62024 @@ -1084,7 +1084,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
62025
62026 /* Write the data. */
62027 oldfs = get_fs(); set_fs(KERNEL_DS);
62028 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
62029 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
62030 set_fs(oldfs);
62031 if (host_err < 0)
62032 goto out_nfserr;
62033 @@ -1629,7 +1629,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
62034 */
62035
62036 oldfs = get_fs(); set_fs(KERNEL_DS);
62037 - host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
62038 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
62039 set_fs(oldfs);
62040
62041 if (host_err < 0)
62042 diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
62043 index fea6bd5..8ee9d81 100644
62044 --- a/fs/nls/nls_base.c
62045 +++ b/fs/nls/nls_base.c
62046 @@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
62047
62048 int register_nls(struct nls_table * nls)
62049 {
62050 - struct nls_table ** tmp = &tables;
62051 + struct nls_table *tmp = tables;
62052
62053 if (nls->next)
62054 return -EBUSY;
62055
62056 spin_lock(&nls_lock);
62057 - while (*tmp) {
62058 - if (nls == *tmp) {
62059 + while (tmp) {
62060 + if (nls == tmp) {
62061 spin_unlock(&nls_lock);
62062 return -EBUSY;
62063 }
62064 - tmp = &(*tmp)->next;
62065 + tmp = tmp->next;
62066 }
62067 - nls->next = tables;
62068 + pax_open_kernel();
62069 + *(struct nls_table **)&nls->next = tables;
62070 + pax_close_kernel();
62071 tables = nls;
62072 spin_unlock(&nls_lock);
62073 return 0;
62074 @@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
62075
62076 int unregister_nls(struct nls_table * nls)
62077 {
62078 - struct nls_table ** tmp = &tables;
62079 + struct nls_table * const * tmp = &tables;
62080
62081 spin_lock(&nls_lock);
62082 while (*tmp) {
62083 if (nls == *tmp) {
62084 - *tmp = nls->next;
62085 + pax_open_kernel();
62086 + *(struct nls_table **)tmp = nls->next;
62087 + pax_close_kernel();
62088 spin_unlock(&nls_lock);
62089 return 0;
62090 }
62091 diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
62092 index 7424929..35f6be5 100644
62093 --- a/fs/nls/nls_euc-jp.c
62094 +++ b/fs/nls/nls_euc-jp.c
62095 @@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
62096 p_nls = load_nls("cp932");
62097
62098 if (p_nls) {
62099 - table.charset2upper = p_nls->charset2upper;
62100 - table.charset2lower = p_nls->charset2lower;
62101 + pax_open_kernel();
62102 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
62103 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
62104 + pax_close_kernel();
62105 return register_nls(&table);
62106 }
62107
62108 diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
62109 index e7bc1d7..06bd4bb 100644
62110 --- a/fs/nls/nls_koi8-ru.c
62111 +++ b/fs/nls/nls_koi8-ru.c
62112 @@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
62113 p_nls = load_nls("koi8-u");
62114
62115 if (p_nls) {
62116 - table.charset2upper = p_nls->charset2upper;
62117 - table.charset2lower = p_nls->charset2lower;
62118 + pax_open_kernel();
62119 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
62120 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
62121 + pax_close_kernel();
62122 return register_nls(&table);
62123 }
62124
62125 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
62126 index 6663511..7668ca4 100644
62127 --- a/fs/notify/fanotify/fanotify_user.c
62128 +++ b/fs/notify/fanotify/fanotify_user.c
62129 @@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
62130
62131 fd = fanotify_event_metadata.fd;
62132 ret = -EFAULT;
62133 - if (copy_to_user(buf, &fanotify_event_metadata,
62134 - fanotify_event_metadata.event_len))
62135 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
62136 + copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
62137 goto out_close_fd;
62138
62139 ret = prepare_for_access_response(group, event, fd);
62140 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
62141 index 7b51b05..5ea5ef6 100644
62142 --- a/fs/notify/notification.c
62143 +++ b/fs/notify/notification.c
62144 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
62145 * get set to 0 so it will never get 'freed'
62146 */
62147 static struct fsnotify_event *q_overflow_event;
62148 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62149 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62150
62151 /**
62152 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
62153 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62154 */
62155 u32 fsnotify_get_cookie(void)
62156 {
62157 - return atomic_inc_return(&fsnotify_sync_cookie);
62158 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
62159 }
62160 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
62161
62162 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
62163 index 9e38daf..5727cae 100644
62164 --- a/fs/ntfs/dir.c
62165 +++ b/fs/ntfs/dir.c
62166 @@ -1310,7 +1310,7 @@ find_next_index_buffer:
62167 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
62168 ~(s64)(ndir->itype.index.block_size - 1)));
62169 /* Bounds checks. */
62170 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
62171 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
62172 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
62173 "inode 0x%lx or driver bug.", vdir->i_ino);
62174 goto err_out;
62175 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
62176 index ea4ba9d..1e13d34 100644
62177 --- a/fs/ntfs/file.c
62178 +++ b/fs/ntfs/file.c
62179 @@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
62180 char *addr;
62181 size_t total = 0;
62182 unsigned len;
62183 - int left;
62184 + unsigned left;
62185
62186 do {
62187 len = PAGE_CACHE_SIZE - ofs;
62188 diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
62189 index 82650d5..db37dcf 100644
62190 --- a/fs/ntfs/super.c
62191 +++ b/fs/ntfs/super.c
62192 @@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62193 if (!silent)
62194 ntfs_error(sb, "Primary boot sector is invalid.");
62195 } else if (!silent)
62196 - ntfs_error(sb, read_err_str, "primary");
62197 + ntfs_error(sb, read_err_str, "%s", "primary");
62198 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
62199 if (bh_primary)
62200 brelse(bh_primary);
62201 @@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62202 goto hotfix_primary_boot_sector;
62203 brelse(bh_backup);
62204 } else if (!silent)
62205 - ntfs_error(sb, read_err_str, "backup");
62206 + ntfs_error(sb, read_err_str, "%s", "backup");
62207 /* Try to read NT3.51- backup boot sector. */
62208 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
62209 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
62210 @@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62211 "sector.");
62212 brelse(bh_backup);
62213 } else if (!silent)
62214 - ntfs_error(sb, read_err_str, "backup");
62215 + ntfs_error(sb, read_err_str, "%s", "backup");
62216 /* We failed. Cleanup and return. */
62217 if (bh_primary)
62218 brelse(bh_primary);
62219 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
62220 index cd5496b..26a1055 100644
62221 --- a/fs/ocfs2/localalloc.c
62222 +++ b/fs/ocfs2/localalloc.c
62223 @@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
62224 goto bail;
62225 }
62226
62227 - atomic_inc(&osb->alloc_stats.moves);
62228 + atomic_inc_unchecked(&osb->alloc_stats.moves);
62229
62230 bail:
62231 if (handle)
62232 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
62233 index 3a90347..c40bef8 100644
62234 --- a/fs/ocfs2/ocfs2.h
62235 +++ b/fs/ocfs2/ocfs2.h
62236 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
62237
62238 struct ocfs2_alloc_stats
62239 {
62240 - atomic_t moves;
62241 - atomic_t local_data;
62242 - atomic_t bitmap_data;
62243 - atomic_t bg_allocs;
62244 - atomic_t bg_extends;
62245 + atomic_unchecked_t moves;
62246 + atomic_unchecked_t local_data;
62247 + atomic_unchecked_t bitmap_data;
62248 + atomic_unchecked_t bg_allocs;
62249 + atomic_unchecked_t bg_extends;
62250 };
62251
62252 enum ocfs2_local_alloc_state
62253 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
62254 index 2c91452..77a3cd2 100644
62255 --- a/fs/ocfs2/suballoc.c
62256 +++ b/fs/ocfs2/suballoc.c
62257 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
62258 mlog_errno(status);
62259 goto bail;
62260 }
62261 - atomic_inc(&osb->alloc_stats.bg_extends);
62262 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
62263
62264 /* You should never ask for this much metadata */
62265 BUG_ON(bits_wanted >
62266 @@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
62267 mlog_errno(status);
62268 goto bail;
62269 }
62270 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62271 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62272
62273 *suballoc_loc = res.sr_bg_blkno;
62274 *suballoc_bit_start = res.sr_bit_offset;
62275 @@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
62276 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
62277 res->sr_bits);
62278
62279 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62280 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62281
62282 BUG_ON(res->sr_bits != 1);
62283
62284 @@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
62285 mlog_errno(status);
62286 goto bail;
62287 }
62288 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62289 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62290
62291 BUG_ON(res.sr_bits != 1);
62292
62293 @@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62294 cluster_start,
62295 num_clusters);
62296 if (!status)
62297 - atomic_inc(&osb->alloc_stats.local_data);
62298 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
62299 } else {
62300 if (min_clusters > (osb->bitmap_cpg - 1)) {
62301 /* The only paths asking for contiguousness
62302 @@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62303 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
62304 res.sr_bg_blkno,
62305 res.sr_bit_offset);
62306 - atomic_inc(&osb->alloc_stats.bitmap_data);
62307 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
62308 *num_clusters = res.sr_bits;
62309 }
62310 }
62311 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
62312 index c414929..5c9ee542 100644
62313 --- a/fs/ocfs2/super.c
62314 +++ b/fs/ocfs2/super.c
62315 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
62316 "%10s => GlobalAllocs: %d LocalAllocs: %d "
62317 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
62318 "Stats",
62319 - atomic_read(&osb->alloc_stats.bitmap_data),
62320 - atomic_read(&osb->alloc_stats.local_data),
62321 - atomic_read(&osb->alloc_stats.bg_allocs),
62322 - atomic_read(&osb->alloc_stats.moves),
62323 - atomic_read(&osb->alloc_stats.bg_extends));
62324 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
62325 + atomic_read_unchecked(&osb->alloc_stats.local_data),
62326 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
62327 + atomic_read_unchecked(&osb->alloc_stats.moves),
62328 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
62329
62330 out += snprintf(buf + out, len - out,
62331 "%10s => State: %u Descriptor: %llu Size: %u bits "
62332 @@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
62333 spin_lock_init(&osb->osb_xattr_lock);
62334 ocfs2_init_steal_slots(osb);
62335
62336 - atomic_set(&osb->alloc_stats.moves, 0);
62337 - atomic_set(&osb->alloc_stats.local_data, 0);
62338 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
62339 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
62340 - atomic_set(&osb->alloc_stats.bg_extends, 0);
62341 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
62342 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
62343 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
62344 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
62345 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
62346
62347 /* Copy the blockcheck stats from the superblock probe */
62348 osb->osb_ecc_stats = *stats;
62349 diff --git a/fs/open.c b/fs/open.c
62350 index 4b3e1ed..1c84599 100644
62351 --- a/fs/open.c
62352 +++ b/fs/open.c
62353 @@ -32,6 +32,8 @@
62354 #include <linux/dnotify.h>
62355 #include <linux/compat.h>
62356
62357 +#define CREATE_TRACE_POINTS
62358 +#include <trace/events/fs.h>
62359 #include "internal.h"
62360
62361 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
62362 @@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
62363 error = locks_verify_truncate(inode, NULL, length);
62364 if (!error)
62365 error = security_path_truncate(path);
62366 + if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
62367 + error = -EACCES;
62368 if (!error)
62369 error = do_truncate(path->dentry, length, 0, NULL);
62370
62371 @@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
62372 error = locks_verify_truncate(inode, f.file, length);
62373 if (!error)
62374 error = security_path_truncate(&f.file->f_path);
62375 + if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
62376 + error = -EACCES;
62377 if (!error)
62378 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
62379 sb_end_write(inode->i_sb);
62380 @@ -361,6 +367,9 @@ retry:
62381 if (__mnt_is_readonly(path.mnt))
62382 res = -EROFS;
62383
62384 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
62385 + res = -EACCES;
62386 +
62387 out_path_release:
62388 path_put(&path);
62389 if (retry_estale(res, lookup_flags)) {
62390 @@ -392,6 +401,8 @@ retry:
62391 if (error)
62392 goto dput_and_out;
62393
62394 + gr_log_chdir(path.dentry, path.mnt);
62395 +
62396 set_fs_pwd(current->fs, &path);
62397
62398 dput_and_out:
62399 @@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
62400 goto out_putf;
62401
62402 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
62403 +
62404 + if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
62405 + error = -EPERM;
62406 +
62407 + if (!error)
62408 + gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
62409 +
62410 if (!error)
62411 set_fs_pwd(current->fs, &f.file->f_path);
62412 out_putf:
62413 @@ -450,7 +468,13 @@ retry:
62414 if (error)
62415 goto dput_and_out;
62416
62417 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
62418 + goto dput_and_out;
62419 +
62420 set_fs_root(current->fs, &path);
62421 +
62422 + gr_handle_chroot_chdir(&path);
62423 +
62424 error = 0;
62425 dput_and_out:
62426 path_put(&path);
62427 @@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode)
62428 return error;
62429 retry_deleg:
62430 mutex_lock(&inode->i_mutex);
62431 +
62432 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
62433 + error = -EACCES;
62434 + goto out_unlock;
62435 + }
62436 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
62437 + error = -EACCES;
62438 + goto out_unlock;
62439 + }
62440 +
62441 error = security_path_chmod(path, mode);
62442 if (error)
62443 goto out_unlock;
62444 @@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
62445 uid = make_kuid(current_user_ns(), user);
62446 gid = make_kgid(current_user_ns(), group);
62447
62448 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
62449 + return -EACCES;
62450 +
62451 newattrs.ia_valid = ATTR_CTIME;
62452 if (user != (uid_t) -1) {
62453 if (!uid_valid(uid))
62454 @@ -990,6 +1027,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
62455 } else {
62456 fsnotify_open(f);
62457 fd_install(fd, f);
62458 + trace_do_sys_open(tmp->name, flags, mode);
62459 }
62460 }
62461 putname(tmp);
62462 diff --git a/fs/pipe.c b/fs/pipe.c
62463 index 0e0752e..7cfdd50 100644
62464 --- a/fs/pipe.c
62465 +++ b/fs/pipe.c
62466 @@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
62467
62468 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
62469 {
62470 - if (pipe->files)
62471 + if (atomic_read(&pipe->files))
62472 mutex_lock_nested(&pipe->mutex, subclass);
62473 }
62474
62475 @@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
62476
62477 void pipe_unlock(struct pipe_inode_info *pipe)
62478 {
62479 - if (pipe->files)
62480 + if (atomic_read(&pipe->files))
62481 mutex_unlock(&pipe->mutex);
62482 }
62483 EXPORT_SYMBOL(pipe_unlock);
62484 @@ -449,9 +449,9 @@ redo:
62485 }
62486 if (bufs) /* More to do? */
62487 continue;
62488 - if (!pipe->writers)
62489 + if (!atomic_read(&pipe->writers))
62490 break;
62491 - if (!pipe->waiting_writers) {
62492 + if (!atomic_read(&pipe->waiting_writers)) {
62493 /* syscall merging: Usually we must not sleep
62494 * if O_NONBLOCK is set, or if we got some data.
62495 * But if a writer sleeps in kernel space, then
62496 @@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
62497 ret = 0;
62498 __pipe_lock(pipe);
62499
62500 - if (!pipe->readers) {
62501 + if (!atomic_read(&pipe->readers)) {
62502 send_sig(SIGPIPE, current, 0);
62503 ret = -EPIPE;
62504 goto out;
62505 @@ -562,7 +562,7 @@ redo1:
62506 for (;;) {
62507 int bufs;
62508
62509 - if (!pipe->readers) {
62510 + if (!atomic_read(&pipe->readers)) {
62511 send_sig(SIGPIPE, current, 0);
62512 if (!ret)
62513 ret = -EPIPE;
62514 @@ -653,9 +653,9 @@ redo2:
62515 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
62516 do_wakeup = 0;
62517 }
62518 - pipe->waiting_writers++;
62519 + atomic_inc(&pipe->waiting_writers);
62520 pipe_wait(pipe);
62521 - pipe->waiting_writers--;
62522 + atomic_dec(&pipe->waiting_writers);
62523 }
62524 out:
62525 __pipe_unlock(pipe);
62526 @@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
62527 mask = 0;
62528 if (filp->f_mode & FMODE_READ) {
62529 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
62530 - if (!pipe->writers && filp->f_version != pipe->w_counter)
62531 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
62532 mask |= POLLHUP;
62533 }
62534
62535 @@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
62536 * Most Unices do not set POLLERR for FIFOs but on Linux they
62537 * behave exactly like pipes for poll().
62538 */
62539 - if (!pipe->readers)
62540 + if (!atomic_read(&pipe->readers))
62541 mask |= POLLERR;
62542 }
62543
62544 @@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
62545 int kill = 0;
62546
62547 spin_lock(&inode->i_lock);
62548 - if (!--pipe->files) {
62549 + if (atomic_dec_and_test(&pipe->files)) {
62550 inode->i_pipe = NULL;
62551 kill = 1;
62552 }
62553 @@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
62554
62555 __pipe_lock(pipe);
62556 if (file->f_mode & FMODE_READ)
62557 - pipe->readers--;
62558 + atomic_dec(&pipe->readers);
62559 if (file->f_mode & FMODE_WRITE)
62560 - pipe->writers--;
62561 + atomic_dec(&pipe->writers);
62562
62563 - if (pipe->readers || pipe->writers) {
62564 + if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
62565 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
62566 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
62567 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
62568 @@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
62569 kfree(pipe);
62570 }
62571
62572 -static struct vfsmount *pipe_mnt __read_mostly;
62573 +struct vfsmount *pipe_mnt __read_mostly;
62574
62575 /*
62576 * pipefs_dname() is called from d_path().
62577 @@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
62578 goto fail_iput;
62579
62580 inode->i_pipe = pipe;
62581 - pipe->files = 2;
62582 - pipe->readers = pipe->writers = 1;
62583 + atomic_set(&pipe->files, 2);
62584 + atomic_set(&pipe->readers, 1);
62585 + atomic_set(&pipe->writers, 1);
62586 inode->i_fop = &pipefifo_fops;
62587
62588 /*
62589 @@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
62590 spin_lock(&inode->i_lock);
62591 if (inode->i_pipe) {
62592 pipe = inode->i_pipe;
62593 - pipe->files++;
62594 + atomic_inc(&pipe->files);
62595 spin_unlock(&inode->i_lock);
62596 } else {
62597 spin_unlock(&inode->i_lock);
62598 pipe = alloc_pipe_info();
62599 if (!pipe)
62600 return -ENOMEM;
62601 - pipe->files = 1;
62602 + atomic_set(&pipe->files, 1);
62603 spin_lock(&inode->i_lock);
62604 if (unlikely(inode->i_pipe)) {
62605 - inode->i_pipe->files++;
62606 + atomic_inc(&inode->i_pipe->files);
62607 spin_unlock(&inode->i_lock);
62608 free_pipe_info(pipe);
62609 pipe = inode->i_pipe;
62610 @@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
62611 * opened, even when there is no process writing the FIFO.
62612 */
62613 pipe->r_counter++;
62614 - if (pipe->readers++ == 0)
62615 + if (atomic_inc_return(&pipe->readers) == 1)
62616 wake_up_partner(pipe);
62617
62618 - if (!is_pipe && !pipe->writers) {
62619 + if (!is_pipe && !atomic_read(&pipe->writers)) {
62620 if ((filp->f_flags & O_NONBLOCK)) {
62621 /* suppress POLLHUP until we have
62622 * seen a writer */
62623 @@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
62624 * errno=ENXIO when there is no process reading the FIFO.
62625 */
62626 ret = -ENXIO;
62627 - if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
62628 + if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
62629 goto err;
62630
62631 pipe->w_counter++;
62632 - if (!pipe->writers++)
62633 + if (atomic_inc_return(&pipe->writers) == 1)
62634 wake_up_partner(pipe);
62635
62636 - if (!is_pipe && !pipe->readers) {
62637 + if (!is_pipe && !atomic_read(&pipe->readers)) {
62638 if (wait_for_partner(pipe, &pipe->r_counter))
62639 goto err_wr;
62640 }
62641 @@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
62642 * the process can at least talk to itself.
62643 */
62644
62645 - pipe->readers++;
62646 - pipe->writers++;
62647 + atomic_inc(&pipe->readers);
62648 + atomic_inc(&pipe->writers);
62649 pipe->r_counter++;
62650 pipe->w_counter++;
62651 - if (pipe->readers == 1 || pipe->writers == 1)
62652 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
62653 wake_up_partner(pipe);
62654 break;
62655
62656 @@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
62657 return 0;
62658
62659 err_rd:
62660 - if (!--pipe->readers)
62661 + if (atomic_dec_and_test(&pipe->readers))
62662 wake_up_interruptible(&pipe->wait);
62663 ret = -ERESTARTSYS;
62664 goto err;
62665
62666 err_wr:
62667 - if (!--pipe->writers)
62668 + if (atomic_dec_and_test(&pipe->writers))
62669 wake_up_interruptible(&pipe->wait);
62670 ret = -ERESTARTSYS;
62671 goto err;
62672 diff --git a/fs/posix_acl.c b/fs/posix_acl.c
62673 index 8bd2135..eab9adb 100644
62674 --- a/fs/posix_acl.c
62675 +++ b/fs/posix_acl.c
62676 @@ -19,6 +19,7 @@
62677 #include <linux/sched.h>
62678 #include <linux/posix_acl.h>
62679 #include <linux/export.h>
62680 +#include <linux/grsecurity.h>
62681
62682 #include <linux/errno.h>
62683
62684 @@ -183,7 +184,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
62685 }
62686 }
62687 if (mode_p)
62688 - *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
62689 + *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
62690 return not_equiv;
62691 }
62692
62693 @@ -331,7 +332,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
62694 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
62695 }
62696
62697 - *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
62698 + *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
62699 return not_equiv;
62700 }
62701
62702 @@ -389,6 +390,8 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
62703 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
62704 int err = -ENOMEM;
62705 if (clone) {
62706 + *mode_p &= ~gr_acl_umask();
62707 +
62708 err = posix_acl_create_masq(clone, mode_p);
62709 if (err < 0) {
62710 posix_acl_release(clone);
62711 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
62712 index 2183fcf..3c32a98 100644
62713 --- a/fs/proc/Kconfig
62714 +++ b/fs/proc/Kconfig
62715 @@ -30,7 +30,7 @@ config PROC_FS
62716
62717 config PROC_KCORE
62718 bool "/proc/kcore support" if !ARM
62719 - depends on PROC_FS && MMU
62720 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
62721 help
62722 Provides a virtual ELF core file of the live kernel. This can
62723 be read with gdb and other ELF tools. No modifications can be
62724 @@ -38,8 +38,8 @@ config PROC_KCORE
62725
62726 config PROC_VMCORE
62727 bool "/proc/vmcore support"
62728 - depends on PROC_FS && CRASH_DUMP
62729 - default y
62730 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
62731 + default n
62732 help
62733 Exports the dump image of crashed kernel in ELF format.
62734
62735 @@ -63,8 +63,8 @@ config PROC_SYSCTL
62736 limited in memory.
62737
62738 config PROC_PAGE_MONITOR
62739 - default y
62740 - depends on PROC_FS && MMU
62741 + default n
62742 + depends on PROC_FS && MMU && !GRKERNSEC
62743 bool "Enable /proc page monitoring" if EXPERT
62744 help
62745 Various /proc files exist to monitor process memory utilization:
62746 diff --git a/fs/proc/array.c b/fs/proc/array.c
62747 index 1bd2077..2f7cfd5 100644
62748 --- a/fs/proc/array.c
62749 +++ b/fs/proc/array.c
62750 @@ -60,6 +60,7 @@
62751 #include <linux/tty.h>
62752 #include <linux/string.h>
62753 #include <linux/mman.h>
62754 +#include <linux/grsecurity.h>
62755 #include <linux/proc_fs.h>
62756 #include <linux/ioport.h>
62757 #include <linux/uaccess.h>
62758 @@ -365,6 +366,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
62759 seq_putc(m, '\n');
62760 }
62761
62762 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62763 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
62764 +{
62765 + if (p->mm)
62766 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
62767 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
62768 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
62769 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
62770 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
62771 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
62772 + else
62773 + seq_printf(m, "PaX:\t-----\n");
62774 +}
62775 +#endif
62776 +
62777 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
62778 struct pid *pid, struct task_struct *task)
62779 {
62780 @@ -383,9 +399,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
62781 task_cpus_allowed(m, task);
62782 cpuset_task_status_allowed(m, task);
62783 task_context_switch_counts(m, task);
62784 +
62785 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62786 + task_pax(m, task);
62787 +#endif
62788 +
62789 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
62790 + task_grsec_rbac(m, task);
62791 +#endif
62792 +
62793 return 0;
62794 }
62795
62796 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62797 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62798 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
62799 + _mm->pax_flags & MF_PAX_SEGMEXEC))
62800 +#endif
62801 +
62802 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62803 struct pid *pid, struct task_struct *task, int whole)
62804 {
62805 @@ -407,6 +438,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62806 char tcomm[sizeof(task->comm)];
62807 unsigned long flags;
62808
62809 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62810 + if (current->exec_id != m->exec_id) {
62811 + gr_log_badprocpid("stat");
62812 + return 0;
62813 + }
62814 +#endif
62815 +
62816 state = *get_task_state(task);
62817 vsize = eip = esp = 0;
62818 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
62819 @@ -478,6 +516,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62820 gtime = task_gtime(task);
62821 }
62822
62823 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62824 + if (PAX_RAND_FLAGS(mm)) {
62825 + eip = 0;
62826 + esp = 0;
62827 + wchan = 0;
62828 + }
62829 +#endif
62830 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62831 + wchan = 0;
62832 + eip =0;
62833 + esp =0;
62834 +#endif
62835 +
62836 /* scale priority and nice values from timeslices to -20..20 */
62837 /* to make it look like a "normal" Unix priority/nice value */
62838 priority = task_prio(task);
62839 @@ -514,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62840 seq_put_decimal_ull(m, ' ', vsize);
62841 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
62842 seq_put_decimal_ull(m, ' ', rsslim);
62843 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62844 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
62845 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
62846 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
62847 +#else
62848 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
62849 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
62850 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
62851 +#endif
62852 seq_put_decimal_ull(m, ' ', esp);
62853 seq_put_decimal_ull(m, ' ', eip);
62854 /* The signal information here is obsolete.
62855 @@ -538,7 +595,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62856 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
62857 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
62858
62859 - if (mm && permitted) {
62860 + if (mm && permitted
62861 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62862 + && !PAX_RAND_FLAGS(mm)
62863 +#endif
62864 + ) {
62865 seq_put_decimal_ull(m, ' ', mm->start_data);
62866 seq_put_decimal_ull(m, ' ', mm->end_data);
62867 seq_put_decimal_ull(m, ' ', mm->start_brk);
62868 @@ -576,8 +637,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62869 struct pid *pid, struct task_struct *task)
62870 {
62871 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
62872 - struct mm_struct *mm = get_task_mm(task);
62873 + struct mm_struct *mm;
62874
62875 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62876 + if (current->exec_id != m->exec_id) {
62877 + gr_log_badprocpid("statm");
62878 + return 0;
62879 + }
62880 +#endif
62881 + mm = get_task_mm(task);
62882 if (mm) {
62883 size = task_statm(mm, &shared, &text, &data, &resident);
62884 mmput(mm);
62885 @@ -600,6 +668,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62886 return 0;
62887 }
62888
62889 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62890 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
62891 +{
62892 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
62893 +}
62894 +#endif
62895 +
62896 #ifdef CONFIG_CHECKPOINT_RESTORE
62897 static struct pid *
62898 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
62899 diff --git a/fs/proc/base.c b/fs/proc/base.c
62900 index b59a34b..68a79e8 100644
62901 --- a/fs/proc/base.c
62902 +++ b/fs/proc/base.c
62903 @@ -113,6 +113,14 @@ struct pid_entry {
62904 union proc_op op;
62905 };
62906
62907 +struct getdents_callback {
62908 + struct linux_dirent __user * current_dir;
62909 + struct linux_dirent __user * previous;
62910 + struct file * file;
62911 + int count;
62912 + int error;
62913 +};
62914 +
62915 #define NOD(NAME, MODE, IOP, FOP, OP) { \
62916 .name = (NAME), \
62917 .len = sizeof(NAME) - 1, \
62918 @@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
62919 if (!mm->arg_end)
62920 goto out_mm; /* Shh! No looking before we're done */
62921
62922 + if (gr_acl_handle_procpidmem(task))
62923 + goto out_mm;
62924 +
62925 len = mm->arg_end - mm->arg_start;
62926
62927 if (len > PAGE_SIZE)
62928 @@ -237,12 +248,28 @@ out:
62929 return res;
62930 }
62931
62932 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62933 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62934 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
62935 + _mm->pax_flags & MF_PAX_SEGMEXEC))
62936 +#endif
62937 +
62938 static int proc_pid_auxv(struct task_struct *task, char *buffer)
62939 {
62940 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
62941 int res = PTR_ERR(mm);
62942 if (mm && !IS_ERR(mm)) {
62943 unsigned int nwords = 0;
62944 +
62945 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62946 + /* allow if we're currently ptracing this task */
62947 + if (PAX_RAND_FLAGS(mm) &&
62948 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
62949 + mmput(mm);
62950 + return 0;
62951 + }
62952 +#endif
62953 +
62954 do {
62955 nwords += 2;
62956 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
62957 @@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
62958 }
62959
62960
62961 -#ifdef CONFIG_KALLSYMS
62962 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62963 /*
62964 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
62965 * Returns the resolved symbol. If that fails, simply return the address.
62966 @@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
62967 mutex_unlock(&task->signal->cred_guard_mutex);
62968 }
62969
62970 -#ifdef CONFIG_STACKTRACE
62971 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62972
62973 #define MAX_STACK_TRACE_DEPTH 64
62974
62975 @@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
62976 return count;
62977 }
62978
62979 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62980 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62981 static int proc_pid_syscall(struct task_struct *task, char *buffer)
62982 {
62983 long nr;
62984 @@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
62985 /************************************************************************/
62986
62987 /* permission checks */
62988 -static int proc_fd_access_allowed(struct inode *inode)
62989 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
62990 {
62991 struct task_struct *task;
62992 int allowed = 0;
62993 @@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
62994 */
62995 task = get_proc_task(inode);
62996 if (task) {
62997 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62998 + if (log)
62999 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
63000 + else
63001 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
63002 put_task_struct(task);
63003 }
63004 return allowed;
63005 @@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
63006 struct task_struct *task,
63007 int hide_pid_min)
63008 {
63009 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63010 + return false;
63011 +
63012 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63013 + rcu_read_lock();
63014 + {
63015 + const struct cred *tmpcred = current_cred();
63016 + const struct cred *cred = __task_cred(task);
63017 +
63018 + if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
63019 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63020 + || in_group_p(grsec_proc_gid)
63021 +#endif
63022 + ) {
63023 + rcu_read_unlock();
63024 + return true;
63025 + }
63026 + }
63027 + rcu_read_unlock();
63028 +
63029 + if (!pid->hide_pid)
63030 + return false;
63031 +#endif
63032 +
63033 if (pid->hide_pid < hide_pid_min)
63034 return true;
63035 if (in_group_p(pid->pid_gid))
63036 return true;
63037 +
63038 return ptrace_may_access(task, PTRACE_MODE_READ);
63039 }
63040
63041 @@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
63042 put_task_struct(task);
63043
63044 if (!has_perms) {
63045 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63046 + {
63047 +#else
63048 if (pid->hide_pid == 2) {
63049 +#endif
63050 /*
63051 * Let's make getdents(), stat(), and open()
63052 * consistent with each other. If a process
63053 @@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
63054 if (!task)
63055 return -ESRCH;
63056
63057 + if (gr_acl_handle_procpidmem(task)) {
63058 + put_task_struct(task);
63059 + return -EPERM;
63060 + }
63061 +
63062 mm = mm_access(task, mode);
63063 put_task_struct(task);
63064
63065 @@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
63066
63067 file->private_data = mm;
63068
63069 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63070 + file->f_version = current->exec_id;
63071 +#endif
63072 +
63073 return 0;
63074 }
63075
63076 @@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
63077 ssize_t copied;
63078 char *page;
63079
63080 +#ifdef CONFIG_GRKERNSEC
63081 + if (write)
63082 + return -EPERM;
63083 +#endif
63084 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63085 + if (file->f_version != current->exec_id) {
63086 + gr_log_badprocpid("mem");
63087 + return 0;
63088 + }
63089 +#endif
63090 +
63091 if (!mm)
63092 return 0;
63093
63094 @@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
63095 goto free;
63096
63097 while (count > 0) {
63098 - int this_len = min_t(int, count, PAGE_SIZE);
63099 + ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
63100
63101 if (write && copy_from_user(page, buf, this_len)) {
63102 copied = -EFAULT;
63103 @@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
63104 if (!mm)
63105 return 0;
63106
63107 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63108 + if (file->f_version != current->exec_id) {
63109 + gr_log_badprocpid("environ");
63110 + return 0;
63111 + }
63112 +#endif
63113 +
63114 page = (char *)__get_free_page(GFP_TEMPORARY);
63115 if (!page)
63116 return -ENOMEM;
63117 @@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
63118 goto free;
63119 while (count > 0) {
63120 size_t this_len, max_len;
63121 - int retval;
63122 + ssize_t retval;
63123
63124 if (src >= (mm->env_end - mm->env_start))
63125 break;
63126 @@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
63127 int error = -EACCES;
63128
63129 /* Are we allowed to snoop on the tasks file descriptors? */
63130 - if (!proc_fd_access_allowed(inode))
63131 + if (!proc_fd_access_allowed(inode, 0))
63132 goto out;
63133
63134 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
63135 @@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
63136 struct path path;
63137
63138 /* Are we allowed to snoop on the tasks file descriptors? */
63139 - if (!proc_fd_access_allowed(inode))
63140 - goto out;
63141 + /* logging this is needed for learning on chromium to work properly,
63142 + but we don't want to flood the logs from 'ps' which does a readlink
63143 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
63144 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
63145 + */
63146 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
63147 + if (!proc_fd_access_allowed(inode,0))
63148 + goto out;
63149 + } else {
63150 + if (!proc_fd_access_allowed(inode,1))
63151 + goto out;
63152 + }
63153
63154 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
63155 if (error)
63156 @@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
63157 rcu_read_lock();
63158 cred = __task_cred(task);
63159 inode->i_uid = cred->euid;
63160 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63161 + inode->i_gid = grsec_proc_gid;
63162 +#else
63163 inode->i_gid = cred->egid;
63164 +#endif
63165 rcu_read_unlock();
63166 }
63167 security_task_to_inode(task, inode);
63168 @@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
63169 return -ENOENT;
63170 }
63171 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
63172 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63173 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
63174 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63175 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
63176 +#endif
63177 task_dumpable(task)) {
63178 cred = __task_cred(task);
63179 stat->uid = cred->euid;
63180 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63181 + stat->gid = grsec_proc_gid;
63182 +#else
63183 stat->gid = cred->egid;
63184 +#endif
63185 }
63186 }
63187 rcu_read_unlock();
63188 @@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
63189
63190 if (task) {
63191 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
63192 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63193 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
63194 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63195 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
63196 +#endif
63197 task_dumpable(task)) {
63198 rcu_read_lock();
63199 cred = __task_cred(task);
63200 inode->i_uid = cred->euid;
63201 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63202 + inode->i_gid = grsec_proc_gid;
63203 +#else
63204 inode->i_gid = cred->egid;
63205 +#endif
63206 rcu_read_unlock();
63207 } else {
63208 inode->i_uid = GLOBAL_ROOT_UID;
63209 @@ -2173,6 +2291,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
63210 if (!task)
63211 goto out_no_task;
63212
63213 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63214 + goto out;
63215 +
63216 /*
63217 * Yes, it does not scale. And it should not. Don't add
63218 * new entries into /proc/<tgid>/ without very good reasons.
63219 @@ -2203,6 +2324,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
63220 if (!task)
63221 return -ENOENT;
63222
63223 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63224 + goto out;
63225 +
63226 if (!dir_emit_dots(file, ctx))
63227 goto out;
63228
63229 @@ -2592,7 +2716,7 @@ static const struct pid_entry tgid_base_stuff[] = {
63230 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
63231 #endif
63232 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
63233 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63234 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63235 INF("syscall", S_IRUGO, proc_pid_syscall),
63236 #endif
63237 INF("cmdline", S_IRUGO, proc_pid_cmdline),
63238 @@ -2617,10 +2741,10 @@ static const struct pid_entry tgid_base_stuff[] = {
63239 #ifdef CONFIG_SECURITY
63240 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
63241 #endif
63242 -#ifdef CONFIG_KALLSYMS
63243 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63244 INF("wchan", S_IRUGO, proc_pid_wchan),
63245 #endif
63246 -#ifdef CONFIG_STACKTRACE
63247 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63248 ONE("stack", S_IRUGO, proc_pid_stack),
63249 #endif
63250 #ifdef CONFIG_SCHEDSTATS
63251 @@ -2654,6 +2778,9 @@ static const struct pid_entry tgid_base_stuff[] = {
63252 #ifdef CONFIG_HARDWALL
63253 INF("hardwall", S_IRUGO, proc_pid_hardwall),
63254 #endif
63255 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63256 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
63257 +#endif
63258 #ifdef CONFIG_USER_NS
63259 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
63260 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
63261 @@ -2784,7 +2911,14 @@ static int proc_pid_instantiate(struct inode *dir,
63262 if (!inode)
63263 goto out;
63264
63265 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63266 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
63267 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63268 + inode->i_gid = grsec_proc_gid;
63269 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
63270 +#else
63271 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
63272 +#endif
63273 inode->i_op = &proc_tgid_base_inode_operations;
63274 inode->i_fop = &proc_tgid_base_operations;
63275 inode->i_flags|=S_IMMUTABLE;
63276 @@ -2822,7 +2956,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
63277 if (!task)
63278 goto out;
63279
63280 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63281 + goto out_put_task;
63282 +
63283 result = proc_pid_instantiate(dir, dentry, task, NULL);
63284 +out_put_task:
63285 put_task_struct(task);
63286 out:
63287 return ERR_PTR(result);
63288 @@ -2928,7 +3066,7 @@ static const struct pid_entry tid_base_stuff[] = {
63289 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
63290 #endif
63291 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
63292 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63293 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63294 INF("syscall", S_IRUGO, proc_pid_syscall),
63295 #endif
63296 INF("cmdline", S_IRUGO, proc_pid_cmdline),
63297 @@ -2955,10 +3093,10 @@ static const struct pid_entry tid_base_stuff[] = {
63298 #ifdef CONFIG_SECURITY
63299 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
63300 #endif
63301 -#ifdef CONFIG_KALLSYMS
63302 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63303 INF("wchan", S_IRUGO, proc_pid_wchan),
63304 #endif
63305 -#ifdef CONFIG_STACKTRACE
63306 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63307 ONE("stack", S_IRUGO, proc_pid_stack),
63308 #endif
63309 #ifdef CONFIG_SCHEDSTATS
63310 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
63311 index 82676e3..5f8518a 100644
63312 --- a/fs/proc/cmdline.c
63313 +++ b/fs/proc/cmdline.c
63314 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
63315
63316 static int __init proc_cmdline_init(void)
63317 {
63318 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63319 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
63320 +#else
63321 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
63322 +#endif
63323 return 0;
63324 }
63325 module_init(proc_cmdline_init);
63326 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
63327 index b143471..bb105e5 100644
63328 --- a/fs/proc/devices.c
63329 +++ b/fs/proc/devices.c
63330 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
63331
63332 static int __init proc_devices_init(void)
63333 {
63334 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63335 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
63336 +#else
63337 proc_create("devices", 0, NULL, &proc_devinfo_operations);
63338 +#endif
63339 return 0;
63340 }
63341 module_init(proc_devices_init);
63342 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
63343 index 985ea88..d118a0a 100644
63344 --- a/fs/proc/fd.c
63345 +++ b/fs/proc/fd.c
63346 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
63347 if (!task)
63348 return -ENOENT;
63349
63350 - files = get_files_struct(task);
63351 + if (!gr_acl_handle_procpidmem(task))
63352 + files = get_files_struct(task);
63353 put_task_struct(task);
63354
63355 if (files) {
63356 @@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
63357 */
63358 int proc_fd_permission(struct inode *inode, int mask)
63359 {
63360 + struct task_struct *task;
63361 int rv = generic_permission(inode, mask);
63362 - if (rv == 0)
63363 - return 0;
63364 +
63365 if (task_tgid(current) == proc_pid(inode))
63366 rv = 0;
63367 +
63368 + task = get_proc_task(inode);
63369 + if (task == NULL)
63370 + return rv;
63371 +
63372 + if (gr_acl_handle_procpidmem(task))
63373 + rv = -EACCES;
63374 +
63375 + put_task_struct(task);
63376 +
63377 return rv;
63378 }
63379
63380 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
63381 index 124fc43..8afbb02 100644
63382 --- a/fs/proc/inode.c
63383 +++ b/fs/proc/inode.c
63384 @@ -23,11 +23,17 @@
63385 #include <linux/slab.h>
63386 #include <linux/mount.h>
63387 #include <linux/magic.h>
63388 +#include <linux/grsecurity.h>
63389
63390 #include <asm/uaccess.h>
63391
63392 #include "internal.h"
63393
63394 +#ifdef CONFIG_PROC_SYSCTL
63395 +extern const struct inode_operations proc_sys_inode_operations;
63396 +extern const struct inode_operations proc_sys_dir_operations;
63397 +#endif
63398 +
63399 static void proc_evict_inode(struct inode *inode)
63400 {
63401 struct proc_dir_entry *de;
63402 @@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
63403 ns = PROC_I(inode)->ns.ns;
63404 if (ns_ops && ns)
63405 ns_ops->put(ns);
63406 +
63407 +#ifdef CONFIG_PROC_SYSCTL
63408 + if (inode->i_op == &proc_sys_inode_operations ||
63409 + inode->i_op == &proc_sys_dir_operations)
63410 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
63411 +#endif
63412 +
63413 }
63414
63415 static struct kmem_cache * proc_inode_cachep;
63416 @@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
63417 if (de->mode) {
63418 inode->i_mode = de->mode;
63419 inode->i_uid = de->uid;
63420 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63421 + inode->i_gid = grsec_proc_gid;
63422 +#else
63423 inode->i_gid = de->gid;
63424 +#endif
63425 }
63426 if (de->size)
63427 inode->i_size = de->size;
63428 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
63429 index 651d09a..3d7f0bf 100644
63430 --- a/fs/proc/internal.h
63431 +++ b/fs/proc/internal.h
63432 @@ -48,7 +48,7 @@ struct proc_dir_entry {
63433 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
63434 u8 namelen;
63435 char name[];
63436 -};
63437 +} __randomize_layout;
63438
63439 union proc_op {
63440 int (*proc_get_link)(struct dentry *, struct path *);
63441 @@ -67,7 +67,7 @@ struct proc_inode {
63442 struct ctl_table *sysctl_entry;
63443 struct proc_ns ns;
63444 struct inode vfs_inode;
63445 -};
63446 +} __randomize_layout;
63447
63448 /*
63449 * General functions
63450 @@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
63451 struct pid *, struct task_struct *);
63452 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
63453 struct pid *, struct task_struct *);
63454 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63455 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
63456 +#endif
63457
63458 /*
63459 * base.c
63460 diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
63461 index 05029c0..7ea1987 100644
63462 --- a/fs/proc/interrupts.c
63463 +++ b/fs/proc/interrupts.c
63464 @@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
63465
63466 static int __init proc_interrupts_init(void)
63467 {
63468 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63469 + proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
63470 +#else
63471 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
63472 +#endif
63473 return 0;
63474 }
63475 module_init(proc_interrupts_init);
63476 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
63477 index 5ed0e52..a1c1f2e 100644
63478 --- a/fs/proc/kcore.c
63479 +++ b/fs/proc/kcore.c
63480 @@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63481 * the addresses in the elf_phdr on our list.
63482 */
63483 start = kc_offset_to_vaddr(*fpos - elf_buflen);
63484 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
63485 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
63486 + if (tsz > buflen)
63487 tsz = buflen;
63488 -
63489 +
63490 while (buflen) {
63491 struct kcore_list *m;
63492
63493 @@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63494 kfree(elf_buf);
63495 } else {
63496 if (kern_addr_valid(start)) {
63497 - unsigned long n;
63498 + char *elf_buf;
63499 + mm_segment_t oldfs;
63500
63501 - n = copy_to_user(buffer, (char *)start, tsz);
63502 - /*
63503 - * We cannot distinguish between fault on source
63504 - * and fault on destination. When this happens
63505 - * we clear too and hope it will trigger the
63506 - * EFAULT again.
63507 - */
63508 - if (n) {
63509 - if (clear_user(buffer + tsz - n,
63510 - n))
63511 + elf_buf = kmalloc(tsz, GFP_KERNEL);
63512 + if (!elf_buf)
63513 + return -ENOMEM;
63514 + oldfs = get_fs();
63515 + set_fs(KERNEL_DS);
63516 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
63517 + set_fs(oldfs);
63518 + if (copy_to_user(buffer, elf_buf, tsz)) {
63519 + kfree(elf_buf);
63520 return -EFAULT;
63521 + }
63522 }
63523 + set_fs(oldfs);
63524 + kfree(elf_buf);
63525 } else {
63526 if (clear_user(buffer, tsz))
63527 return -EFAULT;
63528 @@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63529
63530 static int open_kcore(struct inode *inode, struct file *filp)
63531 {
63532 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63533 + return -EPERM;
63534 +#endif
63535 if (!capable(CAP_SYS_RAWIO))
63536 return -EPERM;
63537 if (kcore_need_update)
63538 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
63539 index a77d2b2..a9153f0 100644
63540 --- a/fs/proc/meminfo.c
63541 +++ b/fs/proc/meminfo.c
63542 @@ -150,7 +150,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
63543 vmi.used >> 10,
63544 vmi.largest_chunk >> 10
63545 #ifdef CONFIG_MEMORY_FAILURE
63546 - ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
63547 + ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
63548 #endif
63549 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
63550 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
63551 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
63552 index 5f9bc8a..5c35f08 100644
63553 --- a/fs/proc/nommu.c
63554 +++ b/fs/proc/nommu.c
63555 @@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
63556
63557 if (file) {
63558 seq_pad(m, ' ');
63559 - seq_path(m, &file->f_path, "");
63560 + seq_path(m, &file->f_path, "\n\\");
63561 }
63562
63563 seq_putc(m, '\n');
63564 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
63565 index 4677bb7..408e936 100644
63566 --- a/fs/proc/proc_net.c
63567 +++ b/fs/proc/proc_net.c
63568 @@ -23,6 +23,7 @@
63569 #include <linux/nsproxy.h>
63570 #include <net/net_namespace.h>
63571 #include <linux/seq_file.h>
63572 +#include <linux/grsecurity.h>
63573
63574 #include "internal.h"
63575
63576 @@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
63577 struct task_struct *task;
63578 struct nsproxy *ns;
63579 struct net *net = NULL;
63580 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63581 + const struct cred *cred = current_cred();
63582 +#endif
63583 +
63584 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63585 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
63586 + return net;
63587 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63588 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
63589 + return net;
63590 +#endif
63591
63592 rcu_read_lock();
63593 task = pid_task(proc_pid(dir), PIDTYPE_PID);
63594 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
63595 index 7129046..6914844 100644
63596 --- a/fs/proc/proc_sysctl.c
63597 +++ b/fs/proc/proc_sysctl.c
63598 @@ -11,13 +11,21 @@
63599 #include <linux/namei.h>
63600 #include <linux/mm.h>
63601 #include <linux/module.h>
63602 +#include <linux/nsproxy.h>
63603 +#ifdef CONFIG_GRKERNSEC
63604 +#include <net/net_namespace.h>
63605 +#endif
63606 #include "internal.h"
63607
63608 +extern int gr_handle_chroot_sysctl(const int op);
63609 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
63610 + const int op);
63611 +
63612 static const struct dentry_operations proc_sys_dentry_operations;
63613 static const struct file_operations proc_sys_file_operations;
63614 -static const struct inode_operations proc_sys_inode_operations;
63615 +const struct inode_operations proc_sys_inode_operations;
63616 static const struct file_operations proc_sys_dir_file_operations;
63617 -static const struct inode_operations proc_sys_dir_operations;
63618 +const struct inode_operations proc_sys_dir_operations;
63619
63620 void proc_sys_poll_notify(struct ctl_table_poll *poll)
63621 {
63622 @@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
63623
63624 err = NULL;
63625 d_set_d_op(dentry, &proc_sys_dentry_operations);
63626 +
63627 + gr_handle_proc_create(dentry, inode);
63628 +
63629 d_add(dentry, inode);
63630
63631 out:
63632 @@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63633 struct inode *inode = file_inode(filp);
63634 struct ctl_table_header *head = grab_header(inode);
63635 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
63636 + int op = write ? MAY_WRITE : MAY_READ;
63637 ssize_t error;
63638 size_t res;
63639
63640 @@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63641 * and won't be until we finish.
63642 */
63643 error = -EPERM;
63644 - if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
63645 + if (sysctl_perm(head, table, op))
63646 goto out;
63647
63648 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
63649 @@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63650 if (!table->proc_handler)
63651 goto out;
63652
63653 +#ifdef CONFIG_GRKERNSEC
63654 + error = -EPERM;
63655 + if (gr_handle_chroot_sysctl(op))
63656 + goto out;
63657 + dget(filp->f_path.dentry);
63658 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
63659 + dput(filp->f_path.dentry);
63660 + goto out;
63661 + }
63662 + dput(filp->f_path.dentry);
63663 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
63664 + goto out;
63665 + if (write) {
63666 + if (current->nsproxy->net_ns != table->extra2) {
63667 + if (!capable(CAP_SYS_ADMIN))
63668 + goto out;
63669 + } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
63670 + goto out;
63671 + }
63672 +#endif
63673 +
63674 /* careful: calling conventions are nasty here */
63675 res = count;
63676 error = table->proc_handler(table, write, buf, &res, ppos);
63677 @@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
63678 return false;
63679 } else {
63680 d_set_d_op(child, &proc_sys_dentry_operations);
63681 +
63682 + gr_handle_proc_create(child, inode);
63683 +
63684 d_add(child, inode);
63685 }
63686 } else {
63687 @@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
63688 if ((*pos)++ < ctx->pos)
63689 return true;
63690
63691 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
63692 + return 0;
63693 +
63694 if (unlikely(S_ISLNK(table->mode)))
63695 res = proc_sys_link_fill_cache(file, ctx, head, table);
63696 else
63697 @@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
63698 if (IS_ERR(head))
63699 return PTR_ERR(head);
63700
63701 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
63702 + return -ENOENT;
63703 +
63704 generic_fillattr(inode, stat);
63705 if (table)
63706 stat->mode = (stat->mode & S_IFMT) | table->mode;
63707 @@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
63708 .llseek = generic_file_llseek,
63709 };
63710
63711 -static const struct inode_operations proc_sys_inode_operations = {
63712 +const struct inode_operations proc_sys_inode_operations = {
63713 .permission = proc_sys_permission,
63714 .setattr = proc_sys_setattr,
63715 .getattr = proc_sys_getattr,
63716 };
63717
63718 -static const struct inode_operations proc_sys_dir_operations = {
63719 +const struct inode_operations proc_sys_dir_operations = {
63720 .lookup = proc_sys_lookup,
63721 .permission = proc_sys_permission,
63722 .setattr = proc_sys_setattr,
63723 @@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
63724 static struct ctl_dir *new_dir(struct ctl_table_set *set,
63725 const char *name, int namelen)
63726 {
63727 - struct ctl_table *table;
63728 + ctl_table_no_const *table;
63729 struct ctl_dir *new;
63730 struct ctl_node *node;
63731 char *new_name;
63732 @@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
63733 return NULL;
63734
63735 node = (struct ctl_node *)(new + 1);
63736 - table = (struct ctl_table *)(node + 1);
63737 + table = (ctl_table_no_const *)(node + 1);
63738 new_name = (char *)(table + 2);
63739 memcpy(new_name, name, namelen);
63740 new_name[namelen] = '\0';
63741 @@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
63742 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
63743 struct ctl_table_root *link_root)
63744 {
63745 - struct ctl_table *link_table, *entry, *link;
63746 + ctl_table_no_const *link_table, *link;
63747 + struct ctl_table *entry;
63748 struct ctl_table_header *links;
63749 struct ctl_node *node;
63750 char *link_name;
63751 @@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
63752 return NULL;
63753
63754 node = (struct ctl_node *)(links + 1);
63755 - link_table = (struct ctl_table *)(node + nr_entries);
63756 + link_table = (ctl_table_no_const *)(node + nr_entries);
63757 link_name = (char *)&link_table[nr_entries + 1];
63758
63759 for (link = link_table, entry = table; entry->procname; link++, entry++) {
63760 @@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63761 struct ctl_table_header ***subheader, struct ctl_table_set *set,
63762 struct ctl_table *table)
63763 {
63764 - struct ctl_table *ctl_table_arg = NULL;
63765 - struct ctl_table *entry, *files;
63766 + ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
63767 + struct ctl_table *entry;
63768 int nr_files = 0;
63769 int nr_dirs = 0;
63770 int err = -ENOMEM;
63771 @@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63772 nr_files++;
63773 }
63774
63775 - files = table;
63776 /* If there are mixed files and directories we need a new table */
63777 if (nr_dirs && nr_files) {
63778 - struct ctl_table *new;
63779 + ctl_table_no_const *new;
63780 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
63781 GFP_KERNEL);
63782 if (!files)
63783 @@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63784 /* Register everything except a directory full of subdirectories */
63785 if (nr_files || !nr_dirs) {
63786 struct ctl_table_header *header;
63787 - header = __register_sysctl_table(set, path, files);
63788 + header = __register_sysctl_table(set, path, files ? files : table);
63789 if (!header) {
63790 kfree(ctl_table_arg);
63791 goto out;
63792 diff --git a/fs/proc/root.c b/fs/proc/root.c
63793 index 87dbcbe..55e1b4d 100644
63794 --- a/fs/proc/root.c
63795 +++ b/fs/proc/root.c
63796 @@ -186,7 +186,15 @@ void __init proc_root_init(void)
63797 #ifdef CONFIG_PROC_DEVICETREE
63798 proc_device_tree_init();
63799 #endif
63800 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63801 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63802 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
63803 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63804 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
63805 +#endif
63806 +#else
63807 proc_mkdir("bus", NULL);
63808 +#endif
63809 proc_sys_init();
63810 }
63811
63812 diff --git a/fs/proc/stat.c b/fs/proc/stat.c
63813 index 1cf86c0..0ee1ca5 100644
63814 --- a/fs/proc/stat.c
63815 +++ b/fs/proc/stat.c
63816 @@ -11,6 +11,7 @@
63817 #include <linux/irqnr.h>
63818 #include <asm/cputime.h>
63819 #include <linux/tick.h>
63820 +#include <linux/grsecurity.h>
63821
63822 #ifndef arch_irq_stat_cpu
63823 #define arch_irq_stat_cpu(cpu) 0
63824 @@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
63825 u64 sum_softirq = 0;
63826 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
63827 struct timespec boottime;
63828 + int unrestricted = 1;
63829 +
63830 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63831 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63832 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
63833 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63834 + && !in_group_p(grsec_proc_gid)
63835 +#endif
63836 + )
63837 + unrestricted = 0;
63838 +#endif
63839 +#endif
63840
63841 user = nice = system = idle = iowait =
63842 irq = softirq = steal = 0;
63843 @@ -94,6 +107,7 @@ static int show_stat(struct seq_file *p, void *v)
63844 getboottime(&boottime);
63845 jif = boottime.tv_sec;
63846
63847 + if (unrestricted) {
63848 for_each_possible_cpu(i) {
63849 user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
63850 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
63851 @@ -116,6 +130,7 @@ static int show_stat(struct seq_file *p, void *v)
63852 }
63853 }
63854 sum += arch_irq_stat();
63855 + }
63856
63857 seq_puts(p, "cpu ");
63858 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
63859 @@ -131,6 +146,7 @@ static int show_stat(struct seq_file *p, void *v)
63860 seq_putc(p, '\n');
63861
63862 for_each_online_cpu(i) {
63863 + if (unrestricted) {
63864 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
63865 user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
63866 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
63867 @@ -142,6 +158,7 @@ static int show_stat(struct seq_file *p, void *v)
63868 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
63869 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
63870 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
63871 + }
63872 seq_printf(p, "cpu%d", i);
63873 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
63874 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
63875 @@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
63876
63877 /* sum again ? it could be updated? */
63878 for_each_irq_nr(j)
63879 - seq_put_decimal_ull(p, ' ', kstat_irqs(j));
63880 + seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
63881
63882 seq_printf(p,
63883 "\nctxt %llu\n"
63884 @@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
63885 "processes %lu\n"
63886 "procs_running %lu\n"
63887 "procs_blocked %lu\n",
63888 - nr_context_switches(),
63889 + unrestricted ? nr_context_switches() : 0ULL,
63890 (unsigned long)jif,
63891 - total_forks,
63892 - nr_running(),
63893 - nr_iowait());
63894 + unrestricted ? total_forks : 0UL,
63895 + unrestricted ? nr_running() : 0UL,
63896 + unrestricted ? nr_iowait() : 0UL);
63897
63898 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
63899
63900 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
63901 index fb52b54..5fc7c14 100644
63902 --- a/fs/proc/task_mmu.c
63903 +++ b/fs/proc/task_mmu.c
63904 @@ -12,12 +12,19 @@
63905 #include <linux/swap.h>
63906 #include <linux/swapops.h>
63907 #include <linux/mmu_notifier.h>
63908 +#include <linux/grsecurity.h>
63909
63910 #include <asm/elf.h>
63911 #include <asm/uaccess.h>
63912 #include <asm/tlbflush.h>
63913 #include "internal.h"
63914
63915 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63916 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63917 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
63918 + _mm->pax_flags & MF_PAX_SEGMEXEC))
63919 +#endif
63920 +
63921 void task_mem(struct seq_file *m, struct mm_struct *mm)
63922 {
63923 unsigned long data, text, lib, swap;
63924 @@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63925 "VmExe:\t%8lu kB\n"
63926 "VmLib:\t%8lu kB\n"
63927 "VmPTE:\t%8lu kB\n"
63928 - "VmSwap:\t%8lu kB\n",
63929 - hiwater_vm << (PAGE_SHIFT-10),
63930 + "VmSwap:\t%8lu kB\n"
63931 +
63932 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63933 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
63934 +#endif
63935 +
63936 + ,hiwater_vm << (PAGE_SHIFT-10),
63937 total_vm << (PAGE_SHIFT-10),
63938 mm->locked_vm << (PAGE_SHIFT-10),
63939 mm->pinned_vm << (PAGE_SHIFT-10),
63940 @@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63941 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
63942 (PTRS_PER_PTE * sizeof(pte_t) *
63943 atomic_long_read(&mm->nr_ptes)) >> 10,
63944 - swap << (PAGE_SHIFT-10));
63945 + swap << (PAGE_SHIFT-10)
63946 +
63947 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63948 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63949 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
63950 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
63951 +#else
63952 + , mm->context.user_cs_base
63953 + , mm->context.user_cs_limit
63954 +#endif
63955 +#endif
63956 +
63957 + );
63958 }
63959
63960 unsigned long task_vsize(struct mm_struct *mm)
63961 @@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63962 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
63963 }
63964
63965 - /* We don't show the stack guard page in /proc/maps */
63966 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63967 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
63968 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
63969 +#else
63970 start = vma->vm_start;
63971 - if (stack_guard_page_start(vma, start))
63972 - start += PAGE_SIZE;
63973 end = vma->vm_end;
63974 - if (stack_guard_page_end(vma, end))
63975 - end -= PAGE_SIZE;
63976 +#endif
63977
63978 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
63979 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
63980 @@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63981 flags & VM_WRITE ? 'w' : '-',
63982 flags & VM_EXEC ? 'x' : '-',
63983 flags & VM_MAYSHARE ? 's' : 'p',
63984 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63985 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
63986 +#else
63987 pgoff,
63988 +#endif
63989 MAJOR(dev), MINOR(dev), ino);
63990
63991 /*
63992 @@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63993 */
63994 if (file) {
63995 seq_pad(m, ' ');
63996 - seq_path(m, &file->f_path, "\n");
63997 + seq_path(m, &file->f_path, "\n\\");
63998 goto done;
63999 }
64000
64001 @@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
64002 * Thread stack in /proc/PID/task/TID/maps or
64003 * the main process stack.
64004 */
64005 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
64006 - vma->vm_end >= mm->start_stack)) {
64007 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
64008 + (vma->vm_start <= mm->start_stack &&
64009 + vma->vm_end >= mm->start_stack)) {
64010 name = "[stack]";
64011 } else {
64012 /* Thread stack in /proc/PID/maps */
64013 @@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
64014 struct proc_maps_private *priv = m->private;
64015 struct task_struct *task = priv->task;
64016
64017 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64018 + if (current->exec_id != m->exec_id) {
64019 + gr_log_badprocpid("maps");
64020 + return 0;
64021 + }
64022 +#endif
64023 +
64024 show_map_vma(m, vma, is_pid);
64025
64026 if (m->count < m->size) /* vma is copied successfully */
64027 @@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
64028 .private = &mss,
64029 };
64030
64031 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64032 + if (current->exec_id != m->exec_id) {
64033 + gr_log_badprocpid("smaps");
64034 + return 0;
64035 + }
64036 +#endif
64037 memset(&mss, 0, sizeof mss);
64038 - mss.vma = vma;
64039 - /* mmap_sem is held in m_start */
64040 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
64041 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
64042 -
64043 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64044 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
64045 +#endif
64046 + mss.vma = vma;
64047 + /* mmap_sem is held in m_start */
64048 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
64049 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
64050 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64051 + }
64052 +#endif
64053 show_map_vma(m, vma, is_pid);
64054
64055 seq_printf(m,
64056 @@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
64057 "KernelPageSize: %8lu kB\n"
64058 "MMUPageSize: %8lu kB\n"
64059 "Locked: %8lu kB\n",
64060 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64061 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
64062 +#else
64063 (vma->vm_end - vma->vm_start) >> 10,
64064 +#endif
64065 mss.resident >> 10,
64066 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
64067 mss.shared_clean >> 10,
64068 @@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
64069 char buffer[64];
64070 int nid;
64071
64072 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64073 + if (current->exec_id != m->exec_id) {
64074 + gr_log_badprocpid("numa_maps");
64075 + return 0;
64076 + }
64077 +#endif
64078 +
64079 if (!mm)
64080 return 0;
64081
64082 @@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
64083 mpol_to_str(buffer, sizeof(buffer), pol);
64084 mpol_cond_put(pol);
64085
64086 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64087 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
64088 +#else
64089 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
64090 +#endif
64091
64092 if (file) {
64093 seq_printf(m, " file=");
64094 - seq_path(m, &file->f_path, "\n\t= ");
64095 + seq_path(m, &file->f_path, "\n\t\\= ");
64096 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64097 seq_printf(m, " heap");
64098 } else {
64099 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
64100 index 678455d..ebd3245 100644
64101 --- a/fs/proc/task_nommu.c
64102 +++ b/fs/proc/task_nommu.c
64103 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
64104 else
64105 bytes += kobjsize(mm);
64106
64107 - if (current->fs && current->fs->users > 1)
64108 + if (current->fs && atomic_read(&current->fs->users) > 1)
64109 sbytes += kobjsize(current->fs);
64110 else
64111 bytes += kobjsize(current->fs);
64112 @@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
64113
64114 if (file) {
64115 seq_pad(m, ' ');
64116 - seq_path(m, &file->f_path, "");
64117 + seq_path(m, &file->f_path, "\n\\");
64118 } else if (mm) {
64119 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
64120
64121 diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
64122 index 9100d69..51cd925 100644
64123 --- a/fs/proc/vmcore.c
64124 +++ b/fs/proc/vmcore.c
64125 @@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
64126 nr_bytes = count;
64127
64128 /* If pfn is not ram, return zeros for sparse dump files */
64129 - if (pfn_is_ram(pfn) == 0)
64130 - memset(buf, 0, nr_bytes);
64131 - else {
64132 + if (pfn_is_ram(pfn) == 0) {
64133 + if (userbuf) {
64134 + if (clear_user((char __force_user *)buf, nr_bytes))
64135 + return -EFAULT;
64136 + } else
64137 + memset(buf, 0, nr_bytes);
64138 + } else {
64139 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
64140 offset, userbuf);
64141 if (tmp < 0)
64142 @@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
64143 static int copy_to(void *target, void *src, size_t size, int userbuf)
64144 {
64145 if (userbuf) {
64146 - if (copy_to_user((char __user *) target, src, size))
64147 + if (copy_to_user((char __force_user *) target, src, size))
64148 return -EFAULT;
64149 } else {
64150 memcpy(target, src, size);
64151 @@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
64152 if (*fpos < m->offset + m->size) {
64153 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
64154 start = m->paddr + *fpos - m->offset;
64155 - tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
64156 + tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
64157 if (tmp < 0)
64158 return tmp;
64159 buflen -= tsz;
64160 @@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
64161 static ssize_t read_vmcore(struct file *file, char __user *buffer,
64162 size_t buflen, loff_t *fpos)
64163 {
64164 - return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
64165 + return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
64166 }
64167
64168 /*
64169 diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
64170 index b00fcc9..e0c6381 100644
64171 --- a/fs/qnx6/qnx6.h
64172 +++ b/fs/qnx6/qnx6.h
64173 @@ -74,7 +74,7 @@ enum {
64174 BYTESEX_BE,
64175 };
64176
64177 -static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
64178 +static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
64179 {
64180 if (sbi->s_bytesex == BYTESEX_LE)
64181 return le64_to_cpu((__force __le64)n);
64182 @@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
64183 return (__force __fs64)cpu_to_be64(n);
64184 }
64185
64186 -static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
64187 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
64188 {
64189 if (sbi->s_bytesex == BYTESEX_LE)
64190 return le32_to_cpu((__force __le32)n);
64191 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
64192 index 72d2917..c917c12 100644
64193 --- a/fs/quota/netlink.c
64194 +++ b/fs/quota/netlink.c
64195 @@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
64196 void quota_send_warning(struct kqid qid, dev_t dev,
64197 const char warntype)
64198 {
64199 - static atomic_t seq;
64200 + static atomic_unchecked_t seq;
64201 struct sk_buff *skb;
64202 void *msg_head;
64203 int ret;
64204 @@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
64205 "VFS: Not enough memory to send quota warning.\n");
64206 return;
64207 }
64208 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
64209 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
64210 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
64211 if (!msg_head) {
64212 printk(KERN_ERR
64213 diff --git a/fs/read_write.c b/fs/read_write.c
64214 index cfa18df..c110979 100644
64215 --- a/fs/read_write.c
64216 +++ b/fs/read_write.c
64217 @@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
64218
64219 old_fs = get_fs();
64220 set_fs(get_ds());
64221 - p = (__force const char __user *)buf;
64222 + p = (const char __force_user *)buf;
64223 if (count > MAX_RW_COUNT)
64224 count = MAX_RW_COUNT;
64225 if (file->f_op->write)
64226 diff --git a/fs/readdir.c b/fs/readdir.c
64227 index 5b53d99..a6c3049 100644
64228 --- a/fs/readdir.c
64229 +++ b/fs/readdir.c
64230 @@ -17,6 +17,7 @@
64231 #include <linux/security.h>
64232 #include <linux/syscalls.h>
64233 #include <linux/unistd.h>
64234 +#include <linux/namei.h>
64235
64236 #include <asm/uaccess.h>
64237
64238 @@ -69,6 +70,7 @@ struct old_linux_dirent {
64239 struct readdir_callback {
64240 struct dir_context ctx;
64241 struct old_linux_dirent __user * dirent;
64242 + struct file * file;
64243 int result;
64244 };
64245
64246 @@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
64247 buf->result = -EOVERFLOW;
64248 return -EOVERFLOW;
64249 }
64250 +
64251 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64252 + return 0;
64253 +
64254 buf->result++;
64255 dirent = buf->dirent;
64256 if (!access_ok(VERIFY_WRITE, dirent,
64257 @@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
64258 if (!f.file)
64259 return -EBADF;
64260
64261 + buf.file = f.file;
64262 error = iterate_dir(f.file, &buf.ctx);
64263 if (buf.result)
64264 error = buf.result;
64265 @@ -142,6 +149,7 @@ struct getdents_callback {
64266 struct dir_context ctx;
64267 struct linux_dirent __user * current_dir;
64268 struct linux_dirent __user * previous;
64269 + struct file * file;
64270 int count;
64271 int error;
64272 };
64273 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
64274 buf->error = -EOVERFLOW;
64275 return -EOVERFLOW;
64276 }
64277 +
64278 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64279 + return 0;
64280 +
64281 dirent = buf->previous;
64282 if (dirent) {
64283 if (__put_user(offset, &dirent->d_off))
64284 @@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
64285 if (!f.file)
64286 return -EBADF;
64287
64288 + buf.file = f.file;
64289 error = iterate_dir(f.file, &buf.ctx);
64290 if (error >= 0)
64291 error = buf.error;
64292 @@ -226,6 +239,7 @@ struct getdents_callback64 {
64293 struct dir_context ctx;
64294 struct linux_dirent64 __user * current_dir;
64295 struct linux_dirent64 __user * previous;
64296 + struct file *file;
64297 int count;
64298 int error;
64299 };
64300 @@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
64301 buf->error = -EINVAL; /* only used if we fail.. */
64302 if (reclen > buf->count)
64303 return -EINVAL;
64304 +
64305 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64306 + return 0;
64307 +
64308 dirent = buf->previous;
64309 if (dirent) {
64310 if (__put_user(offset, &dirent->d_off))
64311 @@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
64312 if (!f.file)
64313 return -EBADF;
64314
64315 + buf.file = f.file;
64316 error = iterate_dir(f.file, &buf.ctx);
64317 if (error >= 0)
64318 error = buf.error;
64319 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
64320 index 2b7882b..1c5ef48 100644
64321 --- a/fs/reiserfs/do_balan.c
64322 +++ b/fs/reiserfs/do_balan.c
64323 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
64324 return;
64325 }
64326
64327 - atomic_inc(&(fs_generation(tb->tb_sb)));
64328 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
64329 do_balance_starts(tb);
64330
64331 /* balance leaf returns 0 except if combining L R and S into
64332 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
64333 index ee382ef..f4eb6eb5 100644
64334 --- a/fs/reiserfs/item_ops.c
64335 +++ b/fs/reiserfs/item_ops.c
64336 @@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
64337 }
64338
64339 static struct item_operations errcatch_ops = {
64340 - errcatch_bytes_number,
64341 - errcatch_decrement_key,
64342 - errcatch_is_left_mergeable,
64343 - errcatch_print_item,
64344 - errcatch_check_item,
64345 + .bytes_number = errcatch_bytes_number,
64346 + .decrement_key = errcatch_decrement_key,
64347 + .is_left_mergeable = errcatch_is_left_mergeable,
64348 + .print_item = errcatch_print_item,
64349 + .check_item = errcatch_check_item,
64350
64351 - errcatch_create_vi,
64352 - errcatch_check_left,
64353 - errcatch_check_right,
64354 - errcatch_part_size,
64355 - errcatch_unit_num,
64356 - errcatch_print_vi
64357 + .create_vi = errcatch_create_vi,
64358 + .check_left = errcatch_check_left,
64359 + .check_right = errcatch_check_right,
64360 + .part_size = errcatch_part_size,
64361 + .unit_num = errcatch_unit_num,
64362 + .print_vi = errcatch_print_vi
64363 };
64364
64365 //////////////////////////////////////////////////////////////////////////////
64366 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
64367 index a958444..42b2323 100644
64368 --- a/fs/reiserfs/procfs.c
64369 +++ b/fs/reiserfs/procfs.c
64370 @@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
64371 "SMALL_TAILS " : "NO_TAILS ",
64372 replay_only(sb) ? "REPLAY_ONLY " : "",
64373 convert_reiserfs(sb) ? "CONV " : "",
64374 - atomic_read(&r->s_generation_counter),
64375 + atomic_read_unchecked(&r->s_generation_counter),
64376 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
64377 SF(s_do_balance), SF(s_unneeded_left_neighbor),
64378 SF(s_good_search_by_key_reada), SF(s_bmaps),
64379 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
64380 index f8adaee..0eeeeca 100644
64381 --- a/fs/reiserfs/reiserfs.h
64382 +++ b/fs/reiserfs/reiserfs.h
64383 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
64384 /* Comment? -Hans */
64385 wait_queue_head_t s_wait;
64386 /* To be obsoleted soon by per buffer seals.. -Hans */
64387 - atomic_t s_generation_counter; // increased by one every time the
64388 + atomic_unchecked_t s_generation_counter; // increased by one every time the
64389 // tree gets re-balanced
64390 unsigned long s_properties; /* File system properties. Currently holds
64391 on-disk FS format */
64392 @@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
64393 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
64394
64395 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
64396 -#define get_generation(s) atomic_read (&fs_generation(s))
64397 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
64398 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
64399 #define __fs_changed(gen,s) (gen != get_generation (s))
64400 #define fs_changed(gen,s) \
64401 diff --git a/fs/select.c b/fs/select.c
64402 index 467bb1c..cf9d65a 100644
64403 --- a/fs/select.c
64404 +++ b/fs/select.c
64405 @@ -20,6 +20,7 @@
64406 #include <linux/export.h>
64407 #include <linux/slab.h>
64408 #include <linux/poll.h>
64409 +#include <linux/security.h>
64410 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
64411 #include <linux/file.h>
64412 #include <linux/fdtable.h>
64413 @@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
64414 struct poll_list *walk = head;
64415 unsigned long todo = nfds;
64416
64417 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
64418 if (nfds > rlimit(RLIMIT_NOFILE))
64419 return -EINVAL;
64420
64421 diff --git a/fs/seq_file.c b/fs/seq_file.c
64422 index 1d641bb..e600623 100644
64423 --- a/fs/seq_file.c
64424 +++ b/fs/seq_file.c
64425 @@ -10,6 +10,7 @@
64426 #include <linux/seq_file.h>
64427 #include <linux/slab.h>
64428 #include <linux/cred.h>
64429 +#include <linux/sched.h>
64430
64431 #include <asm/uaccess.h>
64432 #include <asm/page.h>
64433 @@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
64434 #ifdef CONFIG_USER_NS
64435 p->user_ns = file->f_cred->user_ns;
64436 #endif
64437 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64438 + p->exec_id = current->exec_id;
64439 +#endif
64440
64441 /*
64442 * Wrappers around seq_open(e.g. swaps_open) need to be
64443 @@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
64444 return 0;
64445 }
64446 if (!m->buf) {
64447 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
64448 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
64449 if (!m->buf)
64450 return -ENOMEM;
64451 }
64452 @@ -137,7 +141,7 @@ Eoverflow:
64453 m->op->stop(m, p);
64454 kfree(m->buf);
64455 m->count = 0;
64456 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
64457 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
64458 return !m->buf ? -ENOMEM : -EAGAIN;
64459 }
64460
64461 @@ -153,7 +157,7 @@ Eoverflow:
64462 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64463 {
64464 struct seq_file *m = file->private_data;
64465 - size_t copied = 0;
64466 + ssize_t copied = 0;
64467 loff_t pos;
64468 size_t n;
64469 void *p;
64470 @@ -192,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64471
64472 /* grab buffer if we didn't have one */
64473 if (!m->buf) {
64474 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
64475 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
64476 if (!m->buf)
64477 goto Enomem;
64478 }
64479 @@ -234,7 +238,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64480 m->op->stop(m, p);
64481 kfree(m->buf);
64482 m->count = 0;
64483 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
64484 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
64485 if (!m->buf)
64486 goto Enomem;
64487 m->version = 0;
64488 @@ -584,7 +588,7 @@ static void single_stop(struct seq_file *p, void *v)
64489 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
64490 void *data)
64491 {
64492 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
64493 + seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
64494 int res = -ENOMEM;
64495
64496 if (op) {
64497 diff --git a/fs/splice.c b/fs/splice.c
64498 index 12028fa..a6f2619 100644
64499 --- a/fs/splice.c
64500 +++ b/fs/splice.c
64501 @@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64502 pipe_lock(pipe);
64503
64504 for (;;) {
64505 - if (!pipe->readers) {
64506 + if (!atomic_read(&pipe->readers)) {
64507 send_sig(SIGPIPE, current, 0);
64508 if (!ret)
64509 ret = -EPIPE;
64510 @@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64511 page_nr++;
64512 ret += buf->len;
64513
64514 - if (pipe->files)
64515 + if (atomic_read(&pipe->files))
64516 do_wakeup = 1;
64517
64518 if (!--spd->nr_pages)
64519 @@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64520 do_wakeup = 0;
64521 }
64522
64523 - pipe->waiting_writers++;
64524 + atomic_inc(&pipe->waiting_writers);
64525 pipe_wait(pipe);
64526 - pipe->waiting_writers--;
64527 + atomic_dec(&pipe->waiting_writers);
64528 }
64529
64530 pipe_unlock(pipe);
64531 @@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
64532 old_fs = get_fs();
64533 set_fs(get_ds());
64534 /* The cast to a user pointer is valid due to the set_fs() */
64535 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
64536 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
64537 set_fs(old_fs);
64538
64539 return res;
64540 @@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
64541 old_fs = get_fs();
64542 set_fs(get_ds());
64543 /* The cast to a user pointer is valid due to the set_fs() */
64544 - res = vfs_write(file, (__force const char __user *)buf, count, &pos);
64545 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
64546 set_fs(old_fs);
64547
64548 return res;
64549 @@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
64550 goto err;
64551
64552 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
64553 - vec[i].iov_base = (void __user *) page_address(page);
64554 + vec[i].iov_base = (void __force_user *) page_address(page);
64555 vec[i].iov_len = this_len;
64556 spd.pages[i] = page;
64557 spd.nr_pages++;
64558 @@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
64559 ops->release(pipe, buf);
64560 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
64561 pipe->nrbufs--;
64562 - if (pipe->files)
64563 + if (atomic_read(&pipe->files))
64564 sd->need_wakeup = true;
64565 }
64566
64567 @@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
64568 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
64569 {
64570 while (!pipe->nrbufs) {
64571 - if (!pipe->writers)
64572 + if (!atomic_read(&pipe->writers))
64573 return 0;
64574
64575 - if (!pipe->waiting_writers && sd->num_spliced)
64576 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
64577 return 0;
64578
64579 if (sd->flags & SPLICE_F_NONBLOCK)
64580 @@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
64581 * out of the pipe right after the splice_to_pipe(). So set
64582 * PIPE_READERS appropriately.
64583 */
64584 - pipe->readers = 1;
64585 + atomic_set(&pipe->readers, 1);
64586
64587 current->splice_pipe = pipe;
64588 }
64589 @@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
64590
64591 partial[buffers].offset = off;
64592 partial[buffers].len = plen;
64593 + partial[buffers].private = 0;
64594
64595 off = 0;
64596 len -= plen;
64597 @@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64598 ret = -ERESTARTSYS;
64599 break;
64600 }
64601 - if (!pipe->writers)
64602 + if (!atomic_read(&pipe->writers))
64603 break;
64604 - if (!pipe->waiting_writers) {
64605 + if (!atomic_read(&pipe->waiting_writers)) {
64606 if (flags & SPLICE_F_NONBLOCK) {
64607 ret = -EAGAIN;
64608 break;
64609 @@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64610 pipe_lock(pipe);
64611
64612 while (pipe->nrbufs >= pipe->buffers) {
64613 - if (!pipe->readers) {
64614 + if (!atomic_read(&pipe->readers)) {
64615 send_sig(SIGPIPE, current, 0);
64616 ret = -EPIPE;
64617 break;
64618 @@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64619 ret = -ERESTARTSYS;
64620 break;
64621 }
64622 - pipe->waiting_writers++;
64623 + atomic_inc(&pipe->waiting_writers);
64624 pipe_wait(pipe);
64625 - pipe->waiting_writers--;
64626 + atomic_dec(&pipe->waiting_writers);
64627 }
64628
64629 pipe_unlock(pipe);
64630 @@ -1880,14 +1881,14 @@ retry:
64631 pipe_double_lock(ipipe, opipe);
64632
64633 do {
64634 - if (!opipe->readers) {
64635 + if (!atomic_read(&opipe->readers)) {
64636 send_sig(SIGPIPE, current, 0);
64637 if (!ret)
64638 ret = -EPIPE;
64639 break;
64640 }
64641
64642 - if (!ipipe->nrbufs && !ipipe->writers)
64643 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
64644 break;
64645
64646 /*
64647 @@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
64648 pipe_double_lock(ipipe, opipe);
64649
64650 do {
64651 - if (!opipe->readers) {
64652 + if (!atomic_read(&opipe->readers)) {
64653 send_sig(SIGPIPE, current, 0);
64654 if (!ret)
64655 ret = -EPIPE;
64656 @@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
64657 * return EAGAIN if we have the potential of some data in the
64658 * future, otherwise just return 0
64659 */
64660 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
64661 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
64662 ret = -EAGAIN;
64663
64664 pipe_unlock(ipipe);
64665 diff --git a/fs/stat.c b/fs/stat.c
64666 index ae0c3ce..9ee641c 100644
64667 --- a/fs/stat.c
64668 +++ b/fs/stat.c
64669 @@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
64670 stat->gid = inode->i_gid;
64671 stat->rdev = inode->i_rdev;
64672 stat->size = i_size_read(inode);
64673 - stat->atime = inode->i_atime;
64674 - stat->mtime = inode->i_mtime;
64675 + if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
64676 + stat->atime = inode->i_ctime;
64677 + stat->mtime = inode->i_ctime;
64678 + } else {
64679 + stat->atime = inode->i_atime;
64680 + stat->mtime = inode->i_mtime;
64681 + }
64682 stat->ctime = inode->i_ctime;
64683 stat->blksize = (1 << inode->i_blkbits);
64684 stat->blocks = inode->i_blocks;
64685 @@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
64686 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
64687 {
64688 struct inode *inode = path->dentry->d_inode;
64689 + int retval;
64690
64691 - if (inode->i_op->getattr)
64692 - return inode->i_op->getattr(path->mnt, path->dentry, stat);
64693 + if (inode->i_op->getattr) {
64694 + retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
64695 + if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
64696 + stat->atime = stat->ctime;
64697 + stat->mtime = stat->ctime;
64698 + }
64699 + return retval;
64700 + }
64701
64702 generic_fillattr(inode, stat);
64703 return 0;
64704 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
64705 index 5e73d66..4f165fd 100644
64706 --- a/fs/sysfs/dir.c
64707 +++ b/fs/sysfs/dir.c
64708 @@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
64709 *
64710 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64711 */
64712 -static unsigned int sysfs_name_hash(const char *name, const void *ns)
64713 +static unsigned int sysfs_name_hash(const unsigned char *name, const void *ns)
64714 {
64715 unsigned long hash = init_name_hash();
64716 unsigned int len = strlen(name);
64717 @@ -676,6 +676,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
64718 struct sysfs_dirent *sd;
64719 int rc;
64720
64721 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
64722 + const char *parent_name = parent_sd->s_name;
64723 +
64724 + mode = S_IFDIR | S_IRWXU;
64725 +
64726 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
64727 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
64728 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
64729 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
64730 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
64731 +#endif
64732 +
64733 /* allocate */
64734 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
64735 if (!sd)
64736 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
64737 index 35e7d08..4d6e676 100644
64738 --- a/fs/sysfs/file.c
64739 +++ b/fs/sysfs/file.c
64740 @@ -42,7 +42,7 @@ static DEFINE_MUTEX(sysfs_open_file_mutex);
64741
64742 struct sysfs_open_dirent {
64743 atomic_t refcnt;
64744 - atomic_t event;
64745 + atomic_unchecked_t event;
64746 wait_queue_head_t poll;
64747 struct list_head files; /* goes through sysfs_open_file.list */
64748 };
64749 @@ -112,7 +112,7 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
64750 return -ENODEV;
64751 }
64752
64753 - of->event = atomic_read(&of->sd->s_attr.open->event);
64754 + of->event = atomic_read_unchecked(&of->sd->s_attr.open->event);
64755
64756 /*
64757 * Lookup @ops and invoke show(). Control may reach here via seq
64758 @@ -365,12 +365,12 @@ static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
64759 return ret;
64760 }
64761
64762 -static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
64763 - void *buf, int len, int write)
64764 +static ssize_t sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
64765 + void *buf, size_t len, int write)
64766 {
64767 struct file *file = vma->vm_file;
64768 struct sysfs_open_file *of = sysfs_of(file);
64769 - int ret;
64770 + ssize_t ret;
64771
64772 if (!of->vm_ops)
64773 return -EINVAL;
64774 @@ -564,7 +564,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
64775 return -ENOMEM;
64776
64777 atomic_set(&new_od->refcnt, 0);
64778 - atomic_set(&new_od->event, 1);
64779 + atomic_set_unchecked(&new_od->event, 1);
64780 init_waitqueue_head(&new_od->poll);
64781 INIT_LIST_HEAD(&new_od->files);
64782 goto retry;
64783 @@ -768,7 +768,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
64784
64785 sysfs_put_active(attr_sd);
64786
64787 - if (of->event != atomic_read(&od->event))
64788 + if (of->event != atomic_read_unchecked(&od->event))
64789 goto trigger;
64790
64791 return DEFAULT_POLLMASK;
64792 @@ -787,7 +787,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
64793 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
64794 od = sd->s_attr.open;
64795 if (od) {
64796 - atomic_inc(&od->event);
64797 + atomic_inc_unchecked(&od->event);
64798 wake_up_interruptible(&od->poll);
64799 }
64800 }
64801 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
64802 index 3ae3f1b..081a26c 100644
64803 --- a/fs/sysfs/symlink.c
64804 +++ b/fs/sysfs/symlink.c
64805 @@ -314,7 +314,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64806 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
64807 void *cookie)
64808 {
64809 - char *page = nd_get_link(nd);
64810 + const char *page = nd_get_link(nd);
64811 if (!IS_ERR(page))
64812 free_page((unsigned long)page);
64813 }
64814 diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
64815 index 69d4889..a810bd4 100644
64816 --- a/fs/sysv/sysv.h
64817 +++ b/fs/sysv/sysv.h
64818 @@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
64819 #endif
64820 }
64821
64822 -static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
64823 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
64824 {
64825 if (sbi->s_bytesex == BYTESEX_PDP)
64826 return PDP_swab((__force __u32)n);
64827 diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
64828 index e18b988..f1d4ad0f 100644
64829 --- a/fs/ubifs/io.c
64830 +++ b/fs/ubifs/io.c
64831 @@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
64832 return err;
64833 }
64834
64835 -int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
64836 +int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
64837 {
64838 int err;
64839
64840 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
64841 index c175b4d..8f36a16 100644
64842 --- a/fs/udf/misc.c
64843 +++ b/fs/udf/misc.c
64844 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
64845
64846 u8 udf_tag_checksum(const struct tag *t)
64847 {
64848 - u8 *data = (u8 *)t;
64849 + const u8 *data = (const u8 *)t;
64850 u8 checksum = 0;
64851 int i;
64852 for (i = 0; i < sizeof(struct tag); ++i)
64853 diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
64854 index 8d974c4..b82f6ec 100644
64855 --- a/fs/ufs/swab.h
64856 +++ b/fs/ufs/swab.h
64857 @@ -22,7 +22,7 @@ enum {
64858 BYTESEX_BE
64859 };
64860
64861 -static inline u64
64862 +static inline u64 __intentional_overflow(-1)
64863 fs64_to_cpu(struct super_block *sbp, __fs64 n)
64864 {
64865 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64866 @@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
64867 return (__force __fs64)cpu_to_be64(n);
64868 }
64869
64870 -static inline u32
64871 +static inline u32 __intentional_overflow(-1)
64872 fs32_to_cpu(struct super_block *sbp, __fs32 n)
64873 {
64874 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64875 diff --git a/fs/utimes.c b/fs/utimes.c
64876 index aa138d6..5f3a811 100644
64877 --- a/fs/utimes.c
64878 +++ b/fs/utimes.c
64879 @@ -1,6 +1,7 @@
64880 #include <linux/compiler.h>
64881 #include <linux/file.h>
64882 #include <linux/fs.h>
64883 +#include <linux/security.h>
64884 #include <linux/linkage.h>
64885 #include <linux/mount.h>
64886 #include <linux/namei.h>
64887 @@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
64888 }
64889 }
64890 retry_deleg:
64891 +
64892 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
64893 + error = -EACCES;
64894 + goto mnt_drop_write_and_out;
64895 + }
64896 +
64897 mutex_lock(&inode->i_mutex);
64898 error = notify_change(path->dentry, &newattrs, &delegated_inode);
64899 mutex_unlock(&inode->i_mutex);
64900 diff --git a/fs/xattr.c b/fs/xattr.c
64901 index 3377dff..f394815 100644
64902 --- a/fs/xattr.c
64903 +++ b/fs/xattr.c
64904 @@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
64905 return rc;
64906 }
64907
64908 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64909 +ssize_t
64910 +pax_getxattr(struct dentry *dentry, void *value, size_t size)
64911 +{
64912 + struct inode *inode = dentry->d_inode;
64913 + ssize_t error;
64914 +
64915 + error = inode_permission(inode, MAY_EXEC);
64916 + if (error)
64917 + return error;
64918 +
64919 + if (inode->i_op->getxattr)
64920 + error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
64921 + else
64922 + error = -EOPNOTSUPP;
64923 +
64924 + return error;
64925 +}
64926 +EXPORT_SYMBOL(pax_getxattr);
64927 +#endif
64928 +
64929 ssize_t
64930 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
64931 {
64932 @@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
64933 * Extended attribute SET operations
64934 */
64935 static long
64936 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
64937 +setxattr(struct path *path, const char __user *name, const void __user *value,
64938 size_t size, int flags)
64939 {
64940 int error;
64941 @@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
64942 posix_acl_fix_xattr_from_user(kvalue, size);
64943 }
64944
64945 - error = vfs_setxattr(d, kname, kvalue, size, flags);
64946 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
64947 + error = -EACCES;
64948 + goto out;
64949 + }
64950 +
64951 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
64952 out:
64953 if (vvalue)
64954 vfree(vvalue);
64955 @@ -377,7 +403,7 @@ retry:
64956 return error;
64957 error = mnt_want_write(path.mnt);
64958 if (!error) {
64959 - error = setxattr(path.dentry, name, value, size, flags);
64960 + error = setxattr(&path, name, value, size, flags);
64961 mnt_drop_write(path.mnt);
64962 }
64963 path_put(&path);
64964 @@ -401,7 +427,7 @@ retry:
64965 return error;
64966 error = mnt_want_write(path.mnt);
64967 if (!error) {
64968 - error = setxattr(path.dentry, name, value, size, flags);
64969 + error = setxattr(&path, name, value, size, flags);
64970 mnt_drop_write(path.mnt);
64971 }
64972 path_put(&path);
64973 @@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
64974 const void __user *,value, size_t, size, int, flags)
64975 {
64976 struct fd f = fdget(fd);
64977 - struct dentry *dentry;
64978 int error = -EBADF;
64979
64980 if (!f.file)
64981 return error;
64982 - dentry = f.file->f_path.dentry;
64983 - audit_inode(NULL, dentry, 0);
64984 + audit_inode(NULL, f.file->f_path.dentry, 0);
64985 error = mnt_want_write_file(f.file);
64986 if (!error) {
64987 - error = setxattr(dentry, name, value, size, flags);
64988 + error = setxattr(&f.file->f_path, name, value, size, flags);
64989 mnt_drop_write_file(f.file);
64990 }
64991 fdput(f);
64992 @@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
64993 * Extended attribute REMOVE operations
64994 */
64995 static long
64996 -removexattr(struct dentry *d, const char __user *name)
64997 +removexattr(struct path *path, const char __user *name)
64998 {
64999 int error;
65000 char kname[XATTR_NAME_MAX + 1];
65001 @@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
65002 if (error < 0)
65003 return error;
65004
65005 - return vfs_removexattr(d, kname);
65006 + if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
65007 + return -EACCES;
65008 +
65009 + return vfs_removexattr(path->dentry, kname);
65010 }
65011
65012 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
65013 @@ -652,7 +679,7 @@ retry:
65014 return error;
65015 error = mnt_want_write(path.mnt);
65016 if (!error) {
65017 - error = removexattr(path.dentry, name);
65018 + error = removexattr(&path, name);
65019 mnt_drop_write(path.mnt);
65020 }
65021 path_put(&path);
65022 @@ -675,7 +702,7 @@ retry:
65023 return error;
65024 error = mnt_want_write(path.mnt);
65025 if (!error) {
65026 - error = removexattr(path.dentry, name);
65027 + error = removexattr(&path, name);
65028 mnt_drop_write(path.mnt);
65029 }
65030 path_put(&path);
65031 @@ -689,16 +716,16 @@ retry:
65032 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
65033 {
65034 struct fd f = fdget(fd);
65035 - struct dentry *dentry;
65036 + struct path *path;
65037 int error = -EBADF;
65038
65039 if (!f.file)
65040 return error;
65041 - dentry = f.file->f_path.dentry;
65042 - audit_inode(NULL, dentry, 0);
65043 + path = &f.file->f_path;
65044 + audit_inode(NULL, path->dentry, 0);
65045 error = mnt_want_write_file(f.file);
65046 if (!error) {
65047 - error = removexattr(dentry, name);
65048 + error = removexattr(path, name);
65049 mnt_drop_write_file(f.file);
65050 }
65051 fdput(f);
65052 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
65053 index 9fbea87..417b3c2 100644
65054 --- a/fs/xattr_acl.c
65055 +++ b/fs/xattr_acl.c
65056 @@ -10,6 +10,7 @@
65057 #include <linux/posix_acl_xattr.h>
65058 #include <linux/gfp.h>
65059 #include <linux/user_namespace.h>
65060 +#include <linux/grsecurity.h>
65061
65062 /*
65063 * Fix up the uids and gids in posix acl extended attributes in place.
65064 @@ -76,11 +77,12 @@ struct posix_acl *
65065 posix_acl_from_xattr(struct user_namespace *user_ns,
65066 const void *value, size_t size)
65067 {
65068 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65069 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65070 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65071 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65072 int count;
65073 struct posix_acl *acl;
65074 struct posix_acl_entry *acl_e;
65075 + umode_t umask = gr_acl_umask();
65076
65077 if (!value)
65078 return NULL;
65079 @@ -106,12 +108,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65080
65081 switch(acl_e->e_tag) {
65082 case ACL_USER_OBJ:
65083 + acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65084 + break;
65085 case ACL_GROUP_OBJ:
65086 case ACL_MASK:
65087 + acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65088 + break;
65089 case ACL_OTHER:
65090 + acl_e->e_perm &= ~(umask & S_IRWXO);
65091 break;
65092
65093 case ACL_USER:
65094 + acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65095 acl_e->e_uid =
65096 make_kuid(user_ns,
65097 le32_to_cpu(entry->e_id));
65098 @@ -119,6 +127,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65099 goto fail;
65100 break;
65101 case ACL_GROUP:
65102 + acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65103 acl_e->e_gid =
65104 make_kgid(user_ns,
65105 le32_to_cpu(entry->e_id));
65106 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
65107 index 3b2c14b..de031fe 100644
65108 --- a/fs/xfs/xfs_bmap.c
65109 +++ b/fs/xfs/xfs_bmap.c
65110 @@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
65111
65112 #else
65113 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
65114 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
65115 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
65116 #endif /* DEBUG */
65117
65118 /*
65119 diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
65120 index c4e50c6..8ba93e3 100644
65121 --- a/fs/xfs/xfs_dir2_readdir.c
65122 +++ b/fs/xfs/xfs_dir2_readdir.c
65123 @@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
65124 ino = dp->d_ops->sf_get_ino(sfp, sfep);
65125 filetype = dp->d_ops->sf_get_ftype(sfep);
65126 ctx->pos = off & 0x7fffffff;
65127 - if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
65128 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
65129 + char name[sfep->namelen];
65130 + memcpy(name, sfep->name, sfep->namelen);
65131 + if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
65132 + return 0;
65133 + } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
65134 xfs_dir3_get_dtype(mp, filetype)))
65135 return 0;
65136 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
65137 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
65138 index 33ad9a7..82c18ba 100644
65139 --- a/fs/xfs/xfs_ioctl.c
65140 +++ b/fs/xfs/xfs_ioctl.c
65141 @@ -126,7 +126,7 @@ xfs_find_handle(
65142 }
65143
65144 error = -EFAULT;
65145 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
65146 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
65147 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
65148 goto out_put;
65149
65150 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
65151 index 104455b..764c512 100644
65152 --- a/fs/xfs/xfs_iops.c
65153 +++ b/fs/xfs/xfs_iops.c
65154 @@ -397,7 +397,7 @@ xfs_vn_put_link(
65155 struct nameidata *nd,
65156 void *p)
65157 {
65158 - char *s = nd_get_link(nd);
65159 + const char *s = nd_get_link(nd);
65160
65161 if (!IS_ERR(s))
65162 kfree(s);
65163 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
65164 new file mode 100644
65165 index 0000000..13b7885
65166 --- /dev/null
65167 +++ b/grsecurity/Kconfig
65168 @@ -0,0 +1,1155 @@
65169 +#
65170 +# grecurity configuration
65171 +#
65172 +menu "Memory Protections"
65173 +depends on GRKERNSEC
65174 +
65175 +config GRKERNSEC_KMEM
65176 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
65177 + default y if GRKERNSEC_CONFIG_AUTO
65178 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
65179 + help
65180 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
65181 + be written to or read from to modify or leak the contents of the running
65182 + kernel. /dev/port will also not be allowed to be opened, writing to
65183 + /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
65184 + If you have module support disabled, enabling this will close up several
65185 + ways that are currently used to insert malicious code into the running
65186 + kernel.
65187 +
65188 + Even with this feature enabled, we still highly recommend that
65189 + you use the RBAC system, as it is still possible for an attacker to
65190 + modify the running kernel through other more obscure methods.
65191 +
65192 + It is highly recommended that you say Y here if you meet all the
65193 + conditions above.
65194 +
65195 +config GRKERNSEC_VM86
65196 + bool "Restrict VM86 mode"
65197 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
65198 + depends on X86_32
65199 +
65200 + help
65201 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
65202 + make use of a special execution mode on 32bit x86 processors called
65203 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
65204 + video cards and will still work with this option enabled. The purpose
65205 + of the option is to prevent exploitation of emulation errors in
65206 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
65207 + Nearly all users should be able to enable this option.
65208 +
65209 +config GRKERNSEC_IO
65210 + bool "Disable privileged I/O"
65211 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
65212 + depends on X86
65213 + select RTC_CLASS
65214 + select RTC_INTF_DEV
65215 + select RTC_DRV_CMOS
65216 +
65217 + help
65218 + If you say Y here, all ioperm and iopl calls will return an error.
65219 + Ioperm and iopl can be used to modify the running kernel.
65220 + Unfortunately, some programs need this access to operate properly,
65221 + the most notable of which are XFree86 and hwclock. hwclock can be
65222 + remedied by having RTC support in the kernel, so real-time
65223 + clock support is enabled if this option is enabled, to ensure
65224 + that hwclock operates correctly.
65225 +
65226 + If you're using XFree86 or a version of Xorg from 2012 or earlier,
65227 + you may not be able to boot into a graphical environment with this
65228 + option enabled. In this case, you should use the RBAC system instead.
65229 +
65230 +config GRKERNSEC_JIT_HARDEN
65231 + bool "Harden BPF JIT against spray attacks"
65232 + default y if GRKERNSEC_CONFIG_AUTO
65233 + depends on BPF_JIT && X86
65234 + help
65235 + If you say Y here, the native code generated by the kernel's Berkeley
65236 + Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
65237 + attacks that attempt to fit attacker-beneficial instructions in
65238 + 32bit immediate fields of JIT-generated native instructions. The
65239 + attacker will generally aim to cause an unintended instruction sequence
65240 + of JIT-generated native code to execute by jumping into the middle of
65241 + a generated instruction. This feature effectively randomizes the 32bit
65242 + immediate constants present in the generated code to thwart such attacks.
65243 +
65244 + If you're using KERNEXEC, it's recommended that you enable this option
65245 + to supplement the hardening of the kernel.
65246 +
65247 +config GRKERNSEC_PERF_HARDEN
65248 + bool "Disable unprivileged PERF_EVENTS usage by default"
65249 + default y if GRKERNSEC_CONFIG_AUTO
65250 + depends on PERF_EVENTS
65251 + help
65252 + If you say Y here, the range of acceptable values for the
65253 + /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
65254 + default to a new value: 3. When the sysctl is set to this value, no
65255 + unprivileged use of the PERF_EVENTS syscall interface will be permitted.
65256 +
65257 + Though PERF_EVENTS can be used legitimately for performance monitoring
65258 + and low-level application profiling, it is forced on regardless of
65259 + configuration, has been at fault for several vulnerabilities, and
65260 + creates new opportunities for side channels and other information leaks.
65261 +
65262 + This feature puts PERF_EVENTS into a secure default state and permits
65263 + the administrator to change out of it temporarily if unprivileged
65264 + application profiling is needed.
65265 +
65266 +config GRKERNSEC_RAND_THREADSTACK
65267 + bool "Insert random gaps between thread stacks"
65268 + default y if GRKERNSEC_CONFIG_AUTO
65269 + depends on PAX_RANDMMAP && !PPC
65270 + help
65271 + If you say Y here, a random-sized gap will be enforced between allocated
65272 + thread stacks. Glibc's NPTL and other threading libraries that
65273 + pass MAP_STACK to the kernel for thread stack allocation are supported.
65274 + The implementation currently provides 8 bits of entropy for the gap.
65275 +
65276 + Many distributions do not compile threaded remote services with the
65277 + -fstack-check argument to GCC, causing the variable-sized stack-based
65278 + allocator, alloca(), to not probe the stack on allocation. This
65279 + permits an unbounded alloca() to skip over any guard page and potentially
65280 + modify another thread's stack reliably. An enforced random gap
65281 + reduces the reliability of such an attack and increases the chance
65282 + that such a read/write to another thread's stack instead lands in
65283 + an unmapped area, causing a crash and triggering grsecurity's
65284 + anti-bruteforcing logic.
65285 +
65286 +config GRKERNSEC_PROC_MEMMAP
65287 + bool "Harden ASLR against information leaks and entropy reduction"
65288 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
65289 + depends on PAX_NOEXEC || PAX_ASLR
65290 + help
65291 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
65292 + give no information about the addresses of its mappings if
65293 + PaX features that rely on random addresses are enabled on the task.
65294 + In addition to sanitizing this information and disabling other
65295 + dangerous sources of information, this option causes reads of sensitive
65296 + /proc/<pid> entries where the file descriptor was opened in a different
65297 + task than the one performing the read. Such attempts are logged.
65298 + This option also limits argv/env strings for suid/sgid binaries
65299 + to 512KB to prevent a complete exhaustion of the stack entropy provided
65300 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
65301 + binaries to prevent alternative mmap layouts from being abused.
65302 +
65303 + If you use PaX it is essential that you say Y here as it closes up
65304 + several holes that make full ASLR useless locally.
65305 +
65306 +config GRKERNSEC_BRUTE
65307 + bool "Deter exploit bruteforcing"
65308 + default y if GRKERNSEC_CONFIG_AUTO
65309 + help
65310 + If you say Y here, attempts to bruteforce exploits against forking
65311 + daemons such as apache or sshd, as well as against suid/sgid binaries
65312 + will be deterred. When a child of a forking daemon is killed by PaX
65313 + or crashes due to an illegal instruction or other suspicious signal,
65314 + the parent process will be delayed 30 seconds upon every subsequent
65315 + fork until the administrator is able to assess the situation and
65316 + restart the daemon.
65317 + In the suid/sgid case, the attempt is logged, the user has all their
65318 + existing instances of the suid/sgid binary terminated and will
65319 + be unable to execute any suid/sgid binaries for 15 minutes.
65320 +
65321 + It is recommended that you also enable signal logging in the auditing
65322 + section so that logs are generated when a process triggers a suspicious
65323 + signal.
65324 + If the sysctl option is enabled, a sysctl option with name
65325 + "deter_bruteforce" is created.
65326 +
65327 +config GRKERNSEC_MODHARDEN
65328 + bool "Harden module auto-loading"
65329 + default y if GRKERNSEC_CONFIG_AUTO
65330 + depends on MODULES
65331 + help
65332 + If you say Y here, module auto-loading in response to use of some
65333 + feature implemented by an unloaded module will be restricted to
65334 + root users. Enabling this option helps defend against attacks
65335 + by unprivileged users who abuse the auto-loading behavior to
65336 + cause a vulnerable module to load that is then exploited.
65337 +
65338 + If this option prevents a legitimate use of auto-loading for a
65339 + non-root user, the administrator can execute modprobe manually
65340 + with the exact name of the module mentioned in the alert log.
65341 + Alternatively, the administrator can add the module to the list
65342 + of modules loaded at boot by modifying init scripts.
65343 +
65344 + Modification of init scripts will most likely be needed on
65345 + Ubuntu servers with encrypted home directory support enabled,
65346 + as the first non-root user logging in will cause the ecb(aes),
65347 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
65348 +
65349 +config GRKERNSEC_HIDESYM
65350 + bool "Hide kernel symbols"
65351 + default y if GRKERNSEC_CONFIG_AUTO
65352 + select PAX_USERCOPY_SLABS
65353 + help
65354 + If you say Y here, getting information on loaded modules, and
65355 + displaying all kernel symbols through a syscall will be restricted
65356 + to users with CAP_SYS_MODULE. For software compatibility reasons,
65357 + /proc/kallsyms will be restricted to the root user. The RBAC
65358 + system can hide that entry even from root.
65359 +
65360 + This option also prevents leaking of kernel addresses through
65361 + several /proc entries.
65362 +
65363 + Note that this option is only effective provided the following
65364 + conditions are met:
65365 + 1) The kernel using grsecurity is not precompiled by some distribution
65366 + 2) You have also enabled GRKERNSEC_DMESG
65367 + 3) You are using the RBAC system and hiding other files such as your
65368 + kernel image and System.map. Alternatively, enabling this option
65369 + causes the permissions on /boot, /lib/modules, and the kernel
65370 + source directory to change at compile time to prevent
65371 + reading by non-root users.
65372 + If the above conditions are met, this option will aid in providing a
65373 + useful protection against local kernel exploitation of overflows
65374 + and arbitrary read/write vulnerabilities.
65375 +
65376 + It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
65377 + in addition to this feature.
65378 +
65379 +config GRKERNSEC_RANDSTRUCT
65380 + bool "Randomize layout of sensitive kernel structures"
65381 + default y if GRKERNSEC_CONFIG_AUTO
65382 + select GRKERNSEC_HIDESYM
65383 + select MODVERSIONS if MODULES
65384 + help
65385 + If you say Y here, the layouts of a number of sensitive kernel
65386 + structures (task, fs, cred, etc) and all structures composed entirely
65387 + of function pointers (aka "ops" structs) will be randomized at compile-time.
65388 + This can introduce the requirement of an additional infoleak
65389 + vulnerability for exploits targeting these structure types.
65390 +
65391 + Enabling this feature will introduce some performance impact, slightly
65392 + increase memory usage, and prevent the use of forensic tools like
65393 + Volatility against the system (unless the kernel source tree isn't
65394 + cleaned after kernel installation).
65395 +
65396 + The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
65397 + It remains after a make clean to allow for external modules to be compiled
65398 + with the existing seed and will be removed by a make mrproper or
65399 + make distclean.
65400 +
65401 + Note that the implementation requires gcc 4.6.4. or newer. You may need
65402 + to install the supporting headers explicitly in addition to the normal
65403 + gcc package.
65404 +
65405 +config GRKERNSEC_RANDSTRUCT_PERFORMANCE
65406 + bool "Use cacheline-aware structure randomization"
65407 + depends on GRKERNSEC_RANDSTRUCT
65408 + default y if GRKERNSEC_CONFIG_PRIORITY_PERF
65409 + help
65410 + If you say Y here, the RANDSTRUCT randomization will make a best effort
65411 + at restricting randomization to cacheline-sized groups of elements. It
65412 + will further not randomize bitfields in structures. This reduces the
65413 + performance hit of RANDSTRUCT at the cost of weakened randomization.
65414 +
65415 +config GRKERNSEC_KERN_LOCKOUT
65416 + bool "Active kernel exploit response"
65417 + default y if GRKERNSEC_CONFIG_AUTO
65418 + depends on X86 || ARM || PPC || SPARC
65419 + help
65420 + If you say Y here, when a PaX alert is triggered due to suspicious
65421 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
65422 + or an OOPS occurs due to bad memory accesses, instead of just
65423 + terminating the offending process (and potentially allowing
65424 + a subsequent exploit from the same user), we will take one of two
65425 + actions:
65426 + If the user was root, we will panic the system
65427 + If the user was non-root, we will log the attempt, terminate
65428 + all processes owned by the user, then prevent them from creating
65429 + any new processes until the system is restarted
65430 + This deters repeated kernel exploitation/bruteforcing attempts
65431 + and is useful for later forensics.
65432 +
65433 +config GRKERNSEC_OLD_ARM_USERLAND
65434 + bool "Old ARM userland compatibility"
65435 + depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
65436 + help
65437 + If you say Y here, stubs of executable code to perform such operations
65438 + as "compare-exchange" will be placed at fixed locations in the ARM vector
65439 + table. This is unfortunately needed for old ARM userland meant to run
65440 + across a wide range of processors. Without this option enabled,
65441 + the get_tls and data memory barrier stubs will be emulated by the kernel,
65442 + which is enough for Linaro userlands or other userlands designed for v6
65443 + and newer ARM CPUs. It's recommended that you try without this option enabled
65444 + first, and only enable it if your userland does not boot (it will likely fail
65445 + at init time).
65446 +
65447 +endmenu
65448 +menu "Role Based Access Control Options"
65449 +depends on GRKERNSEC
65450 +
65451 +config GRKERNSEC_RBAC_DEBUG
65452 + bool
65453 +
65454 +config GRKERNSEC_NO_RBAC
65455 + bool "Disable RBAC system"
65456 + help
65457 + If you say Y here, the /dev/grsec device will be removed from the kernel,
65458 + preventing the RBAC system from being enabled. You should only say Y
65459 + here if you have no intention of using the RBAC system, so as to prevent
65460 + an attacker with root access from misusing the RBAC system to hide files
65461 + and processes when loadable module support and /dev/[k]mem have been
65462 + locked down.
65463 +
65464 +config GRKERNSEC_ACL_HIDEKERN
65465 + bool "Hide kernel processes"
65466 + help
65467 + If you say Y here, all kernel threads will be hidden to all
65468 + processes but those whose subject has the "view hidden processes"
65469 + flag.
65470 +
65471 +config GRKERNSEC_ACL_MAXTRIES
65472 + int "Maximum tries before password lockout"
65473 + default 3
65474 + help
65475 + This option enforces the maximum number of times a user can attempt
65476 + to authorize themselves with the grsecurity RBAC system before being
65477 + denied the ability to attempt authorization again for a specified time.
65478 + The lower the number, the harder it will be to brute-force a password.
65479 +
65480 +config GRKERNSEC_ACL_TIMEOUT
65481 + int "Time to wait after max password tries, in seconds"
65482 + default 30
65483 + help
65484 + This option specifies the time the user must wait after attempting to
65485 + authorize to the RBAC system with the maximum number of invalid
65486 + passwords. The higher the number, the harder it will be to brute-force
65487 + a password.
65488 +
65489 +endmenu
65490 +menu "Filesystem Protections"
65491 +depends on GRKERNSEC
65492 +
65493 +config GRKERNSEC_PROC
65494 + bool "Proc restrictions"
65495 + default y if GRKERNSEC_CONFIG_AUTO
65496 + help
65497 + If you say Y here, the permissions of the /proc filesystem
65498 + will be altered to enhance system security and privacy. You MUST
65499 + choose either a user only restriction or a user and group restriction.
65500 + Depending upon the option you choose, you can either restrict users to
65501 + see only the processes they themselves run, or choose a group that can
65502 + view all processes and files normally restricted to root if you choose
65503 + the "restrict to user only" option. NOTE: If you're running identd or
65504 + ntpd as a non-root user, you will have to run it as the group you
65505 + specify here.
65506 +
65507 +config GRKERNSEC_PROC_USER
65508 + bool "Restrict /proc to user only"
65509 + depends on GRKERNSEC_PROC
65510 + help
65511 + If you say Y here, non-root users will only be able to view their own
65512 + processes, and restricts them from viewing network-related information,
65513 + and viewing kernel symbol and module information.
65514 +
65515 +config GRKERNSEC_PROC_USERGROUP
65516 + bool "Allow special group"
65517 + default y if GRKERNSEC_CONFIG_AUTO
65518 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
65519 + help
65520 + If you say Y here, you will be able to select a group that will be
65521 + able to view all processes and network-related information. If you've
65522 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
65523 + remain hidden. This option is useful if you want to run identd as
65524 + a non-root user. The group you select may also be chosen at boot time
65525 + via "grsec_proc_gid=" on the kernel commandline.
65526 +
65527 +config GRKERNSEC_PROC_GID
65528 + int "GID for special group"
65529 + depends on GRKERNSEC_PROC_USERGROUP
65530 + default 1001
65531 +
65532 +config GRKERNSEC_PROC_ADD
65533 + bool "Additional restrictions"
65534 + default y if GRKERNSEC_CONFIG_AUTO
65535 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
65536 + help
65537 + If you say Y here, additional restrictions will be placed on
65538 + /proc that keep normal users from viewing device information and
65539 + slabinfo information that could be useful for exploits.
65540 +
65541 +config GRKERNSEC_LINK
65542 + bool "Linking restrictions"
65543 + default y if GRKERNSEC_CONFIG_AUTO
65544 + help
65545 + If you say Y here, /tmp race exploits will be prevented, since users
65546 + will no longer be able to follow symlinks owned by other users in
65547 + world-writable +t directories (e.g. /tmp), unless the owner of the
65548 + symlink is the owner of the directory. users will also not be
65549 + able to hardlink to files they do not own. If the sysctl option is
65550 + enabled, a sysctl option with name "linking_restrictions" is created.
65551 +
65552 +config GRKERNSEC_SYMLINKOWN
65553 + bool "Kernel-enforced SymlinksIfOwnerMatch"
65554 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
65555 + help
65556 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
65557 + that prevents it from being used as a security feature. As Apache
65558 + verifies the symlink by performing a stat() against the target of
65559 + the symlink before it is followed, an attacker can setup a symlink
65560 + to point to a same-owned file, then replace the symlink with one
65561 + that targets another user's file just after Apache "validates" the
65562 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
65563 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
65564 + will be in place for the group you specify. If the sysctl option
65565 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
65566 + created.
65567 +
65568 +config GRKERNSEC_SYMLINKOWN_GID
65569 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
65570 + depends on GRKERNSEC_SYMLINKOWN
65571 + default 1006
65572 + help
65573 + Setting this GID determines what group kernel-enforced
65574 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
65575 + is enabled, a sysctl option with name "symlinkown_gid" is created.
65576 +
65577 +config GRKERNSEC_FIFO
65578 + bool "FIFO restrictions"
65579 + default y if GRKERNSEC_CONFIG_AUTO
65580 + help
65581 + If you say Y here, users will not be able to write to FIFOs they don't
65582 + own in world-writable +t directories (e.g. /tmp), unless the owner of
65583 + the FIFO is the same owner of the directory it's held in. If the sysctl
65584 + option is enabled, a sysctl option with name "fifo_restrictions" is
65585 + created.
65586 +
65587 +config GRKERNSEC_SYSFS_RESTRICT
65588 + bool "Sysfs/debugfs restriction"
65589 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
65590 + depends on SYSFS
65591 + help
65592 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
65593 + any filesystem normally mounted under it (e.g. debugfs) will be
65594 + mostly accessible only by root. These filesystems generally provide access
65595 + to hardware and debug information that isn't appropriate for unprivileged
65596 + users of the system. Sysfs and debugfs have also become a large source
65597 + of new vulnerabilities, ranging from infoleaks to local compromise.
65598 + There has been very little oversight with an eye toward security involved
65599 + in adding new exporters of information to these filesystems, so their
65600 + use is discouraged.
65601 + For reasons of compatibility, a few directories have been whitelisted
65602 + for access by non-root users:
65603 + /sys/fs/selinux
65604 + /sys/fs/fuse
65605 + /sys/devices/system/cpu
65606 +
65607 +config GRKERNSEC_ROFS
65608 + bool "Runtime read-only mount protection"
65609 + depends on SYSCTL
65610 + help
65611 + If you say Y here, a sysctl option with name "romount_protect" will
65612 + be created. By setting this option to 1 at runtime, filesystems
65613 + will be protected in the following ways:
65614 + * No new writable mounts will be allowed
65615 + * Existing read-only mounts won't be able to be remounted read/write
65616 + * Write operations will be denied on all block devices
65617 + This option acts independently of grsec_lock: once it is set to 1,
65618 + it cannot be turned off. Therefore, please be mindful of the resulting
65619 + behavior if this option is enabled in an init script on a read-only
65620 + filesystem.
65621 + Also be aware that as with other root-focused features, GRKERNSEC_KMEM
65622 + and GRKERNSEC_IO should be enabled and module loading disabled via
65623 + config or at runtime.
65624 + This feature is mainly intended for secure embedded systems.
65625 +
65626 +
65627 +config GRKERNSEC_DEVICE_SIDECHANNEL
65628 + bool "Eliminate stat/notify-based device sidechannels"
65629 + default y if GRKERNSEC_CONFIG_AUTO
65630 + help
65631 + If you say Y here, timing analyses on block or character
65632 + devices like /dev/ptmx using stat or inotify/dnotify/fanotify
65633 + will be thwarted for unprivileged users. If a process without
65634 + CAP_MKNOD stats such a device, the last access and last modify times
65635 + will match the device's create time. No access or modify events
65636 + will be triggered through inotify/dnotify/fanotify for such devices.
65637 + This feature will prevent attacks that may at a minimum
65638 + allow an attacker to determine the administrator's password length.
65639 +
65640 +config GRKERNSEC_CHROOT
65641 + bool "Chroot jail restrictions"
65642 + default y if GRKERNSEC_CONFIG_AUTO
65643 + help
65644 + If you say Y here, you will be able to choose several options that will
65645 + make breaking out of a chrooted jail much more difficult. If you
65646 + encounter no software incompatibilities with the following options, it
65647 + is recommended that you enable each one.
65648 +
65649 +config GRKERNSEC_CHROOT_MOUNT
65650 + bool "Deny mounts"
65651 + default y if GRKERNSEC_CONFIG_AUTO
65652 + depends on GRKERNSEC_CHROOT
65653 + help
65654 + If you say Y here, processes inside a chroot will not be able to
65655 + mount or remount filesystems. If the sysctl option is enabled, a
65656 + sysctl option with name "chroot_deny_mount" is created.
65657 +
65658 +config GRKERNSEC_CHROOT_DOUBLE
65659 + bool "Deny double-chroots"
65660 + default y if GRKERNSEC_CONFIG_AUTO
65661 + depends on GRKERNSEC_CHROOT
65662 + help
65663 + If you say Y here, processes inside a chroot will not be able to chroot
65664 + again outside the chroot. This is a widely used method of breaking
65665 + out of a chroot jail and should not be allowed. If the sysctl
65666 + option is enabled, a sysctl option with name
65667 + "chroot_deny_chroot" is created.
65668 +
65669 +config GRKERNSEC_CHROOT_PIVOT
65670 + bool "Deny pivot_root in chroot"
65671 + default y if GRKERNSEC_CONFIG_AUTO
65672 + depends on GRKERNSEC_CHROOT
65673 + help
65674 + If you say Y here, processes inside a chroot will not be able to use
65675 + a function called pivot_root() that was introduced in Linux 2.3.41. It
65676 + works similar to chroot in that it changes the root filesystem. This
65677 + function could be misused in a chrooted process to attempt to break out
65678 + of the chroot, and therefore should not be allowed. If the sysctl
65679 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
65680 + created.
65681 +
65682 +config GRKERNSEC_CHROOT_CHDIR
65683 + bool "Enforce chdir(\"/\") on all chroots"
65684 + default y if GRKERNSEC_CONFIG_AUTO
65685 + depends on GRKERNSEC_CHROOT
65686 + help
65687 + If you say Y here, the current working directory of all newly-chrooted
65688 + applications will be set to the the root directory of the chroot.
65689 + The man page on chroot(2) states:
65690 + Note that this call does not change the current working
65691 + directory, so that `.' can be outside the tree rooted at
65692 + `/'. In particular, the super-user can escape from a
65693 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
65694 +
65695 + It is recommended that you say Y here, since it's not known to break
65696 + any software. If the sysctl option is enabled, a sysctl option with
65697 + name "chroot_enforce_chdir" is created.
65698 +
65699 +config GRKERNSEC_CHROOT_CHMOD
65700 + bool "Deny (f)chmod +s"
65701 + default y if GRKERNSEC_CONFIG_AUTO
65702 + depends on GRKERNSEC_CHROOT
65703 + help
65704 + If you say Y here, processes inside a chroot will not be able to chmod
65705 + or fchmod files to make them have suid or sgid bits. This protects
65706 + against another published method of breaking a chroot. If the sysctl
65707 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
65708 + created.
65709 +
65710 +config GRKERNSEC_CHROOT_FCHDIR
65711 + bool "Deny fchdir out of chroot"
65712 + default y if GRKERNSEC_CONFIG_AUTO
65713 + depends on GRKERNSEC_CHROOT
65714 + help
65715 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
65716 + to a file descriptor of the chrooting process that points to a directory
65717 + outside the filesystem will be stopped. If the sysctl option
65718 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
65719 +
65720 +config GRKERNSEC_CHROOT_MKNOD
65721 + bool "Deny mknod"
65722 + default y if GRKERNSEC_CONFIG_AUTO
65723 + depends on GRKERNSEC_CHROOT
65724 + help
65725 + If you say Y here, processes inside a chroot will not be allowed to
65726 + mknod. The problem with using mknod inside a chroot is that it
65727 + would allow an attacker to create a device entry that is the same
65728 + as one on the physical root of your system, which could range from
65729 + anything from the console device to a device for your harddrive (which
65730 + they could then use to wipe the drive or steal data). It is recommended
65731 + that you say Y here, unless you run into software incompatibilities.
65732 + If the sysctl option is enabled, a sysctl option with name
65733 + "chroot_deny_mknod" is created.
65734 +
65735 +config GRKERNSEC_CHROOT_SHMAT
65736 + bool "Deny shmat() out of chroot"
65737 + default y if GRKERNSEC_CONFIG_AUTO
65738 + depends on GRKERNSEC_CHROOT
65739 + help
65740 + If you say Y here, processes inside a chroot will not be able to attach
65741 + to shared memory segments that were created outside of the chroot jail.
65742 + It is recommended that you say Y here. If the sysctl option is enabled,
65743 + a sysctl option with name "chroot_deny_shmat" is created.
65744 +
65745 +config GRKERNSEC_CHROOT_UNIX
65746 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
65747 + default y if GRKERNSEC_CONFIG_AUTO
65748 + depends on GRKERNSEC_CHROOT
65749 + help
65750 + If you say Y here, processes inside a chroot will not be able to
65751 + connect to abstract (meaning not belonging to a filesystem) Unix
65752 + domain sockets that were bound outside of a chroot. It is recommended
65753 + that you say Y here. If the sysctl option is enabled, a sysctl option
65754 + with name "chroot_deny_unix" is created.
65755 +
65756 +config GRKERNSEC_CHROOT_FINDTASK
65757 + bool "Protect outside processes"
65758 + default y if GRKERNSEC_CONFIG_AUTO
65759 + depends on GRKERNSEC_CHROOT
65760 + help
65761 + If you say Y here, processes inside a chroot will not be able to
65762 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
65763 + getsid, or view any process outside of the chroot. If the sysctl
65764 + option is enabled, a sysctl option with name "chroot_findtask" is
65765 + created.
65766 +
65767 +config GRKERNSEC_CHROOT_NICE
65768 + bool "Restrict priority changes"
65769 + default y if GRKERNSEC_CONFIG_AUTO
65770 + depends on GRKERNSEC_CHROOT
65771 + help
65772 + If you say Y here, processes inside a chroot will not be able to raise
65773 + the priority of processes in the chroot, or alter the priority of
65774 + processes outside the chroot. This provides more security than simply
65775 + removing CAP_SYS_NICE from the process' capability set. If the
65776 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
65777 + is created.
65778 +
65779 +config GRKERNSEC_CHROOT_SYSCTL
65780 + bool "Deny sysctl writes"
65781 + default y if GRKERNSEC_CONFIG_AUTO
65782 + depends on GRKERNSEC_CHROOT
65783 + help
65784 + If you say Y here, an attacker in a chroot will not be able to
65785 + write to sysctl entries, either by sysctl(2) or through a /proc
65786 + interface. It is strongly recommended that you say Y here. If the
65787 + sysctl option is enabled, a sysctl option with name
65788 + "chroot_deny_sysctl" is created.
65789 +
65790 +config GRKERNSEC_CHROOT_CAPS
65791 + bool "Capability restrictions"
65792 + default y if GRKERNSEC_CONFIG_AUTO
65793 + depends on GRKERNSEC_CHROOT
65794 + help
65795 + If you say Y here, the capabilities on all processes within a
65796 + chroot jail will be lowered to stop module insertion, raw i/o,
65797 + system and net admin tasks, rebooting the system, modifying immutable
65798 + files, modifying IPC owned by another, and changing the system time.
65799 + This is left an option because it can break some apps. Disable this
65800 + if your chrooted apps are having problems performing those kinds of
65801 + tasks. If the sysctl option is enabled, a sysctl option with
65802 + name "chroot_caps" is created.
65803 +
65804 +config GRKERNSEC_CHROOT_INITRD
65805 + bool "Exempt initrd tasks from restrictions"
65806 + default y if GRKERNSEC_CONFIG_AUTO
65807 + depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
65808 + help
65809 + If you say Y here, tasks started prior to init will be exempted from
65810 + grsecurity's chroot restrictions. This option is mainly meant to
65811 + resolve Plymouth's performing privileged operations unnecessarily
65812 + in a chroot.
65813 +
65814 +endmenu
65815 +menu "Kernel Auditing"
65816 +depends on GRKERNSEC
65817 +
65818 +config GRKERNSEC_AUDIT_GROUP
65819 + bool "Single group for auditing"
65820 + help
65821 + If you say Y here, the exec and chdir logging features will only operate
65822 + on a group you specify. This option is recommended if you only want to
65823 + watch certain users instead of having a large amount of logs from the
65824 + entire system. If the sysctl option is enabled, a sysctl option with
65825 + name "audit_group" is created.
65826 +
65827 +config GRKERNSEC_AUDIT_GID
65828 + int "GID for auditing"
65829 + depends on GRKERNSEC_AUDIT_GROUP
65830 + default 1007
65831 +
65832 +config GRKERNSEC_EXECLOG
65833 + bool "Exec logging"
65834 + help
65835 + If you say Y here, all execve() calls will be logged (since the
65836 + other exec*() calls are frontends to execve(), all execution
65837 + will be logged). Useful for shell-servers that like to keep track
65838 + of their users. If the sysctl option is enabled, a sysctl option with
65839 + name "exec_logging" is created.
65840 + WARNING: This option when enabled will produce a LOT of logs, especially
65841 + on an active system.
65842 +
65843 +config GRKERNSEC_RESLOG
65844 + bool "Resource logging"
65845 + default y if GRKERNSEC_CONFIG_AUTO
65846 + help
65847 + If you say Y here, all attempts to overstep resource limits will
65848 + be logged with the resource name, the requested size, and the current
65849 + limit. It is highly recommended that you say Y here. If the sysctl
65850 + option is enabled, a sysctl option with name "resource_logging" is
65851 + created. If the RBAC system is enabled, the sysctl value is ignored.
65852 +
65853 +config GRKERNSEC_CHROOT_EXECLOG
65854 + bool "Log execs within chroot"
65855 + help
65856 + If you say Y here, all executions inside a chroot jail will be logged
65857 + to syslog. This can cause a large amount of logs if certain
65858 + applications (eg. djb's daemontools) are installed on the system, and
65859 + is therefore left as an option. If the sysctl option is enabled, a
65860 + sysctl option with name "chroot_execlog" is created.
65861 +
65862 +config GRKERNSEC_AUDIT_PTRACE
65863 + bool "Ptrace logging"
65864 + help
65865 + If you say Y here, all attempts to attach to a process via ptrace
65866 + will be logged. If the sysctl option is enabled, a sysctl option
65867 + with name "audit_ptrace" is created.
65868 +
65869 +config GRKERNSEC_AUDIT_CHDIR
65870 + bool "Chdir logging"
65871 + help
65872 + If you say Y here, all chdir() calls will be logged. If the sysctl
65873 + option is enabled, a sysctl option with name "audit_chdir" is created.
65874 +
65875 +config GRKERNSEC_AUDIT_MOUNT
65876 + bool "(Un)Mount logging"
65877 + help
65878 + If you say Y here, all mounts and unmounts will be logged. If the
65879 + sysctl option is enabled, a sysctl option with name "audit_mount" is
65880 + created.
65881 +
65882 +config GRKERNSEC_SIGNAL
65883 + bool "Signal logging"
65884 + default y if GRKERNSEC_CONFIG_AUTO
65885 + help
65886 + If you say Y here, certain important signals will be logged, such as
65887 + SIGSEGV, which will as a result inform you of when a error in a program
65888 + occurred, which in some cases could mean a possible exploit attempt.
65889 + If the sysctl option is enabled, a sysctl option with name
65890 + "signal_logging" is created.
65891 +
65892 +config GRKERNSEC_FORKFAIL
65893 + bool "Fork failure logging"
65894 + help
65895 + If you say Y here, all failed fork() attempts will be logged.
65896 + This could suggest a fork bomb, or someone attempting to overstep
65897 + their process limit. If the sysctl option is enabled, a sysctl option
65898 + with name "forkfail_logging" is created.
65899 +
65900 +config GRKERNSEC_TIME
65901 + bool "Time change logging"
65902 + default y if GRKERNSEC_CONFIG_AUTO
65903 + help
65904 + If you say Y here, any changes of the system clock will be logged.
65905 + If the sysctl option is enabled, a sysctl option with name
65906 + "timechange_logging" is created.
65907 +
65908 +config GRKERNSEC_PROC_IPADDR
65909 + bool "/proc/<pid>/ipaddr support"
65910 + default y if GRKERNSEC_CONFIG_AUTO
65911 + help
65912 + If you say Y here, a new entry will be added to each /proc/<pid>
65913 + directory that contains the IP address of the person using the task.
65914 + The IP is carried across local TCP and AF_UNIX stream sockets.
65915 + This information can be useful for IDS/IPSes to perform remote response
65916 + to a local attack. The entry is readable by only the owner of the
65917 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
65918 + the RBAC system), and thus does not create privacy concerns.
65919 +
65920 +config GRKERNSEC_RWXMAP_LOG
65921 + bool 'Denied RWX mmap/mprotect logging'
65922 + default y if GRKERNSEC_CONFIG_AUTO
65923 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
65924 + help
65925 + If you say Y here, calls to mmap() and mprotect() with explicit
65926 + usage of PROT_WRITE and PROT_EXEC together will be logged when
65927 + denied by the PAX_MPROTECT feature. This feature will also
65928 + log other problematic scenarios that can occur when PAX_MPROTECT
65929 + is enabled on a binary, like textrels and PT_GNU_STACK. If the
65930 + sysctl option is enabled, a sysctl option with name "rwxmap_logging"
65931 + is created.
65932 +
65933 +endmenu
65934 +
65935 +menu "Executable Protections"
65936 +depends on GRKERNSEC
65937 +
65938 +config GRKERNSEC_DMESG
65939 + bool "Dmesg(8) restriction"
65940 + default y if GRKERNSEC_CONFIG_AUTO
65941 + help
65942 + If you say Y here, non-root users will not be able to use dmesg(8)
65943 + to view the contents of the kernel's circular log buffer.
65944 + The kernel's log buffer often contains kernel addresses and other
65945 + identifying information useful to an attacker in fingerprinting a
65946 + system for a targeted exploit.
65947 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
65948 + created.
65949 +
65950 +config GRKERNSEC_HARDEN_PTRACE
65951 + bool "Deter ptrace-based process snooping"
65952 + default y if GRKERNSEC_CONFIG_AUTO
65953 + help
65954 + If you say Y here, TTY sniffers and other malicious monitoring
65955 + programs implemented through ptrace will be defeated. If you
65956 + have been using the RBAC system, this option has already been
65957 + enabled for several years for all users, with the ability to make
65958 + fine-grained exceptions.
65959 +
65960 + This option only affects the ability of non-root users to ptrace
65961 + processes that are not a descendent of the ptracing process.
65962 + This means that strace ./binary and gdb ./binary will still work,
65963 + but attaching to arbitrary processes will not. If the sysctl
65964 + option is enabled, a sysctl option with name "harden_ptrace" is
65965 + created.
65966 +
65967 +config GRKERNSEC_PTRACE_READEXEC
65968 + bool "Require read access to ptrace sensitive binaries"
65969 + default y if GRKERNSEC_CONFIG_AUTO
65970 + help
65971 + If you say Y here, unprivileged users will not be able to ptrace unreadable
65972 + binaries. This option is useful in environments that
65973 + remove the read bits (e.g. file mode 4711) from suid binaries to
65974 + prevent infoleaking of their contents. This option adds
65975 + consistency to the use of that file mode, as the binary could normally
65976 + be read out when run without privileges while ptracing.
65977 +
65978 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
65979 + is created.
65980 +
65981 +config GRKERNSEC_SETXID
65982 + bool "Enforce consistent multithreaded privileges"
65983 + default y if GRKERNSEC_CONFIG_AUTO
65984 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
65985 + help
65986 + If you say Y here, a change from a root uid to a non-root uid
65987 + in a multithreaded application will cause the resulting uids,
65988 + gids, supplementary groups, and capabilities in that thread
65989 + to be propagated to the other threads of the process. In most
65990 + cases this is unnecessary, as glibc will emulate this behavior
65991 + on behalf of the application. Other libcs do not act in the
65992 + same way, allowing the other threads of the process to continue
65993 + running with root privileges. If the sysctl option is enabled,
65994 + a sysctl option with name "consistent_setxid" is created.
65995 +
65996 +config GRKERNSEC_HARDEN_IPC
65997 + bool "Disallow access to overly-permissive IPC objects"
65998 + default y if GRKERNSEC_CONFIG_AUTO
65999 + depends on SYSVIPC
66000 + help
66001 + If you say Y here, access to overly-permissive IPC objects (shared
66002 + memory, message queues, and semaphores) will be denied for processes
66003 + given the following criteria beyond normal permission checks:
66004 + 1) If the IPC object is world-accessible and the euid doesn't match
66005 + that of the creator or current uid for the IPC object
66006 + 2) If the IPC object is group-accessible and the egid doesn't
66007 + match that of the creator or current gid for the IPC object
66008 + It's a common error to grant too much permission to these objects,
66009 + with impact ranging from denial of service and information leaking to
66010 + privilege escalation. This feature was developed in response to
66011 + research by Tim Brown:
66012 + http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
66013 + who found hundreds of such insecure usages. Processes with
66014 + CAP_IPC_OWNER are still permitted to access these IPC objects.
66015 + If the sysctl option is enabled, a sysctl option with name
66016 + "harden_ipc" is created.
66017 +
66018 +config GRKERNSEC_TPE
66019 + bool "Trusted Path Execution (TPE)"
66020 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
66021 + help
66022 + If you say Y here, you will be able to choose a gid to add to the
66023 + supplementary groups of users you want to mark as "untrusted."
66024 + These users will not be able to execute any files that are not in
66025 + root-owned directories writable only by root. If the sysctl option
66026 + is enabled, a sysctl option with name "tpe" is created.
66027 +
66028 +config GRKERNSEC_TPE_ALL
66029 + bool "Partially restrict all non-root users"
66030 + depends on GRKERNSEC_TPE
66031 + help
66032 + If you say Y here, all non-root users will be covered under
66033 + a weaker TPE restriction. This is separate from, and in addition to,
66034 + the main TPE options that you have selected elsewhere. Thus, if a
66035 + "trusted" GID is chosen, this restriction applies to even that GID.
66036 + Under this restriction, all non-root users will only be allowed to
66037 + execute files in directories they own that are not group or
66038 + world-writable, or in directories owned by root and writable only by
66039 + root. If the sysctl option is enabled, a sysctl option with name
66040 + "tpe_restrict_all" is created.
66041 +
66042 +config GRKERNSEC_TPE_INVERT
66043 + bool "Invert GID option"
66044 + depends on GRKERNSEC_TPE
66045 + help
66046 + If you say Y here, the group you specify in the TPE configuration will
66047 + decide what group TPE restrictions will be *disabled* for. This
66048 + option is useful if you want TPE restrictions to be applied to most
66049 + users on the system. If the sysctl option is enabled, a sysctl option
66050 + with name "tpe_invert" is created. Unlike other sysctl options, this
66051 + entry will default to on for backward-compatibility.
66052 +
66053 +config GRKERNSEC_TPE_GID
66054 + int
66055 + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
66056 + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
66057 +
66058 +config GRKERNSEC_TPE_UNTRUSTED_GID
66059 + int "GID for TPE-untrusted users"
66060 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
66061 + default 1005
66062 + help
66063 + Setting this GID determines what group TPE restrictions will be
66064 + *enabled* for. If the sysctl option is enabled, a sysctl option
66065 + with name "tpe_gid" is created.
66066 +
66067 +config GRKERNSEC_TPE_TRUSTED_GID
66068 + int "GID for TPE-trusted users"
66069 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
66070 + default 1005
66071 + help
66072 + Setting this GID determines what group TPE restrictions will be
66073 + *disabled* for. If the sysctl option is enabled, a sysctl option
66074 + with name "tpe_gid" is created.
66075 +
66076 +endmenu
66077 +menu "Network Protections"
66078 +depends on GRKERNSEC
66079 +
66080 +config GRKERNSEC_RANDNET
66081 + bool "Larger entropy pools"
66082 + default y if GRKERNSEC_CONFIG_AUTO
66083 + help
66084 + If you say Y here, the entropy pools used for many features of Linux
66085 + and grsecurity will be doubled in size. Since several grsecurity
66086 + features use additional randomness, it is recommended that you say Y
66087 + here. Saying Y here has a similar effect as modifying
66088 + /proc/sys/kernel/random/poolsize.
66089 +
66090 +config GRKERNSEC_BLACKHOLE
66091 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
66092 + default y if GRKERNSEC_CONFIG_AUTO
66093 + depends on NET
66094 + help
66095 + If you say Y here, neither TCP resets nor ICMP
66096 + destination-unreachable packets will be sent in response to packets
66097 + sent to ports for which no associated listening process exists.
66098 + This feature supports both IPV4 and IPV6 and exempts the
66099 + loopback interface from blackholing. Enabling this feature
66100 + makes a host more resilient to DoS attacks and reduces network
66101 + visibility against scanners.
66102 +
66103 + The blackhole feature as-implemented is equivalent to the FreeBSD
66104 + blackhole feature, as it prevents RST responses to all packets, not
66105 + just SYNs. Under most application behavior this causes no
66106 + problems, but applications (like haproxy) may not close certain
66107 + connections in a way that cleanly terminates them on the remote
66108 + end, leaving the remote host in LAST_ACK state. Because of this
66109 + side-effect and to prevent intentional LAST_ACK DoSes, this
66110 + feature also adds automatic mitigation against such attacks.
66111 + The mitigation drastically reduces the amount of time a socket
66112 + can spend in LAST_ACK state. If you're using haproxy and not
66113 + all servers it connects to have this option enabled, consider
66114 + disabling this feature on the haproxy host.
66115 +
66116 + If the sysctl option is enabled, two sysctl options with names
66117 + "ip_blackhole" and "lastack_retries" will be created.
66118 + While "ip_blackhole" takes the standard zero/non-zero on/off
66119 + toggle, "lastack_retries" uses the same kinds of values as
66120 + "tcp_retries1" and "tcp_retries2". The default value of 4
66121 + prevents a socket from lasting more than 45 seconds in LAST_ACK
66122 + state.
66123 +
66124 +config GRKERNSEC_NO_SIMULT_CONNECT
66125 + bool "Disable TCP Simultaneous Connect"
66126 + default y if GRKERNSEC_CONFIG_AUTO
66127 + depends on NET
66128 + help
66129 + If you say Y here, a feature by Willy Tarreau will be enabled that
66130 + removes a weakness in Linux's strict implementation of TCP that
66131 + allows two clients to connect to each other without either entering
66132 + a listening state. The weakness allows an attacker to easily prevent
66133 + a client from connecting to a known server provided the source port
66134 + for the connection is guessed correctly.
66135 +
66136 + As the weakness could be used to prevent an antivirus or IPS from
66137 + fetching updates, or prevent an SSL gateway from fetching a CRL,
66138 + it should be eliminated by enabling this option. Though Linux is
66139 + one of few operating systems supporting simultaneous connect, it
66140 + has no legitimate use in practice and is rarely supported by firewalls.
66141 +
66142 +config GRKERNSEC_SOCKET
66143 + bool "Socket restrictions"
66144 + depends on NET
66145 + help
66146 + If you say Y here, you will be able to choose from several options.
66147 + If you assign a GID on your system and add it to the supplementary
66148 + groups of users you want to restrict socket access to, this patch
66149 + will perform up to three things, based on the option(s) you choose.
66150 +
66151 +config GRKERNSEC_SOCKET_ALL
66152 + bool "Deny any sockets to group"
66153 + depends on GRKERNSEC_SOCKET
66154 + help
66155 + If you say Y here, you will be able to choose a GID of whose users will
66156 + be unable to connect to other hosts from your machine or run server
66157 + applications from your machine. If the sysctl option is enabled, a
66158 + sysctl option with name "socket_all" is created.
66159 +
66160 +config GRKERNSEC_SOCKET_ALL_GID
66161 + int "GID to deny all sockets for"
66162 + depends on GRKERNSEC_SOCKET_ALL
66163 + default 1004
66164 + help
66165 + Here you can choose the GID to disable socket access for. Remember to
66166 + add the users you want socket access disabled for to the GID
66167 + specified here. If the sysctl option is enabled, a sysctl option
66168 + with name "socket_all_gid" is created.
66169 +
66170 +config GRKERNSEC_SOCKET_CLIENT
66171 + bool "Deny client sockets to group"
66172 + depends on GRKERNSEC_SOCKET
66173 + help
66174 + If you say Y here, you will be able to choose a GID of whose users will
66175 + be unable to connect to other hosts from your machine, but will be
66176 + able to run servers. If this option is enabled, all users in the group
66177 + you specify will have to use passive mode when initiating ftp transfers
66178 + from the shell on your machine. If the sysctl option is enabled, a
66179 + sysctl option with name "socket_client" is created.
66180 +
66181 +config GRKERNSEC_SOCKET_CLIENT_GID
66182 + int "GID to deny client sockets for"
66183 + depends on GRKERNSEC_SOCKET_CLIENT
66184 + default 1003
66185 + help
66186 + Here you can choose the GID to disable client socket access for.
66187 + Remember to add the users you want client socket access disabled for to
66188 + the GID specified here. If the sysctl option is enabled, a sysctl
66189 + option with name "socket_client_gid" is created.
66190 +
66191 +config GRKERNSEC_SOCKET_SERVER
66192 + bool "Deny server sockets to group"
66193 + depends on GRKERNSEC_SOCKET
66194 + help
66195 + If you say Y here, you will be able to choose a GID of whose users will
66196 + be unable to run server applications from your machine. If the sysctl
66197 + option is enabled, a sysctl option with name "socket_server" is created.
66198 +
66199 +config GRKERNSEC_SOCKET_SERVER_GID
66200 + int "GID to deny server sockets for"
66201 + depends on GRKERNSEC_SOCKET_SERVER
66202 + default 1002
66203 + help
66204 + Here you can choose the GID to disable server socket access for.
66205 + Remember to add the users you want server socket access disabled for to
66206 + the GID specified here. If the sysctl option is enabled, a sysctl
66207 + option with name "socket_server_gid" is created.
66208 +
66209 +endmenu
66210 +
66211 +menu "Physical Protections"
66212 +depends on GRKERNSEC
66213 +
66214 +config GRKERNSEC_DENYUSB
66215 + bool "Deny new USB connections after toggle"
66216 + default y if GRKERNSEC_CONFIG_AUTO
66217 + depends on SYSCTL && USB_SUPPORT
66218 + help
66219 + If you say Y here, a new sysctl option with name "deny_new_usb"
66220 + will be created. Setting its value to 1 will prevent any new
66221 + USB devices from being recognized by the OS. Any attempted USB
66222 + device insertion will be logged. This option is intended to be
66223 + used against custom USB devices designed to exploit vulnerabilities
66224 + in various USB device drivers.
66225 +
66226 + For greatest effectiveness, this sysctl should be set after any
66227 + relevant init scripts. This option is safe to enable in distros
66228 + as each user can choose whether or not to toggle the sysctl.
66229 +
66230 +config GRKERNSEC_DENYUSB_FORCE
66231 + bool "Reject all USB devices not connected at boot"
66232 + select USB
66233 + depends on GRKERNSEC_DENYUSB
66234 + help
66235 + If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
66236 + that doesn't involve a sysctl entry. This option should only be
66237 + enabled if you're sure you want to deny all new USB connections
66238 + at runtime and don't want to modify init scripts. This should not
66239 + be enabled by distros. It forces the core USB code to be built
66240 + into the kernel image so that all devices connected at boot time
66241 + can be recognized and new USB device connections can be prevented
66242 + prior to init running.
66243 +
66244 +endmenu
66245 +
66246 +menu "Sysctl Support"
66247 +depends on GRKERNSEC && SYSCTL
66248 +
66249 +config GRKERNSEC_SYSCTL
66250 + bool "Sysctl support"
66251 + default y if GRKERNSEC_CONFIG_AUTO
66252 + help
66253 + If you say Y here, you will be able to change the options that
66254 + grsecurity runs with at bootup, without having to recompile your
66255 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
66256 + to enable (1) or disable (0) various features. All the sysctl entries
66257 + are mutable until the "grsec_lock" entry is set to a non-zero value.
66258 + All features enabled in the kernel configuration are disabled at boot
66259 + if you do not say Y to the "Turn on features by default" option.
66260 + All options should be set at startup, and the grsec_lock entry should
66261 + be set to a non-zero value after all the options are set.
66262 + *THIS IS EXTREMELY IMPORTANT*
66263 +
66264 +config GRKERNSEC_SYSCTL_DISTRO
66265 + bool "Extra sysctl support for distro makers (READ HELP)"
66266 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
66267 + help
66268 + If you say Y here, additional sysctl options will be created
66269 + for features that affect processes running as root. Therefore,
66270 + it is critical when using this option that the grsec_lock entry be
66271 + enabled after boot. Only distros with prebuilt kernel packages
66272 + with this option enabled that can ensure grsec_lock is enabled
66273 + after boot should use this option.
66274 + *Failure to set grsec_lock after boot makes all grsec features
66275 + this option covers useless*
66276 +
66277 + Currently this option creates the following sysctl entries:
66278 + "Disable Privileged I/O": "disable_priv_io"
66279 +
66280 +config GRKERNSEC_SYSCTL_ON
66281 + bool "Turn on features by default"
66282 + default y if GRKERNSEC_CONFIG_AUTO
66283 + depends on GRKERNSEC_SYSCTL
66284 + help
66285 + If you say Y here, instead of having all features enabled in the
66286 + kernel configuration disabled at boot time, the features will be
66287 + enabled at boot time. It is recommended you say Y here unless
66288 + there is some reason you would want all sysctl-tunable features to
66289 + be disabled by default. As mentioned elsewhere, it is important
66290 + to enable the grsec_lock entry once you have finished modifying
66291 + the sysctl entries.
66292 +
66293 +endmenu
66294 +menu "Logging Options"
66295 +depends on GRKERNSEC
66296 +
66297 +config GRKERNSEC_FLOODTIME
66298 + int "Seconds in between log messages (minimum)"
66299 + default 10
66300 + help
66301 + This option allows you to enforce the number of seconds between
66302 + grsecurity log messages. The default should be suitable for most
66303 + people, however, if you choose to change it, choose a value small enough
66304 + to allow informative logs to be produced, but large enough to
66305 + prevent flooding.
66306 +
66307 + Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
66308 + any rate limiting on grsecurity log messages.
66309 +
66310 +config GRKERNSEC_FLOODBURST
66311 + int "Number of messages in a burst (maximum)"
66312 + default 6
66313 + help
66314 + This option allows you to choose the maximum number of messages allowed
66315 + within the flood time interval you chose in a separate option. The
66316 + default should be suitable for most people, however if you find that
66317 + many of your logs are being interpreted as flooding, you may want to
66318 + raise this value.
66319 +
66320 + Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
66321 + any rate limiting on grsecurity log messages.
66322 +
66323 +endmenu
66324 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
66325 new file mode 100644
66326 index 0000000..5307c8a
66327 --- /dev/null
66328 +++ b/grsecurity/Makefile
66329 @@ -0,0 +1,54 @@
66330 +# grsecurity – access control and security hardening for Linux
66331 +# All code in this directory and various hooks located throughout the Linux kernel are
66332 +# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
66333 +# http://www.grsecurity.net spender@grsecurity.net
66334 +#
66335 +# This program is free software; you can redistribute it and/or
66336 +# modify it under the terms of the GNU General Public License version 2
66337 +# as published by the Free Software Foundation.
66338 +#
66339 +# This program is distributed in the hope that it will be useful,
66340 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
66341 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
66342 +# GNU General Public License for more details.
66343 +#
66344 +# You should have received a copy of the GNU General Public License
66345 +# along with this program; if not, write to the Free Software
66346 +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
66347 +
66348 +KBUILD_CFLAGS += -Werror
66349 +
66350 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
66351 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
66352 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
66353 + grsec_usb.o grsec_ipc.o
66354 +
66355 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
66356 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
66357 + gracl_learn.o grsec_log.o gracl_policy.o
66358 +ifdef CONFIG_COMPAT
66359 +obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
66360 +endif
66361 +
66362 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
66363 +
66364 +ifdef CONFIG_NET
66365 +obj-y += grsec_sock.o
66366 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
66367 +endif
66368 +
66369 +ifndef CONFIG_GRKERNSEC
66370 +obj-y += grsec_disabled.o
66371 +endif
66372 +
66373 +ifdef CONFIG_GRKERNSEC_HIDESYM
66374 +extra-y := grsec_hidesym.o
66375 +$(obj)/grsec_hidesym.o:
66376 + @-chmod -f 500 /boot
66377 + @-chmod -f 500 /lib/modules
66378 + @-chmod -f 500 /lib64/modules
66379 + @-chmod -f 500 /lib32/modules
66380 + @-chmod -f 700 .
66381 + @-chmod -f 700 $(objtree)
66382 + @echo ' grsec: protected kernel image paths'
66383 +endif
66384 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
66385 new file mode 100644
66386 index 0000000..e56396f
66387 --- /dev/null
66388 +++ b/grsecurity/gracl.c
66389 @@ -0,0 +1,2679 @@
66390 +#include <linux/kernel.h>
66391 +#include <linux/module.h>
66392 +#include <linux/sched.h>
66393 +#include <linux/mm.h>
66394 +#include <linux/file.h>
66395 +#include <linux/fs.h>
66396 +#include <linux/namei.h>
66397 +#include <linux/mount.h>
66398 +#include <linux/tty.h>
66399 +#include <linux/proc_fs.h>
66400 +#include <linux/lglock.h>
66401 +#include <linux/slab.h>
66402 +#include <linux/vmalloc.h>
66403 +#include <linux/types.h>
66404 +#include <linux/sysctl.h>
66405 +#include <linux/netdevice.h>
66406 +#include <linux/ptrace.h>
66407 +#include <linux/gracl.h>
66408 +#include <linux/gralloc.h>
66409 +#include <linux/security.h>
66410 +#include <linux/grinternal.h>
66411 +#include <linux/pid_namespace.h>
66412 +#include <linux/stop_machine.h>
66413 +#include <linux/fdtable.h>
66414 +#include <linux/percpu.h>
66415 +#include <linux/lglock.h>
66416 +#include <linux/hugetlb.h>
66417 +#include <linux/posix-timers.h>
66418 +#include <linux/prefetch.h>
66419 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66420 +#include <linux/magic.h>
66421 +#include <linux/pagemap.h>
66422 +#include "../fs/btrfs/async-thread.h"
66423 +#include "../fs/btrfs/ctree.h"
66424 +#include "../fs/btrfs/btrfs_inode.h"
66425 +#endif
66426 +#include "../fs/mount.h"
66427 +
66428 +#include <asm/uaccess.h>
66429 +#include <asm/errno.h>
66430 +#include <asm/mman.h>
66431 +
66432 +#define FOR_EACH_ROLE_START(role) \
66433 + role = running_polstate.role_list; \
66434 + while (role) {
66435 +
66436 +#define FOR_EACH_ROLE_END(role) \
66437 + role = role->prev; \
66438 + }
66439 +
66440 +extern struct path gr_real_root;
66441 +
66442 +static struct gr_policy_state running_polstate;
66443 +struct gr_policy_state *polstate = &running_polstate;
66444 +extern struct gr_alloc_state *current_alloc_state;
66445 +
66446 +extern char *gr_shared_page[4];
66447 +DEFINE_RWLOCK(gr_inode_lock);
66448 +
66449 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
66450 +
66451 +#ifdef CONFIG_NET
66452 +extern struct vfsmount *sock_mnt;
66453 +#endif
66454 +
66455 +extern struct vfsmount *pipe_mnt;
66456 +extern struct vfsmount *shm_mnt;
66457 +
66458 +#ifdef CONFIG_HUGETLBFS
66459 +extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
66460 +#endif
66461 +
66462 +extern u16 acl_sp_role_value;
66463 +extern struct acl_object_label *fakefs_obj_rw;
66464 +extern struct acl_object_label *fakefs_obj_rwx;
66465 +
66466 +int gr_acl_is_enabled(void)
66467 +{
66468 + return (gr_status & GR_READY);
66469 +}
66470 +
66471 +void gr_enable_rbac_system(void)
66472 +{
66473 + pax_open_kernel();
66474 + gr_status |= GR_READY;
66475 + pax_close_kernel();
66476 +}
66477 +
66478 +int gr_rbac_disable(void *unused)
66479 +{
66480 + pax_open_kernel();
66481 + gr_status &= ~GR_READY;
66482 + pax_close_kernel();
66483 +
66484 + return 0;
66485 +}
66486 +
66487 +static inline dev_t __get_dev(const struct dentry *dentry)
66488 +{
66489 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66490 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
66491 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
66492 + else
66493 +#endif
66494 + return dentry->d_sb->s_dev;
66495 +}
66496 +
66497 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
66498 +{
66499 + return __get_dev(dentry);
66500 +}
66501 +
66502 +static char gr_task_roletype_to_char(struct task_struct *task)
66503 +{
66504 + switch (task->role->roletype &
66505 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
66506 + GR_ROLE_SPECIAL)) {
66507 + case GR_ROLE_DEFAULT:
66508 + return 'D';
66509 + case GR_ROLE_USER:
66510 + return 'U';
66511 + case GR_ROLE_GROUP:
66512 + return 'G';
66513 + case GR_ROLE_SPECIAL:
66514 + return 'S';
66515 + }
66516 +
66517 + return 'X';
66518 +}
66519 +
66520 +char gr_roletype_to_char(void)
66521 +{
66522 + return gr_task_roletype_to_char(current);
66523 +}
66524 +
66525 +__inline__ int
66526 +gr_acl_tpe_check(void)
66527 +{
66528 + if (unlikely(!(gr_status & GR_READY)))
66529 + return 0;
66530 + if (current->role->roletype & GR_ROLE_TPE)
66531 + return 1;
66532 + else
66533 + return 0;
66534 +}
66535 +
66536 +int
66537 +gr_handle_rawio(const struct inode *inode)
66538 +{
66539 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66540 + if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
66541 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
66542 + !capable(CAP_SYS_RAWIO))
66543 + return 1;
66544 +#endif
66545 + return 0;
66546 +}
66547 +
66548 +int
66549 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
66550 +{
66551 + if (likely(lena != lenb))
66552 + return 0;
66553 +
66554 + return !memcmp(a, b, lena);
66555 +}
66556 +
66557 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
66558 +{
66559 + *buflen -= namelen;
66560 + if (*buflen < 0)
66561 + return -ENAMETOOLONG;
66562 + *buffer -= namelen;
66563 + memcpy(*buffer, str, namelen);
66564 + return 0;
66565 +}
66566 +
66567 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
66568 +{
66569 + return prepend(buffer, buflen, name->name, name->len);
66570 +}
66571 +
66572 +static int prepend_path(const struct path *path, struct path *root,
66573 + char **buffer, int *buflen)
66574 +{
66575 + struct dentry *dentry = path->dentry;
66576 + struct vfsmount *vfsmnt = path->mnt;
66577 + struct mount *mnt = real_mount(vfsmnt);
66578 + bool slash = false;
66579 + int error = 0;
66580 +
66581 + while (dentry != root->dentry || vfsmnt != root->mnt) {
66582 + struct dentry * parent;
66583 +
66584 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
66585 + /* Global root? */
66586 + if (!mnt_has_parent(mnt)) {
66587 + goto out;
66588 + }
66589 + dentry = mnt->mnt_mountpoint;
66590 + mnt = mnt->mnt_parent;
66591 + vfsmnt = &mnt->mnt;
66592 + continue;
66593 + }
66594 + parent = dentry->d_parent;
66595 + prefetch(parent);
66596 + spin_lock(&dentry->d_lock);
66597 + error = prepend_name(buffer, buflen, &dentry->d_name);
66598 + spin_unlock(&dentry->d_lock);
66599 + if (!error)
66600 + error = prepend(buffer, buflen, "/", 1);
66601 + if (error)
66602 + break;
66603 +
66604 + slash = true;
66605 + dentry = parent;
66606 + }
66607 +
66608 +out:
66609 + if (!error && !slash)
66610 + error = prepend(buffer, buflen, "/", 1);
66611 +
66612 + return error;
66613 +}
66614 +
66615 +/* this must be called with mount_lock and rename_lock held */
66616 +
66617 +static char *__our_d_path(const struct path *path, struct path *root,
66618 + char *buf, int buflen)
66619 +{
66620 + char *res = buf + buflen;
66621 + int error;
66622 +
66623 + prepend(&res, &buflen, "\0", 1);
66624 + error = prepend_path(path, root, &res, &buflen);
66625 + if (error)
66626 + return ERR_PTR(error);
66627 +
66628 + return res;
66629 +}
66630 +
66631 +static char *
66632 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
66633 +{
66634 + char *retval;
66635 +
66636 + retval = __our_d_path(path, root, buf, buflen);
66637 + if (unlikely(IS_ERR(retval)))
66638 + retval = strcpy(buf, "<path too long>");
66639 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
66640 + retval[1] = '\0';
66641 +
66642 + return retval;
66643 +}
66644 +
66645 +static char *
66646 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
66647 + char *buf, int buflen)
66648 +{
66649 + struct path path;
66650 + char *res;
66651 +
66652 + path.dentry = (struct dentry *)dentry;
66653 + path.mnt = (struct vfsmount *)vfsmnt;
66654 +
66655 + /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
66656 + by the RBAC system */
66657 + res = gen_full_path(&path, &gr_real_root, buf, buflen);
66658 +
66659 + return res;
66660 +}
66661 +
66662 +static char *
66663 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
66664 + char *buf, int buflen)
66665 +{
66666 + char *res;
66667 + struct path path;
66668 + struct path root;
66669 + struct task_struct *reaper = init_pid_ns.child_reaper;
66670 +
66671 + path.dentry = (struct dentry *)dentry;
66672 + path.mnt = (struct vfsmount *)vfsmnt;
66673 +
66674 + /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
66675 + get_fs_root(reaper->fs, &root);
66676 +
66677 + read_seqlock_excl(&mount_lock);
66678 + write_seqlock(&rename_lock);
66679 + res = gen_full_path(&path, &root, buf, buflen);
66680 + write_sequnlock(&rename_lock);
66681 + read_sequnlock_excl(&mount_lock);
66682 +
66683 + path_put(&root);
66684 + return res;
66685 +}
66686 +
66687 +char *
66688 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
66689 +{
66690 + char *ret;
66691 + read_seqlock_excl(&mount_lock);
66692 + write_seqlock(&rename_lock);
66693 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
66694 + PAGE_SIZE);
66695 + write_sequnlock(&rename_lock);
66696 + read_sequnlock_excl(&mount_lock);
66697 + return ret;
66698 +}
66699 +
66700 +static char *
66701 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
66702 +{
66703 + char *ret;
66704 + char *buf;
66705 + int buflen;
66706 +
66707 + read_seqlock_excl(&mount_lock);
66708 + write_seqlock(&rename_lock);
66709 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
66710 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
66711 + buflen = (int)(ret - buf);
66712 + if (buflen >= 5)
66713 + prepend(&ret, &buflen, "/proc", 5);
66714 + else
66715 + ret = strcpy(buf, "<path too long>");
66716 + write_sequnlock(&rename_lock);
66717 + read_sequnlock_excl(&mount_lock);
66718 + return ret;
66719 +}
66720 +
66721 +char *
66722 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
66723 +{
66724 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
66725 + PAGE_SIZE);
66726 +}
66727 +
66728 +char *
66729 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
66730 +{
66731 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66732 + PAGE_SIZE);
66733 +}
66734 +
66735 +char *
66736 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
66737 +{
66738 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
66739 + PAGE_SIZE);
66740 +}
66741 +
66742 +char *
66743 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
66744 +{
66745 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
66746 + PAGE_SIZE);
66747 +}
66748 +
66749 +char *
66750 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
66751 +{
66752 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
66753 + PAGE_SIZE);
66754 +}
66755 +
66756 +__inline__ __u32
66757 +to_gr_audit(const __u32 reqmode)
66758 +{
66759 + /* masks off auditable permission flags, then shifts them to create
66760 + auditing flags, and adds the special case of append auditing if
66761 + we're requesting write */
66762 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
66763 +}
66764 +
66765 +struct acl_role_label *
66766 +__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
66767 + const gid_t gid)
66768 +{
66769 + unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
66770 + struct acl_role_label *match;
66771 + struct role_allowed_ip *ipp;
66772 + unsigned int x;
66773 + u32 curr_ip = task->signal->saved_ip;
66774 +
66775 + match = state->acl_role_set.r_hash[index];
66776 +
66777 + while (match) {
66778 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
66779 + for (x = 0; x < match->domain_child_num; x++) {
66780 + if (match->domain_children[x] == uid)
66781 + goto found;
66782 + }
66783 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
66784 + break;
66785 + match = match->next;
66786 + }
66787 +found:
66788 + if (match == NULL) {
66789 + try_group:
66790 + index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
66791 + match = state->acl_role_set.r_hash[index];
66792 +
66793 + while (match) {
66794 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
66795 + for (x = 0; x < match->domain_child_num; x++) {
66796 + if (match->domain_children[x] == gid)
66797 + goto found2;
66798 + }
66799 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
66800 + break;
66801 + match = match->next;
66802 + }
66803 +found2:
66804 + if (match == NULL)
66805 + match = state->default_role;
66806 + if (match->allowed_ips == NULL)
66807 + return match;
66808 + else {
66809 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
66810 + if (likely
66811 + ((ntohl(curr_ip) & ipp->netmask) ==
66812 + (ntohl(ipp->addr) & ipp->netmask)))
66813 + return match;
66814 + }
66815 + match = state->default_role;
66816 + }
66817 + } else if (match->allowed_ips == NULL) {
66818 + return match;
66819 + } else {
66820 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
66821 + if (likely
66822 + ((ntohl(curr_ip) & ipp->netmask) ==
66823 + (ntohl(ipp->addr) & ipp->netmask)))
66824 + return match;
66825 + }
66826 + goto try_group;
66827 + }
66828 +
66829 + return match;
66830 +}
66831 +
66832 +static struct acl_role_label *
66833 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
66834 + const gid_t gid)
66835 +{
66836 + return __lookup_acl_role_label(&running_polstate, task, uid, gid);
66837 +}
66838 +
66839 +struct acl_subject_label *
66840 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
66841 + const struct acl_role_label *role)
66842 +{
66843 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
66844 + struct acl_subject_label *match;
66845 +
66846 + match = role->subj_hash[index];
66847 +
66848 + while (match && (match->inode != ino || match->device != dev ||
66849 + (match->mode & GR_DELETED))) {
66850 + match = match->next;
66851 + }
66852 +
66853 + if (match && !(match->mode & GR_DELETED))
66854 + return match;
66855 + else
66856 + return NULL;
66857 +}
66858 +
66859 +struct acl_subject_label *
66860 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
66861 + const struct acl_role_label *role)
66862 +{
66863 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
66864 + struct acl_subject_label *match;
66865 +
66866 + match = role->subj_hash[index];
66867 +
66868 + while (match && (match->inode != ino || match->device != dev ||
66869 + !(match->mode & GR_DELETED))) {
66870 + match = match->next;
66871 + }
66872 +
66873 + if (match && (match->mode & GR_DELETED))
66874 + return match;
66875 + else
66876 + return NULL;
66877 +}
66878 +
66879 +static struct acl_object_label *
66880 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
66881 + const struct acl_subject_label *subj)
66882 +{
66883 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
66884 + struct acl_object_label *match;
66885 +
66886 + match = subj->obj_hash[index];
66887 +
66888 + while (match && (match->inode != ino || match->device != dev ||
66889 + (match->mode & GR_DELETED))) {
66890 + match = match->next;
66891 + }
66892 +
66893 + if (match && !(match->mode & GR_DELETED))
66894 + return match;
66895 + else
66896 + return NULL;
66897 +}
66898 +
66899 +static struct acl_object_label *
66900 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
66901 + const struct acl_subject_label *subj)
66902 +{
66903 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
66904 + struct acl_object_label *match;
66905 +
66906 + match = subj->obj_hash[index];
66907 +
66908 + while (match && (match->inode != ino || match->device != dev ||
66909 + !(match->mode & GR_DELETED))) {
66910 + match = match->next;
66911 + }
66912 +
66913 + if (match && (match->mode & GR_DELETED))
66914 + return match;
66915 +
66916 + match = subj->obj_hash[index];
66917 +
66918 + while (match && (match->inode != ino || match->device != dev ||
66919 + (match->mode & GR_DELETED))) {
66920 + match = match->next;
66921 + }
66922 +
66923 + if (match && !(match->mode & GR_DELETED))
66924 + return match;
66925 + else
66926 + return NULL;
66927 +}
66928 +
66929 +struct name_entry *
66930 +__lookup_name_entry(const struct gr_policy_state *state, const char *name)
66931 +{
66932 + unsigned int len = strlen(name);
66933 + unsigned int key = full_name_hash(name, len);
66934 + unsigned int index = key % state->name_set.n_size;
66935 + struct name_entry *match;
66936 +
66937 + match = state->name_set.n_hash[index];
66938 +
66939 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
66940 + match = match->next;
66941 +
66942 + return match;
66943 +}
66944 +
66945 +static struct name_entry *
66946 +lookup_name_entry(const char *name)
66947 +{
66948 + return __lookup_name_entry(&running_polstate, name);
66949 +}
66950 +
66951 +static struct name_entry *
66952 +lookup_name_entry_create(const char *name)
66953 +{
66954 + unsigned int len = strlen(name);
66955 + unsigned int key = full_name_hash(name, len);
66956 + unsigned int index = key % running_polstate.name_set.n_size;
66957 + struct name_entry *match;
66958 +
66959 + match = running_polstate.name_set.n_hash[index];
66960 +
66961 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66962 + !match->deleted))
66963 + match = match->next;
66964 +
66965 + if (match && match->deleted)
66966 + return match;
66967 +
66968 + match = running_polstate.name_set.n_hash[index];
66969 +
66970 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66971 + match->deleted))
66972 + match = match->next;
66973 +
66974 + if (match && !match->deleted)
66975 + return match;
66976 + else
66977 + return NULL;
66978 +}
66979 +
66980 +static struct inodev_entry *
66981 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
66982 +{
66983 + unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
66984 + struct inodev_entry *match;
66985 +
66986 + match = running_polstate.inodev_set.i_hash[index];
66987 +
66988 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
66989 + match = match->next;
66990 +
66991 + return match;
66992 +}
66993 +
66994 +void
66995 +__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
66996 +{
66997 + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
66998 + state->inodev_set.i_size);
66999 + struct inodev_entry **curr;
67000 +
67001 + entry->prev = NULL;
67002 +
67003 + curr = &state->inodev_set.i_hash[index];
67004 + if (*curr != NULL)
67005 + (*curr)->prev = entry;
67006 +
67007 + entry->next = *curr;
67008 + *curr = entry;
67009 +
67010 + return;
67011 +}
67012 +
67013 +static void
67014 +insert_inodev_entry(struct inodev_entry *entry)
67015 +{
67016 + __insert_inodev_entry(&running_polstate, entry);
67017 +}
67018 +
67019 +void
67020 +insert_acl_obj_label(struct acl_object_label *obj,
67021 + struct acl_subject_label *subj)
67022 +{
67023 + unsigned int index =
67024 + gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
67025 + struct acl_object_label **curr;
67026 +
67027 + obj->prev = NULL;
67028 +
67029 + curr = &subj->obj_hash[index];
67030 + if (*curr != NULL)
67031 + (*curr)->prev = obj;
67032 +
67033 + obj->next = *curr;
67034 + *curr = obj;
67035 +
67036 + return;
67037 +}
67038 +
67039 +void
67040 +insert_acl_subj_label(struct acl_subject_label *obj,
67041 + struct acl_role_label *role)
67042 +{
67043 + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
67044 + struct acl_subject_label **curr;
67045 +
67046 + obj->prev = NULL;
67047 +
67048 + curr = &role->subj_hash[index];
67049 + if (*curr != NULL)
67050 + (*curr)->prev = obj;
67051 +
67052 + obj->next = *curr;
67053 + *curr = obj;
67054 +
67055 + return;
67056 +}
67057 +
67058 +/* derived from glibc fnmatch() 0: match, 1: no match*/
67059 +
67060 +static int
67061 +glob_match(const char *p, const char *n)
67062 +{
67063 + char c;
67064 +
67065 + while ((c = *p++) != '\0') {
67066 + switch (c) {
67067 + case '?':
67068 + if (*n == '\0')
67069 + return 1;
67070 + else if (*n == '/')
67071 + return 1;
67072 + break;
67073 + case '\\':
67074 + if (*n != c)
67075 + return 1;
67076 + break;
67077 + case '*':
67078 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
67079 + if (*n == '/')
67080 + return 1;
67081 + else if (c == '?') {
67082 + if (*n == '\0')
67083 + return 1;
67084 + else
67085 + ++n;
67086 + }
67087 + }
67088 + if (c == '\0') {
67089 + return 0;
67090 + } else {
67091 + const char *endp;
67092 +
67093 + if ((endp = strchr(n, '/')) == NULL)
67094 + endp = n + strlen(n);
67095 +
67096 + if (c == '[') {
67097 + for (--p; n < endp; ++n)
67098 + if (!glob_match(p, n))
67099 + return 0;
67100 + } else if (c == '/') {
67101 + while (*n != '\0' && *n != '/')
67102 + ++n;
67103 + if (*n == '/' && !glob_match(p, n + 1))
67104 + return 0;
67105 + } else {
67106 + for (--p; n < endp; ++n)
67107 + if (*n == c && !glob_match(p, n))
67108 + return 0;
67109 + }
67110 +
67111 + return 1;
67112 + }
67113 + case '[':
67114 + {
67115 + int not;
67116 + char cold;
67117 +
67118 + if (*n == '\0' || *n == '/')
67119 + return 1;
67120 +
67121 + not = (*p == '!' || *p == '^');
67122 + if (not)
67123 + ++p;
67124 +
67125 + c = *p++;
67126 + for (;;) {
67127 + unsigned char fn = (unsigned char)*n;
67128 +
67129 + if (c == '\0')
67130 + return 1;
67131 + else {
67132 + if (c == fn)
67133 + goto matched;
67134 + cold = c;
67135 + c = *p++;
67136 +
67137 + if (c == '-' && *p != ']') {
67138 + unsigned char cend = *p++;
67139 +
67140 + if (cend == '\0')
67141 + return 1;
67142 +
67143 + if (cold <= fn && fn <= cend)
67144 + goto matched;
67145 +
67146 + c = *p++;
67147 + }
67148 + }
67149 +
67150 + if (c == ']')
67151 + break;
67152 + }
67153 + if (!not)
67154 + return 1;
67155 + break;
67156 + matched:
67157 + while (c != ']') {
67158 + if (c == '\0')
67159 + return 1;
67160 +
67161 + c = *p++;
67162 + }
67163 + if (not)
67164 + return 1;
67165 + }
67166 + break;
67167 + default:
67168 + if (c != *n)
67169 + return 1;
67170 + }
67171 +
67172 + ++n;
67173 + }
67174 +
67175 + if (*n == '\0')
67176 + return 0;
67177 +
67178 + if (*n == '/')
67179 + return 0;
67180 +
67181 + return 1;
67182 +}
67183 +
67184 +static struct acl_object_label *
67185 +chk_glob_label(struct acl_object_label *globbed,
67186 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
67187 +{
67188 + struct acl_object_label *tmp;
67189 +
67190 + if (*path == NULL)
67191 + *path = gr_to_filename_nolock(dentry, mnt);
67192 +
67193 + tmp = globbed;
67194 +
67195 + while (tmp) {
67196 + if (!glob_match(tmp->filename, *path))
67197 + return tmp;
67198 + tmp = tmp->next;
67199 + }
67200 +
67201 + return NULL;
67202 +}
67203 +
67204 +static struct acl_object_label *
67205 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
67206 + const ino_t curr_ino, const dev_t curr_dev,
67207 + const struct acl_subject_label *subj, char **path, const int checkglob)
67208 +{
67209 + struct acl_subject_label *tmpsubj;
67210 + struct acl_object_label *retval;
67211 + struct acl_object_label *retval2;
67212 +
67213 + tmpsubj = (struct acl_subject_label *) subj;
67214 + read_lock(&gr_inode_lock);
67215 + do {
67216 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
67217 + if (retval) {
67218 + if (checkglob && retval->globbed) {
67219 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
67220 + if (retval2)
67221 + retval = retval2;
67222 + }
67223 + break;
67224 + }
67225 + } while ((tmpsubj = tmpsubj->parent_subject));
67226 + read_unlock(&gr_inode_lock);
67227 +
67228 + return retval;
67229 +}
67230 +
67231 +static __inline__ struct acl_object_label *
67232 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
67233 + struct dentry *curr_dentry,
67234 + const struct acl_subject_label *subj, char **path, const int checkglob)
67235 +{
67236 + int newglob = checkglob;
67237 + ino_t inode;
67238 + dev_t device;
67239 +
67240 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
67241 + as we don't want a / * rule to match instead of the / object
67242 + don't do this for create lookups that call this function though, since they're looking up
67243 + on the parent and thus need globbing checks on all paths
67244 + */
67245 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
67246 + newglob = GR_NO_GLOB;
67247 +
67248 + spin_lock(&curr_dentry->d_lock);
67249 + inode = curr_dentry->d_inode->i_ino;
67250 + device = __get_dev(curr_dentry);
67251 + spin_unlock(&curr_dentry->d_lock);
67252 +
67253 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
67254 +}
67255 +
67256 +#ifdef CONFIG_HUGETLBFS
67257 +static inline bool
67258 +is_hugetlbfs_mnt(const struct vfsmount *mnt)
67259 +{
67260 + int i;
67261 + for (i = 0; i < HUGE_MAX_HSTATE; i++) {
67262 + if (unlikely(hugetlbfs_vfsmount[i] == mnt))
67263 + return true;
67264 + }
67265 +
67266 + return false;
67267 +}
67268 +#endif
67269 +
67270 +static struct acl_object_label *
67271 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67272 + const struct acl_subject_label *subj, char *path, const int checkglob)
67273 +{
67274 + struct dentry *dentry = (struct dentry *) l_dentry;
67275 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
67276 + struct mount *real_mnt = real_mount(mnt);
67277 + struct acl_object_label *retval;
67278 + struct dentry *parent;
67279 +
67280 + read_seqlock_excl(&mount_lock);
67281 + write_seqlock(&rename_lock);
67282 +
67283 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
67284 +#ifdef CONFIG_NET
67285 + mnt == sock_mnt ||
67286 +#endif
67287 +#ifdef CONFIG_HUGETLBFS
67288 + (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
67289 +#endif
67290 + /* ignore Eric Biederman */
67291 + IS_PRIVATE(l_dentry->d_inode))) {
67292 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
67293 + goto out;
67294 + }
67295 +
67296 + for (;;) {
67297 + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
67298 + break;
67299 +
67300 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
67301 + if (!mnt_has_parent(real_mnt))
67302 + break;
67303 +
67304 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67305 + if (retval != NULL)
67306 + goto out;
67307 +
67308 + dentry = real_mnt->mnt_mountpoint;
67309 + real_mnt = real_mnt->mnt_parent;
67310 + mnt = &real_mnt->mnt;
67311 + continue;
67312 + }
67313 +
67314 + parent = dentry->d_parent;
67315 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67316 + if (retval != NULL)
67317 + goto out;
67318 +
67319 + dentry = parent;
67320 + }
67321 +
67322 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67323 +
67324 + /* gr_real_root is pinned so we don't have to hold a reference */
67325 + if (retval == NULL)
67326 + retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
67327 +out:
67328 + write_sequnlock(&rename_lock);
67329 + read_sequnlock_excl(&mount_lock);
67330 +
67331 + BUG_ON(retval == NULL);
67332 +
67333 + return retval;
67334 +}
67335 +
67336 +static __inline__ struct acl_object_label *
67337 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67338 + const struct acl_subject_label *subj)
67339 +{
67340 + char *path = NULL;
67341 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
67342 +}
67343 +
67344 +static __inline__ struct acl_object_label *
67345 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67346 + const struct acl_subject_label *subj)
67347 +{
67348 + char *path = NULL;
67349 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
67350 +}
67351 +
67352 +static __inline__ struct acl_object_label *
67353 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67354 + const struct acl_subject_label *subj, char *path)
67355 +{
67356 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
67357 +}
67358 +
67359 +struct acl_subject_label *
67360 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67361 + const struct acl_role_label *role)
67362 +{
67363 + struct dentry *dentry = (struct dentry *) l_dentry;
67364 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
67365 + struct mount *real_mnt = real_mount(mnt);
67366 + struct acl_subject_label *retval;
67367 + struct dentry *parent;
67368 +
67369 + read_seqlock_excl(&mount_lock);
67370 + write_seqlock(&rename_lock);
67371 +
67372 + for (;;) {
67373 + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
67374 + break;
67375 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
67376 + if (!mnt_has_parent(real_mnt))
67377 + break;
67378 +
67379 + spin_lock(&dentry->d_lock);
67380 + read_lock(&gr_inode_lock);
67381 + retval =
67382 + lookup_acl_subj_label(dentry->d_inode->i_ino,
67383 + __get_dev(dentry), role);
67384 + read_unlock(&gr_inode_lock);
67385 + spin_unlock(&dentry->d_lock);
67386 + if (retval != NULL)
67387 + goto out;
67388 +
67389 + dentry = real_mnt->mnt_mountpoint;
67390 + real_mnt = real_mnt->mnt_parent;
67391 + mnt = &real_mnt->mnt;
67392 + continue;
67393 + }
67394 +
67395 + spin_lock(&dentry->d_lock);
67396 + read_lock(&gr_inode_lock);
67397 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
67398 + __get_dev(dentry), role);
67399 + read_unlock(&gr_inode_lock);
67400 + parent = dentry->d_parent;
67401 + spin_unlock(&dentry->d_lock);
67402 +
67403 + if (retval != NULL)
67404 + goto out;
67405 +
67406 + dentry = parent;
67407 + }
67408 +
67409 + spin_lock(&dentry->d_lock);
67410 + read_lock(&gr_inode_lock);
67411 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
67412 + __get_dev(dentry), role);
67413 + read_unlock(&gr_inode_lock);
67414 + spin_unlock(&dentry->d_lock);
67415 +
67416 + if (unlikely(retval == NULL)) {
67417 + /* gr_real_root is pinned, we don't need to hold a reference */
67418 + read_lock(&gr_inode_lock);
67419 + retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
67420 + __get_dev(gr_real_root.dentry), role);
67421 + read_unlock(&gr_inode_lock);
67422 + }
67423 +out:
67424 + write_sequnlock(&rename_lock);
67425 + read_sequnlock_excl(&mount_lock);
67426 +
67427 + BUG_ON(retval == NULL);
67428 +
67429 + return retval;
67430 +}
67431 +
67432 +void
67433 +assign_special_role(const char *rolename)
67434 +{
67435 + struct acl_object_label *obj;
67436 + struct acl_role_label *r;
67437 + struct acl_role_label *assigned = NULL;
67438 + struct task_struct *tsk;
67439 + struct file *filp;
67440 +
67441 + FOR_EACH_ROLE_START(r)
67442 + if (!strcmp(rolename, r->rolename) &&
67443 + (r->roletype & GR_ROLE_SPECIAL)) {
67444 + assigned = r;
67445 + break;
67446 + }
67447 + FOR_EACH_ROLE_END(r)
67448 +
67449 + if (!assigned)
67450 + return;
67451 +
67452 + read_lock(&tasklist_lock);
67453 + read_lock(&grsec_exec_file_lock);
67454 +
67455 + tsk = current->real_parent;
67456 + if (tsk == NULL)
67457 + goto out_unlock;
67458 +
67459 + filp = tsk->exec_file;
67460 + if (filp == NULL)
67461 + goto out_unlock;
67462 +
67463 + tsk->is_writable = 0;
67464 + tsk->inherited = 0;
67465 +
67466 + tsk->acl_sp_role = 1;
67467 + tsk->acl_role_id = ++acl_sp_role_value;
67468 + tsk->role = assigned;
67469 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
67470 +
67471 + /* ignore additional mmap checks for processes that are writable
67472 + by the default ACL */
67473 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67474 + if (unlikely(obj->mode & GR_WRITE))
67475 + tsk->is_writable = 1;
67476 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
67477 + if (unlikely(obj->mode & GR_WRITE))
67478 + tsk->is_writable = 1;
67479 +
67480 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67481 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
67482 + tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
67483 +#endif
67484 +
67485 +out_unlock:
67486 + read_unlock(&grsec_exec_file_lock);
67487 + read_unlock(&tasklist_lock);
67488 + return;
67489 +}
67490 +
67491 +
67492 +static void
67493 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
67494 +{
67495 + struct task_struct *task = current;
67496 + const struct cred *cred = current_cred();
67497 +
67498 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
67499 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67500 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67501 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
67502 +
67503 + return;
67504 +}
67505 +
67506 +static void
67507 +gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
67508 +{
67509 + struct task_struct *task = current;
67510 + const struct cred *cred = current_cred();
67511 +
67512 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
67513 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67514 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67515 + 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
67516 +
67517 + return;
67518 +}
67519 +
67520 +static void
67521 +gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
67522 +{
67523 + struct task_struct *task = current;
67524 + const struct cred *cred = current_cred();
67525 +
67526 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
67527 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67528 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67529 + 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
67530 +
67531 + return;
67532 +}
67533 +
67534 +static void
67535 +gr_set_proc_res(struct task_struct *task)
67536 +{
67537 + struct acl_subject_label *proc;
67538 + unsigned short i;
67539 +
67540 + proc = task->acl;
67541 +
67542 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
67543 + return;
67544 +
67545 + for (i = 0; i < RLIM_NLIMITS; i++) {
67546 + if (!(proc->resmask & (1U << i)))
67547 + continue;
67548 +
67549 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
67550 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
67551 +
67552 + if (i == RLIMIT_CPU)
67553 + update_rlimit_cpu(task, proc->res[i].rlim_cur);
67554 + }
67555 +
67556 + return;
67557 +}
67558 +
67559 +/* both of the below must be called with
67560 + rcu_read_lock();
67561 + read_lock(&tasklist_lock);
67562 + read_lock(&grsec_exec_file_lock);
67563 +*/
67564 +
67565 +struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
67566 +{
67567 + char *tmpname;
67568 + struct acl_subject_label *tmpsubj;
67569 + struct file *filp;
67570 + struct name_entry *nmatch;
67571 +
67572 + filp = task->exec_file;
67573 + if (filp == NULL)
67574 + return NULL;
67575 +
67576 + /* the following is to apply the correct subject
67577 + on binaries running when the RBAC system
67578 + is enabled, when the binaries have been
67579 + replaced or deleted since their execution
67580 + -----
67581 + when the RBAC system starts, the inode/dev
67582 + from exec_file will be one the RBAC system
67583 + is unaware of. It only knows the inode/dev
67584 + of the present file on disk, or the absence
67585 + of it.
67586 + */
67587 +
67588 + if (filename)
67589 + nmatch = __lookup_name_entry(state, filename);
67590 + else {
67591 + preempt_disable();
67592 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
67593 +
67594 + nmatch = __lookup_name_entry(state, tmpname);
67595 + preempt_enable();
67596 + }
67597 + tmpsubj = NULL;
67598 + if (nmatch) {
67599 + if (nmatch->deleted)
67600 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
67601 + else
67602 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
67603 + }
67604 + /* this also works for the reload case -- if we don't match a potentially inherited subject
67605 + then we fall back to a normal lookup based on the binary's ino/dev
67606 + */
67607 + if (tmpsubj == NULL)
67608 + tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
67609 +
67610 + return tmpsubj;
67611 +}
67612 +
67613 +static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
67614 +{
67615 + return __gr_get_subject_for_task(&running_polstate, task, filename);
67616 +}
67617 +
67618 +void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
67619 +{
67620 + struct acl_object_label *obj;
67621 + struct file *filp;
67622 +
67623 + filp = task->exec_file;
67624 +
67625 + task->acl = subj;
67626 + task->is_writable = 0;
67627 + /* ignore additional mmap checks for processes that are writable
67628 + by the default ACL */
67629 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
67630 + if (unlikely(obj->mode & GR_WRITE))
67631 + task->is_writable = 1;
67632 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
67633 + if (unlikely(obj->mode & GR_WRITE))
67634 + task->is_writable = 1;
67635 +
67636 + gr_set_proc_res(task);
67637 +
67638 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67639 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67640 +#endif
67641 +}
67642 +
67643 +static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
67644 +{
67645 + __gr_apply_subject_to_task(&running_polstate, task, subj);
67646 +}
67647 +
67648 +__u32
67649 +gr_search_file(const struct dentry * dentry, const __u32 mode,
67650 + const struct vfsmount * mnt)
67651 +{
67652 + __u32 retval = mode;
67653 + struct acl_subject_label *curracl;
67654 + struct acl_object_label *currobj;
67655 +
67656 + if (unlikely(!(gr_status & GR_READY)))
67657 + return (mode & ~GR_AUDITS);
67658 +
67659 + curracl = current->acl;
67660 +
67661 + currobj = chk_obj_label(dentry, mnt, curracl);
67662 + retval = currobj->mode & mode;
67663 +
67664 + /* if we're opening a specified transfer file for writing
67665 + (e.g. /dev/initctl), then transfer our role to init
67666 + */
67667 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
67668 + current->role->roletype & GR_ROLE_PERSIST)) {
67669 + struct task_struct *task = init_pid_ns.child_reaper;
67670 +
67671 + if (task->role != current->role) {
67672 + struct acl_subject_label *subj;
67673 +
67674 + task->acl_sp_role = 0;
67675 + task->acl_role_id = current->acl_role_id;
67676 + task->role = current->role;
67677 + rcu_read_lock();
67678 + read_lock(&grsec_exec_file_lock);
67679 + subj = gr_get_subject_for_task(task, NULL);
67680 + gr_apply_subject_to_task(task, subj);
67681 + read_unlock(&grsec_exec_file_lock);
67682 + rcu_read_unlock();
67683 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
67684 + }
67685 + }
67686 +
67687 + if (unlikely
67688 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
67689 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
67690 + __u32 new_mode = mode;
67691 +
67692 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
67693 +
67694 + retval = new_mode;
67695 +
67696 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
67697 + new_mode |= GR_INHERIT;
67698 +
67699 + if (!(mode & GR_NOLEARN))
67700 + gr_log_learn(dentry, mnt, new_mode);
67701 + }
67702 +
67703 + return retval;
67704 +}
67705 +
67706 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
67707 + const struct dentry *parent,
67708 + const struct vfsmount *mnt)
67709 +{
67710 + struct name_entry *match;
67711 + struct acl_object_label *matchpo;
67712 + struct acl_subject_label *curracl;
67713 + char *path;
67714 +
67715 + if (unlikely(!(gr_status & GR_READY)))
67716 + return NULL;
67717 +
67718 + preempt_disable();
67719 + path = gr_to_filename_rbac(new_dentry, mnt);
67720 + match = lookup_name_entry_create(path);
67721 +
67722 + curracl = current->acl;
67723 +
67724 + if (match) {
67725 + read_lock(&gr_inode_lock);
67726 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
67727 + read_unlock(&gr_inode_lock);
67728 +
67729 + if (matchpo) {
67730 + preempt_enable();
67731 + return matchpo;
67732 + }
67733 + }
67734 +
67735 + // lookup parent
67736 +
67737 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
67738 +
67739 + preempt_enable();
67740 + return matchpo;
67741 +}
67742 +
67743 +__u32
67744 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
67745 + const struct vfsmount * mnt, const __u32 mode)
67746 +{
67747 + struct acl_object_label *matchpo;
67748 + __u32 retval;
67749 +
67750 + if (unlikely(!(gr_status & GR_READY)))
67751 + return (mode & ~GR_AUDITS);
67752 +
67753 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
67754 +
67755 + retval = matchpo->mode & mode;
67756 +
67757 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
67758 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
67759 + __u32 new_mode = mode;
67760 +
67761 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
67762 +
67763 + gr_log_learn(new_dentry, mnt, new_mode);
67764 + return new_mode;
67765 + }
67766 +
67767 + return retval;
67768 +}
67769 +
67770 +__u32
67771 +gr_check_link(const struct dentry * new_dentry,
67772 + const struct dentry * parent_dentry,
67773 + const struct vfsmount * parent_mnt,
67774 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
67775 +{
67776 + struct acl_object_label *obj;
67777 + __u32 oldmode, newmode;
67778 + __u32 needmode;
67779 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
67780 + GR_DELETE | GR_INHERIT;
67781 +
67782 + if (unlikely(!(gr_status & GR_READY)))
67783 + return (GR_CREATE | GR_LINK);
67784 +
67785 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
67786 + oldmode = obj->mode;
67787 +
67788 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
67789 + newmode = obj->mode;
67790 +
67791 + needmode = newmode & checkmodes;
67792 +
67793 + // old name for hardlink must have at least the permissions of the new name
67794 + if ((oldmode & needmode) != needmode)
67795 + goto bad;
67796 +
67797 + // if old name had restrictions/auditing, make sure the new name does as well
67798 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
67799 +
67800 + // don't allow hardlinking of suid/sgid/fcapped files without permission
67801 + if (is_privileged_binary(old_dentry))
67802 + needmode |= GR_SETID;
67803 +
67804 + if ((newmode & needmode) != needmode)
67805 + goto bad;
67806 +
67807 + // enforce minimum permissions
67808 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
67809 + return newmode;
67810 +bad:
67811 + needmode = oldmode;
67812 + if (is_privileged_binary(old_dentry))
67813 + needmode |= GR_SETID;
67814 +
67815 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
67816 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
67817 + return (GR_CREATE | GR_LINK);
67818 + } else if (newmode & GR_SUPPRESS)
67819 + return GR_SUPPRESS;
67820 + else
67821 + return 0;
67822 +}
67823 +
67824 +int
67825 +gr_check_hidden_task(const struct task_struct *task)
67826 +{
67827 + if (unlikely(!(gr_status & GR_READY)))
67828 + return 0;
67829 +
67830 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
67831 + return 1;
67832 +
67833 + return 0;
67834 +}
67835 +
67836 +int
67837 +gr_check_protected_task(const struct task_struct *task)
67838 +{
67839 + if (unlikely(!(gr_status & GR_READY) || !task))
67840 + return 0;
67841 +
67842 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
67843 + task->acl != current->acl)
67844 + return 1;
67845 +
67846 + return 0;
67847 +}
67848 +
67849 +int
67850 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
67851 +{
67852 + struct task_struct *p;
67853 + int ret = 0;
67854 +
67855 + if (unlikely(!(gr_status & GR_READY) || !pid))
67856 + return ret;
67857 +
67858 + read_lock(&tasklist_lock);
67859 + do_each_pid_task(pid, type, p) {
67860 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
67861 + p->acl != current->acl) {
67862 + ret = 1;
67863 + goto out;
67864 + }
67865 + } while_each_pid_task(pid, type, p);
67866 +out:
67867 + read_unlock(&tasklist_lock);
67868 +
67869 + return ret;
67870 +}
67871 +
67872 +void
67873 +gr_copy_label(struct task_struct *tsk)
67874 +{
67875 + struct task_struct *p = current;
67876 +
67877 + tsk->inherited = p->inherited;
67878 + tsk->acl_sp_role = 0;
67879 + tsk->acl_role_id = p->acl_role_id;
67880 + tsk->acl = p->acl;
67881 + tsk->role = p->role;
67882 + tsk->signal->used_accept = 0;
67883 + tsk->signal->curr_ip = p->signal->curr_ip;
67884 + tsk->signal->saved_ip = p->signal->saved_ip;
67885 + if (p->exec_file)
67886 + get_file(p->exec_file);
67887 + tsk->exec_file = p->exec_file;
67888 + tsk->is_writable = p->is_writable;
67889 + if (unlikely(p->signal->used_accept)) {
67890 + p->signal->curr_ip = 0;
67891 + p->signal->saved_ip = 0;
67892 + }
67893 +
67894 + return;
67895 +}
67896 +
67897 +extern int gr_process_kernel_setuid_ban(struct user_struct *user);
67898 +
67899 +int
67900 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
67901 +{
67902 + unsigned int i;
67903 + __u16 num;
67904 + uid_t *uidlist;
67905 + uid_t curuid;
67906 + int realok = 0;
67907 + int effectiveok = 0;
67908 + int fsok = 0;
67909 + uid_t globalreal, globaleffective, globalfs;
67910 +
67911 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
67912 + struct user_struct *user;
67913 +
67914 + if (!uid_valid(real))
67915 + goto skipit;
67916 +
67917 + /* find user based on global namespace */
67918 +
67919 + globalreal = GR_GLOBAL_UID(real);
67920 +
67921 + user = find_user(make_kuid(&init_user_ns, globalreal));
67922 + if (user == NULL)
67923 + goto skipit;
67924 +
67925 + if (gr_process_kernel_setuid_ban(user)) {
67926 + /* for find_user */
67927 + free_uid(user);
67928 + return 1;
67929 + }
67930 +
67931 + /* for find_user */
67932 + free_uid(user);
67933 +
67934 +skipit:
67935 +#endif
67936 +
67937 + if (unlikely(!(gr_status & GR_READY)))
67938 + return 0;
67939 +
67940 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67941 + gr_log_learn_uid_change(real, effective, fs);
67942 +
67943 + num = current->acl->user_trans_num;
67944 + uidlist = current->acl->user_transitions;
67945 +
67946 + if (uidlist == NULL)
67947 + return 0;
67948 +
67949 + if (!uid_valid(real)) {
67950 + realok = 1;
67951 + globalreal = (uid_t)-1;
67952 + } else {
67953 + globalreal = GR_GLOBAL_UID(real);
67954 + }
67955 + if (!uid_valid(effective)) {
67956 + effectiveok = 1;
67957 + globaleffective = (uid_t)-1;
67958 + } else {
67959 + globaleffective = GR_GLOBAL_UID(effective);
67960 + }
67961 + if (!uid_valid(fs)) {
67962 + fsok = 1;
67963 + globalfs = (uid_t)-1;
67964 + } else {
67965 + globalfs = GR_GLOBAL_UID(fs);
67966 + }
67967 +
67968 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
67969 + for (i = 0; i < num; i++) {
67970 + curuid = uidlist[i];
67971 + if (globalreal == curuid)
67972 + realok = 1;
67973 + if (globaleffective == curuid)
67974 + effectiveok = 1;
67975 + if (globalfs == curuid)
67976 + fsok = 1;
67977 + }
67978 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
67979 + for (i = 0; i < num; i++) {
67980 + curuid = uidlist[i];
67981 + if (globalreal == curuid)
67982 + break;
67983 + if (globaleffective == curuid)
67984 + break;
67985 + if (globalfs == curuid)
67986 + break;
67987 + }
67988 + /* not in deny list */
67989 + if (i == num) {
67990 + realok = 1;
67991 + effectiveok = 1;
67992 + fsok = 1;
67993 + }
67994 + }
67995 +
67996 + if (realok && effectiveok && fsok)
67997 + return 0;
67998 + else {
67999 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
68000 + return 1;
68001 + }
68002 +}
68003 +
68004 +int
68005 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
68006 +{
68007 + unsigned int i;
68008 + __u16 num;
68009 + gid_t *gidlist;
68010 + gid_t curgid;
68011 + int realok = 0;
68012 + int effectiveok = 0;
68013 + int fsok = 0;
68014 + gid_t globalreal, globaleffective, globalfs;
68015 +
68016 + if (unlikely(!(gr_status & GR_READY)))
68017 + return 0;
68018 +
68019 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68020 + gr_log_learn_gid_change(real, effective, fs);
68021 +
68022 + num = current->acl->group_trans_num;
68023 + gidlist = current->acl->group_transitions;
68024 +
68025 + if (gidlist == NULL)
68026 + return 0;
68027 +
68028 + if (!gid_valid(real)) {
68029 + realok = 1;
68030 + globalreal = (gid_t)-1;
68031 + } else {
68032 + globalreal = GR_GLOBAL_GID(real);
68033 + }
68034 + if (!gid_valid(effective)) {
68035 + effectiveok = 1;
68036 + globaleffective = (gid_t)-1;
68037 + } else {
68038 + globaleffective = GR_GLOBAL_GID(effective);
68039 + }
68040 + if (!gid_valid(fs)) {
68041 + fsok = 1;
68042 + globalfs = (gid_t)-1;
68043 + } else {
68044 + globalfs = GR_GLOBAL_GID(fs);
68045 + }
68046 +
68047 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
68048 + for (i = 0; i < num; i++) {
68049 + curgid = gidlist[i];
68050 + if (globalreal == curgid)
68051 + realok = 1;
68052 + if (globaleffective == curgid)
68053 + effectiveok = 1;
68054 + if (globalfs == curgid)
68055 + fsok = 1;
68056 + }
68057 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
68058 + for (i = 0; i < num; i++) {
68059 + curgid = gidlist[i];
68060 + if (globalreal == curgid)
68061 + break;
68062 + if (globaleffective == curgid)
68063 + break;
68064 + if (globalfs == curgid)
68065 + break;
68066 + }
68067 + /* not in deny list */
68068 + if (i == num) {
68069 + realok = 1;
68070 + effectiveok = 1;
68071 + fsok = 1;
68072 + }
68073 + }
68074 +
68075 + if (realok && effectiveok && fsok)
68076 + return 0;
68077 + else {
68078 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
68079 + return 1;
68080 + }
68081 +}
68082 +
68083 +extern int gr_acl_is_capable(const int cap);
68084 +
68085 +void
68086 +gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
68087 +{
68088 + struct acl_role_label *role = task->role;
68089 + struct acl_subject_label *subj = NULL;
68090 + struct acl_object_label *obj;
68091 + struct file *filp;
68092 + uid_t uid;
68093 + gid_t gid;
68094 +
68095 + if (unlikely(!(gr_status & GR_READY)))
68096 + return;
68097 +
68098 + uid = GR_GLOBAL_UID(kuid);
68099 + gid = GR_GLOBAL_GID(kgid);
68100 +
68101 + filp = task->exec_file;
68102 +
68103 + /* kernel process, we'll give them the kernel role */
68104 + if (unlikely(!filp)) {
68105 + task->role = running_polstate.kernel_role;
68106 + task->acl = running_polstate.kernel_role->root_label;
68107 + return;
68108 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
68109 + /* save the current ip at time of role lookup so that the proper
68110 + IP will be learned for role_allowed_ip */
68111 + task->signal->saved_ip = task->signal->curr_ip;
68112 + role = lookup_acl_role_label(task, uid, gid);
68113 + }
68114 +
68115 + /* don't change the role if we're not a privileged process */
68116 + if (role && task->role != role &&
68117 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
68118 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
68119 + return;
68120 +
68121 + /* perform subject lookup in possibly new role
68122 + we can use this result below in the case where role == task->role
68123 + */
68124 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
68125 +
68126 + /* if we changed uid/gid, but result in the same role
68127 + and are using inheritance, don't lose the inherited subject
68128 + if current subject is other than what normal lookup
68129 + would result in, we arrived via inheritance, don't
68130 + lose subject
68131 + */
68132 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
68133 + (subj == task->acl)))
68134 + task->acl = subj;
68135 +
68136 + /* leave task->inherited unaffected */
68137 +
68138 + task->role = role;
68139 +
68140 + task->is_writable = 0;
68141 +
68142 + /* ignore additional mmap checks for processes that are writable
68143 + by the default ACL */
68144 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
68145 + if (unlikely(obj->mode & GR_WRITE))
68146 + task->is_writable = 1;
68147 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
68148 + if (unlikely(obj->mode & GR_WRITE))
68149 + task->is_writable = 1;
68150 +
68151 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68152 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
68153 +#endif
68154 +
68155 + gr_set_proc_res(task);
68156 +
68157 + return;
68158 +}
68159 +
68160 +int
68161 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
68162 + const int unsafe_flags)
68163 +{
68164 + struct task_struct *task = current;
68165 + struct acl_subject_label *newacl;
68166 + struct acl_object_label *obj;
68167 + __u32 retmode;
68168 +
68169 + if (unlikely(!(gr_status & GR_READY)))
68170 + return 0;
68171 +
68172 + newacl = chk_subj_label(dentry, mnt, task->role);
68173 +
68174 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
68175 + did an exec
68176 + */
68177 + rcu_read_lock();
68178 + read_lock(&tasklist_lock);
68179 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
68180 + (task->parent->acl->mode & GR_POVERRIDE))) {
68181 + read_unlock(&tasklist_lock);
68182 + rcu_read_unlock();
68183 + goto skip_check;
68184 + }
68185 + read_unlock(&tasklist_lock);
68186 + rcu_read_unlock();
68187 +
68188 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
68189 + !(task->role->roletype & GR_ROLE_GOD) &&
68190 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
68191 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
68192 + if (unsafe_flags & LSM_UNSAFE_SHARE)
68193 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
68194 + else
68195 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
68196 + return -EACCES;
68197 + }
68198 +
68199 +skip_check:
68200 +
68201 + obj = chk_obj_label(dentry, mnt, task->acl);
68202 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
68203 +
68204 + if (!(task->acl->mode & GR_INHERITLEARN) &&
68205 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
68206 + if (obj->nested)
68207 + task->acl = obj->nested;
68208 + else
68209 + task->acl = newacl;
68210 + task->inherited = 0;
68211 + } else {
68212 + task->inherited = 1;
68213 + if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
68214 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
68215 + }
68216 +
68217 + task->is_writable = 0;
68218 +
68219 + /* ignore additional mmap checks for processes that are writable
68220 + by the default ACL */
68221 + obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
68222 + if (unlikely(obj->mode & GR_WRITE))
68223 + task->is_writable = 1;
68224 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
68225 + if (unlikely(obj->mode & GR_WRITE))
68226 + task->is_writable = 1;
68227 +
68228 + gr_set_proc_res(task);
68229 +
68230 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68231 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
68232 +#endif
68233 + return 0;
68234 +}
68235 +
68236 +/* always called with valid inodev ptr */
68237 +static void
68238 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
68239 +{
68240 + struct acl_object_label *matchpo;
68241 + struct acl_subject_label *matchps;
68242 + struct acl_subject_label *subj;
68243 + struct acl_role_label *role;
68244 + unsigned int x;
68245 +
68246 + FOR_EACH_ROLE_START(role)
68247 + FOR_EACH_SUBJECT_START(role, subj, x)
68248 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
68249 + matchpo->mode |= GR_DELETED;
68250 + FOR_EACH_SUBJECT_END(subj,x)
68251 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
68252 + /* nested subjects aren't in the role's subj_hash table */
68253 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
68254 + matchpo->mode |= GR_DELETED;
68255 + FOR_EACH_NESTED_SUBJECT_END(subj)
68256 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
68257 + matchps->mode |= GR_DELETED;
68258 + FOR_EACH_ROLE_END(role)
68259 +
68260 + inodev->nentry->deleted = 1;
68261 +
68262 + return;
68263 +}
68264 +
68265 +void
68266 +gr_handle_delete(const ino_t ino, const dev_t dev)
68267 +{
68268 + struct inodev_entry *inodev;
68269 +
68270 + if (unlikely(!(gr_status & GR_READY)))
68271 + return;
68272 +
68273 + write_lock(&gr_inode_lock);
68274 + inodev = lookup_inodev_entry(ino, dev);
68275 + if (inodev != NULL)
68276 + do_handle_delete(inodev, ino, dev);
68277 + write_unlock(&gr_inode_lock);
68278 +
68279 + return;
68280 +}
68281 +
68282 +static void
68283 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
68284 + const ino_t newinode, const dev_t newdevice,
68285 + struct acl_subject_label *subj)
68286 +{
68287 + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
68288 + struct acl_object_label *match;
68289 +
68290 + match = subj->obj_hash[index];
68291 +
68292 + while (match && (match->inode != oldinode ||
68293 + match->device != olddevice ||
68294 + !(match->mode & GR_DELETED)))
68295 + match = match->next;
68296 +
68297 + if (match && (match->inode == oldinode)
68298 + && (match->device == olddevice)
68299 + && (match->mode & GR_DELETED)) {
68300 + if (match->prev == NULL) {
68301 + subj->obj_hash[index] = match->next;
68302 + if (match->next != NULL)
68303 + match->next->prev = NULL;
68304 + } else {
68305 + match->prev->next = match->next;
68306 + if (match->next != NULL)
68307 + match->next->prev = match->prev;
68308 + }
68309 + match->prev = NULL;
68310 + match->next = NULL;
68311 + match->inode = newinode;
68312 + match->device = newdevice;
68313 + match->mode &= ~GR_DELETED;
68314 +
68315 + insert_acl_obj_label(match, subj);
68316 + }
68317 +
68318 + return;
68319 +}
68320 +
68321 +static void
68322 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
68323 + const ino_t newinode, const dev_t newdevice,
68324 + struct acl_role_label *role)
68325 +{
68326 + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
68327 + struct acl_subject_label *match;
68328 +
68329 + match = role->subj_hash[index];
68330 +
68331 + while (match && (match->inode != oldinode ||
68332 + match->device != olddevice ||
68333 + !(match->mode & GR_DELETED)))
68334 + match = match->next;
68335 +
68336 + if (match && (match->inode == oldinode)
68337 + && (match->device == olddevice)
68338 + && (match->mode & GR_DELETED)) {
68339 + if (match->prev == NULL) {
68340 + role->subj_hash[index] = match->next;
68341 + if (match->next != NULL)
68342 + match->next->prev = NULL;
68343 + } else {
68344 + match->prev->next = match->next;
68345 + if (match->next != NULL)
68346 + match->next->prev = match->prev;
68347 + }
68348 + match->prev = NULL;
68349 + match->next = NULL;
68350 + match->inode = newinode;
68351 + match->device = newdevice;
68352 + match->mode &= ~GR_DELETED;
68353 +
68354 + insert_acl_subj_label(match, role);
68355 + }
68356 +
68357 + return;
68358 +}
68359 +
68360 +static void
68361 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
68362 + const ino_t newinode, const dev_t newdevice)
68363 +{
68364 + unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
68365 + struct inodev_entry *match;
68366 +
68367 + match = running_polstate.inodev_set.i_hash[index];
68368 +
68369 + while (match && (match->nentry->inode != oldinode ||
68370 + match->nentry->device != olddevice || !match->nentry->deleted))
68371 + match = match->next;
68372 +
68373 + if (match && (match->nentry->inode == oldinode)
68374 + && (match->nentry->device == olddevice) &&
68375 + match->nentry->deleted) {
68376 + if (match->prev == NULL) {
68377 + running_polstate.inodev_set.i_hash[index] = match->next;
68378 + if (match->next != NULL)
68379 + match->next->prev = NULL;
68380 + } else {
68381 + match->prev->next = match->next;
68382 + if (match->next != NULL)
68383 + match->next->prev = match->prev;
68384 + }
68385 + match->prev = NULL;
68386 + match->next = NULL;
68387 + match->nentry->inode = newinode;
68388 + match->nentry->device = newdevice;
68389 + match->nentry->deleted = 0;
68390 +
68391 + insert_inodev_entry(match);
68392 + }
68393 +
68394 + return;
68395 +}
68396 +
68397 +static void
68398 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
68399 +{
68400 + struct acl_subject_label *subj;
68401 + struct acl_role_label *role;
68402 + unsigned int x;
68403 +
68404 + FOR_EACH_ROLE_START(role)
68405 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
68406 +
68407 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
68408 + if ((subj->inode == ino) && (subj->device == dev)) {
68409 + subj->inode = ino;
68410 + subj->device = dev;
68411 + }
68412 + /* nested subjects aren't in the role's subj_hash table */
68413 + update_acl_obj_label(matchn->inode, matchn->device,
68414 + ino, dev, subj);
68415 + FOR_EACH_NESTED_SUBJECT_END(subj)
68416 + FOR_EACH_SUBJECT_START(role, subj, x)
68417 + update_acl_obj_label(matchn->inode, matchn->device,
68418 + ino, dev, subj);
68419 + FOR_EACH_SUBJECT_END(subj,x)
68420 + FOR_EACH_ROLE_END(role)
68421 +
68422 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
68423 +
68424 + return;
68425 +}
68426 +
68427 +static void
68428 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
68429 + const struct vfsmount *mnt)
68430 +{
68431 + ino_t ino = dentry->d_inode->i_ino;
68432 + dev_t dev = __get_dev(dentry);
68433 +
68434 + __do_handle_create(matchn, ino, dev);
68435 +
68436 + return;
68437 +}
68438 +
68439 +void
68440 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
68441 +{
68442 + struct name_entry *matchn;
68443 +
68444 + if (unlikely(!(gr_status & GR_READY)))
68445 + return;
68446 +
68447 + preempt_disable();
68448 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
68449 +
68450 + if (unlikely((unsigned long)matchn)) {
68451 + write_lock(&gr_inode_lock);
68452 + do_handle_create(matchn, dentry, mnt);
68453 + write_unlock(&gr_inode_lock);
68454 + }
68455 + preempt_enable();
68456 +
68457 + return;
68458 +}
68459 +
68460 +void
68461 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
68462 +{
68463 + struct name_entry *matchn;
68464 +
68465 + if (unlikely(!(gr_status & GR_READY)))
68466 + return;
68467 +
68468 + preempt_disable();
68469 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
68470 +
68471 + if (unlikely((unsigned long)matchn)) {
68472 + write_lock(&gr_inode_lock);
68473 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
68474 + write_unlock(&gr_inode_lock);
68475 + }
68476 + preempt_enable();
68477 +
68478 + return;
68479 +}
68480 +
68481 +void
68482 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68483 + struct dentry *old_dentry,
68484 + struct dentry *new_dentry,
68485 + struct vfsmount *mnt, const __u8 replace)
68486 +{
68487 + struct name_entry *matchn;
68488 + struct inodev_entry *inodev;
68489 + struct inode *inode = new_dentry->d_inode;
68490 + ino_t old_ino = old_dentry->d_inode->i_ino;
68491 + dev_t old_dev = __get_dev(old_dentry);
68492 +
68493 + /* vfs_rename swaps the name and parent link for old_dentry and
68494 + new_dentry
68495 + at this point, old_dentry has the new name, parent link, and inode
68496 + for the renamed file
68497 + if a file is being replaced by a rename, new_dentry has the inode
68498 + and name for the replaced file
68499 + */
68500 +
68501 + if (unlikely(!(gr_status & GR_READY)))
68502 + return;
68503 +
68504 + preempt_disable();
68505 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
68506 +
68507 + /* we wouldn't have to check d_inode if it weren't for
68508 + NFS silly-renaming
68509 + */
68510 +
68511 + write_lock(&gr_inode_lock);
68512 + if (unlikely(replace && inode)) {
68513 + ino_t new_ino = inode->i_ino;
68514 + dev_t new_dev = __get_dev(new_dentry);
68515 +
68516 + inodev = lookup_inodev_entry(new_ino, new_dev);
68517 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
68518 + do_handle_delete(inodev, new_ino, new_dev);
68519 + }
68520 +
68521 + inodev = lookup_inodev_entry(old_ino, old_dev);
68522 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
68523 + do_handle_delete(inodev, old_ino, old_dev);
68524 +
68525 + if (unlikely((unsigned long)matchn))
68526 + do_handle_create(matchn, old_dentry, mnt);
68527 +
68528 + write_unlock(&gr_inode_lock);
68529 + preempt_enable();
68530 +
68531 + return;
68532 +}
68533 +
68534 +#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
68535 +static const unsigned long res_learn_bumps[GR_NLIMITS] = {
68536 + [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
68537 + [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
68538 + [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
68539 + [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
68540 + [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
68541 + [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
68542 + [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
68543 + [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
68544 + [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
68545 + [RLIMIT_AS] = GR_RLIM_AS_BUMP,
68546 + [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
68547 + [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
68548 + [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
68549 + [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
68550 + [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
68551 + [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
68552 +};
68553 +
68554 +void
68555 +gr_learn_resource(const struct task_struct *task,
68556 + const int res, const unsigned long wanted, const int gt)
68557 +{
68558 + struct acl_subject_label *acl;
68559 + const struct cred *cred;
68560 +
68561 + if (unlikely((gr_status & GR_READY) &&
68562 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
68563 + goto skip_reslog;
68564 +
68565 + gr_log_resource(task, res, wanted, gt);
68566 +skip_reslog:
68567 +
68568 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
68569 + return;
68570 +
68571 + acl = task->acl;
68572 +
68573 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
68574 + !(acl->resmask & (1U << (unsigned short) res))))
68575 + return;
68576 +
68577 + if (wanted >= acl->res[res].rlim_cur) {
68578 + unsigned long res_add;
68579 +
68580 + res_add = wanted + res_learn_bumps[res];
68581 +
68582 + acl->res[res].rlim_cur = res_add;
68583 +
68584 + if (wanted > acl->res[res].rlim_max)
68585 + acl->res[res].rlim_max = res_add;
68586 +
68587 + /* only log the subject filename, since resource logging is supported for
68588 + single-subject learning only */
68589 + rcu_read_lock();
68590 + cred = __task_cred(task);
68591 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
68592 + task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
68593 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
68594 + "", (unsigned long) res, &task->signal->saved_ip);
68595 + rcu_read_unlock();
68596 + }
68597 +
68598 + return;
68599 +}
68600 +EXPORT_SYMBOL_GPL(gr_learn_resource);
68601 +#endif
68602 +
68603 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
68604 +void
68605 +pax_set_initial_flags(struct linux_binprm *bprm)
68606 +{
68607 + struct task_struct *task = current;
68608 + struct acl_subject_label *proc;
68609 + unsigned long flags;
68610 +
68611 + if (unlikely(!(gr_status & GR_READY)))
68612 + return;
68613 +
68614 + flags = pax_get_flags(task);
68615 +
68616 + proc = task->acl;
68617 +
68618 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
68619 + flags &= ~MF_PAX_PAGEEXEC;
68620 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
68621 + flags &= ~MF_PAX_SEGMEXEC;
68622 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
68623 + flags &= ~MF_PAX_RANDMMAP;
68624 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
68625 + flags &= ~MF_PAX_EMUTRAMP;
68626 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
68627 + flags &= ~MF_PAX_MPROTECT;
68628 +
68629 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
68630 + flags |= MF_PAX_PAGEEXEC;
68631 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
68632 + flags |= MF_PAX_SEGMEXEC;
68633 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
68634 + flags |= MF_PAX_RANDMMAP;
68635 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
68636 + flags |= MF_PAX_EMUTRAMP;
68637 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
68638 + flags |= MF_PAX_MPROTECT;
68639 +
68640 + pax_set_flags(task, flags);
68641 +
68642 + return;
68643 +}
68644 +#endif
68645 +
68646 +int
68647 +gr_handle_proc_ptrace(struct task_struct *task)
68648 +{
68649 + struct file *filp;
68650 + struct task_struct *tmp = task;
68651 + struct task_struct *curtemp = current;
68652 + __u32 retmode;
68653 +
68654 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
68655 + if (unlikely(!(gr_status & GR_READY)))
68656 + return 0;
68657 +#endif
68658 +
68659 + read_lock(&tasklist_lock);
68660 + read_lock(&grsec_exec_file_lock);
68661 + filp = task->exec_file;
68662 +
68663 + while (task_pid_nr(tmp) > 0) {
68664 + if (tmp == curtemp)
68665 + break;
68666 + tmp = tmp->real_parent;
68667 + }
68668 +
68669 + if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
68670 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
68671 + read_unlock(&grsec_exec_file_lock);
68672 + read_unlock(&tasklist_lock);
68673 + return 1;
68674 + }
68675 +
68676 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68677 + if (!(gr_status & GR_READY)) {
68678 + read_unlock(&grsec_exec_file_lock);
68679 + read_unlock(&tasklist_lock);
68680 + return 0;
68681 + }
68682 +#endif
68683 +
68684 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
68685 + read_unlock(&grsec_exec_file_lock);
68686 + read_unlock(&tasklist_lock);
68687 +
68688 + if (retmode & GR_NOPTRACE)
68689 + return 1;
68690 +
68691 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
68692 + && (current->acl != task->acl || (current->acl != current->role->root_label
68693 + && task_pid_nr(current) != task_pid_nr(task))))
68694 + return 1;
68695 +
68696 + return 0;
68697 +}
68698 +
68699 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
68700 +{
68701 + if (unlikely(!(gr_status & GR_READY)))
68702 + return;
68703 +
68704 + if (!(current->role->roletype & GR_ROLE_GOD))
68705 + return;
68706 +
68707 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
68708 + p->role->rolename, gr_task_roletype_to_char(p),
68709 + p->acl->filename);
68710 +}
68711 +
68712 +int
68713 +gr_handle_ptrace(struct task_struct *task, const long request)
68714 +{
68715 + struct task_struct *tmp = task;
68716 + struct task_struct *curtemp = current;
68717 + __u32 retmode;
68718 +
68719 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
68720 + if (unlikely(!(gr_status & GR_READY)))
68721 + return 0;
68722 +#endif
68723 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68724 + read_lock(&tasklist_lock);
68725 + while (task_pid_nr(tmp) > 0) {
68726 + if (tmp == curtemp)
68727 + break;
68728 + tmp = tmp->real_parent;
68729 + }
68730 +
68731 + if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
68732 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
68733 + read_unlock(&tasklist_lock);
68734 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68735 + return 1;
68736 + }
68737 + read_unlock(&tasklist_lock);
68738 + }
68739 +
68740 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68741 + if (!(gr_status & GR_READY))
68742 + return 0;
68743 +#endif
68744 +
68745 + read_lock(&grsec_exec_file_lock);
68746 + if (unlikely(!task->exec_file)) {
68747 + read_unlock(&grsec_exec_file_lock);
68748 + return 0;
68749 + }
68750 +
68751 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
68752 + read_unlock(&grsec_exec_file_lock);
68753 +
68754 + if (retmode & GR_NOPTRACE) {
68755 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68756 + return 1;
68757 + }
68758 +
68759 + if (retmode & GR_PTRACERD) {
68760 + switch (request) {
68761 + case PTRACE_SEIZE:
68762 + case PTRACE_POKETEXT:
68763 + case PTRACE_POKEDATA:
68764 + case PTRACE_POKEUSR:
68765 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
68766 + case PTRACE_SETREGS:
68767 + case PTRACE_SETFPREGS:
68768 +#endif
68769 +#ifdef CONFIG_X86
68770 + case PTRACE_SETFPXREGS:
68771 +#endif
68772 +#ifdef CONFIG_ALTIVEC
68773 + case PTRACE_SETVRREGS:
68774 +#endif
68775 + return 1;
68776 + default:
68777 + return 0;
68778 + }
68779 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
68780 + !(current->role->roletype & GR_ROLE_GOD) &&
68781 + (current->acl != task->acl)) {
68782 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68783 + return 1;
68784 + }
68785 +
68786 + return 0;
68787 +}
68788 +
68789 +static int is_writable_mmap(const struct file *filp)
68790 +{
68791 + struct task_struct *task = current;
68792 + struct acl_object_label *obj, *obj2;
68793 +
68794 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
68795 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
68796 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
68797 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
68798 + task->role->root_label);
68799 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
68800 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
68801 + return 1;
68802 + }
68803 + }
68804 + return 0;
68805 +}
68806 +
68807 +int
68808 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
68809 +{
68810 + __u32 mode;
68811 +
68812 + if (unlikely(!file || !(prot & PROT_EXEC)))
68813 + return 1;
68814 +
68815 + if (is_writable_mmap(file))
68816 + return 0;
68817 +
68818 + mode =
68819 + gr_search_file(file->f_path.dentry,
68820 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
68821 + file->f_path.mnt);
68822 +
68823 + if (!gr_tpe_allow(file))
68824 + return 0;
68825 +
68826 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
68827 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68828 + return 0;
68829 + } else if (unlikely(!(mode & GR_EXEC))) {
68830 + return 0;
68831 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
68832 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68833 + return 1;
68834 + }
68835 +
68836 + return 1;
68837 +}
68838 +
68839 +int
68840 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
68841 +{
68842 + __u32 mode;
68843 +
68844 + if (unlikely(!file || !(prot & PROT_EXEC)))
68845 + return 1;
68846 +
68847 + if (is_writable_mmap(file))
68848 + return 0;
68849 +
68850 + mode =
68851 + gr_search_file(file->f_path.dentry,
68852 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
68853 + file->f_path.mnt);
68854 +
68855 + if (!gr_tpe_allow(file))
68856 + return 0;
68857 +
68858 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
68859 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68860 + return 0;
68861 + } else if (unlikely(!(mode & GR_EXEC))) {
68862 + return 0;
68863 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
68864 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68865 + return 1;
68866 + }
68867 +
68868 + return 1;
68869 +}
68870 +
68871 +void
68872 +gr_acl_handle_psacct(struct task_struct *task, const long code)
68873 +{
68874 + unsigned long runtime, cputime;
68875 + cputime_t utime, stime;
68876 + unsigned int wday, cday;
68877 + __u8 whr, chr;
68878 + __u8 wmin, cmin;
68879 + __u8 wsec, csec;
68880 + struct timespec timeval;
68881 +
68882 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
68883 + !(task->acl->mode & GR_PROCACCT)))
68884 + return;
68885 +
68886 + do_posix_clock_monotonic_gettime(&timeval);
68887 + runtime = timeval.tv_sec - task->start_time.tv_sec;
68888 + wday = runtime / (60 * 60 * 24);
68889 + runtime -= wday * (60 * 60 * 24);
68890 + whr = runtime / (60 * 60);
68891 + runtime -= whr * (60 * 60);
68892 + wmin = runtime / 60;
68893 + runtime -= wmin * 60;
68894 + wsec = runtime;
68895 +
68896 + task_cputime(task, &utime, &stime);
68897 + cputime = cputime_to_secs(utime + stime);
68898 + cday = cputime / (60 * 60 * 24);
68899 + cputime -= cday * (60 * 60 * 24);
68900 + chr = cputime / (60 * 60);
68901 + cputime -= chr * (60 * 60);
68902 + cmin = cputime / 60;
68903 + cputime -= cmin * 60;
68904 + csec = cputime;
68905 +
68906 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
68907 +
68908 + return;
68909 +}
68910 +
68911 +#ifdef CONFIG_TASKSTATS
68912 +int gr_is_taskstats_denied(int pid)
68913 +{
68914 + struct task_struct *task;
68915 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68916 + const struct cred *cred;
68917 +#endif
68918 + int ret = 0;
68919 +
68920 + /* restrict taskstats viewing to un-chrooted root users
68921 + who have the 'view' subject flag if the RBAC system is enabled
68922 + */
68923 +
68924 + rcu_read_lock();
68925 + read_lock(&tasklist_lock);
68926 + task = find_task_by_vpid(pid);
68927 + if (task) {
68928 +#ifdef CONFIG_GRKERNSEC_CHROOT
68929 + if (proc_is_chrooted(task))
68930 + ret = -EACCES;
68931 +#endif
68932 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68933 + cred = __task_cred(task);
68934 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68935 + if (gr_is_global_nonroot(cred->uid))
68936 + ret = -EACCES;
68937 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68938 + if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
68939 + ret = -EACCES;
68940 +#endif
68941 +#endif
68942 + if (gr_status & GR_READY) {
68943 + if (!(task->acl->mode & GR_VIEW))
68944 + ret = -EACCES;
68945 + }
68946 + } else
68947 + ret = -ENOENT;
68948 +
68949 + read_unlock(&tasklist_lock);
68950 + rcu_read_unlock();
68951 +
68952 + return ret;
68953 +}
68954 +#endif
68955 +
68956 +/* AUXV entries are filled via a descendant of search_binary_handler
68957 + after we've already applied the subject for the target
68958 +*/
68959 +int gr_acl_enable_at_secure(void)
68960 +{
68961 + if (unlikely(!(gr_status & GR_READY)))
68962 + return 0;
68963 +
68964 + if (current->acl->mode & GR_ATSECURE)
68965 + return 1;
68966 +
68967 + return 0;
68968 +}
68969 +
68970 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
68971 +{
68972 + struct task_struct *task = current;
68973 + struct dentry *dentry = file->f_path.dentry;
68974 + struct vfsmount *mnt = file->f_path.mnt;
68975 + struct acl_object_label *obj, *tmp;
68976 + struct acl_subject_label *subj;
68977 + unsigned int bufsize;
68978 + int is_not_root;
68979 + char *path;
68980 + dev_t dev = __get_dev(dentry);
68981 +
68982 + if (unlikely(!(gr_status & GR_READY)))
68983 + return 1;
68984 +
68985 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68986 + return 1;
68987 +
68988 + /* ignore Eric Biederman */
68989 + if (IS_PRIVATE(dentry->d_inode))
68990 + return 1;
68991 +
68992 + subj = task->acl;
68993 + read_lock(&gr_inode_lock);
68994 + do {
68995 + obj = lookup_acl_obj_label(ino, dev, subj);
68996 + if (obj != NULL) {
68997 + read_unlock(&gr_inode_lock);
68998 + return (obj->mode & GR_FIND) ? 1 : 0;
68999 + }
69000 + } while ((subj = subj->parent_subject));
69001 + read_unlock(&gr_inode_lock);
69002 +
69003 + /* this is purely an optimization since we're looking for an object
69004 + for the directory we're doing a readdir on
69005 + if it's possible for any globbed object to match the entry we're
69006 + filling into the directory, then the object we find here will be
69007 + an anchor point with attached globbed objects
69008 + */
69009 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
69010 + if (obj->globbed == NULL)
69011 + return (obj->mode & GR_FIND) ? 1 : 0;
69012 +
69013 + is_not_root = ((obj->filename[0] == '/') &&
69014 + (obj->filename[1] == '\0')) ? 0 : 1;
69015 + bufsize = PAGE_SIZE - namelen - is_not_root;
69016 +
69017 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
69018 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
69019 + return 1;
69020 +
69021 + preempt_disable();
69022 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69023 + bufsize);
69024 +
69025 + bufsize = strlen(path);
69026 +
69027 + /* if base is "/", don't append an additional slash */
69028 + if (is_not_root)
69029 + *(path + bufsize) = '/';
69030 + memcpy(path + bufsize + is_not_root, name, namelen);
69031 + *(path + bufsize + namelen + is_not_root) = '\0';
69032 +
69033 + tmp = obj->globbed;
69034 + while (tmp) {
69035 + if (!glob_match(tmp->filename, path)) {
69036 + preempt_enable();
69037 + return (tmp->mode & GR_FIND) ? 1 : 0;
69038 + }
69039 + tmp = tmp->next;
69040 + }
69041 + preempt_enable();
69042 + return (obj->mode & GR_FIND) ? 1 : 0;
69043 +}
69044 +
69045 +void gr_put_exec_file(struct task_struct *task)
69046 +{
69047 + struct file *filp;
69048 +
69049 + write_lock(&grsec_exec_file_lock);
69050 + filp = task->exec_file;
69051 + task->exec_file = NULL;
69052 + write_unlock(&grsec_exec_file_lock);
69053 +
69054 + if (filp)
69055 + fput(filp);
69056 +
69057 + return;
69058 +}
69059 +
69060 +
69061 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
69062 +EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
69063 +#endif
69064 +#ifdef CONFIG_SECURITY
69065 +EXPORT_SYMBOL_GPL(gr_check_user_change);
69066 +EXPORT_SYMBOL_GPL(gr_check_group_change);
69067 +#endif
69068 +
69069 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
69070 new file mode 100644
69071 index 0000000..18ffbbd
69072 --- /dev/null
69073 +++ b/grsecurity/gracl_alloc.c
69074 @@ -0,0 +1,105 @@
69075 +#include <linux/kernel.h>
69076 +#include <linux/mm.h>
69077 +#include <linux/slab.h>
69078 +#include <linux/vmalloc.h>
69079 +#include <linux/gracl.h>
69080 +#include <linux/grsecurity.h>
69081 +
69082 +static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
69083 +struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
69084 +
69085 +static __inline__ int
69086 +alloc_pop(void)
69087 +{
69088 + if (current_alloc_state->alloc_stack_next == 1)
69089 + return 0;
69090 +
69091 + kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
69092 +
69093 + current_alloc_state->alloc_stack_next--;
69094 +
69095 + return 1;
69096 +}
69097 +
69098 +static __inline__ int
69099 +alloc_push(void *buf)
69100 +{
69101 + if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
69102 + return 1;
69103 +
69104 + current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
69105 +
69106 + current_alloc_state->alloc_stack_next++;
69107 +
69108 + return 0;
69109 +}
69110 +
69111 +void *
69112 +acl_alloc(unsigned long len)
69113 +{
69114 + void *ret = NULL;
69115 +
69116 + if (!len || len > PAGE_SIZE)
69117 + goto out;
69118 +
69119 + ret = kmalloc(len, GFP_KERNEL);
69120 +
69121 + if (ret) {
69122 + if (alloc_push(ret)) {
69123 + kfree(ret);
69124 + ret = NULL;
69125 + }
69126 + }
69127 +
69128 +out:
69129 + return ret;
69130 +}
69131 +
69132 +void *
69133 +acl_alloc_num(unsigned long num, unsigned long len)
69134 +{
69135 + if (!len || (num > (PAGE_SIZE / len)))
69136 + return NULL;
69137 +
69138 + return acl_alloc(num * len);
69139 +}
69140 +
69141 +void
69142 +acl_free_all(void)
69143 +{
69144 + if (!current_alloc_state->alloc_stack)
69145 + return;
69146 +
69147 + while (alloc_pop()) ;
69148 +
69149 + if (current_alloc_state->alloc_stack) {
69150 + if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
69151 + kfree(current_alloc_state->alloc_stack);
69152 + else
69153 + vfree(current_alloc_state->alloc_stack);
69154 + }
69155 +
69156 + current_alloc_state->alloc_stack = NULL;
69157 + current_alloc_state->alloc_stack_size = 1;
69158 + current_alloc_state->alloc_stack_next = 1;
69159 +
69160 + return;
69161 +}
69162 +
69163 +int
69164 +acl_alloc_stack_init(unsigned long size)
69165 +{
69166 + if ((size * sizeof (void *)) <= PAGE_SIZE)
69167 + current_alloc_state->alloc_stack =
69168 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
69169 + else
69170 + current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
69171 +
69172 + current_alloc_state->alloc_stack_size = size;
69173 + current_alloc_state->alloc_stack_next = 1;
69174 +
69175 + if (!current_alloc_state->alloc_stack)
69176 + return 0;
69177 + else
69178 + return 1;
69179 +}
69180 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
69181 new file mode 100644
69182 index 0000000..bdd51ea
69183 --- /dev/null
69184 +++ b/grsecurity/gracl_cap.c
69185 @@ -0,0 +1,110 @@
69186 +#include <linux/kernel.h>
69187 +#include <linux/module.h>
69188 +#include <linux/sched.h>
69189 +#include <linux/gracl.h>
69190 +#include <linux/grsecurity.h>
69191 +#include <linux/grinternal.h>
69192 +
69193 +extern const char *captab_log[];
69194 +extern int captab_log_entries;
69195 +
69196 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
69197 +{
69198 + struct acl_subject_label *curracl;
69199 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
69200 + kernel_cap_t cap_audit = __cap_empty_set;
69201 +
69202 + if (!gr_acl_is_enabled())
69203 + return 1;
69204 +
69205 + curracl = task->acl;
69206 +
69207 + cap_drop = curracl->cap_lower;
69208 + cap_mask = curracl->cap_mask;
69209 + cap_audit = curracl->cap_invert_audit;
69210 +
69211 + while ((curracl = curracl->parent_subject)) {
69212 + /* if the cap isn't specified in the current computed mask but is specified in the
69213 + current level subject, and is lowered in the current level subject, then add
69214 + it to the set of dropped capabilities
69215 + otherwise, add the current level subject's mask to the current computed mask
69216 + */
69217 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
69218 + cap_raise(cap_mask, cap);
69219 + if (cap_raised(curracl->cap_lower, cap))
69220 + cap_raise(cap_drop, cap);
69221 + if (cap_raised(curracl->cap_invert_audit, cap))
69222 + cap_raise(cap_audit, cap);
69223 + }
69224 + }
69225 +
69226 + if (!cap_raised(cap_drop, cap)) {
69227 + if (cap_raised(cap_audit, cap))
69228 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
69229 + return 1;
69230 + }
69231 +
69232 + curracl = task->acl;
69233 +
69234 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
69235 + && cap_raised(cred->cap_effective, cap)) {
69236 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
69237 + task->role->roletype, GR_GLOBAL_UID(cred->uid),
69238 + GR_GLOBAL_GID(cred->gid), task->exec_file ?
69239 + gr_to_filename(task->exec_file->f_path.dentry,
69240 + task->exec_file->f_path.mnt) : curracl->filename,
69241 + curracl->filename, 0UL,
69242 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
69243 + return 1;
69244 + }
69245 +
69246 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
69247 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
69248 +
69249 + return 0;
69250 +}
69251 +
69252 +int
69253 +gr_acl_is_capable(const int cap)
69254 +{
69255 + return gr_task_acl_is_capable(current, current_cred(), cap);
69256 +}
69257 +
69258 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
69259 +{
69260 + struct acl_subject_label *curracl;
69261 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
69262 +
69263 + if (!gr_acl_is_enabled())
69264 + return 1;
69265 +
69266 + curracl = task->acl;
69267 +
69268 + cap_drop = curracl->cap_lower;
69269 + cap_mask = curracl->cap_mask;
69270 +
69271 + while ((curracl = curracl->parent_subject)) {
69272 + /* if the cap isn't specified in the current computed mask but is specified in the
69273 + current level subject, and is lowered in the current level subject, then add
69274 + it to the set of dropped capabilities
69275 + otherwise, add the current level subject's mask to the current computed mask
69276 + */
69277 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
69278 + cap_raise(cap_mask, cap);
69279 + if (cap_raised(curracl->cap_lower, cap))
69280 + cap_raise(cap_drop, cap);
69281 + }
69282 + }
69283 +
69284 + if (!cap_raised(cap_drop, cap))
69285 + return 1;
69286 +
69287 + return 0;
69288 +}
69289 +
69290 +int
69291 +gr_acl_is_capable_nolog(const int cap)
69292 +{
69293 + return gr_task_acl_is_capable_nolog(current, cap);
69294 +}
69295 +
69296 diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
69297 new file mode 100644
69298 index 0000000..ca25605
69299 --- /dev/null
69300 +++ b/grsecurity/gracl_compat.c
69301 @@ -0,0 +1,270 @@
69302 +#include <linux/kernel.h>
69303 +#include <linux/gracl.h>
69304 +#include <linux/compat.h>
69305 +#include <linux/gracl_compat.h>
69306 +
69307 +#include <asm/uaccess.h>
69308 +
69309 +int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
69310 +{
69311 + struct gr_arg_wrapper_compat uwrapcompat;
69312 +
69313 + if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
69314 + return -EFAULT;
69315 +
69316 + if (((uwrapcompat.version != GRSECURITY_VERSION) &&
69317 + (uwrapcompat.version != 0x2901)) ||
69318 + (uwrapcompat.size != sizeof(struct gr_arg_compat)))
69319 + return -EINVAL;
69320 +
69321 + uwrap->arg = compat_ptr(uwrapcompat.arg);
69322 + uwrap->version = uwrapcompat.version;
69323 + uwrap->size = sizeof(struct gr_arg);
69324 +
69325 + return 0;
69326 +}
69327 +
69328 +int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
69329 +{
69330 + struct gr_arg_compat argcompat;
69331 +
69332 + if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
69333 + return -EFAULT;
69334 +
69335 + arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
69336 + arg->role_db.num_pointers = argcompat.role_db.num_pointers;
69337 + arg->role_db.num_roles = argcompat.role_db.num_roles;
69338 + arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
69339 + arg->role_db.num_subjects = argcompat.role_db.num_subjects;
69340 + arg->role_db.num_objects = argcompat.role_db.num_objects;
69341 +
69342 + memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
69343 + memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
69344 + memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
69345 + memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
69346 + arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
69347 + arg->segv_device = argcompat.segv_device;
69348 + arg->segv_inode = argcompat.segv_inode;
69349 + arg->segv_uid = argcompat.segv_uid;
69350 + arg->num_sprole_pws = argcompat.num_sprole_pws;
69351 + arg->mode = argcompat.mode;
69352 +
69353 + return 0;
69354 +}
69355 +
69356 +int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
69357 +{
69358 + struct acl_object_label_compat objcompat;
69359 +
69360 + if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
69361 + return -EFAULT;
69362 +
69363 + obj->filename = compat_ptr(objcompat.filename);
69364 + obj->inode = objcompat.inode;
69365 + obj->device = objcompat.device;
69366 + obj->mode = objcompat.mode;
69367 +
69368 + obj->nested = compat_ptr(objcompat.nested);
69369 + obj->globbed = compat_ptr(objcompat.globbed);
69370 +
69371 + obj->prev = compat_ptr(objcompat.prev);
69372 + obj->next = compat_ptr(objcompat.next);
69373 +
69374 + return 0;
69375 +}
69376 +
69377 +int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
69378 +{
69379 + unsigned int i;
69380 + struct acl_subject_label_compat subjcompat;
69381 +
69382 + if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
69383 + return -EFAULT;
69384 +
69385 + subj->filename = compat_ptr(subjcompat.filename);
69386 + subj->inode = subjcompat.inode;
69387 + subj->device = subjcompat.device;
69388 + subj->mode = subjcompat.mode;
69389 + subj->cap_mask = subjcompat.cap_mask;
69390 + subj->cap_lower = subjcompat.cap_lower;
69391 + subj->cap_invert_audit = subjcompat.cap_invert_audit;
69392 +
69393 + for (i = 0; i < GR_NLIMITS; i++) {
69394 + if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
69395 + subj->res[i].rlim_cur = RLIM_INFINITY;
69396 + else
69397 + subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
69398 + if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
69399 + subj->res[i].rlim_max = RLIM_INFINITY;
69400 + else
69401 + subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
69402 + }
69403 + subj->resmask = subjcompat.resmask;
69404 +
69405 + subj->user_trans_type = subjcompat.user_trans_type;
69406 + subj->group_trans_type = subjcompat.group_trans_type;
69407 + subj->user_transitions = compat_ptr(subjcompat.user_transitions);
69408 + subj->group_transitions = compat_ptr(subjcompat.group_transitions);
69409 + subj->user_trans_num = subjcompat.user_trans_num;
69410 + subj->group_trans_num = subjcompat.group_trans_num;
69411 +
69412 + memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
69413 + memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
69414 + subj->ip_type = subjcompat.ip_type;
69415 + subj->ips = compat_ptr(subjcompat.ips);
69416 + subj->ip_num = subjcompat.ip_num;
69417 + subj->inaddr_any_override = subjcompat.inaddr_any_override;
69418 +
69419 + subj->crashes = subjcompat.crashes;
69420 + subj->expires = subjcompat.expires;
69421 +
69422 + subj->parent_subject = compat_ptr(subjcompat.parent_subject);
69423 + subj->hash = compat_ptr(subjcompat.hash);
69424 + subj->prev = compat_ptr(subjcompat.prev);
69425 + subj->next = compat_ptr(subjcompat.next);
69426 +
69427 + subj->obj_hash = compat_ptr(subjcompat.obj_hash);
69428 + subj->obj_hash_size = subjcompat.obj_hash_size;
69429 + subj->pax_flags = subjcompat.pax_flags;
69430 +
69431 + return 0;
69432 +}
69433 +
69434 +int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
69435 +{
69436 + struct acl_role_label_compat rolecompat;
69437 +
69438 + if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
69439 + return -EFAULT;
69440 +
69441 + role->rolename = compat_ptr(rolecompat.rolename);
69442 + role->uidgid = rolecompat.uidgid;
69443 + role->roletype = rolecompat.roletype;
69444 +
69445 + role->auth_attempts = rolecompat.auth_attempts;
69446 + role->expires = rolecompat.expires;
69447 +
69448 + role->root_label = compat_ptr(rolecompat.root_label);
69449 + role->hash = compat_ptr(rolecompat.hash);
69450 +
69451 + role->prev = compat_ptr(rolecompat.prev);
69452 + role->next = compat_ptr(rolecompat.next);
69453 +
69454 + role->transitions = compat_ptr(rolecompat.transitions);
69455 + role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
69456 + role->domain_children = compat_ptr(rolecompat.domain_children);
69457 + role->domain_child_num = rolecompat.domain_child_num;
69458 +
69459 + role->umask = rolecompat.umask;
69460 +
69461 + role->subj_hash = compat_ptr(rolecompat.subj_hash);
69462 + role->subj_hash_size = rolecompat.subj_hash_size;
69463 +
69464 + return 0;
69465 +}
69466 +
69467 +int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
69468 +{
69469 + struct role_allowed_ip_compat roleip_compat;
69470 +
69471 + if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
69472 + return -EFAULT;
69473 +
69474 + roleip->addr = roleip_compat.addr;
69475 + roleip->netmask = roleip_compat.netmask;
69476 +
69477 + roleip->prev = compat_ptr(roleip_compat.prev);
69478 + roleip->next = compat_ptr(roleip_compat.next);
69479 +
69480 + return 0;
69481 +}
69482 +
69483 +int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
69484 +{
69485 + struct role_transition_compat trans_compat;
69486 +
69487 + if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
69488 + return -EFAULT;
69489 +
69490 + trans->rolename = compat_ptr(trans_compat.rolename);
69491 +
69492 + trans->prev = compat_ptr(trans_compat.prev);
69493 + trans->next = compat_ptr(trans_compat.next);
69494 +
69495 + return 0;
69496 +
69497 +}
69498 +
69499 +int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
69500 +{
69501 + struct gr_hash_struct_compat hash_compat;
69502 +
69503 + if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
69504 + return -EFAULT;
69505 +
69506 + hash->table = compat_ptr(hash_compat.table);
69507 + hash->nametable = compat_ptr(hash_compat.nametable);
69508 + hash->first = compat_ptr(hash_compat.first);
69509 +
69510 + hash->table_size = hash_compat.table_size;
69511 + hash->used_size = hash_compat.used_size;
69512 +
69513 + hash->type = hash_compat.type;
69514 +
69515 + return 0;
69516 +}
69517 +
69518 +int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
69519 +{
69520 + compat_uptr_t ptrcompat;
69521 +
69522 + if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
69523 + return -EFAULT;
69524 +
69525 + *(void **)ptr = compat_ptr(ptrcompat);
69526 +
69527 + return 0;
69528 +}
69529 +
69530 +int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
69531 +{
69532 + struct acl_ip_label_compat ip_compat;
69533 +
69534 + if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
69535 + return -EFAULT;
69536 +
69537 + ip->iface = compat_ptr(ip_compat.iface);
69538 + ip->addr = ip_compat.addr;
69539 + ip->netmask = ip_compat.netmask;
69540 + ip->low = ip_compat.low;
69541 + ip->high = ip_compat.high;
69542 + ip->mode = ip_compat.mode;
69543 + ip->type = ip_compat.type;
69544 +
69545 + memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
69546 +
69547 + ip->prev = compat_ptr(ip_compat.prev);
69548 + ip->next = compat_ptr(ip_compat.next);
69549 +
69550 + return 0;
69551 +}
69552 +
69553 +int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
69554 +{
69555 + struct sprole_pw_compat pw_compat;
69556 +
69557 + if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
69558 + return -EFAULT;
69559 +
69560 + pw->rolename = compat_ptr(pw_compat.rolename);
69561 + memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
69562 + memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
69563 +
69564 + return 0;
69565 +}
69566 +
69567 +size_t get_gr_arg_wrapper_size_compat(void)
69568 +{
69569 + return sizeof(struct gr_arg_wrapper_compat);
69570 +}
69571 +
69572 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
69573 new file mode 100644
69574 index 0000000..a89b1f4
69575 --- /dev/null
69576 +++ b/grsecurity/gracl_fs.c
69577 @@ -0,0 +1,437 @@
69578 +#include <linux/kernel.h>
69579 +#include <linux/sched.h>
69580 +#include <linux/types.h>
69581 +#include <linux/fs.h>
69582 +#include <linux/file.h>
69583 +#include <linux/stat.h>
69584 +#include <linux/grsecurity.h>
69585 +#include <linux/grinternal.h>
69586 +#include <linux/gracl.h>
69587 +
69588 +umode_t
69589 +gr_acl_umask(void)
69590 +{
69591 + if (unlikely(!gr_acl_is_enabled()))
69592 + return 0;
69593 +
69594 + return current->role->umask;
69595 +}
69596 +
69597 +__u32
69598 +gr_acl_handle_hidden_file(const struct dentry * dentry,
69599 + const struct vfsmount * mnt)
69600 +{
69601 + __u32 mode;
69602 +
69603 + if (unlikely(d_is_negative(dentry)))
69604 + return GR_FIND;
69605 +
69606 + mode =
69607 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
69608 +
69609 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
69610 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
69611 + return mode;
69612 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
69613 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
69614 + return 0;
69615 + } else if (unlikely(!(mode & GR_FIND)))
69616 + return 0;
69617 +
69618 + return GR_FIND;
69619 +}
69620 +
69621 +__u32
69622 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
69623 + int acc_mode)
69624 +{
69625 + __u32 reqmode = GR_FIND;
69626 + __u32 mode;
69627 +
69628 + if (unlikely(d_is_negative(dentry)))
69629 + return reqmode;
69630 +
69631 + if (acc_mode & MAY_APPEND)
69632 + reqmode |= GR_APPEND;
69633 + else if (acc_mode & MAY_WRITE)
69634 + reqmode |= GR_WRITE;
69635 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
69636 + reqmode |= GR_READ;
69637 +
69638 + mode =
69639 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
69640 + mnt);
69641 +
69642 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69643 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
69644 + reqmode & GR_READ ? " reading" : "",
69645 + reqmode & GR_WRITE ? " writing" : reqmode &
69646 + GR_APPEND ? " appending" : "");
69647 + return reqmode;
69648 + } else
69649 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69650 + {
69651 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
69652 + reqmode & GR_READ ? " reading" : "",
69653 + reqmode & GR_WRITE ? " writing" : reqmode &
69654 + GR_APPEND ? " appending" : "");
69655 + return 0;
69656 + } else if (unlikely((mode & reqmode) != reqmode))
69657 + return 0;
69658 +
69659 + return reqmode;
69660 +}
69661 +
69662 +__u32
69663 +gr_acl_handle_creat(const struct dentry * dentry,
69664 + const struct dentry * p_dentry,
69665 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
69666 + const int imode)
69667 +{
69668 + __u32 reqmode = GR_WRITE | GR_CREATE;
69669 + __u32 mode;
69670 +
69671 + if (acc_mode & MAY_APPEND)
69672 + reqmode |= GR_APPEND;
69673 + // if a directory was required or the directory already exists, then
69674 + // don't count this open as a read
69675 + if ((acc_mode & MAY_READ) &&
69676 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
69677 + reqmode |= GR_READ;
69678 + if ((open_flags & O_CREAT) &&
69679 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
69680 + reqmode |= GR_SETID;
69681 +
69682 + mode =
69683 + gr_check_create(dentry, p_dentry, p_mnt,
69684 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
69685 +
69686 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69687 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
69688 + reqmode & GR_READ ? " reading" : "",
69689 + reqmode & GR_WRITE ? " writing" : reqmode &
69690 + GR_APPEND ? " appending" : "");
69691 + return reqmode;
69692 + } else
69693 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69694 + {
69695 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
69696 + reqmode & GR_READ ? " reading" : "",
69697 + reqmode & GR_WRITE ? " writing" : reqmode &
69698 + GR_APPEND ? " appending" : "");
69699 + return 0;
69700 + } else if (unlikely((mode & reqmode) != reqmode))
69701 + return 0;
69702 +
69703 + return reqmode;
69704 +}
69705 +
69706 +__u32
69707 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
69708 + const int fmode)
69709 +{
69710 + __u32 mode, reqmode = GR_FIND;
69711 +
69712 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
69713 + reqmode |= GR_EXEC;
69714 + if (fmode & S_IWOTH)
69715 + reqmode |= GR_WRITE;
69716 + if (fmode & S_IROTH)
69717 + reqmode |= GR_READ;
69718 +
69719 + mode =
69720 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
69721 + mnt);
69722 +
69723 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69724 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
69725 + reqmode & GR_READ ? " reading" : "",
69726 + reqmode & GR_WRITE ? " writing" : "",
69727 + reqmode & GR_EXEC ? " executing" : "");
69728 + return reqmode;
69729 + } else
69730 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69731 + {
69732 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
69733 + reqmode & GR_READ ? " reading" : "",
69734 + reqmode & GR_WRITE ? " writing" : "",
69735 + reqmode & GR_EXEC ? " executing" : "");
69736 + return 0;
69737 + } else if (unlikely((mode & reqmode) != reqmode))
69738 + return 0;
69739 +
69740 + return reqmode;
69741 +}
69742 +
69743 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
69744 +{
69745 + __u32 mode;
69746 +
69747 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
69748 +
69749 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
69750 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
69751 + return mode;
69752 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
69753 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
69754 + return 0;
69755 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
69756 + return 0;
69757 +
69758 + return (reqmode);
69759 +}
69760 +
69761 +__u32
69762 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
69763 +{
69764 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
69765 +}
69766 +
69767 +__u32
69768 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
69769 +{
69770 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
69771 +}
69772 +
69773 +__u32
69774 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
69775 +{
69776 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
69777 +}
69778 +
69779 +__u32
69780 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
69781 +{
69782 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
69783 +}
69784 +
69785 +__u32
69786 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
69787 + umode_t *modeptr)
69788 +{
69789 + umode_t mode;
69790 +
69791 + *modeptr &= ~gr_acl_umask();
69792 + mode = *modeptr;
69793 +
69794 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
69795 + return 1;
69796 +
69797 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
69798 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
69799 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
69800 + GR_CHMOD_ACL_MSG);
69801 + } else {
69802 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
69803 + }
69804 +}
69805 +
69806 +__u32
69807 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
69808 +{
69809 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
69810 +}
69811 +
69812 +__u32
69813 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
69814 +{
69815 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
69816 +}
69817 +
69818 +__u32
69819 +gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
69820 +{
69821 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
69822 +}
69823 +
69824 +__u32
69825 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
69826 +{
69827 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
69828 +}
69829 +
69830 +__u32
69831 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
69832 +{
69833 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
69834 + GR_UNIXCONNECT_ACL_MSG);
69835 +}
69836 +
69837 +/* hardlinks require at minimum create and link permission,
69838 + any additional privilege required is based on the
69839 + privilege of the file being linked to
69840 +*/
69841 +__u32
69842 +gr_acl_handle_link(const struct dentry * new_dentry,
69843 + const struct dentry * parent_dentry,
69844 + const struct vfsmount * parent_mnt,
69845 + const struct dentry * old_dentry,
69846 + const struct vfsmount * old_mnt, const struct filename *to)
69847 +{
69848 + __u32 mode;
69849 + __u32 needmode = GR_CREATE | GR_LINK;
69850 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
69851 +
69852 + mode =
69853 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
69854 + old_mnt);
69855 +
69856 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
69857 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
69858 + return mode;
69859 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
69860 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
69861 + return 0;
69862 + } else if (unlikely((mode & needmode) != needmode))
69863 + return 0;
69864 +
69865 + return 1;
69866 +}
69867 +
69868 +__u32
69869 +gr_acl_handle_symlink(const struct dentry * new_dentry,
69870 + const struct dentry * parent_dentry,
69871 + const struct vfsmount * parent_mnt, const struct filename *from)
69872 +{
69873 + __u32 needmode = GR_WRITE | GR_CREATE;
69874 + __u32 mode;
69875 +
69876 + mode =
69877 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
69878 + GR_CREATE | GR_AUDIT_CREATE |
69879 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
69880 +
69881 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
69882 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
69883 + return mode;
69884 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
69885 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
69886 + return 0;
69887 + } else if (unlikely((mode & needmode) != needmode))
69888 + return 0;
69889 +
69890 + return (GR_WRITE | GR_CREATE);
69891 +}
69892 +
69893 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
69894 +{
69895 + __u32 mode;
69896 +
69897 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
69898 +
69899 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
69900 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
69901 + return mode;
69902 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
69903 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
69904 + return 0;
69905 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
69906 + return 0;
69907 +
69908 + return (reqmode);
69909 +}
69910 +
69911 +__u32
69912 +gr_acl_handle_mknod(const struct dentry * new_dentry,
69913 + const struct dentry * parent_dentry,
69914 + const struct vfsmount * parent_mnt,
69915 + const int mode)
69916 +{
69917 + __u32 reqmode = GR_WRITE | GR_CREATE;
69918 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
69919 + reqmode |= GR_SETID;
69920 +
69921 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69922 + reqmode, GR_MKNOD_ACL_MSG);
69923 +}
69924 +
69925 +__u32
69926 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
69927 + const struct dentry *parent_dentry,
69928 + const struct vfsmount *parent_mnt)
69929 +{
69930 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69931 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
69932 +}
69933 +
69934 +#define RENAME_CHECK_SUCCESS(old, new) \
69935 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
69936 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
69937 +
69938 +int
69939 +gr_acl_handle_rename(struct dentry *new_dentry,
69940 + struct dentry *parent_dentry,
69941 + const struct vfsmount *parent_mnt,
69942 + struct dentry *old_dentry,
69943 + struct inode *old_parent_inode,
69944 + struct vfsmount *old_mnt, const struct filename *newname)
69945 +{
69946 + __u32 comp1, comp2;
69947 + int error = 0;
69948 +
69949 + if (unlikely(!gr_acl_is_enabled()))
69950 + return 0;
69951 +
69952 + if (d_is_negative(new_dentry)) {
69953 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
69954 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
69955 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
69956 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
69957 + GR_DELETE | GR_AUDIT_DELETE |
69958 + GR_AUDIT_READ | GR_AUDIT_WRITE |
69959 + GR_SUPPRESS, old_mnt);
69960 + } else {
69961 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
69962 + GR_CREATE | GR_DELETE |
69963 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
69964 + GR_AUDIT_READ | GR_AUDIT_WRITE |
69965 + GR_SUPPRESS, parent_mnt);
69966 + comp2 =
69967 + gr_search_file(old_dentry,
69968 + GR_READ | GR_WRITE | GR_AUDIT_READ |
69969 + GR_DELETE | GR_AUDIT_DELETE |
69970 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
69971 + }
69972 +
69973 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
69974 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
69975 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69976 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
69977 + && !(comp2 & GR_SUPPRESS)) {
69978 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69979 + error = -EACCES;
69980 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
69981 + error = -EACCES;
69982 +
69983 + return error;
69984 +}
69985 +
69986 +void
69987 +gr_acl_handle_exit(void)
69988 +{
69989 + u16 id;
69990 + char *rolename;
69991 +
69992 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
69993 + !(current->role->roletype & GR_ROLE_PERSIST))) {
69994 + id = current->acl_role_id;
69995 + rolename = current->role->rolename;
69996 + gr_set_acls(1);
69997 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
69998 + }
69999 +
70000 + gr_put_exec_file(current);
70001 + return;
70002 +}
70003 +
70004 +int
70005 +gr_acl_handle_procpidmem(const struct task_struct *task)
70006 +{
70007 + if (unlikely(!gr_acl_is_enabled()))
70008 + return 0;
70009 +
70010 + if (task != current && task->acl->mode & GR_PROTPROCFD)
70011 + return -EACCES;
70012 +
70013 + return 0;
70014 +}
70015 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
70016 new file mode 100644
70017 index 0000000..f056b81
70018 --- /dev/null
70019 +++ b/grsecurity/gracl_ip.c
70020 @@ -0,0 +1,386 @@
70021 +#include <linux/kernel.h>
70022 +#include <asm/uaccess.h>
70023 +#include <asm/errno.h>
70024 +#include <net/sock.h>
70025 +#include <linux/file.h>
70026 +#include <linux/fs.h>
70027 +#include <linux/net.h>
70028 +#include <linux/in.h>
70029 +#include <linux/skbuff.h>
70030 +#include <linux/ip.h>
70031 +#include <linux/udp.h>
70032 +#include <linux/types.h>
70033 +#include <linux/sched.h>
70034 +#include <linux/netdevice.h>
70035 +#include <linux/inetdevice.h>
70036 +#include <linux/gracl.h>
70037 +#include <linux/grsecurity.h>
70038 +#include <linux/grinternal.h>
70039 +
70040 +#define GR_BIND 0x01
70041 +#define GR_CONNECT 0x02
70042 +#define GR_INVERT 0x04
70043 +#define GR_BINDOVERRIDE 0x08
70044 +#define GR_CONNECTOVERRIDE 0x10
70045 +#define GR_SOCK_FAMILY 0x20
70046 +
70047 +static const char * gr_protocols[IPPROTO_MAX] = {
70048 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
70049 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
70050 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
70051 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
70052 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
70053 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
70054 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
70055 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
70056 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
70057 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
70058 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
70059 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
70060 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
70061 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
70062 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
70063 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
70064 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
70065 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
70066 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
70067 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
70068 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
70069 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
70070 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
70071 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
70072 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
70073 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
70074 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
70075 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
70076 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
70077 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
70078 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
70079 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
70080 + };
70081 +
70082 +static const char * gr_socktypes[SOCK_MAX] = {
70083 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
70084 + "unknown:7", "unknown:8", "unknown:9", "packet"
70085 + };
70086 +
70087 +static const char * gr_sockfamilies[AF_MAX+1] = {
70088 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
70089 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
70090 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
70091 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
70092 + };
70093 +
70094 +const char *
70095 +gr_proto_to_name(unsigned char proto)
70096 +{
70097 + return gr_protocols[proto];
70098 +}
70099 +
70100 +const char *
70101 +gr_socktype_to_name(unsigned char type)
70102 +{
70103 + return gr_socktypes[type];
70104 +}
70105 +
70106 +const char *
70107 +gr_sockfamily_to_name(unsigned char family)
70108 +{
70109 + return gr_sockfamilies[family];
70110 +}
70111 +
70112 +extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
70113 +
70114 +int
70115 +gr_search_socket(const int domain, const int type, const int protocol)
70116 +{
70117 + struct acl_subject_label *curr;
70118 + const struct cred *cred = current_cred();
70119 +
70120 + if (unlikely(!gr_acl_is_enabled()))
70121 + goto exit;
70122 +
70123 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
70124 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
70125 + goto exit; // let the kernel handle it
70126 +
70127 + curr = current->acl;
70128 +
70129 + if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
70130 + /* the family is allowed, if this is PF_INET allow it only if
70131 + the extra sock type/protocol checks pass */
70132 + if (domain == PF_INET)
70133 + goto inet_check;
70134 + goto exit;
70135 + } else {
70136 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70137 + __u32 fakeip = 0;
70138 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70139 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
70140 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
70141 + gr_to_filename(current->exec_file->f_path.dentry,
70142 + current->exec_file->f_path.mnt) :
70143 + curr->filename, curr->filename,
70144 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
70145 + &current->signal->saved_ip);
70146 + goto exit;
70147 + }
70148 + goto exit_fail;
70149 + }
70150 +
70151 +inet_check:
70152 + /* the rest of this checking is for IPv4 only */
70153 + if (!curr->ips)
70154 + goto exit;
70155 +
70156 + if ((curr->ip_type & (1U << type)) &&
70157 + (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
70158 + goto exit;
70159 +
70160 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70161 + /* we don't place acls on raw sockets , and sometimes
70162 + dgram/ip sockets are opened for ioctl and not
70163 + bind/connect, so we'll fake a bind learn log */
70164 + if (type == SOCK_RAW || type == SOCK_PACKET) {
70165 + __u32 fakeip = 0;
70166 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70167 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
70168 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
70169 + gr_to_filename(current->exec_file->f_path.dentry,
70170 + current->exec_file->f_path.mnt) :
70171 + curr->filename, curr->filename,
70172 + &fakeip, 0, type,
70173 + protocol, GR_CONNECT, &current->signal->saved_ip);
70174 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
70175 + __u32 fakeip = 0;
70176 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70177 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
70178 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
70179 + gr_to_filename(current->exec_file->f_path.dentry,
70180 + current->exec_file->f_path.mnt) :
70181 + curr->filename, curr->filename,
70182 + &fakeip, 0, type,
70183 + protocol, GR_BIND, &current->signal->saved_ip);
70184 + }
70185 + /* we'll log when they use connect or bind */
70186 + goto exit;
70187 + }
70188 +
70189 +exit_fail:
70190 + if (domain == PF_INET)
70191 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
70192 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
70193 + else if (rcu_access_pointer(net_families[domain]) != NULL)
70194 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
70195 + gr_socktype_to_name(type), protocol);
70196 +
70197 + return 0;
70198 +exit:
70199 + return 1;
70200 +}
70201 +
70202 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
70203 +{
70204 + if ((ip->mode & mode) &&
70205 + (ip_port >= ip->low) &&
70206 + (ip_port <= ip->high) &&
70207 + ((ntohl(ip_addr) & our_netmask) ==
70208 + (ntohl(our_addr) & our_netmask))
70209 + && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
70210 + && (ip->type & (1U << type))) {
70211 + if (ip->mode & GR_INVERT)
70212 + return 2; // specifically denied
70213 + else
70214 + return 1; // allowed
70215 + }
70216 +
70217 + return 0; // not specifically allowed, may continue parsing
70218 +}
70219 +
70220 +static int
70221 +gr_search_connectbind(const int full_mode, struct sock *sk,
70222 + struct sockaddr_in *addr, const int type)
70223 +{
70224 + char iface[IFNAMSIZ] = {0};
70225 + struct acl_subject_label *curr;
70226 + struct acl_ip_label *ip;
70227 + struct inet_sock *isk;
70228 + struct net_device *dev;
70229 + struct in_device *idev;
70230 + unsigned long i;
70231 + int ret;
70232 + int mode = full_mode & (GR_BIND | GR_CONNECT);
70233 + __u32 ip_addr = 0;
70234 + __u32 our_addr;
70235 + __u32 our_netmask;
70236 + char *p;
70237 + __u16 ip_port = 0;
70238 + const struct cred *cred = current_cred();
70239 +
70240 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
70241 + return 0;
70242 +
70243 + curr = current->acl;
70244 + isk = inet_sk(sk);
70245 +
70246 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
70247 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
70248 + addr->sin_addr.s_addr = curr->inaddr_any_override;
70249 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
70250 + struct sockaddr_in saddr;
70251 + int err;
70252 +
70253 + saddr.sin_family = AF_INET;
70254 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
70255 + saddr.sin_port = isk->inet_sport;
70256 +
70257 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
70258 + if (err)
70259 + return err;
70260 +
70261 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
70262 + if (err)
70263 + return err;
70264 + }
70265 +
70266 + if (!curr->ips)
70267 + return 0;
70268 +
70269 + ip_addr = addr->sin_addr.s_addr;
70270 + ip_port = ntohs(addr->sin_port);
70271 +
70272 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70273 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70274 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
70275 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
70276 + gr_to_filename(current->exec_file->f_path.dentry,
70277 + current->exec_file->f_path.mnt) :
70278 + curr->filename, curr->filename,
70279 + &ip_addr, ip_port, type,
70280 + sk->sk_protocol, mode, &current->signal->saved_ip);
70281 + return 0;
70282 + }
70283 +
70284 + for (i = 0; i < curr->ip_num; i++) {
70285 + ip = *(curr->ips + i);
70286 + if (ip->iface != NULL) {
70287 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
70288 + p = strchr(iface, ':');
70289 + if (p != NULL)
70290 + *p = '\0';
70291 + dev = dev_get_by_name(sock_net(sk), iface);
70292 + if (dev == NULL)
70293 + continue;
70294 + idev = in_dev_get(dev);
70295 + if (idev == NULL) {
70296 + dev_put(dev);
70297 + continue;
70298 + }
70299 + rcu_read_lock();
70300 + for_ifa(idev) {
70301 + if (!strcmp(ip->iface, ifa->ifa_label)) {
70302 + our_addr = ifa->ifa_address;
70303 + our_netmask = 0xffffffff;
70304 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
70305 + if (ret == 1) {
70306 + rcu_read_unlock();
70307 + in_dev_put(idev);
70308 + dev_put(dev);
70309 + return 0;
70310 + } else if (ret == 2) {
70311 + rcu_read_unlock();
70312 + in_dev_put(idev);
70313 + dev_put(dev);
70314 + goto denied;
70315 + }
70316 + }
70317 + } endfor_ifa(idev);
70318 + rcu_read_unlock();
70319 + in_dev_put(idev);
70320 + dev_put(dev);
70321 + } else {
70322 + our_addr = ip->addr;
70323 + our_netmask = ip->netmask;
70324 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
70325 + if (ret == 1)
70326 + return 0;
70327 + else if (ret == 2)
70328 + goto denied;
70329 + }
70330 + }
70331 +
70332 +denied:
70333 + if (mode == GR_BIND)
70334 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
70335 + else if (mode == GR_CONNECT)
70336 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
70337 +
70338 + return -EACCES;
70339 +}
70340 +
70341 +int
70342 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
70343 +{
70344 + /* always allow disconnection of dgram sockets with connect */
70345 + if (addr->sin_family == AF_UNSPEC)
70346 + return 0;
70347 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
70348 +}
70349 +
70350 +int
70351 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
70352 +{
70353 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
70354 +}
70355 +
70356 +int gr_search_listen(struct socket *sock)
70357 +{
70358 + struct sock *sk = sock->sk;
70359 + struct sockaddr_in addr;
70360 +
70361 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
70362 + addr.sin_port = inet_sk(sk)->inet_sport;
70363 +
70364 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
70365 +}
70366 +
70367 +int gr_search_accept(struct socket *sock)
70368 +{
70369 + struct sock *sk = sock->sk;
70370 + struct sockaddr_in addr;
70371 +
70372 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
70373 + addr.sin_port = inet_sk(sk)->inet_sport;
70374 +
70375 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
70376 +}
70377 +
70378 +int
70379 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
70380 +{
70381 + if (addr)
70382 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
70383 + else {
70384 + struct sockaddr_in sin;
70385 + const struct inet_sock *inet = inet_sk(sk);
70386 +
70387 + sin.sin_addr.s_addr = inet->inet_daddr;
70388 + sin.sin_port = inet->inet_dport;
70389 +
70390 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
70391 + }
70392 +}
70393 +
70394 +int
70395 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
70396 +{
70397 + struct sockaddr_in sin;
70398 +
70399 + if (unlikely(skb->len < sizeof (struct udphdr)))
70400 + return 0; // skip this packet
70401 +
70402 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
70403 + sin.sin_port = udp_hdr(skb)->source;
70404 +
70405 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
70406 +}
70407 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
70408 new file mode 100644
70409 index 0000000..25f54ef
70410 --- /dev/null
70411 +++ b/grsecurity/gracl_learn.c
70412 @@ -0,0 +1,207 @@
70413 +#include <linux/kernel.h>
70414 +#include <linux/mm.h>
70415 +#include <linux/sched.h>
70416 +#include <linux/poll.h>
70417 +#include <linux/string.h>
70418 +#include <linux/file.h>
70419 +#include <linux/types.h>
70420 +#include <linux/vmalloc.h>
70421 +#include <linux/grinternal.h>
70422 +
70423 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
70424 + size_t count, loff_t *ppos);
70425 +extern int gr_acl_is_enabled(void);
70426 +
70427 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
70428 +static int gr_learn_attached;
70429 +
70430 +/* use a 512k buffer */
70431 +#define LEARN_BUFFER_SIZE (512 * 1024)
70432 +
70433 +static DEFINE_SPINLOCK(gr_learn_lock);
70434 +static DEFINE_MUTEX(gr_learn_user_mutex);
70435 +
70436 +/* we need to maintain two buffers, so that the kernel context of grlearn
70437 + uses a semaphore around the userspace copying, and the other kernel contexts
70438 + use a spinlock when copying into the buffer, since they cannot sleep
70439 +*/
70440 +static char *learn_buffer;
70441 +static char *learn_buffer_user;
70442 +static int learn_buffer_len;
70443 +static int learn_buffer_user_len;
70444 +
70445 +static ssize_t
70446 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
70447 +{
70448 + DECLARE_WAITQUEUE(wait, current);
70449 + ssize_t retval = 0;
70450 +
70451 + add_wait_queue(&learn_wait, &wait);
70452 + set_current_state(TASK_INTERRUPTIBLE);
70453 + do {
70454 + mutex_lock(&gr_learn_user_mutex);
70455 + spin_lock(&gr_learn_lock);
70456 + if (learn_buffer_len)
70457 + break;
70458 + spin_unlock(&gr_learn_lock);
70459 + mutex_unlock(&gr_learn_user_mutex);
70460 + if (file->f_flags & O_NONBLOCK) {
70461 + retval = -EAGAIN;
70462 + goto out;
70463 + }
70464 + if (signal_pending(current)) {
70465 + retval = -ERESTARTSYS;
70466 + goto out;
70467 + }
70468 +
70469 + schedule();
70470 + } while (1);
70471 +
70472 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
70473 + learn_buffer_user_len = learn_buffer_len;
70474 + retval = learn_buffer_len;
70475 + learn_buffer_len = 0;
70476 +
70477 + spin_unlock(&gr_learn_lock);
70478 +
70479 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
70480 + retval = -EFAULT;
70481 +
70482 + mutex_unlock(&gr_learn_user_mutex);
70483 +out:
70484 + set_current_state(TASK_RUNNING);
70485 + remove_wait_queue(&learn_wait, &wait);
70486 + return retval;
70487 +}
70488 +
70489 +static unsigned int
70490 +poll_learn(struct file * file, poll_table * wait)
70491 +{
70492 + poll_wait(file, &learn_wait, wait);
70493 +
70494 + if (learn_buffer_len)
70495 + return (POLLIN | POLLRDNORM);
70496 +
70497 + return 0;
70498 +}
70499 +
70500 +void
70501 +gr_clear_learn_entries(void)
70502 +{
70503 + char *tmp;
70504 +
70505 + mutex_lock(&gr_learn_user_mutex);
70506 + spin_lock(&gr_learn_lock);
70507 + tmp = learn_buffer;
70508 + learn_buffer = NULL;
70509 + spin_unlock(&gr_learn_lock);
70510 + if (tmp)
70511 + vfree(tmp);
70512 + if (learn_buffer_user != NULL) {
70513 + vfree(learn_buffer_user);
70514 + learn_buffer_user = NULL;
70515 + }
70516 + learn_buffer_len = 0;
70517 + mutex_unlock(&gr_learn_user_mutex);
70518 +
70519 + return;
70520 +}
70521 +
70522 +void
70523 +gr_add_learn_entry(const char *fmt, ...)
70524 +{
70525 + va_list args;
70526 + unsigned int len;
70527 +
70528 + if (!gr_learn_attached)
70529 + return;
70530 +
70531 + spin_lock(&gr_learn_lock);
70532 +
70533 + /* leave a gap at the end so we know when it's "full" but don't have to
70534 + compute the exact length of the string we're trying to append
70535 + */
70536 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
70537 + spin_unlock(&gr_learn_lock);
70538 + wake_up_interruptible(&learn_wait);
70539 + return;
70540 + }
70541 + if (learn_buffer == NULL) {
70542 + spin_unlock(&gr_learn_lock);
70543 + return;
70544 + }
70545 +
70546 + va_start(args, fmt);
70547 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
70548 + va_end(args);
70549 +
70550 + learn_buffer_len += len + 1;
70551 +
70552 + spin_unlock(&gr_learn_lock);
70553 + wake_up_interruptible(&learn_wait);
70554 +
70555 + return;
70556 +}
70557 +
70558 +static int
70559 +open_learn(struct inode *inode, struct file *file)
70560 +{
70561 + if (file->f_mode & FMODE_READ && gr_learn_attached)
70562 + return -EBUSY;
70563 + if (file->f_mode & FMODE_READ) {
70564 + int retval = 0;
70565 + mutex_lock(&gr_learn_user_mutex);
70566 + if (learn_buffer == NULL)
70567 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
70568 + if (learn_buffer_user == NULL)
70569 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
70570 + if (learn_buffer == NULL) {
70571 + retval = -ENOMEM;
70572 + goto out_error;
70573 + }
70574 + if (learn_buffer_user == NULL) {
70575 + retval = -ENOMEM;
70576 + goto out_error;
70577 + }
70578 + learn_buffer_len = 0;
70579 + learn_buffer_user_len = 0;
70580 + gr_learn_attached = 1;
70581 +out_error:
70582 + mutex_unlock(&gr_learn_user_mutex);
70583 + return retval;
70584 + }
70585 + return 0;
70586 +}
70587 +
70588 +static int
70589 +close_learn(struct inode *inode, struct file *file)
70590 +{
70591 + if (file->f_mode & FMODE_READ) {
70592 + char *tmp = NULL;
70593 + mutex_lock(&gr_learn_user_mutex);
70594 + spin_lock(&gr_learn_lock);
70595 + tmp = learn_buffer;
70596 + learn_buffer = NULL;
70597 + spin_unlock(&gr_learn_lock);
70598 + if (tmp)
70599 + vfree(tmp);
70600 + if (learn_buffer_user != NULL) {
70601 + vfree(learn_buffer_user);
70602 + learn_buffer_user = NULL;
70603 + }
70604 + learn_buffer_len = 0;
70605 + learn_buffer_user_len = 0;
70606 + gr_learn_attached = 0;
70607 + mutex_unlock(&gr_learn_user_mutex);
70608 + }
70609 +
70610 + return 0;
70611 +}
70612 +
70613 +const struct file_operations grsec_fops = {
70614 + .read = read_learn,
70615 + .write = write_grsec_handler,
70616 + .open = open_learn,
70617 + .release = close_learn,
70618 + .poll = poll_learn,
70619 +};
70620 diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
70621 new file mode 100644
70622 index 0000000..361a099
70623 --- /dev/null
70624 +++ b/grsecurity/gracl_policy.c
70625 @@ -0,0 +1,1782 @@
70626 +#include <linux/kernel.h>
70627 +#include <linux/module.h>
70628 +#include <linux/sched.h>
70629 +#include <linux/mm.h>
70630 +#include <linux/file.h>
70631 +#include <linux/fs.h>
70632 +#include <linux/namei.h>
70633 +#include <linux/mount.h>
70634 +#include <linux/tty.h>
70635 +#include <linux/proc_fs.h>
70636 +#include <linux/lglock.h>
70637 +#include <linux/slab.h>
70638 +#include <linux/vmalloc.h>
70639 +#include <linux/types.h>
70640 +#include <linux/sysctl.h>
70641 +#include <linux/netdevice.h>
70642 +#include <linux/ptrace.h>
70643 +#include <linux/gracl.h>
70644 +#include <linux/gralloc.h>
70645 +#include <linux/security.h>
70646 +#include <linux/grinternal.h>
70647 +#include <linux/pid_namespace.h>
70648 +#include <linux/stop_machine.h>
70649 +#include <linux/fdtable.h>
70650 +#include <linux/percpu.h>
70651 +#include <linux/lglock.h>
70652 +#include <linux/hugetlb.h>
70653 +#include <linux/posix-timers.h>
70654 +#include "../fs/mount.h"
70655 +
70656 +#include <asm/uaccess.h>
70657 +#include <asm/errno.h>
70658 +#include <asm/mman.h>
70659 +
70660 +extern struct gr_policy_state *polstate;
70661 +
70662 +#define FOR_EACH_ROLE_START(role) \
70663 + role = polstate->role_list; \
70664 + while (role) {
70665 +
70666 +#define FOR_EACH_ROLE_END(role) \
70667 + role = role->prev; \
70668 + }
70669 +
70670 +struct path gr_real_root;
70671 +
70672 +extern struct gr_alloc_state *current_alloc_state;
70673 +
70674 +u16 acl_sp_role_value;
70675 +
70676 +static DEFINE_MUTEX(gr_dev_mutex);
70677 +
70678 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
70679 +extern void gr_clear_learn_entries(void);
70680 +
70681 +static struct gr_arg gr_usermode;
70682 +static unsigned char gr_system_salt[GR_SALT_LEN];
70683 +static unsigned char gr_system_sum[GR_SHA_LEN];
70684 +
70685 +static unsigned int gr_auth_attempts = 0;
70686 +static unsigned long gr_auth_expires = 0UL;
70687 +
70688 +struct acl_object_label *fakefs_obj_rw;
70689 +struct acl_object_label *fakefs_obj_rwx;
70690 +
70691 +extern int gr_init_uidset(void);
70692 +extern void gr_free_uidset(void);
70693 +extern void gr_remove_uid(uid_t uid);
70694 +extern int gr_find_uid(uid_t uid);
70695 +
70696 +extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
70697 +extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
70698 +extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
70699 +extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
70700 +extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
70701 +extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
70702 +extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
70703 +extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
70704 +extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
70705 +extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
70706 +extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
70707 +extern void assign_special_role(const char *rolename);
70708 +extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
70709 +extern int gr_rbac_disable(void *unused);
70710 +extern void gr_enable_rbac_system(void);
70711 +
70712 +static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
70713 +{
70714 + if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
70715 + return -EFAULT;
70716 +
70717 + return 0;
70718 +}
70719 +
70720 +static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
70721 +{
70722 + if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
70723 + return -EFAULT;
70724 +
70725 + return 0;
70726 +}
70727 +
70728 +static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
70729 +{
70730 + if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
70731 + return -EFAULT;
70732 +
70733 + return 0;
70734 +}
70735 +
70736 +static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
70737 +{
70738 + if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
70739 + return -EFAULT;
70740 +
70741 + return 0;
70742 +}
70743 +
70744 +static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
70745 +{
70746 + if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
70747 + return -EFAULT;
70748 +
70749 + return 0;
70750 +}
70751 +
70752 +static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
70753 +{
70754 + if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
70755 + return -EFAULT;
70756 +
70757 + return 0;
70758 +}
70759 +
70760 +static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
70761 +{
70762 + if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
70763 + return -EFAULT;
70764 +
70765 + return 0;
70766 +}
70767 +
70768 +static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
70769 +{
70770 + if (copy_from_user(trans, userp, sizeof(struct role_transition)))
70771 + return -EFAULT;
70772 +
70773 + return 0;
70774 +}
70775 +
70776 +int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
70777 +{
70778 + if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
70779 + return -EFAULT;
70780 +
70781 + return 0;
70782 +}
70783 +
70784 +static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
70785 +{
70786 + if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
70787 + return -EFAULT;
70788 +
70789 + if (((uwrap->version != GRSECURITY_VERSION) &&
70790 + (uwrap->version != 0x2901)) ||
70791 + (uwrap->size != sizeof(struct gr_arg)))
70792 + return -EINVAL;
70793 +
70794 + return 0;
70795 +}
70796 +
70797 +static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
70798 +{
70799 + if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
70800 + return -EFAULT;
70801 +
70802 + return 0;
70803 +}
70804 +
70805 +static size_t get_gr_arg_wrapper_size_normal(void)
70806 +{
70807 + return sizeof(struct gr_arg_wrapper);
70808 +}
70809 +
70810 +#ifdef CONFIG_COMPAT
70811 +extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
70812 +extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
70813 +extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
70814 +extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
70815 +extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
70816 +extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
70817 +extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
70818 +extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
70819 +extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
70820 +extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
70821 +extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
70822 +extern size_t get_gr_arg_wrapper_size_compat(void);
70823 +
70824 +int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
70825 +int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
70826 +int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
70827 +int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
70828 +int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
70829 +int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
70830 +int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
70831 +int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
70832 +int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
70833 +int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
70834 +int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
70835 +size_t (* get_gr_arg_wrapper_size)(void) __read_only;
70836 +
70837 +#else
70838 +#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
70839 +#define copy_gr_arg copy_gr_arg_normal
70840 +#define copy_gr_hash_struct copy_gr_hash_struct_normal
70841 +#define copy_acl_object_label copy_acl_object_label_normal
70842 +#define copy_acl_subject_label copy_acl_subject_label_normal
70843 +#define copy_acl_role_label copy_acl_role_label_normal
70844 +#define copy_acl_ip_label copy_acl_ip_label_normal
70845 +#define copy_pointer_from_array copy_pointer_from_array_normal
70846 +#define copy_sprole_pw copy_sprole_pw_normal
70847 +#define copy_role_transition copy_role_transition_normal
70848 +#define copy_role_allowed_ip copy_role_allowed_ip_normal
70849 +#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
70850 +#endif
70851 +
70852 +static struct acl_subject_label *
70853 +lookup_subject_map(const struct acl_subject_label *userp)
70854 +{
70855 + unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
70856 + struct subject_map *match;
70857 +
70858 + match = polstate->subj_map_set.s_hash[index];
70859 +
70860 + while (match && match->user != userp)
70861 + match = match->next;
70862 +
70863 + if (match != NULL)
70864 + return match->kernel;
70865 + else
70866 + return NULL;
70867 +}
70868 +
70869 +static void
70870 +insert_subj_map_entry(struct subject_map *subjmap)
70871 +{
70872 + unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
70873 + struct subject_map **curr;
70874 +
70875 + subjmap->prev = NULL;
70876 +
70877 + curr = &polstate->subj_map_set.s_hash[index];
70878 + if (*curr != NULL)
70879 + (*curr)->prev = subjmap;
70880 +
70881 + subjmap->next = *curr;
70882 + *curr = subjmap;
70883 +
70884 + return;
70885 +}
70886 +
70887 +static void
70888 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
70889 +{
70890 + unsigned int index =
70891 + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
70892 + struct acl_role_label **curr;
70893 + struct acl_role_label *tmp, *tmp2;
70894 +
70895 + curr = &polstate->acl_role_set.r_hash[index];
70896 +
70897 + /* simple case, slot is empty, just set it to our role */
70898 + if (*curr == NULL) {
70899 + *curr = role;
70900 + } else {
70901 + /* example:
70902 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
70903 + 2 -> 3
70904 + */
70905 + /* first check to see if we can already be reached via this slot */
70906 + tmp = *curr;
70907 + while (tmp && tmp != role)
70908 + tmp = tmp->next;
70909 + if (tmp == role) {
70910 + /* we don't need to add ourselves to this slot's chain */
70911 + return;
70912 + }
70913 + /* we need to add ourselves to this chain, two cases */
70914 + if (role->next == NULL) {
70915 + /* simple case, append the current chain to our role */
70916 + role->next = *curr;
70917 + *curr = role;
70918 + } else {
70919 + /* 1 -> 2 -> 3 -> 4
70920 + 2 -> 3 -> 4
70921 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
70922 + */
70923 + /* trickier case: walk our role's chain until we find
70924 + the role for the start of the current slot's chain */
70925 + tmp = role;
70926 + tmp2 = *curr;
70927 + while (tmp->next && tmp->next != tmp2)
70928 + tmp = tmp->next;
70929 + if (tmp->next == tmp2) {
70930 + /* from example above, we found 3, so just
70931 + replace this slot's chain with ours */
70932 + *curr = role;
70933 + } else {
70934 + /* we didn't find a subset of our role's chain
70935 + in the current slot's chain, so append their
70936 + chain to ours, and set us as the first role in
70937 + the slot's chain
70938 +
70939 + we could fold this case with the case above,
70940 + but making it explicit for clarity
70941 + */
70942 + tmp->next = tmp2;
70943 + *curr = role;
70944 + }
70945 + }
70946 + }
70947 +
70948 + return;
70949 +}
70950 +
70951 +static void
70952 +insert_acl_role_label(struct acl_role_label *role)
70953 +{
70954 + int i;
70955 +
70956 + if (polstate->role_list == NULL) {
70957 + polstate->role_list = role;
70958 + role->prev = NULL;
70959 + } else {
70960 + role->prev = polstate->role_list;
70961 + polstate->role_list = role;
70962 + }
70963 +
70964 + /* used for hash chains */
70965 + role->next = NULL;
70966 +
70967 + if (role->roletype & GR_ROLE_DOMAIN) {
70968 + for (i = 0; i < role->domain_child_num; i++)
70969 + __insert_acl_role_label(role, role->domain_children[i]);
70970 + } else
70971 + __insert_acl_role_label(role, role->uidgid);
70972 +}
70973 +
70974 +static int
70975 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
70976 +{
70977 + struct name_entry **curr, *nentry;
70978 + struct inodev_entry *ientry;
70979 + unsigned int len = strlen(name);
70980 + unsigned int key = full_name_hash(name, len);
70981 + unsigned int index = key % polstate->name_set.n_size;
70982 +
70983 + curr = &polstate->name_set.n_hash[index];
70984 +
70985 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
70986 + curr = &((*curr)->next);
70987 +
70988 + if (*curr != NULL)
70989 + return 1;
70990 +
70991 + nentry = acl_alloc(sizeof (struct name_entry));
70992 + if (nentry == NULL)
70993 + return 0;
70994 + ientry = acl_alloc(sizeof (struct inodev_entry));
70995 + if (ientry == NULL)
70996 + return 0;
70997 + ientry->nentry = nentry;
70998 +
70999 + nentry->key = key;
71000 + nentry->name = name;
71001 + nentry->inode = inode;
71002 + nentry->device = device;
71003 + nentry->len = len;
71004 + nentry->deleted = deleted;
71005 +
71006 + nentry->prev = NULL;
71007 + curr = &polstate->name_set.n_hash[index];
71008 + if (*curr != NULL)
71009 + (*curr)->prev = nentry;
71010 + nentry->next = *curr;
71011 + *curr = nentry;
71012 +
71013 + /* insert us into the table searchable by inode/dev */
71014 + __insert_inodev_entry(polstate, ientry);
71015 +
71016 + return 1;
71017 +}
71018 +
71019 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
71020 +
71021 +static void *
71022 +create_table(__u32 * len, int elementsize)
71023 +{
71024 + unsigned int table_sizes[] = {
71025 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
71026 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
71027 + 4194301, 8388593, 16777213, 33554393, 67108859
71028 + };
71029 + void *newtable = NULL;
71030 + unsigned int pwr = 0;
71031 +
71032 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
71033 + table_sizes[pwr] <= *len)
71034 + pwr++;
71035 +
71036 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
71037 + return newtable;
71038 +
71039 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
71040 + newtable =
71041 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
71042 + else
71043 + newtable = vmalloc(table_sizes[pwr] * elementsize);
71044 +
71045 + *len = table_sizes[pwr];
71046 +
71047 + return newtable;
71048 +}
71049 +
71050 +static int
71051 +init_variables(const struct gr_arg *arg, bool reload)
71052 +{
71053 + struct task_struct *reaper = init_pid_ns.child_reaper;
71054 + unsigned int stacksize;
71055 +
71056 + polstate->subj_map_set.s_size = arg->role_db.num_subjects;
71057 + polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
71058 + polstate->name_set.n_size = arg->role_db.num_objects;
71059 + polstate->inodev_set.i_size = arg->role_db.num_objects;
71060 +
71061 + if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
71062 + !polstate->name_set.n_size || !polstate->inodev_set.i_size)
71063 + return 1;
71064 +
71065 + if (!reload) {
71066 + if (!gr_init_uidset())
71067 + return 1;
71068 + }
71069 +
71070 + /* set up the stack that holds allocation info */
71071 +
71072 + stacksize = arg->role_db.num_pointers + 5;
71073 +
71074 + if (!acl_alloc_stack_init(stacksize))
71075 + return 1;
71076 +
71077 + if (!reload) {
71078 + /* grab reference for the real root dentry and vfsmount */
71079 + get_fs_root(reaper->fs, &gr_real_root);
71080 +
71081 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71082 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
71083 +#endif
71084 +
71085 + fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
71086 + if (fakefs_obj_rw == NULL)
71087 + return 1;
71088 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
71089 +
71090 + fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
71091 + if (fakefs_obj_rwx == NULL)
71092 + return 1;
71093 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
71094 + }
71095 +
71096 + polstate->subj_map_set.s_hash =
71097 + (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
71098 + polstate->acl_role_set.r_hash =
71099 + (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
71100 + polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
71101 + polstate->inodev_set.i_hash =
71102 + (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
71103 +
71104 + if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
71105 + !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
71106 + return 1;
71107 +
71108 + memset(polstate->subj_map_set.s_hash, 0,
71109 + sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
71110 + memset(polstate->acl_role_set.r_hash, 0,
71111 + sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
71112 + memset(polstate->name_set.n_hash, 0,
71113 + sizeof (struct name_entry *) * polstate->name_set.n_size);
71114 + memset(polstate->inodev_set.i_hash, 0,
71115 + sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
71116 +
71117 + return 0;
71118 +}
71119 +
71120 +/* free information not needed after startup
71121 + currently contains user->kernel pointer mappings for subjects
71122 +*/
71123 +
71124 +static void
71125 +free_init_variables(void)
71126 +{
71127 + __u32 i;
71128 +
71129 + if (polstate->subj_map_set.s_hash) {
71130 + for (i = 0; i < polstate->subj_map_set.s_size; i++) {
71131 + if (polstate->subj_map_set.s_hash[i]) {
71132 + kfree(polstate->subj_map_set.s_hash[i]);
71133 + polstate->subj_map_set.s_hash[i] = NULL;
71134 + }
71135 + }
71136 +
71137 + if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
71138 + PAGE_SIZE)
71139 + kfree(polstate->subj_map_set.s_hash);
71140 + else
71141 + vfree(polstate->subj_map_set.s_hash);
71142 + }
71143 +
71144 + return;
71145 +}
71146 +
71147 +static void
71148 +free_variables(bool reload)
71149 +{
71150 + struct acl_subject_label *s;
71151 + struct acl_role_label *r;
71152 + struct task_struct *task, *task2;
71153 + unsigned int x;
71154 +
71155 + if (!reload) {
71156 + gr_clear_learn_entries();
71157 +
71158 + read_lock(&tasklist_lock);
71159 + do_each_thread(task2, task) {
71160 + task->acl_sp_role = 0;
71161 + task->acl_role_id = 0;
71162 + task->inherited = 0;
71163 + task->acl = NULL;
71164 + task->role = NULL;
71165 + } while_each_thread(task2, task);
71166 + read_unlock(&tasklist_lock);
71167 +
71168 + kfree(fakefs_obj_rw);
71169 + fakefs_obj_rw = NULL;
71170 + kfree(fakefs_obj_rwx);
71171 + fakefs_obj_rwx = NULL;
71172 +
71173 + /* release the reference to the real root dentry and vfsmount */
71174 + path_put(&gr_real_root);
71175 + memset(&gr_real_root, 0, sizeof(gr_real_root));
71176 + }
71177 +
71178 + /* free all object hash tables */
71179 +
71180 + FOR_EACH_ROLE_START(r)
71181 + if (r->subj_hash == NULL)
71182 + goto next_role;
71183 + FOR_EACH_SUBJECT_START(r, s, x)
71184 + if (s->obj_hash == NULL)
71185 + break;
71186 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
71187 + kfree(s->obj_hash);
71188 + else
71189 + vfree(s->obj_hash);
71190 + FOR_EACH_SUBJECT_END(s, x)
71191 + FOR_EACH_NESTED_SUBJECT_START(r, s)
71192 + if (s->obj_hash == NULL)
71193 + break;
71194 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
71195 + kfree(s->obj_hash);
71196 + else
71197 + vfree(s->obj_hash);
71198 + FOR_EACH_NESTED_SUBJECT_END(s)
71199 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
71200 + kfree(r->subj_hash);
71201 + else
71202 + vfree(r->subj_hash);
71203 + r->subj_hash = NULL;
71204 +next_role:
71205 + FOR_EACH_ROLE_END(r)
71206 +
71207 + acl_free_all();
71208 +
71209 + if (polstate->acl_role_set.r_hash) {
71210 + if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
71211 + PAGE_SIZE)
71212 + kfree(polstate->acl_role_set.r_hash);
71213 + else
71214 + vfree(polstate->acl_role_set.r_hash);
71215 + }
71216 + if (polstate->name_set.n_hash) {
71217 + if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
71218 + PAGE_SIZE)
71219 + kfree(polstate->name_set.n_hash);
71220 + else
71221 + vfree(polstate->name_set.n_hash);
71222 + }
71223 +
71224 + if (polstate->inodev_set.i_hash) {
71225 + if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
71226 + PAGE_SIZE)
71227 + kfree(polstate->inodev_set.i_hash);
71228 + else
71229 + vfree(polstate->inodev_set.i_hash);
71230 + }
71231 +
71232 + if (!reload)
71233 + gr_free_uidset();
71234 +
71235 + memset(&polstate->name_set, 0, sizeof (struct name_db));
71236 + memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
71237 + memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
71238 + memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
71239 +
71240 + polstate->default_role = NULL;
71241 + polstate->kernel_role = NULL;
71242 + polstate->role_list = NULL;
71243 +
71244 + return;
71245 +}
71246 +
71247 +static struct acl_subject_label *
71248 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
71249 +
71250 +static int alloc_and_copy_string(char **name, unsigned int maxlen)
71251 +{
71252 + unsigned int len = strnlen_user(*name, maxlen);
71253 + char *tmp;
71254 +
71255 + if (!len || len >= maxlen)
71256 + return -EINVAL;
71257 +
71258 + if ((tmp = (char *) acl_alloc(len)) == NULL)
71259 + return -ENOMEM;
71260 +
71261 + if (copy_from_user(tmp, *name, len))
71262 + return -EFAULT;
71263 +
71264 + tmp[len-1] = '\0';
71265 + *name = tmp;
71266 +
71267 + return 0;
71268 +}
71269 +
71270 +static int
71271 +copy_user_glob(struct acl_object_label *obj)
71272 +{
71273 + struct acl_object_label *g_tmp, **guser;
71274 + int error;
71275 +
71276 + if (obj->globbed == NULL)
71277 + return 0;
71278 +
71279 + guser = &obj->globbed;
71280 + while (*guser) {
71281 + g_tmp = (struct acl_object_label *)
71282 + acl_alloc(sizeof (struct acl_object_label));
71283 + if (g_tmp == NULL)
71284 + return -ENOMEM;
71285 +
71286 + if (copy_acl_object_label(g_tmp, *guser))
71287 + return -EFAULT;
71288 +
71289 + error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
71290 + if (error)
71291 + return error;
71292 +
71293 + *guser = g_tmp;
71294 + guser = &(g_tmp->next);
71295 + }
71296 +
71297 + return 0;
71298 +}
71299 +
71300 +static int
71301 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
71302 + struct acl_role_label *role)
71303 +{
71304 + struct acl_object_label *o_tmp;
71305 + int ret;
71306 +
71307 + while (userp) {
71308 + if ((o_tmp = (struct acl_object_label *)
71309 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
71310 + return -ENOMEM;
71311 +
71312 + if (copy_acl_object_label(o_tmp, userp))
71313 + return -EFAULT;
71314 +
71315 + userp = o_tmp->prev;
71316 +
71317 + ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
71318 + if (ret)
71319 + return ret;
71320 +
71321 + insert_acl_obj_label(o_tmp, subj);
71322 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
71323 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
71324 + return -ENOMEM;
71325 +
71326 + ret = copy_user_glob(o_tmp);
71327 + if (ret)
71328 + return ret;
71329 +
71330 + if (o_tmp->nested) {
71331 + int already_copied;
71332 +
71333 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
71334 + if (IS_ERR(o_tmp->nested))
71335 + return PTR_ERR(o_tmp->nested);
71336 +
71337 + /* insert into nested subject list if we haven't copied this one yet
71338 + to prevent duplicate entries */
71339 + if (!already_copied) {
71340 + o_tmp->nested->next = role->hash->first;
71341 + role->hash->first = o_tmp->nested;
71342 + }
71343 + }
71344 + }
71345 +
71346 + return 0;
71347 +}
71348 +
71349 +static __u32
71350 +count_user_subjs(struct acl_subject_label *userp)
71351 +{
71352 + struct acl_subject_label s_tmp;
71353 + __u32 num = 0;
71354 +
71355 + while (userp) {
71356 + if (copy_acl_subject_label(&s_tmp, userp))
71357 + break;
71358 +
71359 + userp = s_tmp.prev;
71360 + }
71361 +
71362 + return num;
71363 +}
71364 +
71365 +static int
71366 +copy_user_allowedips(struct acl_role_label *rolep)
71367 +{
71368 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
71369 +
71370 + ruserip = rolep->allowed_ips;
71371 +
71372 + while (ruserip) {
71373 + rlast = rtmp;
71374 +
71375 + if ((rtmp = (struct role_allowed_ip *)
71376 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
71377 + return -ENOMEM;
71378 +
71379 + if (copy_role_allowed_ip(rtmp, ruserip))
71380 + return -EFAULT;
71381 +
71382 + ruserip = rtmp->prev;
71383 +
71384 + if (!rlast) {
71385 + rtmp->prev = NULL;
71386 + rolep->allowed_ips = rtmp;
71387 + } else {
71388 + rlast->next = rtmp;
71389 + rtmp->prev = rlast;
71390 + }
71391 +
71392 + if (!ruserip)
71393 + rtmp->next = NULL;
71394 + }
71395 +
71396 + return 0;
71397 +}
71398 +
71399 +static int
71400 +copy_user_transitions(struct acl_role_label *rolep)
71401 +{
71402 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
71403 + int error;
71404 +
71405 + rusertp = rolep->transitions;
71406 +
71407 + while (rusertp) {
71408 + rlast = rtmp;
71409 +
71410 + if ((rtmp = (struct role_transition *)
71411 + acl_alloc(sizeof (struct role_transition))) == NULL)
71412 + return -ENOMEM;
71413 +
71414 + if (copy_role_transition(rtmp, rusertp))
71415 + return -EFAULT;
71416 +
71417 + rusertp = rtmp->prev;
71418 +
71419 + error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
71420 + if (error)
71421 + return error;
71422 +
71423 + if (!rlast) {
71424 + rtmp->prev = NULL;
71425 + rolep->transitions = rtmp;
71426 + } else {
71427 + rlast->next = rtmp;
71428 + rtmp->prev = rlast;
71429 + }
71430 +
71431 + if (!rusertp)
71432 + rtmp->next = NULL;
71433 + }
71434 +
71435 + return 0;
71436 +}
71437 +
71438 +static __u32 count_user_objs(const struct acl_object_label __user *userp)
71439 +{
71440 + struct acl_object_label o_tmp;
71441 + __u32 num = 0;
71442 +
71443 + while (userp) {
71444 + if (copy_acl_object_label(&o_tmp, userp))
71445 + break;
71446 +
71447 + userp = o_tmp.prev;
71448 + num++;
71449 + }
71450 +
71451 + return num;
71452 +}
71453 +
71454 +static struct acl_subject_label *
71455 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
71456 +{
71457 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
71458 + __u32 num_objs;
71459 + struct acl_ip_label **i_tmp, *i_utmp2;
71460 + struct gr_hash_struct ghash;
71461 + struct subject_map *subjmap;
71462 + unsigned int i_num;
71463 + int err;
71464 +
71465 + if (already_copied != NULL)
71466 + *already_copied = 0;
71467 +
71468 + s_tmp = lookup_subject_map(userp);
71469 +
71470 + /* we've already copied this subject into the kernel, just return
71471 + the reference to it, and don't copy it over again
71472 + */
71473 + if (s_tmp) {
71474 + if (already_copied != NULL)
71475 + *already_copied = 1;
71476 + return(s_tmp);
71477 + }
71478 +
71479 + if ((s_tmp = (struct acl_subject_label *)
71480 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
71481 + return ERR_PTR(-ENOMEM);
71482 +
71483 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
71484 + if (subjmap == NULL)
71485 + return ERR_PTR(-ENOMEM);
71486 +
71487 + subjmap->user = userp;
71488 + subjmap->kernel = s_tmp;
71489 + insert_subj_map_entry(subjmap);
71490 +
71491 + if (copy_acl_subject_label(s_tmp, userp))
71492 + return ERR_PTR(-EFAULT);
71493 +
71494 + err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
71495 + if (err)
71496 + return ERR_PTR(err);
71497 +
71498 + if (!strcmp(s_tmp->filename, "/"))
71499 + role->root_label = s_tmp;
71500 +
71501 + if (copy_gr_hash_struct(&ghash, s_tmp->hash))
71502 + return ERR_PTR(-EFAULT);
71503 +
71504 + /* copy user and group transition tables */
71505 +
71506 + if (s_tmp->user_trans_num) {
71507 + uid_t *uidlist;
71508 +
71509 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
71510 + if (uidlist == NULL)
71511 + return ERR_PTR(-ENOMEM);
71512 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
71513 + return ERR_PTR(-EFAULT);
71514 +
71515 + s_tmp->user_transitions = uidlist;
71516 + }
71517 +
71518 + if (s_tmp->group_trans_num) {
71519 + gid_t *gidlist;
71520 +
71521 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
71522 + if (gidlist == NULL)
71523 + return ERR_PTR(-ENOMEM);
71524 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
71525 + return ERR_PTR(-EFAULT);
71526 +
71527 + s_tmp->group_transitions = gidlist;
71528 + }
71529 +
71530 + /* set up object hash table */
71531 + num_objs = count_user_objs(ghash.first);
71532 +
71533 + s_tmp->obj_hash_size = num_objs;
71534 + s_tmp->obj_hash =
71535 + (struct acl_object_label **)
71536 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
71537 +
71538 + if (!s_tmp->obj_hash)
71539 + return ERR_PTR(-ENOMEM);
71540 +
71541 + memset(s_tmp->obj_hash, 0,
71542 + s_tmp->obj_hash_size *
71543 + sizeof (struct acl_object_label *));
71544 +
71545 + /* add in objects */
71546 + err = copy_user_objs(ghash.first, s_tmp, role);
71547 +
71548 + if (err)
71549 + return ERR_PTR(err);
71550 +
71551 + /* set pointer for parent subject */
71552 + if (s_tmp->parent_subject) {
71553 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
71554 +
71555 + if (IS_ERR(s_tmp2))
71556 + return s_tmp2;
71557 +
71558 + s_tmp->parent_subject = s_tmp2;
71559 + }
71560 +
71561 + /* add in ip acls */
71562 +
71563 + if (!s_tmp->ip_num) {
71564 + s_tmp->ips = NULL;
71565 + goto insert;
71566 + }
71567 +
71568 + i_tmp =
71569 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
71570 + sizeof (struct acl_ip_label *));
71571 +
71572 + if (!i_tmp)
71573 + return ERR_PTR(-ENOMEM);
71574 +
71575 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
71576 + *(i_tmp + i_num) =
71577 + (struct acl_ip_label *)
71578 + acl_alloc(sizeof (struct acl_ip_label));
71579 + if (!*(i_tmp + i_num))
71580 + return ERR_PTR(-ENOMEM);
71581 +
71582 + if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
71583 + return ERR_PTR(-EFAULT);
71584 +
71585 + if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
71586 + return ERR_PTR(-EFAULT);
71587 +
71588 + if ((*(i_tmp + i_num))->iface == NULL)
71589 + continue;
71590 +
71591 + err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
71592 + if (err)
71593 + return ERR_PTR(err);
71594 + }
71595 +
71596 + s_tmp->ips = i_tmp;
71597 +
71598 +insert:
71599 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
71600 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
71601 + return ERR_PTR(-ENOMEM);
71602 +
71603 + return s_tmp;
71604 +}
71605 +
71606 +static int
71607 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
71608 +{
71609 + struct acl_subject_label s_pre;
71610 + struct acl_subject_label * ret;
71611 + int err;
71612 +
71613 + while (userp) {
71614 + if (copy_acl_subject_label(&s_pre, userp))
71615 + return -EFAULT;
71616 +
71617 + ret = do_copy_user_subj(userp, role, NULL);
71618 +
71619 + err = PTR_ERR(ret);
71620 + if (IS_ERR(ret))
71621 + return err;
71622 +
71623 + insert_acl_subj_label(ret, role);
71624 +
71625 + userp = s_pre.prev;
71626 + }
71627 +
71628 + return 0;
71629 +}
71630 +
71631 +static int
71632 +copy_user_acl(struct gr_arg *arg)
71633 +{
71634 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
71635 + struct acl_subject_label *subj_list;
71636 + struct sprole_pw *sptmp;
71637 + struct gr_hash_struct *ghash;
71638 + uid_t *domainlist;
71639 + unsigned int r_num;
71640 + int err = 0;
71641 + __u16 i;
71642 + __u32 num_subjs;
71643 +
71644 + /* we need a default and kernel role */
71645 + if (arg->role_db.num_roles < 2)
71646 + return -EINVAL;
71647 +
71648 + /* copy special role authentication info from userspace */
71649 +
71650 + polstate->num_sprole_pws = arg->num_sprole_pws;
71651 + polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
71652 +
71653 + if (!polstate->acl_special_roles && polstate->num_sprole_pws)
71654 + return -ENOMEM;
71655 +
71656 + for (i = 0; i < polstate->num_sprole_pws; i++) {
71657 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
71658 + if (!sptmp)
71659 + return -ENOMEM;
71660 + if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
71661 + return -EFAULT;
71662 +
71663 + err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
71664 + if (err)
71665 + return err;
71666 +
71667 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71668 + printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
71669 +#endif
71670 +
71671 + polstate->acl_special_roles[i] = sptmp;
71672 + }
71673 +
71674 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
71675 +
71676 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
71677 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
71678 +
71679 + if (!r_tmp)
71680 + return -ENOMEM;
71681 +
71682 + if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
71683 + return -EFAULT;
71684 +
71685 + if (copy_acl_role_label(r_tmp, r_utmp2))
71686 + return -EFAULT;
71687 +
71688 + err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
71689 + if (err)
71690 + return err;
71691 +
71692 + if (!strcmp(r_tmp->rolename, "default")
71693 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
71694 + polstate->default_role = r_tmp;
71695 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
71696 + polstate->kernel_role = r_tmp;
71697 + }
71698 +
71699 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
71700 + return -ENOMEM;
71701 +
71702 + if (copy_gr_hash_struct(ghash, r_tmp->hash))
71703 + return -EFAULT;
71704 +
71705 + r_tmp->hash = ghash;
71706 +
71707 + num_subjs = count_user_subjs(r_tmp->hash->first);
71708 +
71709 + r_tmp->subj_hash_size = num_subjs;
71710 + r_tmp->subj_hash =
71711 + (struct acl_subject_label **)
71712 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
71713 +
71714 + if (!r_tmp->subj_hash)
71715 + return -ENOMEM;
71716 +
71717 + err = copy_user_allowedips(r_tmp);
71718 + if (err)
71719 + return err;
71720 +
71721 + /* copy domain info */
71722 + if (r_tmp->domain_children != NULL) {
71723 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
71724 + if (domainlist == NULL)
71725 + return -ENOMEM;
71726 +
71727 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
71728 + return -EFAULT;
71729 +
71730 + r_tmp->domain_children = domainlist;
71731 + }
71732 +
71733 + err = copy_user_transitions(r_tmp);
71734 + if (err)
71735 + return err;
71736 +
71737 + memset(r_tmp->subj_hash, 0,
71738 + r_tmp->subj_hash_size *
71739 + sizeof (struct acl_subject_label *));
71740 +
71741 + /* acquire the list of subjects, then NULL out
71742 + the list prior to parsing the subjects for this role,
71743 + as during this parsing the list is replaced with a list
71744 + of *nested* subjects for the role
71745 + */
71746 + subj_list = r_tmp->hash->first;
71747 +
71748 + /* set nested subject list to null */
71749 + r_tmp->hash->first = NULL;
71750 +
71751 + err = copy_user_subjs(subj_list, r_tmp);
71752 +
71753 + if (err)
71754 + return err;
71755 +
71756 + insert_acl_role_label(r_tmp);
71757 + }
71758 +
71759 + if (polstate->default_role == NULL || polstate->kernel_role == NULL)
71760 + return -EINVAL;
71761 +
71762 + return err;
71763 +}
71764 +
71765 +static int gracl_reload_apply_policies(void *reload)
71766 +{
71767 + struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
71768 + struct task_struct *task, *task2;
71769 + struct acl_role_label *role, *rtmp;
71770 + struct acl_subject_label *subj;
71771 + const struct cred *cred;
71772 + int role_applied;
71773 + int ret = 0;
71774 +
71775 + memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
71776 + memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
71777 +
71778 + /* first make sure we'll be able to apply the new policy cleanly */
71779 + do_each_thread(task2, task) {
71780 + if (task->exec_file == NULL)
71781 + continue;
71782 + role_applied = 0;
71783 + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
71784 + /* preserve special roles */
71785 + FOR_EACH_ROLE_START(role)
71786 + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
71787 + rtmp = task->role;
71788 + task->role = role;
71789 + role_applied = 1;
71790 + break;
71791 + }
71792 + FOR_EACH_ROLE_END(role)
71793 + }
71794 + if (!role_applied) {
71795 + cred = __task_cred(task);
71796 + rtmp = task->role;
71797 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71798 + }
71799 + /* this handles non-nested inherited subjects, nested subjects will still
71800 + be dropped currently */
71801 + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
71802 + task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
71803 + /* change the role back so that we've made no modifications to the policy */
71804 + task->role = rtmp;
71805 +
71806 + if (subj == NULL || task->tmpacl == NULL) {
71807 + ret = -EINVAL;
71808 + goto out;
71809 + }
71810 + } while_each_thread(task2, task);
71811 +
71812 + /* now actually apply the policy */
71813 +
71814 + do_each_thread(task2, task) {
71815 + if (task->exec_file) {
71816 + role_applied = 0;
71817 + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
71818 + /* preserve special roles */
71819 + FOR_EACH_ROLE_START(role)
71820 + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
71821 + task->role = role;
71822 + role_applied = 1;
71823 + break;
71824 + }
71825 + FOR_EACH_ROLE_END(role)
71826 + }
71827 + if (!role_applied) {
71828 + cred = __task_cred(task);
71829 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71830 + }
71831 + /* this handles non-nested inherited subjects, nested subjects will still
71832 + be dropped currently */
71833 + if (!reload_state->oldmode && task->inherited)
71834 + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
71835 + else {
71836 + /* looked up and tagged to the task previously */
71837 + subj = task->tmpacl;
71838 + }
71839 + /* subj will be non-null */
71840 + __gr_apply_subject_to_task(polstate, task, subj);
71841 + if (reload_state->oldmode) {
71842 + task->acl_role_id = 0;
71843 + task->acl_sp_role = 0;
71844 + task->inherited = 0;
71845 + }
71846 + } else {
71847 + // it's a kernel process
71848 + task->role = polstate->kernel_role;
71849 + task->acl = polstate->kernel_role->root_label;
71850 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
71851 + task->acl->mode &= ~GR_PROCFIND;
71852 +#endif
71853 + }
71854 + } while_each_thread(task2, task);
71855 +
71856 + memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
71857 + memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
71858 +
71859 +out:
71860 +
71861 + return ret;
71862 +}
71863 +
71864 +static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
71865 +{
71866 + struct gr_reload_state new_reload_state = { };
71867 + int err;
71868 +
71869 + new_reload_state.oldpolicy_ptr = polstate;
71870 + new_reload_state.oldalloc_ptr = current_alloc_state;
71871 + new_reload_state.oldmode = oldmode;
71872 +
71873 + current_alloc_state = &new_reload_state.newalloc;
71874 + polstate = &new_reload_state.newpolicy;
71875 +
71876 + /* everything relevant is now saved off, copy in the new policy */
71877 + if (init_variables(args, true)) {
71878 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
71879 + err = -ENOMEM;
71880 + goto error;
71881 + }
71882 +
71883 + err = copy_user_acl(args);
71884 + free_init_variables();
71885 + if (err)
71886 + goto error;
71887 + /* the new policy is copied in, with the old policy available via saved_state
71888 + first go through applying roles, making sure to preserve special roles
71889 + then apply new subjects, making sure to preserve inherited and nested subjects,
71890 + though currently only inherited subjects will be preserved
71891 + */
71892 + err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
71893 + if (err)
71894 + goto error;
71895 +
71896 + /* we've now applied the new policy, so restore the old policy state to free it */
71897 + polstate = &new_reload_state.oldpolicy;
71898 + current_alloc_state = &new_reload_state.oldalloc;
71899 + free_variables(true);
71900 +
71901 + /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
71902 + to running_polstate/current_alloc_state inside stop_machine
71903 + */
71904 + err = 0;
71905 + goto out;
71906 +error:
71907 + /* on error of loading the new policy, we'll just keep the previous
71908 + policy set around
71909 + */
71910 + free_variables(true);
71911 +
71912 + /* doesn't affect runtime, but maintains consistent state */
71913 +out:
71914 + polstate = new_reload_state.oldpolicy_ptr;
71915 + current_alloc_state = new_reload_state.oldalloc_ptr;
71916 +
71917 + return err;
71918 +}
71919 +
71920 +static int
71921 +gracl_init(struct gr_arg *args)
71922 +{
71923 + int error = 0;
71924 +
71925 + memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
71926 + memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
71927 +
71928 + if (init_variables(args, false)) {
71929 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
71930 + error = -ENOMEM;
71931 + goto out;
71932 + }
71933 +
71934 + error = copy_user_acl(args);
71935 + free_init_variables();
71936 + if (error)
71937 + goto out;
71938 +
71939 + error = gr_set_acls(0);
71940 + if (error)
71941 + goto out;
71942 +
71943 + gr_enable_rbac_system();
71944 +
71945 + return 0;
71946 +
71947 +out:
71948 + free_variables(false);
71949 + return error;
71950 +}
71951 +
71952 +static int
71953 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
71954 + unsigned char **sum)
71955 +{
71956 + struct acl_role_label *r;
71957 + struct role_allowed_ip *ipp;
71958 + struct role_transition *trans;
71959 + unsigned int i;
71960 + int found = 0;
71961 + u32 curr_ip = current->signal->curr_ip;
71962 +
71963 + current->signal->saved_ip = curr_ip;
71964 +
71965 + /* check transition table */
71966 +
71967 + for (trans = current->role->transitions; trans; trans = trans->next) {
71968 + if (!strcmp(rolename, trans->rolename)) {
71969 + found = 1;
71970 + break;
71971 + }
71972 + }
71973 +
71974 + if (!found)
71975 + return 0;
71976 +
71977 + /* handle special roles that do not require authentication
71978 + and check ip */
71979 +
71980 + FOR_EACH_ROLE_START(r)
71981 + if (!strcmp(rolename, r->rolename) &&
71982 + (r->roletype & GR_ROLE_SPECIAL)) {
71983 + found = 0;
71984 + if (r->allowed_ips != NULL) {
71985 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
71986 + if ((ntohl(curr_ip) & ipp->netmask) ==
71987 + (ntohl(ipp->addr) & ipp->netmask))
71988 + found = 1;
71989 + }
71990 + } else
71991 + found = 2;
71992 + if (!found)
71993 + return 0;
71994 +
71995 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
71996 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
71997 + *salt = NULL;
71998 + *sum = NULL;
71999 + return 1;
72000 + }
72001 + }
72002 + FOR_EACH_ROLE_END(r)
72003 +
72004 + for (i = 0; i < polstate->num_sprole_pws; i++) {
72005 + if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
72006 + *salt = polstate->acl_special_roles[i]->salt;
72007 + *sum = polstate->acl_special_roles[i]->sum;
72008 + return 1;
72009 + }
72010 + }
72011 +
72012 + return 0;
72013 +}
72014 +
72015 +int gr_check_secure_terminal(struct task_struct *task)
72016 +{
72017 + struct task_struct *p, *p2, *p3;
72018 + struct files_struct *files;
72019 + struct fdtable *fdt;
72020 + struct file *our_file = NULL, *file;
72021 + int i;
72022 +
72023 + if (task->signal->tty == NULL)
72024 + return 1;
72025 +
72026 + files = get_files_struct(task);
72027 + if (files != NULL) {
72028 + rcu_read_lock();
72029 + fdt = files_fdtable(files);
72030 + for (i=0; i < fdt->max_fds; i++) {
72031 + file = fcheck_files(files, i);
72032 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
72033 + get_file(file);
72034 + our_file = file;
72035 + }
72036 + }
72037 + rcu_read_unlock();
72038 + put_files_struct(files);
72039 + }
72040 +
72041 + if (our_file == NULL)
72042 + return 1;
72043 +
72044 + read_lock(&tasklist_lock);
72045 + do_each_thread(p2, p) {
72046 + files = get_files_struct(p);
72047 + if (files == NULL ||
72048 + (p->signal && p->signal->tty == task->signal->tty)) {
72049 + if (files != NULL)
72050 + put_files_struct(files);
72051 + continue;
72052 + }
72053 + rcu_read_lock();
72054 + fdt = files_fdtable(files);
72055 + for (i=0; i < fdt->max_fds; i++) {
72056 + file = fcheck_files(files, i);
72057 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
72058 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
72059 + p3 = task;
72060 + while (task_pid_nr(p3) > 0) {
72061 + if (p3 == p)
72062 + break;
72063 + p3 = p3->real_parent;
72064 + }
72065 + if (p3 == p)
72066 + break;
72067 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
72068 + gr_handle_alertkill(p);
72069 + rcu_read_unlock();
72070 + put_files_struct(files);
72071 + read_unlock(&tasklist_lock);
72072 + fput(our_file);
72073 + return 0;
72074 + }
72075 + }
72076 + rcu_read_unlock();
72077 + put_files_struct(files);
72078 + } while_each_thread(p2, p);
72079 + read_unlock(&tasklist_lock);
72080 +
72081 + fput(our_file);
72082 + return 1;
72083 +}
72084 +
72085 +ssize_t
72086 +write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
72087 +{
72088 + struct gr_arg_wrapper uwrap;
72089 + unsigned char *sprole_salt = NULL;
72090 + unsigned char *sprole_sum = NULL;
72091 + int error = 0;
72092 + int error2 = 0;
72093 + size_t req_count = 0;
72094 + unsigned char oldmode = 0;
72095 +
72096 + mutex_lock(&gr_dev_mutex);
72097 +
72098 + if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
72099 + error = -EPERM;
72100 + goto out;
72101 + }
72102 +
72103 +#ifdef CONFIG_COMPAT
72104 + pax_open_kernel();
72105 + if (is_compat_task()) {
72106 + copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
72107 + copy_gr_arg = &copy_gr_arg_compat;
72108 + copy_acl_object_label = &copy_acl_object_label_compat;
72109 + copy_acl_subject_label = &copy_acl_subject_label_compat;
72110 + copy_acl_role_label = &copy_acl_role_label_compat;
72111 + copy_acl_ip_label = &copy_acl_ip_label_compat;
72112 + copy_role_allowed_ip = &copy_role_allowed_ip_compat;
72113 + copy_role_transition = &copy_role_transition_compat;
72114 + copy_sprole_pw = &copy_sprole_pw_compat;
72115 + copy_gr_hash_struct = &copy_gr_hash_struct_compat;
72116 + copy_pointer_from_array = &copy_pointer_from_array_compat;
72117 + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
72118 + } else {
72119 + copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
72120 + copy_gr_arg = &copy_gr_arg_normal;
72121 + copy_acl_object_label = &copy_acl_object_label_normal;
72122 + copy_acl_subject_label = &copy_acl_subject_label_normal;
72123 + copy_acl_role_label = &copy_acl_role_label_normal;
72124 + copy_acl_ip_label = &copy_acl_ip_label_normal;
72125 + copy_role_allowed_ip = &copy_role_allowed_ip_normal;
72126 + copy_role_transition = &copy_role_transition_normal;
72127 + copy_sprole_pw = &copy_sprole_pw_normal;
72128 + copy_gr_hash_struct = &copy_gr_hash_struct_normal;
72129 + copy_pointer_from_array = &copy_pointer_from_array_normal;
72130 + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
72131 + }
72132 + pax_close_kernel();
72133 +#endif
72134 +
72135 + req_count = get_gr_arg_wrapper_size();
72136 +
72137 + if (count != req_count) {
72138 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
72139 + error = -EINVAL;
72140 + goto out;
72141 + }
72142 +
72143 +
72144 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
72145 + gr_auth_expires = 0;
72146 + gr_auth_attempts = 0;
72147 + }
72148 +
72149 + error = copy_gr_arg_wrapper(buf, &uwrap);
72150 + if (error)
72151 + goto out;
72152 +
72153 + error = copy_gr_arg(uwrap.arg, &gr_usermode);
72154 + if (error)
72155 + goto out;
72156 +
72157 + if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
72158 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
72159 + time_after(gr_auth_expires, get_seconds())) {
72160 + error = -EBUSY;
72161 + goto out;
72162 + }
72163 +
72164 + /* if non-root trying to do anything other than use a special role,
72165 + do not attempt authentication, do not count towards authentication
72166 + locking
72167 + */
72168 +
72169 + if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
72170 + gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
72171 + gr_is_global_nonroot(current_uid())) {
72172 + error = -EPERM;
72173 + goto out;
72174 + }
72175 +
72176 + /* ensure pw and special role name are null terminated */
72177 +
72178 + gr_usermode.pw[GR_PW_LEN - 1] = '\0';
72179 + gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
72180 +
72181 + /* Okay.
72182 + * We have our enough of the argument structure..(we have yet
72183 + * to copy_from_user the tables themselves) . Copy the tables
72184 + * only if we need them, i.e. for loading operations. */
72185 +
72186 + switch (gr_usermode.mode) {
72187 + case GR_STATUS:
72188 + if (gr_acl_is_enabled()) {
72189 + error = 1;
72190 + if (!gr_check_secure_terminal(current))
72191 + error = 3;
72192 + } else
72193 + error = 2;
72194 + goto out;
72195 + case GR_SHUTDOWN:
72196 + if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
72197 + stop_machine(gr_rbac_disable, NULL, NULL);
72198 + free_variables(false);
72199 + memset(&gr_usermode, 0, sizeof(gr_usermode));
72200 + memset(&gr_system_salt, 0, sizeof(gr_system_salt));
72201 + memset(&gr_system_sum, 0, sizeof(gr_system_sum));
72202 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
72203 + } else if (gr_acl_is_enabled()) {
72204 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
72205 + error = -EPERM;
72206 + } else {
72207 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
72208 + error = -EAGAIN;
72209 + }
72210 + break;
72211 + case GR_ENABLE:
72212 + if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
72213 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
72214 + else {
72215 + if (gr_acl_is_enabled())
72216 + error = -EAGAIN;
72217 + else
72218 + error = error2;
72219 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
72220 + }
72221 + break;
72222 + case GR_OLDRELOAD:
72223 + oldmode = 1;
72224 + case GR_RELOAD:
72225 + if (!gr_acl_is_enabled()) {
72226 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
72227 + error = -EAGAIN;
72228 + } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
72229 + error2 = gracl_reload(&gr_usermode, oldmode);
72230 + if (!error2)
72231 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
72232 + else {
72233 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
72234 + error = error2;
72235 + }
72236 + } else {
72237 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
72238 + error = -EPERM;
72239 + }
72240 + break;
72241 + case GR_SEGVMOD:
72242 + if (unlikely(!gr_acl_is_enabled())) {
72243 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
72244 + error = -EAGAIN;
72245 + break;
72246 + }
72247 +
72248 + if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
72249 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
72250 + if (gr_usermode.segv_device && gr_usermode.segv_inode) {
72251 + struct acl_subject_label *segvacl;
72252 + segvacl =
72253 + lookup_acl_subj_label(gr_usermode.segv_inode,
72254 + gr_usermode.segv_device,
72255 + current->role);
72256 + if (segvacl) {
72257 + segvacl->crashes = 0;
72258 + segvacl->expires = 0;
72259 + }
72260 + } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
72261 + gr_remove_uid(gr_usermode.segv_uid);
72262 + }
72263 + } else {
72264 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
72265 + error = -EPERM;
72266 + }
72267 + break;
72268 + case GR_SPROLE:
72269 + case GR_SPROLEPAM:
72270 + if (unlikely(!gr_acl_is_enabled())) {
72271 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
72272 + error = -EAGAIN;
72273 + break;
72274 + }
72275 +
72276 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
72277 + current->role->expires = 0;
72278 + current->role->auth_attempts = 0;
72279 + }
72280 +
72281 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
72282 + time_after(current->role->expires, get_seconds())) {
72283 + error = -EBUSY;
72284 + goto out;
72285 + }
72286 +
72287 + if (lookup_special_role_auth
72288 + (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
72289 + && ((!sprole_salt && !sprole_sum)
72290 + || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
72291 + char *p = "";
72292 + assign_special_role(gr_usermode.sp_role);
72293 + read_lock(&tasklist_lock);
72294 + if (current->real_parent)
72295 + p = current->real_parent->role->rolename;
72296 + read_unlock(&tasklist_lock);
72297 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
72298 + p, acl_sp_role_value);
72299 + } else {
72300 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
72301 + error = -EPERM;
72302 + if(!(current->role->auth_attempts++))
72303 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
72304 +
72305 + goto out;
72306 + }
72307 + break;
72308 + case GR_UNSPROLE:
72309 + if (unlikely(!gr_acl_is_enabled())) {
72310 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
72311 + error = -EAGAIN;
72312 + break;
72313 + }
72314 +
72315 + if (current->role->roletype & GR_ROLE_SPECIAL) {
72316 + char *p = "";
72317 + int i = 0;
72318 +
72319 + read_lock(&tasklist_lock);
72320 + if (current->real_parent) {
72321 + p = current->real_parent->role->rolename;
72322 + i = current->real_parent->acl_role_id;
72323 + }
72324 + read_unlock(&tasklist_lock);
72325 +
72326 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
72327 + gr_set_acls(1);
72328 + } else {
72329 + error = -EPERM;
72330 + goto out;
72331 + }
72332 + break;
72333 + default:
72334 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
72335 + error = -EINVAL;
72336 + break;
72337 + }
72338 +
72339 + if (error != -EPERM)
72340 + goto out;
72341 +
72342 + if(!(gr_auth_attempts++))
72343 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
72344 +
72345 + out:
72346 + mutex_unlock(&gr_dev_mutex);
72347 +
72348 + if (!error)
72349 + error = req_count;
72350 +
72351 + return error;
72352 +}
72353 +
72354 +int
72355 +gr_set_acls(const int type)
72356 +{
72357 + struct task_struct *task, *task2;
72358 + struct acl_role_label *role = current->role;
72359 + struct acl_subject_label *subj;
72360 + __u16 acl_role_id = current->acl_role_id;
72361 + const struct cred *cred;
72362 + int ret;
72363 +
72364 + rcu_read_lock();
72365 + read_lock(&tasklist_lock);
72366 + read_lock(&grsec_exec_file_lock);
72367 + do_each_thread(task2, task) {
72368 + /* check to see if we're called from the exit handler,
72369 + if so, only replace ACLs that have inherited the admin
72370 + ACL */
72371 +
72372 + if (type && (task->role != role ||
72373 + task->acl_role_id != acl_role_id))
72374 + continue;
72375 +
72376 + task->acl_role_id = 0;
72377 + task->acl_sp_role = 0;
72378 + task->inherited = 0;
72379 +
72380 + if (task->exec_file) {
72381 + cred = __task_cred(task);
72382 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
72383 + subj = __gr_get_subject_for_task(polstate, task, NULL);
72384 + if (subj == NULL) {
72385 + ret = -EINVAL;
72386 + read_unlock(&grsec_exec_file_lock);
72387 + read_unlock(&tasklist_lock);
72388 + rcu_read_unlock();
72389 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
72390 + return ret;
72391 + }
72392 + __gr_apply_subject_to_task(polstate, task, subj);
72393 + } else {
72394 + // it's a kernel process
72395 + task->role = polstate->kernel_role;
72396 + task->acl = polstate->kernel_role->root_label;
72397 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
72398 + task->acl->mode &= ~GR_PROCFIND;
72399 +#endif
72400 + }
72401 + } while_each_thread(task2, task);
72402 + read_unlock(&grsec_exec_file_lock);
72403 + read_unlock(&tasklist_lock);
72404 + rcu_read_unlock();
72405 +
72406 + return 0;
72407 +}
72408 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
72409 new file mode 100644
72410 index 0000000..39645c9
72411 --- /dev/null
72412 +++ b/grsecurity/gracl_res.c
72413 @@ -0,0 +1,68 @@
72414 +#include <linux/kernel.h>
72415 +#include <linux/sched.h>
72416 +#include <linux/gracl.h>
72417 +#include <linux/grinternal.h>
72418 +
72419 +static const char *restab_log[] = {
72420 + [RLIMIT_CPU] = "RLIMIT_CPU",
72421 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
72422 + [RLIMIT_DATA] = "RLIMIT_DATA",
72423 + [RLIMIT_STACK] = "RLIMIT_STACK",
72424 + [RLIMIT_CORE] = "RLIMIT_CORE",
72425 + [RLIMIT_RSS] = "RLIMIT_RSS",
72426 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
72427 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
72428 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
72429 + [RLIMIT_AS] = "RLIMIT_AS",
72430 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
72431 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
72432 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
72433 + [RLIMIT_NICE] = "RLIMIT_NICE",
72434 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
72435 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
72436 + [GR_CRASH_RES] = "RLIMIT_CRASH"
72437 +};
72438 +
72439 +void
72440 +gr_log_resource(const struct task_struct *task,
72441 + const int res, const unsigned long wanted, const int gt)
72442 +{
72443 + const struct cred *cred;
72444 + unsigned long rlim;
72445 +
72446 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
72447 + return;
72448 +
72449 + // not yet supported resource
72450 + if (unlikely(!restab_log[res]))
72451 + return;
72452 +
72453 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
72454 + rlim = task_rlimit_max(task, res);
72455 + else
72456 + rlim = task_rlimit(task, res);
72457 +
72458 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
72459 + return;
72460 +
72461 + rcu_read_lock();
72462 + cred = __task_cred(task);
72463 +
72464 + if (res == RLIMIT_NPROC &&
72465 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
72466 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
72467 + goto out_rcu_unlock;
72468 + else if (res == RLIMIT_MEMLOCK &&
72469 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
72470 + goto out_rcu_unlock;
72471 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
72472 + goto out_rcu_unlock;
72473 + rcu_read_unlock();
72474 +
72475 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
72476 +
72477 + return;
72478 +out_rcu_unlock:
72479 + rcu_read_unlock();
72480 + return;
72481 +}
72482 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
72483 new file mode 100644
72484 index 0000000..2040e61
72485 --- /dev/null
72486 +++ b/grsecurity/gracl_segv.c
72487 @@ -0,0 +1,313 @@
72488 +#include <linux/kernel.h>
72489 +#include <linux/mm.h>
72490 +#include <asm/uaccess.h>
72491 +#include <asm/errno.h>
72492 +#include <asm/mman.h>
72493 +#include <net/sock.h>
72494 +#include <linux/file.h>
72495 +#include <linux/fs.h>
72496 +#include <linux/net.h>
72497 +#include <linux/in.h>
72498 +#include <linux/slab.h>
72499 +#include <linux/types.h>
72500 +#include <linux/sched.h>
72501 +#include <linux/timer.h>
72502 +#include <linux/gracl.h>
72503 +#include <linux/grsecurity.h>
72504 +#include <linux/grinternal.h>
72505 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
72506 +#include <linux/magic.h>
72507 +#include <linux/pagemap.h>
72508 +#include "../fs/btrfs/async-thread.h"
72509 +#include "../fs/btrfs/ctree.h"
72510 +#include "../fs/btrfs/btrfs_inode.h"
72511 +#endif
72512 +
72513 +static struct crash_uid *uid_set;
72514 +static unsigned short uid_used;
72515 +static DEFINE_SPINLOCK(gr_uid_lock);
72516 +extern rwlock_t gr_inode_lock;
72517 +extern struct acl_subject_label *
72518 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
72519 + struct acl_role_label *role);
72520 +
72521 +static inline dev_t __get_dev(const struct dentry *dentry)
72522 +{
72523 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
72524 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
72525 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
72526 + else
72527 +#endif
72528 + return dentry->d_sb->s_dev;
72529 +}
72530 +
72531 +int
72532 +gr_init_uidset(void)
72533 +{
72534 + uid_set =
72535 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
72536 + uid_used = 0;
72537 +
72538 + return uid_set ? 1 : 0;
72539 +}
72540 +
72541 +void
72542 +gr_free_uidset(void)
72543 +{
72544 + if (uid_set) {
72545 + struct crash_uid *tmpset;
72546 + spin_lock(&gr_uid_lock);
72547 + tmpset = uid_set;
72548 + uid_set = NULL;
72549 + uid_used = 0;
72550 + spin_unlock(&gr_uid_lock);
72551 + if (tmpset)
72552 + kfree(tmpset);
72553 + }
72554 +
72555 + return;
72556 +}
72557 +
72558 +int
72559 +gr_find_uid(const uid_t uid)
72560 +{
72561 + struct crash_uid *tmp = uid_set;
72562 + uid_t buid;
72563 + int low = 0, high = uid_used - 1, mid;
72564 +
72565 + while (high >= low) {
72566 + mid = (low + high) >> 1;
72567 + buid = tmp[mid].uid;
72568 + if (buid == uid)
72569 + return mid;
72570 + if (buid > uid)
72571 + high = mid - 1;
72572 + if (buid < uid)
72573 + low = mid + 1;
72574 + }
72575 +
72576 + return -1;
72577 +}
72578 +
72579 +static __inline__ void
72580 +gr_insertsort(void)
72581 +{
72582 + unsigned short i, j;
72583 + struct crash_uid index;
72584 +
72585 + for (i = 1; i < uid_used; i++) {
72586 + index = uid_set[i];
72587 + j = i;
72588 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
72589 + uid_set[j] = uid_set[j - 1];
72590 + j--;
72591 + }
72592 + uid_set[j] = index;
72593 + }
72594 +
72595 + return;
72596 +}
72597 +
72598 +static __inline__ void
72599 +gr_insert_uid(const kuid_t kuid, const unsigned long expires)
72600 +{
72601 + int loc;
72602 + uid_t uid = GR_GLOBAL_UID(kuid);
72603 +
72604 + if (uid_used == GR_UIDTABLE_MAX)
72605 + return;
72606 +
72607 + loc = gr_find_uid(uid);
72608 +
72609 + if (loc >= 0) {
72610 + uid_set[loc].expires = expires;
72611 + return;
72612 + }
72613 +
72614 + uid_set[uid_used].uid = uid;
72615 + uid_set[uid_used].expires = expires;
72616 + uid_used++;
72617 +
72618 + gr_insertsort();
72619 +
72620 + return;
72621 +}
72622 +
72623 +void
72624 +gr_remove_uid(const unsigned short loc)
72625 +{
72626 + unsigned short i;
72627 +
72628 + for (i = loc + 1; i < uid_used; i++)
72629 + uid_set[i - 1] = uid_set[i];
72630 +
72631 + uid_used--;
72632 +
72633 + return;
72634 +}
72635 +
72636 +int
72637 +gr_check_crash_uid(const kuid_t kuid)
72638 +{
72639 + int loc;
72640 + int ret = 0;
72641 + uid_t uid;
72642 +
72643 + if (unlikely(!gr_acl_is_enabled()))
72644 + return 0;
72645 +
72646 + uid = GR_GLOBAL_UID(kuid);
72647 +
72648 + spin_lock(&gr_uid_lock);
72649 + loc = gr_find_uid(uid);
72650 +
72651 + if (loc < 0)
72652 + goto out_unlock;
72653 +
72654 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
72655 + gr_remove_uid(loc);
72656 + else
72657 + ret = 1;
72658 +
72659 +out_unlock:
72660 + spin_unlock(&gr_uid_lock);
72661 + return ret;
72662 +}
72663 +
72664 +static __inline__ int
72665 +proc_is_setxid(const struct cred *cred)
72666 +{
72667 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
72668 + !uid_eq(cred->uid, cred->fsuid))
72669 + return 1;
72670 + if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
72671 + !gid_eq(cred->gid, cred->fsgid))
72672 + return 1;
72673 +
72674 + return 0;
72675 +}
72676 +
72677 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
72678 +
72679 +void
72680 +gr_handle_crash(struct task_struct *task, const int sig)
72681 +{
72682 + struct acl_subject_label *curr;
72683 + struct task_struct *tsk, *tsk2;
72684 + const struct cred *cred;
72685 + const struct cred *cred2;
72686 +
72687 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
72688 + return;
72689 +
72690 + if (unlikely(!gr_acl_is_enabled()))
72691 + return;
72692 +
72693 + curr = task->acl;
72694 +
72695 + if (!(curr->resmask & (1U << GR_CRASH_RES)))
72696 + return;
72697 +
72698 + if (time_before_eq(curr->expires, get_seconds())) {
72699 + curr->expires = 0;
72700 + curr->crashes = 0;
72701 + }
72702 +
72703 + curr->crashes++;
72704 +
72705 + if (!curr->expires)
72706 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
72707 +
72708 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
72709 + time_after(curr->expires, get_seconds())) {
72710 + rcu_read_lock();
72711 + cred = __task_cred(task);
72712 + if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
72713 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
72714 + spin_lock(&gr_uid_lock);
72715 + gr_insert_uid(cred->uid, curr->expires);
72716 + spin_unlock(&gr_uid_lock);
72717 + curr->expires = 0;
72718 + curr->crashes = 0;
72719 + read_lock(&tasklist_lock);
72720 + do_each_thread(tsk2, tsk) {
72721 + cred2 = __task_cred(tsk);
72722 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
72723 + gr_fake_force_sig(SIGKILL, tsk);
72724 + } while_each_thread(tsk2, tsk);
72725 + read_unlock(&tasklist_lock);
72726 + } else {
72727 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
72728 + read_lock(&tasklist_lock);
72729 + read_lock(&grsec_exec_file_lock);
72730 + do_each_thread(tsk2, tsk) {
72731 + if (likely(tsk != task)) {
72732 + // if this thread has the same subject as the one that triggered
72733 + // RES_CRASH and it's the same binary, kill it
72734 + if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
72735 + gr_fake_force_sig(SIGKILL, tsk);
72736 + }
72737 + } while_each_thread(tsk2, tsk);
72738 + read_unlock(&grsec_exec_file_lock);
72739 + read_unlock(&tasklist_lock);
72740 + }
72741 + rcu_read_unlock();
72742 + }
72743 +
72744 + return;
72745 +}
72746 +
72747 +int
72748 +gr_check_crash_exec(const struct file *filp)
72749 +{
72750 + struct acl_subject_label *curr;
72751 +
72752 + if (unlikely(!gr_acl_is_enabled()))
72753 + return 0;
72754 +
72755 + read_lock(&gr_inode_lock);
72756 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
72757 + __get_dev(filp->f_path.dentry),
72758 + current->role);
72759 + read_unlock(&gr_inode_lock);
72760 +
72761 + if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
72762 + (!curr->crashes && !curr->expires))
72763 + return 0;
72764 +
72765 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
72766 + time_after(curr->expires, get_seconds()))
72767 + return 1;
72768 + else if (time_before_eq(curr->expires, get_seconds())) {
72769 + curr->crashes = 0;
72770 + curr->expires = 0;
72771 + }
72772 +
72773 + return 0;
72774 +}
72775 +
72776 +void
72777 +gr_handle_alertkill(struct task_struct *task)
72778 +{
72779 + struct acl_subject_label *curracl;
72780 + __u32 curr_ip;
72781 + struct task_struct *p, *p2;
72782 +
72783 + if (unlikely(!gr_acl_is_enabled()))
72784 + return;
72785 +
72786 + curracl = task->acl;
72787 + curr_ip = task->signal->curr_ip;
72788 +
72789 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
72790 + read_lock(&tasklist_lock);
72791 + do_each_thread(p2, p) {
72792 + if (p->signal->curr_ip == curr_ip)
72793 + gr_fake_force_sig(SIGKILL, p);
72794 + } while_each_thread(p2, p);
72795 + read_unlock(&tasklist_lock);
72796 + } else if (curracl->mode & GR_KILLPROC)
72797 + gr_fake_force_sig(SIGKILL, task);
72798 +
72799 + return;
72800 +}
72801 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
72802 new file mode 100644
72803 index 0000000..98011b0
72804 --- /dev/null
72805 +++ b/grsecurity/gracl_shm.c
72806 @@ -0,0 +1,40 @@
72807 +#include <linux/kernel.h>
72808 +#include <linux/mm.h>
72809 +#include <linux/sched.h>
72810 +#include <linux/file.h>
72811 +#include <linux/ipc.h>
72812 +#include <linux/gracl.h>
72813 +#include <linux/grsecurity.h>
72814 +#include <linux/grinternal.h>
72815 +
72816 +int
72817 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72818 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
72819 +{
72820 + struct task_struct *task;
72821 +
72822 + if (!gr_acl_is_enabled())
72823 + return 1;
72824 +
72825 + rcu_read_lock();
72826 + read_lock(&tasklist_lock);
72827 +
72828 + task = find_task_by_vpid(shm_cprid);
72829 +
72830 + if (unlikely(!task))
72831 + task = find_task_by_vpid(shm_lapid);
72832 +
72833 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
72834 + (task_pid_nr(task) == shm_lapid)) &&
72835 + (task->acl->mode & GR_PROTSHM) &&
72836 + (task->acl != current->acl))) {
72837 + read_unlock(&tasklist_lock);
72838 + rcu_read_unlock();
72839 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
72840 + return 0;
72841 + }
72842 + read_unlock(&tasklist_lock);
72843 + rcu_read_unlock();
72844 +
72845 + return 1;
72846 +}
72847 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
72848 new file mode 100644
72849 index 0000000..bc0be01
72850 --- /dev/null
72851 +++ b/grsecurity/grsec_chdir.c
72852 @@ -0,0 +1,19 @@
72853 +#include <linux/kernel.h>
72854 +#include <linux/sched.h>
72855 +#include <linux/fs.h>
72856 +#include <linux/file.h>
72857 +#include <linux/grsecurity.h>
72858 +#include <linux/grinternal.h>
72859 +
72860 +void
72861 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
72862 +{
72863 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
72864 + if ((grsec_enable_chdir && grsec_enable_group &&
72865 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
72866 + !grsec_enable_group)) {
72867 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
72868 + }
72869 +#endif
72870 + return;
72871 +}
72872 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
72873 new file mode 100644
72874 index 0000000..651d6c2
72875 --- /dev/null
72876 +++ b/grsecurity/grsec_chroot.c
72877 @@ -0,0 +1,370 @@
72878 +#include <linux/kernel.h>
72879 +#include <linux/module.h>
72880 +#include <linux/sched.h>
72881 +#include <linux/file.h>
72882 +#include <linux/fs.h>
72883 +#include <linux/mount.h>
72884 +#include <linux/types.h>
72885 +#include "../fs/mount.h"
72886 +#include <linux/grsecurity.h>
72887 +#include <linux/grinternal.h>
72888 +
72889 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72890 +int gr_init_ran;
72891 +#endif
72892 +
72893 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
72894 +{
72895 +#ifdef CONFIG_GRKERNSEC
72896 + if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
72897 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
72898 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72899 + && gr_init_ran
72900 +#endif
72901 + )
72902 + task->gr_is_chrooted = 1;
72903 + else {
72904 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72905 + if (task_pid_nr(task) == 1 && !gr_init_ran)
72906 + gr_init_ran = 1;
72907 +#endif
72908 + task->gr_is_chrooted = 0;
72909 + }
72910 +
72911 + task->gr_chroot_dentry = path->dentry;
72912 +#endif
72913 + return;
72914 +}
72915 +
72916 +void gr_clear_chroot_entries(struct task_struct *task)
72917 +{
72918 +#ifdef CONFIG_GRKERNSEC
72919 + task->gr_is_chrooted = 0;
72920 + task->gr_chroot_dentry = NULL;
72921 +#endif
72922 + return;
72923 +}
72924 +
72925 +int
72926 +gr_handle_chroot_unix(const pid_t pid)
72927 +{
72928 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
72929 + struct task_struct *p;
72930 +
72931 + if (unlikely(!grsec_enable_chroot_unix))
72932 + return 1;
72933 +
72934 + if (likely(!proc_is_chrooted(current)))
72935 + return 1;
72936 +
72937 + rcu_read_lock();
72938 + read_lock(&tasklist_lock);
72939 + p = find_task_by_vpid_unrestricted(pid);
72940 + if (unlikely(p && !have_same_root(current, p))) {
72941 + read_unlock(&tasklist_lock);
72942 + rcu_read_unlock();
72943 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
72944 + return 0;
72945 + }
72946 + read_unlock(&tasklist_lock);
72947 + rcu_read_unlock();
72948 +#endif
72949 + return 1;
72950 +}
72951 +
72952 +int
72953 +gr_handle_chroot_nice(void)
72954 +{
72955 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72956 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
72957 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
72958 + return -EPERM;
72959 + }
72960 +#endif
72961 + return 0;
72962 +}
72963 +
72964 +int
72965 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
72966 +{
72967 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72968 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
72969 + && proc_is_chrooted(current)) {
72970 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
72971 + return -EACCES;
72972 + }
72973 +#endif
72974 + return 0;
72975 +}
72976 +
72977 +int
72978 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
72979 +{
72980 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72981 + struct task_struct *p;
72982 + int ret = 0;
72983 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
72984 + return ret;
72985 +
72986 + read_lock(&tasklist_lock);
72987 + do_each_pid_task(pid, type, p) {
72988 + if (!have_same_root(current, p)) {
72989 + ret = 1;
72990 + goto out;
72991 + }
72992 + } while_each_pid_task(pid, type, p);
72993 +out:
72994 + read_unlock(&tasklist_lock);
72995 + return ret;
72996 +#endif
72997 + return 0;
72998 +}
72999 +
73000 +int
73001 +gr_pid_is_chrooted(struct task_struct *p)
73002 +{
73003 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73004 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
73005 + return 0;
73006 +
73007 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
73008 + !have_same_root(current, p)) {
73009 + return 1;
73010 + }
73011 +#endif
73012 + return 0;
73013 +}
73014 +
73015 +EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
73016 +
73017 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
73018 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
73019 +{
73020 + struct path path, currentroot;
73021 + int ret = 0;
73022 +
73023 + path.dentry = (struct dentry *)u_dentry;
73024 + path.mnt = (struct vfsmount *)u_mnt;
73025 + get_fs_root(current->fs, &currentroot);
73026 + if (path_is_under(&path, &currentroot))
73027 + ret = 1;
73028 + path_put(&currentroot);
73029 +
73030 + return ret;
73031 +}
73032 +#endif
73033 +
73034 +int
73035 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
73036 +{
73037 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
73038 + if (!grsec_enable_chroot_fchdir)
73039 + return 1;
73040 +
73041 + if (!proc_is_chrooted(current))
73042 + return 1;
73043 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
73044 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
73045 + return 0;
73046 + }
73047 +#endif
73048 + return 1;
73049 +}
73050 +
73051 +int
73052 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73053 + const time_t shm_createtime)
73054 +{
73055 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
73056 + struct task_struct *p;
73057 + time_t starttime;
73058 +
73059 + if (unlikely(!grsec_enable_chroot_shmat))
73060 + return 1;
73061 +
73062 + if (likely(!proc_is_chrooted(current)))
73063 + return 1;
73064 +
73065 + rcu_read_lock();
73066 + read_lock(&tasklist_lock);
73067 +
73068 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
73069 + starttime = p->start_time.tv_sec;
73070 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
73071 + if (have_same_root(current, p)) {
73072 + goto allow;
73073 + } else {
73074 + read_unlock(&tasklist_lock);
73075 + rcu_read_unlock();
73076 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
73077 + return 0;
73078 + }
73079 + }
73080 + /* creator exited, pid reuse, fall through to next check */
73081 + }
73082 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
73083 + if (unlikely(!have_same_root(current, p))) {
73084 + read_unlock(&tasklist_lock);
73085 + rcu_read_unlock();
73086 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
73087 + return 0;
73088 + }
73089 + }
73090 +
73091 +allow:
73092 + read_unlock(&tasklist_lock);
73093 + rcu_read_unlock();
73094 +#endif
73095 + return 1;
73096 +}
73097 +
73098 +void
73099 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
73100 +{
73101 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
73102 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
73103 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
73104 +#endif
73105 + return;
73106 +}
73107 +
73108 +int
73109 +gr_handle_chroot_mknod(const struct dentry *dentry,
73110 + const struct vfsmount *mnt, const int mode)
73111 +{
73112 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
73113 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
73114 + proc_is_chrooted(current)) {
73115 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
73116 + return -EPERM;
73117 + }
73118 +#endif
73119 + return 0;
73120 +}
73121 +
73122 +int
73123 +gr_handle_chroot_mount(const struct dentry *dentry,
73124 + const struct vfsmount *mnt, const char *dev_name)
73125 +{
73126 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
73127 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
73128 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
73129 + return -EPERM;
73130 + }
73131 +#endif
73132 + return 0;
73133 +}
73134 +
73135 +int
73136 +gr_handle_chroot_pivot(void)
73137 +{
73138 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
73139 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
73140 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
73141 + return -EPERM;
73142 + }
73143 +#endif
73144 + return 0;
73145 +}
73146 +
73147 +int
73148 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
73149 +{
73150 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
73151 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
73152 + !gr_is_outside_chroot(dentry, mnt)) {
73153 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
73154 + return -EPERM;
73155 + }
73156 +#endif
73157 + return 0;
73158 +}
73159 +
73160 +extern const char *captab_log[];
73161 +extern int captab_log_entries;
73162 +
73163 +int
73164 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73165 +{
73166 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73167 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
73168 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
73169 + if (cap_raised(chroot_caps, cap)) {
73170 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
73171 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
73172 + }
73173 + return 0;
73174 + }
73175 + }
73176 +#endif
73177 + return 1;
73178 +}
73179 +
73180 +int
73181 +gr_chroot_is_capable(const int cap)
73182 +{
73183 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73184 + return gr_task_chroot_is_capable(current, current_cred(), cap);
73185 +#endif
73186 + return 1;
73187 +}
73188 +
73189 +int
73190 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
73191 +{
73192 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73193 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
73194 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
73195 + if (cap_raised(chroot_caps, cap)) {
73196 + return 0;
73197 + }
73198 + }
73199 +#endif
73200 + return 1;
73201 +}
73202 +
73203 +int
73204 +gr_chroot_is_capable_nolog(const int cap)
73205 +{
73206 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73207 + return gr_task_chroot_is_capable_nolog(current, cap);
73208 +#endif
73209 + return 1;
73210 +}
73211 +
73212 +int
73213 +gr_handle_chroot_sysctl(const int op)
73214 +{
73215 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
73216 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
73217 + proc_is_chrooted(current))
73218 + return -EACCES;
73219 +#endif
73220 + return 0;
73221 +}
73222 +
73223 +void
73224 +gr_handle_chroot_chdir(const struct path *path)
73225 +{
73226 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
73227 + if (grsec_enable_chroot_chdir)
73228 + set_fs_pwd(current->fs, path);
73229 +#endif
73230 + return;
73231 +}
73232 +
73233 +int
73234 +gr_handle_chroot_chmod(const struct dentry *dentry,
73235 + const struct vfsmount *mnt, const int mode)
73236 +{
73237 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
73238 + /* allow chmod +s on directories, but not files */
73239 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
73240 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
73241 + proc_is_chrooted(current)) {
73242 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
73243 + return -EPERM;
73244 + }
73245 +#endif
73246 + return 0;
73247 +}
73248 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
73249 new file mode 100644
73250 index 0000000..4d6fce8
73251 --- /dev/null
73252 +++ b/grsecurity/grsec_disabled.c
73253 @@ -0,0 +1,433 @@
73254 +#include <linux/kernel.h>
73255 +#include <linux/module.h>
73256 +#include <linux/sched.h>
73257 +#include <linux/file.h>
73258 +#include <linux/fs.h>
73259 +#include <linux/kdev_t.h>
73260 +#include <linux/net.h>
73261 +#include <linux/in.h>
73262 +#include <linux/ip.h>
73263 +#include <linux/skbuff.h>
73264 +#include <linux/sysctl.h>
73265 +
73266 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
73267 +void
73268 +pax_set_initial_flags(struct linux_binprm *bprm)
73269 +{
73270 + return;
73271 +}
73272 +#endif
73273 +
73274 +#ifdef CONFIG_SYSCTL
73275 +__u32
73276 +gr_handle_sysctl(const struct ctl_table * table, const int op)
73277 +{
73278 + return 0;
73279 +}
73280 +#endif
73281 +
73282 +#ifdef CONFIG_TASKSTATS
73283 +int gr_is_taskstats_denied(int pid)
73284 +{
73285 + return 0;
73286 +}
73287 +#endif
73288 +
73289 +int
73290 +gr_acl_is_enabled(void)
73291 +{
73292 + return 0;
73293 +}
73294 +
73295 +void
73296 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
73297 +{
73298 + return;
73299 +}
73300 +
73301 +int
73302 +gr_handle_rawio(const struct inode *inode)
73303 +{
73304 + return 0;
73305 +}
73306 +
73307 +void
73308 +gr_acl_handle_psacct(struct task_struct *task, const long code)
73309 +{
73310 + return;
73311 +}
73312 +
73313 +int
73314 +gr_handle_ptrace(struct task_struct *task, const long request)
73315 +{
73316 + return 0;
73317 +}
73318 +
73319 +int
73320 +gr_handle_proc_ptrace(struct task_struct *task)
73321 +{
73322 + return 0;
73323 +}
73324 +
73325 +int
73326 +gr_set_acls(const int type)
73327 +{
73328 + return 0;
73329 +}
73330 +
73331 +int
73332 +gr_check_hidden_task(const struct task_struct *tsk)
73333 +{
73334 + return 0;
73335 +}
73336 +
73337 +int
73338 +gr_check_protected_task(const struct task_struct *task)
73339 +{
73340 + return 0;
73341 +}
73342 +
73343 +int
73344 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
73345 +{
73346 + return 0;
73347 +}
73348 +
73349 +void
73350 +gr_copy_label(struct task_struct *tsk)
73351 +{
73352 + return;
73353 +}
73354 +
73355 +void
73356 +gr_set_pax_flags(struct task_struct *task)
73357 +{
73358 + return;
73359 +}
73360 +
73361 +int
73362 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
73363 + const int unsafe_share)
73364 +{
73365 + return 0;
73366 +}
73367 +
73368 +void
73369 +gr_handle_delete(const ino_t ino, const dev_t dev)
73370 +{
73371 + return;
73372 +}
73373 +
73374 +void
73375 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
73376 +{
73377 + return;
73378 +}
73379 +
73380 +void
73381 +gr_handle_crash(struct task_struct *task, const int sig)
73382 +{
73383 + return;
73384 +}
73385 +
73386 +int
73387 +gr_check_crash_exec(const struct file *filp)
73388 +{
73389 + return 0;
73390 +}
73391 +
73392 +int
73393 +gr_check_crash_uid(const kuid_t uid)
73394 +{
73395 + return 0;
73396 +}
73397 +
73398 +void
73399 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
73400 + struct dentry *old_dentry,
73401 + struct dentry *new_dentry,
73402 + struct vfsmount *mnt, const __u8 replace)
73403 +{
73404 + return;
73405 +}
73406 +
73407 +int
73408 +gr_search_socket(const int family, const int type, const int protocol)
73409 +{
73410 + return 1;
73411 +}
73412 +
73413 +int
73414 +gr_search_connectbind(const int mode, const struct socket *sock,
73415 + const struct sockaddr_in *addr)
73416 +{
73417 + return 0;
73418 +}
73419 +
73420 +void
73421 +gr_handle_alertkill(struct task_struct *task)
73422 +{
73423 + return;
73424 +}
73425 +
73426 +__u32
73427 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
73428 +{
73429 + return 1;
73430 +}
73431 +
73432 +__u32
73433 +gr_acl_handle_hidden_file(const struct dentry * dentry,
73434 + const struct vfsmount * mnt)
73435 +{
73436 + return 1;
73437 +}
73438 +
73439 +__u32
73440 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73441 + int acc_mode)
73442 +{
73443 + return 1;
73444 +}
73445 +
73446 +__u32
73447 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73448 +{
73449 + return 1;
73450 +}
73451 +
73452 +__u32
73453 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
73454 +{
73455 + return 1;
73456 +}
73457 +
73458 +int
73459 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
73460 + unsigned int *vm_flags)
73461 +{
73462 + return 1;
73463 +}
73464 +
73465 +__u32
73466 +gr_acl_handle_truncate(const struct dentry * dentry,
73467 + const struct vfsmount * mnt)
73468 +{
73469 + return 1;
73470 +}
73471 +
73472 +__u32
73473 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
73474 +{
73475 + return 1;
73476 +}
73477 +
73478 +__u32
73479 +gr_acl_handle_access(const struct dentry * dentry,
73480 + const struct vfsmount * mnt, const int fmode)
73481 +{
73482 + return 1;
73483 +}
73484 +
73485 +__u32
73486 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
73487 + umode_t *mode)
73488 +{
73489 + return 1;
73490 +}
73491 +
73492 +__u32
73493 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
73494 +{
73495 + return 1;
73496 +}
73497 +
73498 +__u32
73499 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
73500 +{
73501 + return 1;
73502 +}
73503 +
73504 +__u32
73505 +gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
73506 +{
73507 + return 1;
73508 +}
73509 +
73510 +void
73511 +grsecurity_init(void)
73512 +{
73513 + return;
73514 +}
73515 +
73516 +umode_t gr_acl_umask(void)
73517 +{
73518 + return 0;
73519 +}
73520 +
73521 +__u32
73522 +gr_acl_handle_mknod(const struct dentry * new_dentry,
73523 + const struct dentry * parent_dentry,
73524 + const struct vfsmount * parent_mnt,
73525 + const int mode)
73526 +{
73527 + return 1;
73528 +}
73529 +
73530 +__u32
73531 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
73532 + const struct dentry * parent_dentry,
73533 + const struct vfsmount * parent_mnt)
73534 +{
73535 + return 1;
73536 +}
73537 +
73538 +__u32
73539 +gr_acl_handle_symlink(const struct dentry * new_dentry,
73540 + const struct dentry * parent_dentry,
73541 + const struct vfsmount * parent_mnt, const struct filename *from)
73542 +{
73543 + return 1;
73544 +}
73545 +
73546 +__u32
73547 +gr_acl_handle_link(const struct dentry * new_dentry,
73548 + const struct dentry * parent_dentry,
73549 + const struct vfsmount * parent_mnt,
73550 + const struct dentry * old_dentry,
73551 + const struct vfsmount * old_mnt, const struct filename *to)
73552 +{
73553 + return 1;
73554 +}
73555 +
73556 +int
73557 +gr_acl_handle_rename(const struct dentry *new_dentry,
73558 + const struct dentry *parent_dentry,
73559 + const struct vfsmount *parent_mnt,
73560 + const struct dentry *old_dentry,
73561 + const struct inode *old_parent_inode,
73562 + const struct vfsmount *old_mnt, const struct filename *newname)
73563 +{
73564 + return 0;
73565 +}
73566 +
73567 +int
73568 +gr_acl_handle_filldir(const struct file *file, const char *name,
73569 + const int namelen, const ino_t ino)
73570 +{
73571 + return 1;
73572 +}
73573 +
73574 +int
73575 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73576 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
73577 +{
73578 + return 1;
73579 +}
73580 +
73581 +int
73582 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
73583 +{
73584 + return 0;
73585 +}
73586 +
73587 +int
73588 +gr_search_accept(const struct socket *sock)
73589 +{
73590 + return 0;
73591 +}
73592 +
73593 +int
73594 +gr_search_listen(const struct socket *sock)
73595 +{
73596 + return 0;
73597 +}
73598 +
73599 +int
73600 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
73601 +{
73602 + return 0;
73603 +}
73604 +
73605 +__u32
73606 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
73607 +{
73608 + return 1;
73609 +}
73610 +
73611 +__u32
73612 +gr_acl_handle_creat(const struct dentry * dentry,
73613 + const struct dentry * p_dentry,
73614 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73615 + const int imode)
73616 +{
73617 + return 1;
73618 +}
73619 +
73620 +void
73621 +gr_acl_handle_exit(void)
73622 +{
73623 + return;
73624 +}
73625 +
73626 +int
73627 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73628 +{
73629 + return 1;
73630 +}
73631 +
73632 +void
73633 +gr_set_role_label(const kuid_t uid, const kgid_t gid)
73634 +{
73635 + return;
73636 +}
73637 +
73638 +int
73639 +gr_acl_handle_procpidmem(const struct task_struct *task)
73640 +{
73641 + return 0;
73642 +}
73643 +
73644 +int
73645 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
73646 +{
73647 + return 0;
73648 +}
73649 +
73650 +int
73651 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
73652 +{
73653 + return 0;
73654 +}
73655 +
73656 +int
73657 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
73658 +{
73659 + return 0;
73660 +}
73661 +
73662 +int
73663 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
73664 +{
73665 + return 0;
73666 +}
73667 +
73668 +int gr_acl_enable_at_secure(void)
73669 +{
73670 + return 0;
73671 +}
73672 +
73673 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
73674 +{
73675 + return dentry->d_sb->s_dev;
73676 +}
73677 +
73678 +void gr_put_exec_file(struct task_struct *task)
73679 +{
73680 + return;
73681 +}
73682 +
73683 +#ifdef CONFIG_SECURITY
73684 +EXPORT_SYMBOL_GPL(gr_check_user_change);
73685 +EXPORT_SYMBOL_GPL(gr_check_group_change);
73686 +#endif
73687 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
73688 new file mode 100644
73689 index 0000000..f35f454
73690 --- /dev/null
73691 +++ b/grsecurity/grsec_exec.c
73692 @@ -0,0 +1,187 @@
73693 +#include <linux/kernel.h>
73694 +#include <linux/sched.h>
73695 +#include <linux/file.h>
73696 +#include <linux/binfmts.h>
73697 +#include <linux/fs.h>
73698 +#include <linux/types.h>
73699 +#include <linux/grdefs.h>
73700 +#include <linux/grsecurity.h>
73701 +#include <linux/grinternal.h>
73702 +#include <linux/capability.h>
73703 +#include <linux/module.h>
73704 +#include <linux/compat.h>
73705 +
73706 +#include <asm/uaccess.h>
73707 +
73708 +#ifdef CONFIG_GRKERNSEC_EXECLOG
73709 +static char gr_exec_arg_buf[132];
73710 +static DEFINE_MUTEX(gr_exec_arg_mutex);
73711 +#endif
73712 +
73713 +struct user_arg_ptr {
73714 +#ifdef CONFIG_COMPAT
73715 + bool is_compat;
73716 +#endif
73717 + union {
73718 + const char __user *const __user *native;
73719 +#ifdef CONFIG_COMPAT
73720 + const compat_uptr_t __user *compat;
73721 +#endif
73722 + } ptr;
73723 +};
73724 +
73725 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
73726 +
73727 +void
73728 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
73729 +{
73730 +#ifdef CONFIG_GRKERNSEC_EXECLOG
73731 + char *grarg = gr_exec_arg_buf;
73732 + unsigned int i, x, execlen = 0;
73733 + char c;
73734 +
73735 + if (!((grsec_enable_execlog && grsec_enable_group &&
73736 + in_group_p(grsec_audit_gid))
73737 + || (grsec_enable_execlog && !grsec_enable_group)))
73738 + return;
73739 +
73740 + mutex_lock(&gr_exec_arg_mutex);
73741 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
73742 +
73743 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
73744 + const char __user *p;
73745 + unsigned int len;
73746 +
73747 + p = get_user_arg_ptr(argv, i);
73748 + if (IS_ERR(p))
73749 + goto log;
73750 +
73751 + len = strnlen_user(p, 128 - execlen);
73752 + if (len > 128 - execlen)
73753 + len = 128 - execlen;
73754 + else if (len > 0)
73755 + len--;
73756 + if (copy_from_user(grarg + execlen, p, len))
73757 + goto log;
73758 +
73759 + /* rewrite unprintable characters */
73760 + for (x = 0; x < len; x++) {
73761 + c = *(grarg + execlen + x);
73762 + if (c < 32 || c > 126)
73763 + *(grarg + execlen + x) = ' ';
73764 + }
73765 +
73766 + execlen += len;
73767 + *(grarg + execlen) = ' ';
73768 + *(grarg + execlen + 1) = '\0';
73769 + execlen++;
73770 + }
73771 +
73772 + log:
73773 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
73774 + bprm->file->f_path.mnt, grarg);
73775 + mutex_unlock(&gr_exec_arg_mutex);
73776 +#endif
73777 + return;
73778 +}
73779 +
73780 +#ifdef CONFIG_GRKERNSEC
73781 +extern int gr_acl_is_capable(const int cap);
73782 +extern int gr_acl_is_capable_nolog(const int cap);
73783 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
73784 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
73785 +extern int gr_chroot_is_capable(const int cap);
73786 +extern int gr_chroot_is_capable_nolog(const int cap);
73787 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
73788 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
73789 +#endif
73790 +
73791 +const char *captab_log[] = {
73792 + "CAP_CHOWN",
73793 + "CAP_DAC_OVERRIDE",
73794 + "CAP_DAC_READ_SEARCH",
73795 + "CAP_FOWNER",
73796 + "CAP_FSETID",
73797 + "CAP_KILL",
73798 + "CAP_SETGID",
73799 + "CAP_SETUID",
73800 + "CAP_SETPCAP",
73801 + "CAP_LINUX_IMMUTABLE",
73802 + "CAP_NET_BIND_SERVICE",
73803 + "CAP_NET_BROADCAST",
73804 + "CAP_NET_ADMIN",
73805 + "CAP_NET_RAW",
73806 + "CAP_IPC_LOCK",
73807 + "CAP_IPC_OWNER",
73808 + "CAP_SYS_MODULE",
73809 + "CAP_SYS_RAWIO",
73810 + "CAP_SYS_CHROOT",
73811 + "CAP_SYS_PTRACE",
73812 + "CAP_SYS_PACCT",
73813 + "CAP_SYS_ADMIN",
73814 + "CAP_SYS_BOOT",
73815 + "CAP_SYS_NICE",
73816 + "CAP_SYS_RESOURCE",
73817 + "CAP_SYS_TIME",
73818 + "CAP_SYS_TTY_CONFIG",
73819 + "CAP_MKNOD",
73820 + "CAP_LEASE",
73821 + "CAP_AUDIT_WRITE",
73822 + "CAP_AUDIT_CONTROL",
73823 + "CAP_SETFCAP",
73824 + "CAP_MAC_OVERRIDE",
73825 + "CAP_MAC_ADMIN",
73826 + "CAP_SYSLOG",
73827 + "CAP_WAKE_ALARM"
73828 +};
73829 +
73830 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
73831 +
73832 +int gr_is_capable(const int cap)
73833 +{
73834 +#ifdef CONFIG_GRKERNSEC
73835 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
73836 + return 1;
73837 + return 0;
73838 +#else
73839 + return 1;
73840 +#endif
73841 +}
73842 +
73843 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73844 +{
73845 +#ifdef CONFIG_GRKERNSEC
73846 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
73847 + return 1;
73848 + return 0;
73849 +#else
73850 + return 1;
73851 +#endif
73852 +}
73853 +
73854 +int gr_is_capable_nolog(const int cap)
73855 +{
73856 +#ifdef CONFIG_GRKERNSEC
73857 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
73858 + return 1;
73859 + return 0;
73860 +#else
73861 + return 1;
73862 +#endif
73863 +}
73864 +
73865 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
73866 +{
73867 +#ifdef CONFIG_GRKERNSEC
73868 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
73869 + return 1;
73870 + return 0;
73871 +#else
73872 + return 1;
73873 +#endif
73874 +}
73875 +
73876 +EXPORT_SYMBOL_GPL(gr_is_capable);
73877 +EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
73878 +EXPORT_SYMBOL_GPL(gr_task_is_capable);
73879 +EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
73880 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
73881 new file mode 100644
73882 index 0000000..06cc6ea
73883 --- /dev/null
73884 +++ b/grsecurity/grsec_fifo.c
73885 @@ -0,0 +1,24 @@
73886 +#include <linux/kernel.h>
73887 +#include <linux/sched.h>
73888 +#include <linux/fs.h>
73889 +#include <linux/file.h>
73890 +#include <linux/grinternal.h>
73891 +
73892 +int
73893 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
73894 + const struct dentry *dir, const int flag, const int acc_mode)
73895 +{
73896 +#ifdef CONFIG_GRKERNSEC_FIFO
73897 + const struct cred *cred = current_cred();
73898 +
73899 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
73900 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
73901 + !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
73902 + !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
73903 + if (!inode_permission(dentry->d_inode, acc_mode))
73904 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
73905 + return -EACCES;
73906 + }
73907 +#endif
73908 + return 0;
73909 +}
73910 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
73911 new file mode 100644
73912 index 0000000..8ca18bf
73913 --- /dev/null
73914 +++ b/grsecurity/grsec_fork.c
73915 @@ -0,0 +1,23 @@
73916 +#include <linux/kernel.h>
73917 +#include <linux/sched.h>
73918 +#include <linux/grsecurity.h>
73919 +#include <linux/grinternal.h>
73920 +#include <linux/errno.h>
73921 +
73922 +void
73923 +gr_log_forkfail(const int retval)
73924 +{
73925 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
73926 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
73927 + switch (retval) {
73928 + case -EAGAIN:
73929 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
73930 + break;
73931 + case -ENOMEM:
73932 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
73933 + break;
73934 + }
73935 + }
73936 +#endif
73937 + return;
73938 +}
73939 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
73940 new file mode 100644
73941 index 0000000..ae6c028
73942 --- /dev/null
73943 +++ b/grsecurity/grsec_init.c
73944 @@ -0,0 +1,272 @@
73945 +#include <linux/kernel.h>
73946 +#include <linux/sched.h>
73947 +#include <linux/mm.h>
73948 +#include <linux/gracl.h>
73949 +#include <linux/slab.h>
73950 +#include <linux/vmalloc.h>
73951 +#include <linux/percpu.h>
73952 +#include <linux/module.h>
73953 +
73954 +int grsec_enable_ptrace_readexec;
73955 +int grsec_enable_setxid;
73956 +int grsec_enable_symlinkown;
73957 +kgid_t grsec_symlinkown_gid;
73958 +int grsec_enable_brute;
73959 +int grsec_enable_link;
73960 +int grsec_enable_dmesg;
73961 +int grsec_enable_harden_ptrace;
73962 +int grsec_enable_harden_ipc;
73963 +int grsec_enable_fifo;
73964 +int grsec_enable_execlog;
73965 +int grsec_enable_signal;
73966 +int grsec_enable_forkfail;
73967 +int grsec_enable_audit_ptrace;
73968 +int grsec_enable_time;
73969 +int grsec_enable_group;
73970 +kgid_t grsec_audit_gid;
73971 +int grsec_enable_chdir;
73972 +int grsec_enable_mount;
73973 +int grsec_enable_rofs;
73974 +int grsec_deny_new_usb;
73975 +int grsec_enable_chroot_findtask;
73976 +int grsec_enable_chroot_mount;
73977 +int grsec_enable_chroot_shmat;
73978 +int grsec_enable_chroot_fchdir;
73979 +int grsec_enable_chroot_double;
73980 +int grsec_enable_chroot_pivot;
73981 +int grsec_enable_chroot_chdir;
73982 +int grsec_enable_chroot_chmod;
73983 +int grsec_enable_chroot_mknod;
73984 +int grsec_enable_chroot_nice;
73985 +int grsec_enable_chroot_execlog;
73986 +int grsec_enable_chroot_caps;
73987 +int grsec_enable_chroot_sysctl;
73988 +int grsec_enable_chroot_unix;
73989 +int grsec_enable_tpe;
73990 +kgid_t grsec_tpe_gid;
73991 +int grsec_enable_blackhole;
73992 +#ifdef CONFIG_IPV6_MODULE
73993 +EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
73994 +#endif
73995 +int grsec_lastack_retries;
73996 +int grsec_enable_tpe_all;
73997 +int grsec_enable_tpe_invert;
73998 +int grsec_enable_socket_all;
73999 +kgid_t grsec_socket_all_gid;
74000 +int grsec_enable_socket_client;
74001 +kgid_t grsec_socket_client_gid;
74002 +int grsec_enable_socket_server;
74003 +kgid_t grsec_socket_server_gid;
74004 +int grsec_resource_logging;
74005 +int grsec_disable_privio;
74006 +int grsec_enable_log_rwxmaps;
74007 +int grsec_lock;
74008 +
74009 +DEFINE_SPINLOCK(grsec_alert_lock);
74010 +unsigned long grsec_alert_wtime = 0;
74011 +unsigned long grsec_alert_fyet = 0;
74012 +
74013 +DEFINE_SPINLOCK(grsec_audit_lock);
74014 +
74015 +DEFINE_RWLOCK(grsec_exec_file_lock);
74016 +
74017 +char *gr_shared_page[4];
74018 +
74019 +char *gr_alert_log_fmt;
74020 +char *gr_audit_log_fmt;
74021 +char *gr_alert_log_buf;
74022 +char *gr_audit_log_buf;
74023 +
74024 +void __init
74025 +grsecurity_init(void)
74026 +{
74027 + int j;
74028 + /* create the per-cpu shared pages */
74029 +
74030 +#ifdef CONFIG_X86
74031 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
74032 +#endif
74033 +
74034 + for (j = 0; j < 4; j++) {
74035 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
74036 + if (gr_shared_page[j] == NULL) {
74037 + panic("Unable to allocate grsecurity shared page");
74038 + return;
74039 + }
74040 + }
74041 +
74042 + /* allocate log buffers */
74043 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
74044 + if (!gr_alert_log_fmt) {
74045 + panic("Unable to allocate grsecurity alert log format buffer");
74046 + return;
74047 + }
74048 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
74049 + if (!gr_audit_log_fmt) {
74050 + panic("Unable to allocate grsecurity audit log format buffer");
74051 + return;
74052 + }
74053 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
74054 + if (!gr_alert_log_buf) {
74055 + panic("Unable to allocate grsecurity alert log buffer");
74056 + return;
74057 + }
74058 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
74059 + if (!gr_audit_log_buf) {
74060 + panic("Unable to allocate grsecurity audit log buffer");
74061 + return;
74062 + }
74063 +
74064 +#ifdef CONFIG_GRKERNSEC_IO
74065 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
74066 + grsec_disable_privio = 1;
74067 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
74068 + grsec_disable_privio = 1;
74069 +#else
74070 + grsec_disable_privio = 0;
74071 +#endif
74072 +#endif
74073 +
74074 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
74075 + /* for backward compatibility, tpe_invert always defaults to on if
74076 + enabled in the kernel
74077 + */
74078 + grsec_enable_tpe_invert = 1;
74079 +#endif
74080 +
74081 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
74082 +#ifndef CONFIG_GRKERNSEC_SYSCTL
74083 + grsec_lock = 1;
74084 +#endif
74085 +
74086 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74087 + grsec_enable_log_rwxmaps = 1;
74088 +#endif
74089 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
74090 + grsec_enable_group = 1;
74091 + grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
74092 +#endif
74093 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74094 + grsec_enable_ptrace_readexec = 1;
74095 +#endif
74096 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
74097 + grsec_enable_chdir = 1;
74098 +#endif
74099 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
74100 + grsec_enable_harden_ptrace = 1;
74101 +#endif
74102 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74103 + grsec_enable_harden_ipc = 1;
74104 +#endif
74105 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74106 + grsec_enable_mount = 1;
74107 +#endif
74108 +#ifdef CONFIG_GRKERNSEC_LINK
74109 + grsec_enable_link = 1;
74110 +#endif
74111 +#ifdef CONFIG_GRKERNSEC_BRUTE
74112 + grsec_enable_brute = 1;
74113 +#endif
74114 +#ifdef CONFIG_GRKERNSEC_DMESG
74115 + grsec_enable_dmesg = 1;
74116 +#endif
74117 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74118 + grsec_enable_blackhole = 1;
74119 + grsec_lastack_retries = 4;
74120 +#endif
74121 +#ifdef CONFIG_GRKERNSEC_FIFO
74122 + grsec_enable_fifo = 1;
74123 +#endif
74124 +#ifdef CONFIG_GRKERNSEC_EXECLOG
74125 + grsec_enable_execlog = 1;
74126 +#endif
74127 +#ifdef CONFIG_GRKERNSEC_SETXID
74128 + grsec_enable_setxid = 1;
74129 +#endif
74130 +#ifdef CONFIG_GRKERNSEC_SIGNAL
74131 + grsec_enable_signal = 1;
74132 +#endif
74133 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
74134 + grsec_enable_forkfail = 1;
74135 +#endif
74136 +#ifdef CONFIG_GRKERNSEC_TIME
74137 + grsec_enable_time = 1;
74138 +#endif
74139 +#ifdef CONFIG_GRKERNSEC_RESLOG
74140 + grsec_resource_logging = 1;
74141 +#endif
74142 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74143 + grsec_enable_chroot_findtask = 1;
74144 +#endif
74145 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
74146 + grsec_enable_chroot_unix = 1;
74147 +#endif
74148 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
74149 + grsec_enable_chroot_mount = 1;
74150 +#endif
74151 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
74152 + grsec_enable_chroot_fchdir = 1;
74153 +#endif
74154 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
74155 + grsec_enable_chroot_shmat = 1;
74156 +#endif
74157 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74158 + grsec_enable_audit_ptrace = 1;
74159 +#endif
74160 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
74161 + grsec_enable_chroot_double = 1;
74162 +#endif
74163 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
74164 + grsec_enable_chroot_pivot = 1;
74165 +#endif
74166 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
74167 + grsec_enable_chroot_chdir = 1;
74168 +#endif
74169 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
74170 + grsec_enable_chroot_chmod = 1;
74171 +#endif
74172 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
74173 + grsec_enable_chroot_mknod = 1;
74174 +#endif
74175 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
74176 + grsec_enable_chroot_nice = 1;
74177 +#endif
74178 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
74179 + grsec_enable_chroot_execlog = 1;
74180 +#endif
74181 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74182 + grsec_enable_chroot_caps = 1;
74183 +#endif
74184 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
74185 + grsec_enable_chroot_sysctl = 1;
74186 +#endif
74187 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
74188 + grsec_enable_symlinkown = 1;
74189 + grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
74190 +#endif
74191 +#ifdef CONFIG_GRKERNSEC_TPE
74192 + grsec_enable_tpe = 1;
74193 + grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
74194 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
74195 + grsec_enable_tpe_all = 1;
74196 +#endif
74197 +#endif
74198 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
74199 + grsec_enable_socket_all = 1;
74200 + grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
74201 +#endif
74202 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
74203 + grsec_enable_socket_client = 1;
74204 + grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
74205 +#endif
74206 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74207 + grsec_enable_socket_server = 1;
74208 + grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
74209 +#endif
74210 +#endif
74211 +#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
74212 + grsec_deny_new_usb = 1;
74213 +#endif
74214 +
74215 + return;
74216 +}
74217 diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
74218 new file mode 100644
74219 index 0000000..1773300
74220 --- /dev/null
74221 +++ b/grsecurity/grsec_ipc.c
74222 @@ -0,0 +1,48 @@
74223 +#include <linux/kernel.h>
74224 +#include <linux/mm.h>
74225 +#include <linux/sched.h>
74226 +#include <linux/file.h>
74227 +#include <linux/ipc.h>
74228 +#include <linux/ipc_namespace.h>
74229 +#include <linux/grsecurity.h>
74230 +#include <linux/grinternal.h>
74231 +
74232 +int
74233 +gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
74234 +{
74235 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74236 + int write;
74237 + int orig_granted_mode;
74238 + kuid_t euid;
74239 + kgid_t egid;
74240 +
74241 + if (!grsec_enable_harden_ipc)
74242 + return 1;
74243 +
74244 + euid = current_euid();
74245 + egid = current_egid();
74246 +
74247 + write = requested_mode & 00002;
74248 + orig_granted_mode = ipcp->mode;
74249 +
74250 + if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
74251 + orig_granted_mode >>= 6;
74252 + else {
74253 + /* if likely wrong permissions, lock to user */
74254 + if (orig_granted_mode & 0007)
74255 + orig_granted_mode = 0;
74256 + /* otherwise do a egid-only check */
74257 + else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
74258 + orig_granted_mode >>= 3;
74259 + /* otherwise, no access */
74260 + else
74261 + orig_granted_mode = 0;
74262 + }
74263 + if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
74264 + !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
74265 + gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
74266 + return 0;
74267 + }
74268 +#endif
74269 + return 1;
74270 +}
74271 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
74272 new file mode 100644
74273 index 0000000..5e05e20
74274 --- /dev/null
74275 +++ b/grsecurity/grsec_link.c
74276 @@ -0,0 +1,58 @@
74277 +#include <linux/kernel.h>
74278 +#include <linux/sched.h>
74279 +#include <linux/fs.h>
74280 +#include <linux/file.h>
74281 +#include <linux/grinternal.h>
74282 +
74283 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
74284 +{
74285 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
74286 + const struct inode *link_inode = link->dentry->d_inode;
74287 +
74288 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
74289 + /* ignore root-owned links, e.g. /proc/self */
74290 + gr_is_global_nonroot(link_inode->i_uid) && target &&
74291 + !uid_eq(link_inode->i_uid, target->i_uid)) {
74292 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
74293 + return 1;
74294 + }
74295 +#endif
74296 + return 0;
74297 +}
74298 +
74299 +int
74300 +gr_handle_follow_link(const struct inode *parent,
74301 + const struct inode *inode,
74302 + const struct dentry *dentry, const struct vfsmount *mnt)
74303 +{
74304 +#ifdef CONFIG_GRKERNSEC_LINK
74305 + const struct cred *cred = current_cred();
74306 +
74307 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
74308 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
74309 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
74310 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
74311 + return -EACCES;
74312 + }
74313 +#endif
74314 + return 0;
74315 +}
74316 +
74317 +int
74318 +gr_handle_hardlink(const struct dentry *dentry,
74319 + const struct vfsmount *mnt,
74320 + struct inode *inode, const int mode, const struct filename *to)
74321 +{
74322 +#ifdef CONFIG_GRKERNSEC_LINK
74323 + const struct cred *cred = current_cred();
74324 +
74325 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
74326 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
74327 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
74328 + !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
74329 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
74330 + return -EPERM;
74331 + }
74332 +#endif
74333 + return 0;
74334 +}
74335 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
74336 new file mode 100644
74337 index 0000000..dbe0a6b
74338 --- /dev/null
74339 +++ b/grsecurity/grsec_log.c
74340 @@ -0,0 +1,341 @@
74341 +#include <linux/kernel.h>
74342 +#include <linux/sched.h>
74343 +#include <linux/file.h>
74344 +#include <linux/tty.h>
74345 +#include <linux/fs.h>
74346 +#include <linux/mm.h>
74347 +#include <linux/grinternal.h>
74348 +
74349 +#ifdef CONFIG_TREE_PREEMPT_RCU
74350 +#define DISABLE_PREEMPT() preempt_disable()
74351 +#define ENABLE_PREEMPT() preempt_enable()
74352 +#else
74353 +#define DISABLE_PREEMPT()
74354 +#define ENABLE_PREEMPT()
74355 +#endif
74356 +
74357 +#define BEGIN_LOCKS(x) \
74358 + DISABLE_PREEMPT(); \
74359 + rcu_read_lock(); \
74360 + read_lock(&tasklist_lock); \
74361 + read_lock(&grsec_exec_file_lock); \
74362 + if (x != GR_DO_AUDIT) \
74363 + spin_lock(&grsec_alert_lock); \
74364 + else \
74365 + spin_lock(&grsec_audit_lock)
74366 +
74367 +#define END_LOCKS(x) \
74368 + if (x != GR_DO_AUDIT) \
74369 + spin_unlock(&grsec_alert_lock); \
74370 + else \
74371 + spin_unlock(&grsec_audit_lock); \
74372 + read_unlock(&grsec_exec_file_lock); \
74373 + read_unlock(&tasklist_lock); \
74374 + rcu_read_unlock(); \
74375 + ENABLE_PREEMPT(); \
74376 + if (x == GR_DONT_AUDIT) \
74377 + gr_handle_alertkill(current)
74378 +
74379 +enum {
74380 + FLOODING,
74381 + NO_FLOODING
74382 +};
74383 +
74384 +extern char *gr_alert_log_fmt;
74385 +extern char *gr_audit_log_fmt;
74386 +extern char *gr_alert_log_buf;
74387 +extern char *gr_audit_log_buf;
74388 +
74389 +static int gr_log_start(int audit)
74390 +{
74391 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
74392 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
74393 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74394 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
74395 + unsigned long curr_secs = get_seconds();
74396 +
74397 + if (audit == GR_DO_AUDIT)
74398 + goto set_fmt;
74399 +
74400 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
74401 + grsec_alert_wtime = curr_secs;
74402 + grsec_alert_fyet = 0;
74403 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
74404 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
74405 + grsec_alert_fyet++;
74406 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
74407 + grsec_alert_wtime = curr_secs;
74408 + grsec_alert_fyet++;
74409 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
74410 + return FLOODING;
74411 + }
74412 + else return FLOODING;
74413 +
74414 +set_fmt:
74415 +#endif
74416 + memset(buf, 0, PAGE_SIZE);
74417 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
74418 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
74419 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
74420 + } else if (current->signal->curr_ip) {
74421 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
74422 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
74423 + } else if (gr_acl_is_enabled()) {
74424 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
74425 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
74426 + } else {
74427 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
74428 + strcpy(buf, fmt);
74429 + }
74430 +
74431 + return NO_FLOODING;
74432 +}
74433 +
74434 +static void gr_log_middle(int audit, const char *msg, va_list ap)
74435 + __attribute__ ((format (printf, 2, 0)));
74436 +
74437 +static void gr_log_middle(int audit, const char *msg, va_list ap)
74438 +{
74439 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74440 + unsigned int len = strlen(buf);
74441 +
74442 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
74443 +
74444 + return;
74445 +}
74446 +
74447 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
74448 + __attribute__ ((format (printf, 2, 3)));
74449 +
74450 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
74451 +{
74452 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74453 + unsigned int len = strlen(buf);
74454 + va_list ap;
74455 +
74456 + va_start(ap, msg);
74457 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
74458 + va_end(ap);
74459 +
74460 + return;
74461 +}
74462 +
74463 +static void gr_log_end(int audit, int append_default)
74464 +{
74465 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74466 + if (append_default) {
74467 + struct task_struct *task = current;
74468 + struct task_struct *parent = task->real_parent;
74469 + const struct cred *cred = __task_cred(task);
74470 + const struct cred *pcred = __task_cred(parent);
74471 + unsigned int len = strlen(buf);
74472 +
74473 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74474 + }
74475 +
74476 + printk("%s\n", buf);
74477 +
74478 + return;
74479 +}
74480 +
74481 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
74482 +{
74483 + int logtype;
74484 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
74485 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
74486 + void *voidptr = NULL;
74487 + int num1 = 0, num2 = 0;
74488 + unsigned long ulong1 = 0, ulong2 = 0;
74489 + struct dentry *dentry = NULL;
74490 + struct vfsmount *mnt = NULL;
74491 + struct file *file = NULL;
74492 + struct task_struct *task = NULL;
74493 + struct vm_area_struct *vma = NULL;
74494 + const struct cred *cred, *pcred;
74495 + va_list ap;
74496 +
74497 + BEGIN_LOCKS(audit);
74498 + logtype = gr_log_start(audit);
74499 + if (logtype == FLOODING) {
74500 + END_LOCKS(audit);
74501 + return;
74502 + }
74503 + va_start(ap, argtypes);
74504 + switch (argtypes) {
74505 + case GR_TTYSNIFF:
74506 + task = va_arg(ap, struct task_struct *);
74507 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
74508 + break;
74509 + case GR_SYSCTL_HIDDEN:
74510 + str1 = va_arg(ap, char *);
74511 + gr_log_middle_varargs(audit, msg, result, str1);
74512 + break;
74513 + case GR_RBAC:
74514 + dentry = va_arg(ap, struct dentry *);
74515 + mnt = va_arg(ap, struct vfsmount *);
74516 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
74517 + break;
74518 + case GR_RBAC_STR:
74519 + dentry = va_arg(ap, struct dentry *);
74520 + mnt = va_arg(ap, struct vfsmount *);
74521 + str1 = va_arg(ap, char *);
74522 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
74523 + break;
74524 + case GR_STR_RBAC:
74525 + str1 = va_arg(ap, char *);
74526 + dentry = va_arg(ap, struct dentry *);
74527 + mnt = va_arg(ap, struct vfsmount *);
74528 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
74529 + break;
74530 + case GR_RBAC_MODE2:
74531 + dentry = va_arg(ap, struct dentry *);
74532 + mnt = va_arg(ap, struct vfsmount *);
74533 + str1 = va_arg(ap, char *);
74534 + str2 = va_arg(ap, char *);
74535 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
74536 + break;
74537 + case GR_RBAC_MODE3:
74538 + dentry = va_arg(ap, struct dentry *);
74539 + mnt = va_arg(ap, struct vfsmount *);
74540 + str1 = va_arg(ap, char *);
74541 + str2 = va_arg(ap, char *);
74542 + str3 = va_arg(ap, char *);
74543 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
74544 + break;
74545 + case GR_FILENAME:
74546 + dentry = va_arg(ap, struct dentry *);
74547 + mnt = va_arg(ap, struct vfsmount *);
74548 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
74549 + break;
74550 + case GR_STR_FILENAME:
74551 + str1 = va_arg(ap, char *);
74552 + dentry = va_arg(ap, struct dentry *);
74553 + mnt = va_arg(ap, struct vfsmount *);
74554 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
74555 + break;
74556 + case GR_FILENAME_STR:
74557 + dentry = va_arg(ap, struct dentry *);
74558 + mnt = va_arg(ap, struct vfsmount *);
74559 + str1 = va_arg(ap, char *);
74560 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
74561 + break;
74562 + case GR_FILENAME_TWO_INT:
74563 + dentry = va_arg(ap, struct dentry *);
74564 + mnt = va_arg(ap, struct vfsmount *);
74565 + num1 = va_arg(ap, int);
74566 + num2 = va_arg(ap, int);
74567 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
74568 + break;
74569 + case GR_FILENAME_TWO_INT_STR:
74570 + dentry = va_arg(ap, struct dentry *);
74571 + mnt = va_arg(ap, struct vfsmount *);
74572 + num1 = va_arg(ap, int);
74573 + num2 = va_arg(ap, int);
74574 + str1 = va_arg(ap, char *);
74575 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
74576 + break;
74577 + case GR_TEXTREL:
74578 + file = va_arg(ap, struct file *);
74579 + ulong1 = va_arg(ap, unsigned long);
74580 + ulong2 = va_arg(ap, unsigned long);
74581 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
74582 + break;
74583 + case GR_PTRACE:
74584 + task = va_arg(ap, struct task_struct *);
74585 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
74586 + break;
74587 + case GR_RESOURCE:
74588 + task = va_arg(ap, struct task_struct *);
74589 + cred = __task_cred(task);
74590 + pcred = __task_cred(task->real_parent);
74591 + ulong1 = va_arg(ap, unsigned long);
74592 + str1 = va_arg(ap, char *);
74593 + ulong2 = va_arg(ap, unsigned long);
74594 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74595 + break;
74596 + case GR_CAP:
74597 + task = va_arg(ap, struct task_struct *);
74598 + cred = __task_cred(task);
74599 + pcred = __task_cred(task->real_parent);
74600 + str1 = va_arg(ap, char *);
74601 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74602 + break;
74603 + case GR_SIG:
74604 + str1 = va_arg(ap, char *);
74605 + voidptr = va_arg(ap, void *);
74606 + gr_log_middle_varargs(audit, msg, str1, voidptr);
74607 + break;
74608 + case GR_SIG2:
74609 + task = va_arg(ap, struct task_struct *);
74610 + cred = __task_cred(task);
74611 + pcred = __task_cred(task->real_parent);
74612 + num1 = va_arg(ap, int);
74613 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74614 + break;
74615 + case GR_CRASH1:
74616 + task = va_arg(ap, struct task_struct *);
74617 + cred = __task_cred(task);
74618 + pcred = __task_cred(task->real_parent);
74619 + ulong1 = va_arg(ap, unsigned long);
74620 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
74621 + break;
74622 + case GR_CRASH2:
74623 + task = va_arg(ap, struct task_struct *);
74624 + cred = __task_cred(task);
74625 + pcred = __task_cred(task->real_parent);
74626 + ulong1 = va_arg(ap, unsigned long);
74627 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
74628 + break;
74629 + case GR_RWXMAP:
74630 + file = va_arg(ap, struct file *);
74631 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
74632 + break;
74633 + case GR_RWXMAPVMA:
74634 + vma = va_arg(ap, struct vm_area_struct *);
74635 + if (vma->vm_file)
74636 + str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
74637 + else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
74638 + str1 = "<stack>";
74639 + else if (vma->vm_start <= current->mm->brk &&
74640 + vma->vm_end >= current->mm->start_brk)
74641 + str1 = "<heap>";
74642 + else
74643 + str1 = "<anonymous mapping>";
74644 + gr_log_middle_varargs(audit, msg, str1);
74645 + break;
74646 + case GR_PSACCT:
74647 + {
74648 + unsigned int wday, cday;
74649 + __u8 whr, chr;
74650 + __u8 wmin, cmin;
74651 + __u8 wsec, csec;
74652 + char cur_tty[64] = { 0 };
74653 + char parent_tty[64] = { 0 };
74654 +
74655 + task = va_arg(ap, struct task_struct *);
74656 + wday = va_arg(ap, unsigned int);
74657 + cday = va_arg(ap, unsigned int);
74658 + whr = va_arg(ap, int);
74659 + chr = va_arg(ap, int);
74660 + wmin = va_arg(ap, int);
74661 + cmin = va_arg(ap, int);
74662 + wsec = va_arg(ap, int);
74663 + csec = va_arg(ap, int);
74664 + ulong1 = va_arg(ap, unsigned long);
74665 + cred = __task_cred(task);
74666 + pcred = __task_cred(task->real_parent);
74667 +
74668 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74669 + }
74670 + break;
74671 + default:
74672 + gr_log_middle(audit, msg, ap);
74673 + }
74674 + va_end(ap);
74675 + // these don't need DEFAULTSECARGS printed on the end
74676 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
74677 + gr_log_end(audit, 0);
74678 + else
74679 + gr_log_end(audit, 1);
74680 + END_LOCKS(audit);
74681 +}
74682 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
74683 new file mode 100644
74684 index 0000000..0e39d8c
74685 --- /dev/null
74686 +++ b/grsecurity/grsec_mem.c
74687 @@ -0,0 +1,48 @@
74688 +#include <linux/kernel.h>
74689 +#include <linux/sched.h>
74690 +#include <linux/mm.h>
74691 +#include <linux/mman.h>
74692 +#include <linux/module.h>
74693 +#include <linux/grinternal.h>
74694 +
74695 +void gr_handle_msr_write(void)
74696 +{
74697 + gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
74698 + return;
74699 +}
74700 +EXPORT_SYMBOL_GPL(gr_handle_msr_write);
74701 +
74702 +void
74703 +gr_handle_ioperm(void)
74704 +{
74705 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
74706 + return;
74707 +}
74708 +
74709 +void
74710 +gr_handle_iopl(void)
74711 +{
74712 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
74713 + return;
74714 +}
74715 +
74716 +void
74717 +gr_handle_mem_readwrite(u64 from, u64 to)
74718 +{
74719 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
74720 + return;
74721 +}
74722 +
74723 +void
74724 +gr_handle_vm86(void)
74725 +{
74726 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
74727 + return;
74728 +}
74729 +
74730 +void
74731 +gr_log_badprocpid(const char *entry)
74732 +{
74733 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
74734 + return;
74735 +}
74736 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
74737 new file mode 100644
74738 index 0000000..cd9e124
74739 --- /dev/null
74740 +++ b/grsecurity/grsec_mount.c
74741 @@ -0,0 +1,65 @@
74742 +#include <linux/kernel.h>
74743 +#include <linux/sched.h>
74744 +#include <linux/mount.h>
74745 +#include <linux/major.h>
74746 +#include <linux/grsecurity.h>
74747 +#include <linux/grinternal.h>
74748 +
74749 +void
74750 +gr_log_remount(const char *devname, const int retval)
74751 +{
74752 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74753 + if (grsec_enable_mount && (retval >= 0))
74754 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
74755 +#endif
74756 + return;
74757 +}
74758 +
74759 +void
74760 +gr_log_unmount(const char *devname, const int retval)
74761 +{
74762 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74763 + if (grsec_enable_mount && (retval >= 0))
74764 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
74765 +#endif
74766 + return;
74767 +}
74768 +
74769 +void
74770 +gr_log_mount(const char *from, const char *to, const int retval)
74771 +{
74772 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74773 + if (grsec_enable_mount && (retval >= 0))
74774 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
74775 +#endif
74776 + return;
74777 +}
74778 +
74779 +int
74780 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
74781 +{
74782 +#ifdef CONFIG_GRKERNSEC_ROFS
74783 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
74784 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
74785 + return -EPERM;
74786 + } else
74787 + return 0;
74788 +#endif
74789 + return 0;
74790 +}
74791 +
74792 +int
74793 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
74794 +{
74795 +#ifdef CONFIG_GRKERNSEC_ROFS
74796 + struct inode *inode = dentry->d_inode;
74797 +
74798 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
74799 + inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
74800 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
74801 + return -EPERM;
74802 + } else
74803 + return 0;
74804 +#endif
74805 + return 0;
74806 +}
74807 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
74808 new file mode 100644
74809 index 0000000..6ee9d50
74810 --- /dev/null
74811 +++ b/grsecurity/grsec_pax.c
74812 @@ -0,0 +1,45 @@
74813 +#include <linux/kernel.h>
74814 +#include <linux/sched.h>
74815 +#include <linux/mm.h>
74816 +#include <linux/file.h>
74817 +#include <linux/grinternal.h>
74818 +#include <linux/grsecurity.h>
74819 +
74820 +void
74821 +gr_log_textrel(struct vm_area_struct * vma)
74822 +{
74823 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74824 + if (grsec_enable_log_rwxmaps)
74825 + gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
74826 +#endif
74827 + return;
74828 +}
74829 +
74830 +void gr_log_ptgnustack(struct file *file)
74831 +{
74832 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74833 + if (grsec_enable_log_rwxmaps)
74834 + gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
74835 +#endif
74836 + return;
74837 +}
74838 +
74839 +void
74840 +gr_log_rwxmmap(struct file *file)
74841 +{
74842 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74843 + if (grsec_enable_log_rwxmaps)
74844 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
74845 +#endif
74846 + return;
74847 +}
74848 +
74849 +void
74850 +gr_log_rwxmprotect(struct vm_area_struct *vma)
74851 +{
74852 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74853 + if (grsec_enable_log_rwxmaps)
74854 + gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
74855 +#endif
74856 + return;
74857 +}
74858 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
74859 new file mode 100644
74860 index 0000000..f7f29aa
74861 --- /dev/null
74862 +++ b/grsecurity/grsec_ptrace.c
74863 @@ -0,0 +1,30 @@
74864 +#include <linux/kernel.h>
74865 +#include <linux/sched.h>
74866 +#include <linux/grinternal.h>
74867 +#include <linux/security.h>
74868 +
74869 +void
74870 +gr_audit_ptrace(struct task_struct *task)
74871 +{
74872 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74873 + if (grsec_enable_audit_ptrace)
74874 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
74875 +#endif
74876 + return;
74877 +}
74878 +
74879 +int
74880 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
74881 +{
74882 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74883 + const struct dentry *dentry = file->f_path.dentry;
74884 + const struct vfsmount *mnt = file->f_path.mnt;
74885 +
74886 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
74887 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
74888 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
74889 + return -EACCES;
74890 + }
74891 +#endif
74892 + return 0;
74893 +}
74894 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
74895 new file mode 100644
74896 index 0000000..3860c7e
74897 --- /dev/null
74898 +++ b/grsecurity/grsec_sig.c
74899 @@ -0,0 +1,236 @@
74900 +#include <linux/kernel.h>
74901 +#include <linux/sched.h>
74902 +#include <linux/fs.h>
74903 +#include <linux/delay.h>
74904 +#include <linux/grsecurity.h>
74905 +#include <linux/grinternal.h>
74906 +#include <linux/hardirq.h>
74907 +
74908 +char *signames[] = {
74909 + [SIGSEGV] = "Segmentation fault",
74910 + [SIGILL] = "Illegal instruction",
74911 + [SIGABRT] = "Abort",
74912 + [SIGBUS] = "Invalid alignment/Bus error"
74913 +};
74914 +
74915 +void
74916 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
74917 +{
74918 +#ifdef CONFIG_GRKERNSEC_SIGNAL
74919 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
74920 + (sig == SIGABRT) || (sig == SIGBUS))) {
74921 + if (task_pid_nr(t) == task_pid_nr(current)) {
74922 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
74923 + } else {
74924 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
74925 + }
74926 + }
74927 +#endif
74928 + return;
74929 +}
74930 +
74931 +int
74932 +gr_handle_signal(const struct task_struct *p, const int sig)
74933 +{
74934 +#ifdef CONFIG_GRKERNSEC
74935 + /* ignore the 0 signal for protected task checks */
74936 + if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
74937 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
74938 + return -EPERM;
74939 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
74940 + return -EPERM;
74941 + }
74942 +#endif
74943 + return 0;
74944 +}
74945 +
74946 +#ifdef CONFIG_GRKERNSEC
74947 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
74948 +
74949 +int gr_fake_force_sig(int sig, struct task_struct *t)
74950 +{
74951 + unsigned long int flags;
74952 + int ret, blocked, ignored;
74953 + struct k_sigaction *action;
74954 +
74955 + spin_lock_irqsave(&t->sighand->siglock, flags);
74956 + action = &t->sighand->action[sig-1];
74957 + ignored = action->sa.sa_handler == SIG_IGN;
74958 + blocked = sigismember(&t->blocked, sig);
74959 + if (blocked || ignored) {
74960 + action->sa.sa_handler = SIG_DFL;
74961 + if (blocked) {
74962 + sigdelset(&t->blocked, sig);
74963 + recalc_sigpending_and_wake(t);
74964 + }
74965 + }
74966 + if (action->sa.sa_handler == SIG_DFL)
74967 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
74968 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
74969 +
74970 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
74971 +
74972 + return ret;
74973 +}
74974 +#endif
74975 +
74976 +#define GR_USER_BAN_TIME (15 * 60)
74977 +#define GR_DAEMON_BRUTE_TIME (30 * 60)
74978 +
74979 +void gr_handle_brute_attach(int dumpable)
74980 +{
74981 +#ifdef CONFIG_GRKERNSEC_BRUTE
74982 + struct task_struct *p = current;
74983 + kuid_t uid = GLOBAL_ROOT_UID;
74984 + int daemon = 0;
74985 +
74986 + if (!grsec_enable_brute)
74987 + return;
74988 +
74989 + rcu_read_lock();
74990 + read_lock(&tasklist_lock);
74991 + read_lock(&grsec_exec_file_lock);
74992 + if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
74993 + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
74994 + p->real_parent->brute = 1;
74995 + daemon = 1;
74996 + } else {
74997 + const struct cred *cred = __task_cred(p), *cred2;
74998 + struct task_struct *tsk, *tsk2;
74999 +
75000 + if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
75001 + struct user_struct *user;
75002 +
75003 + uid = cred->uid;
75004 +
75005 + /* this is put upon execution past expiration */
75006 + user = find_user(uid);
75007 + if (user == NULL)
75008 + goto unlock;
75009 + user->suid_banned = 1;
75010 + user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
75011 + if (user->suid_ban_expires == ~0UL)
75012 + user->suid_ban_expires--;
75013 +
75014 + /* only kill other threads of the same binary, from the same user */
75015 + do_each_thread(tsk2, tsk) {
75016 + cred2 = __task_cred(tsk);
75017 + if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
75018 + gr_fake_force_sig(SIGKILL, tsk);
75019 + } while_each_thread(tsk2, tsk);
75020 + }
75021 + }
75022 +unlock:
75023 + read_unlock(&grsec_exec_file_lock);
75024 + read_unlock(&tasklist_lock);
75025 + rcu_read_unlock();
75026 +
75027 + if (gr_is_global_nonroot(uid))
75028 + gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
75029 + else if (daemon)
75030 + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
75031 +
75032 +#endif
75033 + return;
75034 +}
75035 +
75036 +void gr_handle_brute_check(void)
75037 +{
75038 +#ifdef CONFIG_GRKERNSEC_BRUTE
75039 + struct task_struct *p = current;
75040 +
75041 + if (unlikely(p->brute)) {
75042 + if (!grsec_enable_brute)
75043 + p->brute = 0;
75044 + else if (time_before(get_seconds(), p->brute_expires))
75045 + msleep(30 * 1000);
75046 + }
75047 +#endif
75048 + return;
75049 +}
75050 +
75051 +void gr_handle_kernel_exploit(void)
75052 +{
75053 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75054 + const struct cred *cred;
75055 + struct task_struct *tsk, *tsk2;
75056 + struct user_struct *user;
75057 + kuid_t uid;
75058 +
75059 + if (in_irq() || in_serving_softirq() || in_nmi())
75060 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
75061 +
75062 + uid = current_uid();
75063 +
75064 + if (gr_is_global_root(uid))
75065 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
75066 + else {
75067 + /* kill all the processes of this user, hold a reference
75068 + to their creds struct, and prevent them from creating
75069 + another process until system reset
75070 + */
75071 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
75072 + GR_GLOBAL_UID(uid));
75073 + /* we intentionally leak this ref */
75074 + user = get_uid(current->cred->user);
75075 + if (user)
75076 + user->kernel_banned = 1;
75077 +
75078 + /* kill all processes of this user */
75079 + read_lock(&tasklist_lock);
75080 + do_each_thread(tsk2, tsk) {
75081 + cred = __task_cred(tsk);
75082 + if (uid_eq(cred->uid, uid))
75083 + gr_fake_force_sig(SIGKILL, tsk);
75084 + } while_each_thread(tsk2, tsk);
75085 + read_unlock(&tasklist_lock);
75086 + }
75087 +#endif
75088 +}
75089 +
75090 +#ifdef CONFIG_GRKERNSEC_BRUTE
75091 +static bool suid_ban_expired(struct user_struct *user)
75092 +{
75093 + if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
75094 + user->suid_banned = 0;
75095 + user->suid_ban_expires = 0;
75096 + free_uid(user);
75097 + return true;
75098 + }
75099 +
75100 + return false;
75101 +}
75102 +#endif
75103 +
75104 +int gr_process_kernel_exec_ban(void)
75105 +{
75106 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75107 + if (unlikely(current->cred->user->kernel_banned))
75108 + return -EPERM;
75109 +#endif
75110 + return 0;
75111 +}
75112 +
75113 +int gr_process_kernel_setuid_ban(struct user_struct *user)
75114 +{
75115 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75116 + if (unlikely(user->kernel_banned))
75117 + gr_fake_force_sig(SIGKILL, current);
75118 +#endif
75119 + return 0;
75120 +}
75121 +
75122 +int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
75123 +{
75124 +#ifdef CONFIG_GRKERNSEC_BRUTE
75125 + struct user_struct *user = current->cred->user;
75126 + if (unlikely(user->suid_banned)) {
75127 + if (suid_ban_expired(user))
75128 + return 0;
75129 + /* disallow execution of suid binaries only */
75130 + else if (!uid_eq(bprm->cred->euid, current->cred->uid))
75131 + return -EPERM;
75132 + }
75133 +#endif
75134 + return 0;
75135 +}
75136 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
75137 new file mode 100644
75138 index 0000000..c0aef3a
75139 --- /dev/null
75140 +++ b/grsecurity/grsec_sock.c
75141 @@ -0,0 +1,244 @@
75142 +#include <linux/kernel.h>
75143 +#include <linux/module.h>
75144 +#include <linux/sched.h>
75145 +#include <linux/file.h>
75146 +#include <linux/net.h>
75147 +#include <linux/in.h>
75148 +#include <linux/ip.h>
75149 +#include <net/sock.h>
75150 +#include <net/inet_sock.h>
75151 +#include <linux/grsecurity.h>
75152 +#include <linux/grinternal.h>
75153 +#include <linux/gracl.h>
75154 +
75155 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
75156 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
75157 +
75158 +EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
75159 +EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
75160 +
75161 +#ifdef CONFIG_UNIX_MODULE
75162 +EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
75163 +EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
75164 +EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
75165 +EXPORT_SYMBOL_GPL(gr_handle_create);
75166 +#endif
75167 +
75168 +#ifdef CONFIG_GRKERNSEC
75169 +#define gr_conn_table_size 32749
75170 +struct conn_table_entry {
75171 + struct conn_table_entry *next;
75172 + struct signal_struct *sig;
75173 +};
75174 +
75175 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
75176 +DEFINE_SPINLOCK(gr_conn_table_lock);
75177 +
75178 +extern const char * gr_socktype_to_name(unsigned char type);
75179 +extern const char * gr_proto_to_name(unsigned char proto);
75180 +extern const char * gr_sockfamily_to_name(unsigned char family);
75181 +
75182 +static __inline__ int
75183 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
75184 +{
75185 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
75186 +}
75187 +
75188 +static __inline__ int
75189 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
75190 + __u16 sport, __u16 dport)
75191 +{
75192 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
75193 + sig->gr_sport == sport && sig->gr_dport == dport))
75194 + return 1;
75195 + else
75196 + return 0;
75197 +}
75198 +
75199 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
75200 +{
75201 + struct conn_table_entry **match;
75202 + unsigned int index;
75203 +
75204 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
75205 + sig->gr_sport, sig->gr_dport,
75206 + gr_conn_table_size);
75207 +
75208 + newent->sig = sig;
75209 +
75210 + match = &gr_conn_table[index];
75211 + newent->next = *match;
75212 + *match = newent;
75213 +
75214 + return;
75215 +}
75216 +
75217 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
75218 +{
75219 + struct conn_table_entry *match, *last = NULL;
75220 + unsigned int index;
75221 +
75222 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
75223 + sig->gr_sport, sig->gr_dport,
75224 + gr_conn_table_size);
75225 +
75226 + match = gr_conn_table[index];
75227 + while (match && !conn_match(match->sig,
75228 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
75229 + sig->gr_dport)) {
75230 + last = match;
75231 + match = match->next;
75232 + }
75233 +
75234 + if (match) {
75235 + if (last)
75236 + last->next = match->next;
75237 + else
75238 + gr_conn_table[index] = NULL;
75239 + kfree(match);
75240 + }
75241 +
75242 + return;
75243 +}
75244 +
75245 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
75246 + __u16 sport, __u16 dport)
75247 +{
75248 + struct conn_table_entry *match;
75249 + unsigned int index;
75250 +
75251 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
75252 +
75253 + match = gr_conn_table[index];
75254 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
75255 + match = match->next;
75256 +
75257 + if (match)
75258 + return match->sig;
75259 + else
75260 + return NULL;
75261 +}
75262 +
75263 +#endif
75264 +
75265 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
75266 +{
75267 +#ifdef CONFIG_GRKERNSEC
75268 + struct signal_struct *sig = task->signal;
75269 + struct conn_table_entry *newent;
75270 +
75271 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
75272 + if (newent == NULL)
75273 + return;
75274 + /* no bh lock needed since we are called with bh disabled */
75275 + spin_lock(&gr_conn_table_lock);
75276 + gr_del_task_from_ip_table_nolock(sig);
75277 + sig->gr_saddr = inet->inet_rcv_saddr;
75278 + sig->gr_daddr = inet->inet_daddr;
75279 + sig->gr_sport = inet->inet_sport;
75280 + sig->gr_dport = inet->inet_dport;
75281 + gr_add_to_task_ip_table_nolock(sig, newent);
75282 + spin_unlock(&gr_conn_table_lock);
75283 +#endif
75284 + return;
75285 +}
75286 +
75287 +void gr_del_task_from_ip_table(struct task_struct *task)
75288 +{
75289 +#ifdef CONFIG_GRKERNSEC
75290 + spin_lock_bh(&gr_conn_table_lock);
75291 + gr_del_task_from_ip_table_nolock(task->signal);
75292 + spin_unlock_bh(&gr_conn_table_lock);
75293 +#endif
75294 + return;
75295 +}
75296 +
75297 +void
75298 +gr_attach_curr_ip(const struct sock *sk)
75299 +{
75300 +#ifdef CONFIG_GRKERNSEC
75301 + struct signal_struct *p, *set;
75302 + const struct inet_sock *inet = inet_sk(sk);
75303 +
75304 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
75305 + return;
75306 +
75307 + set = current->signal;
75308 +
75309 + spin_lock_bh(&gr_conn_table_lock);
75310 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
75311 + inet->inet_dport, inet->inet_sport);
75312 + if (unlikely(p != NULL)) {
75313 + set->curr_ip = p->curr_ip;
75314 + set->used_accept = 1;
75315 + gr_del_task_from_ip_table_nolock(p);
75316 + spin_unlock_bh(&gr_conn_table_lock);
75317 + return;
75318 + }
75319 + spin_unlock_bh(&gr_conn_table_lock);
75320 +
75321 + set->curr_ip = inet->inet_daddr;
75322 + set->used_accept = 1;
75323 +#endif
75324 + return;
75325 +}
75326 +
75327 +int
75328 +gr_handle_sock_all(const int family, const int type, const int protocol)
75329 +{
75330 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
75331 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
75332 + (family != AF_UNIX)) {
75333 + if (family == AF_INET)
75334 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
75335 + else
75336 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
75337 + return -EACCES;
75338 + }
75339 +#endif
75340 + return 0;
75341 +}
75342 +
75343 +int
75344 +gr_handle_sock_server(const struct sockaddr *sck)
75345 +{
75346 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75347 + if (grsec_enable_socket_server &&
75348 + in_group_p(grsec_socket_server_gid) &&
75349 + sck && (sck->sa_family != AF_UNIX) &&
75350 + (sck->sa_family != AF_LOCAL)) {
75351 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
75352 + return -EACCES;
75353 + }
75354 +#endif
75355 + return 0;
75356 +}
75357 +
75358 +int
75359 +gr_handle_sock_server_other(const struct sock *sck)
75360 +{
75361 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75362 + if (grsec_enable_socket_server &&
75363 + in_group_p(grsec_socket_server_gid) &&
75364 + sck && (sck->sk_family != AF_UNIX) &&
75365 + (sck->sk_family != AF_LOCAL)) {
75366 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
75367 + return -EACCES;
75368 + }
75369 +#endif
75370 + return 0;
75371 +}
75372 +
75373 +int
75374 +gr_handle_sock_client(const struct sockaddr *sck)
75375 +{
75376 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
75377 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
75378 + sck && (sck->sa_family != AF_UNIX) &&
75379 + (sck->sa_family != AF_LOCAL)) {
75380 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
75381 + return -EACCES;
75382 + }
75383 +#endif
75384 + return 0;
75385 +}
75386 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
75387 new file mode 100644
75388 index 0000000..8159888
75389 --- /dev/null
75390 +++ b/grsecurity/grsec_sysctl.c
75391 @@ -0,0 +1,479 @@
75392 +#include <linux/kernel.h>
75393 +#include <linux/sched.h>
75394 +#include <linux/sysctl.h>
75395 +#include <linux/grsecurity.h>
75396 +#include <linux/grinternal.h>
75397 +
75398 +int
75399 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
75400 +{
75401 +#ifdef CONFIG_GRKERNSEC_SYSCTL
75402 + if (dirname == NULL || name == NULL)
75403 + return 0;
75404 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
75405 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
75406 + return -EACCES;
75407 + }
75408 +#endif
75409 + return 0;
75410 +}
75411 +
75412 +#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
75413 +static int __maybe_unused __read_only one = 1;
75414 +#endif
75415 +
75416 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
75417 + defined(CONFIG_GRKERNSEC_DENYUSB)
75418 +struct ctl_table grsecurity_table[] = {
75419 +#ifdef CONFIG_GRKERNSEC_SYSCTL
75420 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
75421 +#ifdef CONFIG_GRKERNSEC_IO
75422 + {
75423 + .procname = "disable_priv_io",
75424 + .data = &grsec_disable_privio,
75425 + .maxlen = sizeof(int),
75426 + .mode = 0600,
75427 + .proc_handler = &proc_dointvec,
75428 + },
75429 +#endif
75430 +#endif
75431 +#ifdef CONFIG_GRKERNSEC_LINK
75432 + {
75433 + .procname = "linking_restrictions",
75434 + .data = &grsec_enable_link,
75435 + .maxlen = sizeof(int),
75436 + .mode = 0600,
75437 + .proc_handler = &proc_dointvec,
75438 + },
75439 +#endif
75440 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
75441 + {
75442 + .procname = "enforce_symlinksifowner",
75443 + .data = &grsec_enable_symlinkown,
75444 + .maxlen = sizeof(int),
75445 + .mode = 0600,
75446 + .proc_handler = &proc_dointvec,
75447 + },
75448 + {
75449 + .procname = "symlinkown_gid",
75450 + .data = &grsec_symlinkown_gid,
75451 + .maxlen = sizeof(int),
75452 + .mode = 0600,
75453 + .proc_handler = &proc_dointvec,
75454 + },
75455 +#endif
75456 +#ifdef CONFIG_GRKERNSEC_BRUTE
75457 + {
75458 + .procname = "deter_bruteforce",
75459 + .data = &grsec_enable_brute,
75460 + .maxlen = sizeof(int),
75461 + .mode = 0600,
75462 + .proc_handler = &proc_dointvec,
75463 + },
75464 +#endif
75465 +#ifdef CONFIG_GRKERNSEC_FIFO
75466 + {
75467 + .procname = "fifo_restrictions",
75468 + .data = &grsec_enable_fifo,
75469 + .maxlen = sizeof(int),
75470 + .mode = 0600,
75471 + .proc_handler = &proc_dointvec,
75472 + },
75473 +#endif
75474 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
75475 + {
75476 + .procname = "ptrace_readexec",
75477 + .data = &grsec_enable_ptrace_readexec,
75478 + .maxlen = sizeof(int),
75479 + .mode = 0600,
75480 + .proc_handler = &proc_dointvec,
75481 + },
75482 +#endif
75483 +#ifdef CONFIG_GRKERNSEC_SETXID
75484 + {
75485 + .procname = "consistent_setxid",
75486 + .data = &grsec_enable_setxid,
75487 + .maxlen = sizeof(int),
75488 + .mode = 0600,
75489 + .proc_handler = &proc_dointvec,
75490 + },
75491 +#endif
75492 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75493 + {
75494 + .procname = "ip_blackhole",
75495 + .data = &grsec_enable_blackhole,
75496 + .maxlen = sizeof(int),
75497 + .mode = 0600,
75498 + .proc_handler = &proc_dointvec,
75499 + },
75500 + {
75501 + .procname = "lastack_retries",
75502 + .data = &grsec_lastack_retries,
75503 + .maxlen = sizeof(int),
75504 + .mode = 0600,
75505 + .proc_handler = &proc_dointvec,
75506 + },
75507 +#endif
75508 +#ifdef CONFIG_GRKERNSEC_EXECLOG
75509 + {
75510 + .procname = "exec_logging",
75511 + .data = &grsec_enable_execlog,
75512 + .maxlen = sizeof(int),
75513 + .mode = 0600,
75514 + .proc_handler = &proc_dointvec,
75515 + },
75516 +#endif
75517 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75518 + {
75519 + .procname = "rwxmap_logging",
75520 + .data = &grsec_enable_log_rwxmaps,
75521 + .maxlen = sizeof(int),
75522 + .mode = 0600,
75523 + .proc_handler = &proc_dointvec,
75524 + },
75525 +#endif
75526 +#ifdef CONFIG_GRKERNSEC_SIGNAL
75527 + {
75528 + .procname = "signal_logging",
75529 + .data = &grsec_enable_signal,
75530 + .maxlen = sizeof(int),
75531 + .mode = 0600,
75532 + .proc_handler = &proc_dointvec,
75533 + },
75534 +#endif
75535 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
75536 + {
75537 + .procname = "forkfail_logging",
75538 + .data = &grsec_enable_forkfail,
75539 + .maxlen = sizeof(int),
75540 + .mode = 0600,
75541 + .proc_handler = &proc_dointvec,
75542 + },
75543 +#endif
75544 +#ifdef CONFIG_GRKERNSEC_TIME
75545 + {
75546 + .procname = "timechange_logging",
75547 + .data = &grsec_enable_time,
75548 + .maxlen = sizeof(int),
75549 + .mode = 0600,
75550 + .proc_handler = &proc_dointvec,
75551 + },
75552 +#endif
75553 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
75554 + {
75555 + .procname = "chroot_deny_shmat",
75556 + .data = &grsec_enable_chroot_shmat,
75557 + .maxlen = sizeof(int),
75558 + .mode = 0600,
75559 + .proc_handler = &proc_dointvec,
75560 + },
75561 +#endif
75562 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75563 + {
75564 + .procname = "chroot_deny_unix",
75565 + .data = &grsec_enable_chroot_unix,
75566 + .maxlen = sizeof(int),
75567 + .mode = 0600,
75568 + .proc_handler = &proc_dointvec,
75569 + },
75570 +#endif
75571 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
75572 + {
75573 + .procname = "chroot_deny_mount",
75574 + .data = &grsec_enable_chroot_mount,
75575 + .maxlen = sizeof(int),
75576 + .mode = 0600,
75577 + .proc_handler = &proc_dointvec,
75578 + },
75579 +#endif
75580 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
75581 + {
75582 + .procname = "chroot_deny_fchdir",
75583 + .data = &grsec_enable_chroot_fchdir,
75584 + .maxlen = sizeof(int),
75585 + .mode = 0600,
75586 + .proc_handler = &proc_dointvec,
75587 + },
75588 +#endif
75589 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
75590 + {
75591 + .procname = "chroot_deny_chroot",
75592 + .data = &grsec_enable_chroot_double,
75593 + .maxlen = sizeof(int),
75594 + .mode = 0600,
75595 + .proc_handler = &proc_dointvec,
75596 + },
75597 +#endif
75598 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
75599 + {
75600 + .procname = "chroot_deny_pivot",
75601 + .data = &grsec_enable_chroot_pivot,
75602 + .maxlen = sizeof(int),
75603 + .mode = 0600,
75604 + .proc_handler = &proc_dointvec,
75605 + },
75606 +#endif
75607 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
75608 + {
75609 + .procname = "chroot_enforce_chdir",
75610 + .data = &grsec_enable_chroot_chdir,
75611 + .maxlen = sizeof(int),
75612 + .mode = 0600,
75613 + .proc_handler = &proc_dointvec,
75614 + },
75615 +#endif
75616 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
75617 + {
75618 + .procname = "chroot_deny_chmod",
75619 + .data = &grsec_enable_chroot_chmod,
75620 + .maxlen = sizeof(int),
75621 + .mode = 0600,
75622 + .proc_handler = &proc_dointvec,
75623 + },
75624 +#endif
75625 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
75626 + {
75627 + .procname = "chroot_deny_mknod",
75628 + .data = &grsec_enable_chroot_mknod,
75629 + .maxlen = sizeof(int),
75630 + .mode = 0600,
75631 + .proc_handler = &proc_dointvec,
75632 + },
75633 +#endif
75634 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75635 + {
75636 + .procname = "chroot_restrict_nice",
75637 + .data = &grsec_enable_chroot_nice,
75638 + .maxlen = sizeof(int),
75639 + .mode = 0600,
75640 + .proc_handler = &proc_dointvec,
75641 + },
75642 +#endif
75643 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
75644 + {
75645 + .procname = "chroot_execlog",
75646 + .data = &grsec_enable_chroot_execlog,
75647 + .maxlen = sizeof(int),
75648 + .mode = 0600,
75649 + .proc_handler = &proc_dointvec,
75650 + },
75651 +#endif
75652 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75653 + {
75654 + .procname = "chroot_caps",
75655 + .data = &grsec_enable_chroot_caps,
75656 + .maxlen = sizeof(int),
75657 + .mode = 0600,
75658 + .proc_handler = &proc_dointvec,
75659 + },
75660 +#endif
75661 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
75662 + {
75663 + .procname = "chroot_deny_sysctl",
75664 + .data = &grsec_enable_chroot_sysctl,
75665 + .maxlen = sizeof(int),
75666 + .mode = 0600,
75667 + .proc_handler = &proc_dointvec,
75668 + },
75669 +#endif
75670 +#ifdef CONFIG_GRKERNSEC_TPE
75671 + {
75672 + .procname = "tpe",
75673 + .data = &grsec_enable_tpe,
75674 + .maxlen = sizeof(int),
75675 + .mode = 0600,
75676 + .proc_handler = &proc_dointvec,
75677 + },
75678 + {
75679 + .procname = "tpe_gid",
75680 + .data = &grsec_tpe_gid,
75681 + .maxlen = sizeof(int),
75682 + .mode = 0600,
75683 + .proc_handler = &proc_dointvec,
75684 + },
75685 +#endif
75686 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
75687 + {
75688 + .procname = "tpe_invert",
75689 + .data = &grsec_enable_tpe_invert,
75690 + .maxlen = sizeof(int),
75691 + .mode = 0600,
75692 + .proc_handler = &proc_dointvec,
75693 + },
75694 +#endif
75695 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
75696 + {
75697 + .procname = "tpe_restrict_all",
75698 + .data = &grsec_enable_tpe_all,
75699 + .maxlen = sizeof(int),
75700 + .mode = 0600,
75701 + .proc_handler = &proc_dointvec,
75702 + },
75703 +#endif
75704 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
75705 + {
75706 + .procname = "socket_all",
75707 + .data = &grsec_enable_socket_all,
75708 + .maxlen = sizeof(int),
75709 + .mode = 0600,
75710 + .proc_handler = &proc_dointvec,
75711 + },
75712 + {
75713 + .procname = "socket_all_gid",
75714 + .data = &grsec_socket_all_gid,
75715 + .maxlen = sizeof(int),
75716 + .mode = 0600,
75717 + .proc_handler = &proc_dointvec,
75718 + },
75719 +#endif
75720 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
75721 + {
75722 + .procname = "socket_client",
75723 + .data = &grsec_enable_socket_client,
75724 + .maxlen = sizeof(int),
75725 + .mode = 0600,
75726 + .proc_handler = &proc_dointvec,
75727 + },
75728 + {
75729 + .procname = "socket_client_gid",
75730 + .data = &grsec_socket_client_gid,
75731 + .maxlen = sizeof(int),
75732 + .mode = 0600,
75733 + .proc_handler = &proc_dointvec,
75734 + },
75735 +#endif
75736 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75737 + {
75738 + .procname = "socket_server",
75739 + .data = &grsec_enable_socket_server,
75740 + .maxlen = sizeof(int),
75741 + .mode = 0600,
75742 + .proc_handler = &proc_dointvec,
75743 + },
75744 + {
75745 + .procname = "socket_server_gid",
75746 + .data = &grsec_socket_server_gid,
75747 + .maxlen = sizeof(int),
75748 + .mode = 0600,
75749 + .proc_handler = &proc_dointvec,
75750 + },
75751 +#endif
75752 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
75753 + {
75754 + .procname = "audit_group",
75755 + .data = &grsec_enable_group,
75756 + .maxlen = sizeof(int),
75757 + .mode = 0600,
75758 + .proc_handler = &proc_dointvec,
75759 + },
75760 + {
75761 + .procname = "audit_gid",
75762 + .data = &grsec_audit_gid,
75763 + .maxlen = sizeof(int),
75764 + .mode = 0600,
75765 + .proc_handler = &proc_dointvec,
75766 + },
75767 +#endif
75768 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75769 + {
75770 + .procname = "audit_chdir",
75771 + .data = &grsec_enable_chdir,
75772 + .maxlen = sizeof(int),
75773 + .mode = 0600,
75774 + .proc_handler = &proc_dointvec,
75775 + },
75776 +#endif
75777 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
75778 + {
75779 + .procname = "audit_mount",
75780 + .data = &grsec_enable_mount,
75781 + .maxlen = sizeof(int),
75782 + .mode = 0600,
75783 + .proc_handler = &proc_dointvec,
75784 + },
75785 +#endif
75786 +#ifdef CONFIG_GRKERNSEC_DMESG
75787 + {
75788 + .procname = "dmesg",
75789 + .data = &grsec_enable_dmesg,
75790 + .maxlen = sizeof(int),
75791 + .mode = 0600,
75792 + .proc_handler = &proc_dointvec,
75793 + },
75794 +#endif
75795 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75796 + {
75797 + .procname = "chroot_findtask",
75798 + .data = &grsec_enable_chroot_findtask,
75799 + .maxlen = sizeof(int),
75800 + .mode = 0600,
75801 + .proc_handler = &proc_dointvec,
75802 + },
75803 +#endif
75804 +#ifdef CONFIG_GRKERNSEC_RESLOG
75805 + {
75806 + .procname = "resource_logging",
75807 + .data = &grsec_resource_logging,
75808 + .maxlen = sizeof(int),
75809 + .mode = 0600,
75810 + .proc_handler = &proc_dointvec,
75811 + },
75812 +#endif
75813 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
75814 + {
75815 + .procname = "audit_ptrace",
75816 + .data = &grsec_enable_audit_ptrace,
75817 + .maxlen = sizeof(int),
75818 + .mode = 0600,
75819 + .proc_handler = &proc_dointvec,
75820 + },
75821 +#endif
75822 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
75823 + {
75824 + .procname = "harden_ptrace",
75825 + .data = &grsec_enable_harden_ptrace,
75826 + .maxlen = sizeof(int),
75827 + .mode = 0600,
75828 + .proc_handler = &proc_dointvec,
75829 + },
75830 +#endif
75831 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
75832 + {
75833 + .procname = "harden_ipc",
75834 + .data = &grsec_enable_harden_ipc,
75835 + .maxlen = sizeof(int),
75836 + .mode = 0600,
75837 + .proc_handler = &proc_dointvec,
75838 + },
75839 +#endif
75840 + {
75841 + .procname = "grsec_lock",
75842 + .data = &grsec_lock,
75843 + .maxlen = sizeof(int),
75844 + .mode = 0600,
75845 + .proc_handler = &proc_dointvec,
75846 + },
75847 +#endif
75848 +#ifdef CONFIG_GRKERNSEC_ROFS
75849 + {
75850 + .procname = "romount_protect",
75851 + .data = &grsec_enable_rofs,
75852 + .maxlen = sizeof(int),
75853 + .mode = 0600,
75854 + .proc_handler = &proc_dointvec_minmax,
75855 + .extra1 = &one,
75856 + .extra2 = &one,
75857 + },
75858 +#endif
75859 +#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
75860 + {
75861 + .procname = "deny_new_usb",
75862 + .data = &grsec_deny_new_usb,
75863 + .maxlen = sizeof(int),
75864 + .mode = 0600,
75865 + .proc_handler = &proc_dointvec,
75866 + },
75867 +#endif
75868 + { }
75869 +};
75870 +#endif
75871 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
75872 new file mode 100644
75873 index 0000000..61b514e
75874 --- /dev/null
75875 +++ b/grsecurity/grsec_time.c
75876 @@ -0,0 +1,16 @@
75877 +#include <linux/kernel.h>
75878 +#include <linux/sched.h>
75879 +#include <linux/grinternal.h>
75880 +#include <linux/module.h>
75881 +
75882 +void
75883 +gr_log_timechange(void)
75884 +{
75885 +#ifdef CONFIG_GRKERNSEC_TIME
75886 + if (grsec_enable_time)
75887 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
75888 +#endif
75889 + return;
75890 +}
75891 +
75892 +EXPORT_SYMBOL_GPL(gr_log_timechange);
75893 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
75894 new file mode 100644
75895 index 0000000..ee57dcf
75896 --- /dev/null
75897 +++ b/grsecurity/grsec_tpe.c
75898 @@ -0,0 +1,73 @@
75899 +#include <linux/kernel.h>
75900 +#include <linux/sched.h>
75901 +#include <linux/file.h>
75902 +#include <linux/fs.h>
75903 +#include <linux/grinternal.h>
75904 +
75905 +extern int gr_acl_tpe_check(void);
75906 +
75907 +int
75908 +gr_tpe_allow(const struct file *file)
75909 +{
75910 +#ifdef CONFIG_GRKERNSEC
75911 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
75912 + const struct cred *cred = current_cred();
75913 + char *msg = NULL;
75914 + char *msg2 = NULL;
75915 +
75916 + // never restrict root
75917 + if (gr_is_global_root(cred->uid))
75918 + return 1;
75919 +
75920 + if (grsec_enable_tpe) {
75921 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
75922 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
75923 + msg = "not being in trusted group";
75924 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
75925 + msg = "being in untrusted group";
75926 +#else
75927 + if (in_group_p(grsec_tpe_gid))
75928 + msg = "being in untrusted group";
75929 +#endif
75930 + }
75931 + if (!msg && gr_acl_tpe_check())
75932 + msg = "being in untrusted role";
75933 +
75934 + // not in any affected group/role
75935 + if (!msg)
75936 + goto next_check;
75937 +
75938 + if (gr_is_global_nonroot(inode->i_uid))
75939 + msg2 = "file in non-root-owned directory";
75940 + else if (inode->i_mode & S_IWOTH)
75941 + msg2 = "file in world-writable directory";
75942 + else if (inode->i_mode & S_IWGRP)
75943 + msg2 = "file in group-writable directory";
75944 +
75945 + if (msg && msg2) {
75946 + char fullmsg[70] = {0};
75947 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
75948 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
75949 + return 0;
75950 + }
75951 + msg = NULL;
75952 +next_check:
75953 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
75954 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
75955 + return 1;
75956 +
75957 + if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
75958 + msg = "directory not owned by user";
75959 + else if (inode->i_mode & S_IWOTH)
75960 + msg = "file in world-writable directory";
75961 + else if (inode->i_mode & S_IWGRP)
75962 + msg = "file in group-writable directory";
75963 +
75964 + if (msg) {
75965 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
75966 + return 0;
75967 + }
75968 +#endif
75969 +#endif
75970 + return 1;
75971 +}
75972 diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
75973 new file mode 100644
75974 index 0000000..ae02d8e
75975 --- /dev/null
75976 +++ b/grsecurity/grsec_usb.c
75977 @@ -0,0 +1,15 @@
75978 +#include <linux/kernel.h>
75979 +#include <linux/grinternal.h>
75980 +#include <linux/module.h>
75981 +
75982 +int gr_handle_new_usb(void)
75983 +{
75984 +#ifdef CONFIG_GRKERNSEC_DENYUSB
75985 + if (grsec_deny_new_usb) {
75986 + printk(KERN_ALERT "grsec: denied insert of new USB device\n");
75987 + return 1;
75988 + }
75989 +#endif
75990 + return 0;
75991 +}
75992 +EXPORT_SYMBOL_GPL(gr_handle_new_usb);
75993 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
75994 new file mode 100644
75995 index 0000000..9f7b1ac
75996 --- /dev/null
75997 +++ b/grsecurity/grsum.c
75998 @@ -0,0 +1,61 @@
75999 +#include <linux/err.h>
76000 +#include <linux/kernel.h>
76001 +#include <linux/sched.h>
76002 +#include <linux/mm.h>
76003 +#include <linux/scatterlist.h>
76004 +#include <linux/crypto.h>
76005 +#include <linux/gracl.h>
76006 +
76007 +
76008 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
76009 +#error "crypto and sha256 must be built into the kernel"
76010 +#endif
76011 +
76012 +int
76013 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
76014 +{
76015 + char *p;
76016 + struct crypto_hash *tfm;
76017 + struct hash_desc desc;
76018 + struct scatterlist sg;
76019 + unsigned char temp_sum[GR_SHA_LEN];
76020 + volatile int retval = 0;
76021 + volatile int dummy = 0;
76022 + unsigned int i;
76023 +
76024 + sg_init_table(&sg, 1);
76025 +
76026 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
76027 + if (IS_ERR(tfm)) {
76028 + /* should never happen, since sha256 should be built in */
76029 + return 1;
76030 + }
76031 +
76032 + desc.tfm = tfm;
76033 + desc.flags = 0;
76034 +
76035 + crypto_hash_init(&desc);
76036 +
76037 + p = salt;
76038 + sg_set_buf(&sg, p, GR_SALT_LEN);
76039 + crypto_hash_update(&desc, &sg, sg.length);
76040 +
76041 + p = entry->pw;
76042 + sg_set_buf(&sg, p, strlen(p));
76043 +
76044 + crypto_hash_update(&desc, &sg, sg.length);
76045 +
76046 + crypto_hash_final(&desc, temp_sum);
76047 +
76048 + memset(entry->pw, 0, GR_PW_LEN);
76049 +
76050 + for (i = 0; i < GR_SHA_LEN; i++)
76051 + if (sum[i] != temp_sum[i])
76052 + retval = 1;
76053 + else
76054 + dummy = 1; // waste a cycle
76055 +
76056 + crypto_free_hash(tfm);
76057 +
76058 + return retval;
76059 +}
76060 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
76061 index 77ff547..181834f 100644
76062 --- a/include/asm-generic/4level-fixup.h
76063 +++ b/include/asm-generic/4level-fixup.h
76064 @@ -13,8 +13,10 @@
76065 #define pmd_alloc(mm, pud, address) \
76066 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
76067 NULL: pmd_offset(pud, address))
76068 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
76069
76070 #define pud_alloc(mm, pgd, address) (pgd)
76071 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
76072 #define pud_offset(pgd, start) (pgd)
76073 #define pud_none(pud) 0
76074 #define pud_bad(pud) 0
76075 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
76076 index b7babf0..97f4c4f 100644
76077 --- a/include/asm-generic/atomic-long.h
76078 +++ b/include/asm-generic/atomic-long.h
76079 @@ -22,6 +22,12 @@
76080
76081 typedef atomic64_t atomic_long_t;
76082
76083 +#ifdef CONFIG_PAX_REFCOUNT
76084 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
76085 +#else
76086 +typedef atomic64_t atomic_long_unchecked_t;
76087 +#endif
76088 +
76089 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
76090
76091 static inline long atomic_long_read(atomic_long_t *l)
76092 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
76093 return (long)atomic64_read(v);
76094 }
76095
76096 +#ifdef CONFIG_PAX_REFCOUNT
76097 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
76098 +{
76099 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76100 +
76101 + return (long)atomic64_read_unchecked(v);
76102 +}
76103 +#endif
76104 +
76105 static inline void atomic_long_set(atomic_long_t *l, long i)
76106 {
76107 atomic64_t *v = (atomic64_t *)l;
76108 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
76109 atomic64_set(v, i);
76110 }
76111
76112 +#ifdef CONFIG_PAX_REFCOUNT
76113 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
76114 +{
76115 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76116 +
76117 + atomic64_set_unchecked(v, i);
76118 +}
76119 +#endif
76120 +
76121 static inline void atomic_long_inc(atomic_long_t *l)
76122 {
76123 atomic64_t *v = (atomic64_t *)l;
76124 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
76125 atomic64_inc(v);
76126 }
76127
76128 +#ifdef CONFIG_PAX_REFCOUNT
76129 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
76130 +{
76131 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76132 +
76133 + atomic64_inc_unchecked(v);
76134 +}
76135 +#endif
76136 +
76137 static inline void atomic_long_dec(atomic_long_t *l)
76138 {
76139 atomic64_t *v = (atomic64_t *)l;
76140 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
76141 atomic64_dec(v);
76142 }
76143
76144 +#ifdef CONFIG_PAX_REFCOUNT
76145 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
76146 +{
76147 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76148 +
76149 + atomic64_dec_unchecked(v);
76150 +}
76151 +#endif
76152 +
76153 static inline void atomic_long_add(long i, atomic_long_t *l)
76154 {
76155 atomic64_t *v = (atomic64_t *)l;
76156 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
76157 atomic64_add(i, v);
76158 }
76159
76160 +#ifdef CONFIG_PAX_REFCOUNT
76161 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
76162 +{
76163 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76164 +
76165 + atomic64_add_unchecked(i, v);
76166 +}
76167 +#endif
76168 +
76169 static inline void atomic_long_sub(long i, atomic_long_t *l)
76170 {
76171 atomic64_t *v = (atomic64_t *)l;
76172 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
76173 atomic64_sub(i, v);
76174 }
76175
76176 +#ifdef CONFIG_PAX_REFCOUNT
76177 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
76178 +{
76179 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76180 +
76181 + atomic64_sub_unchecked(i, v);
76182 +}
76183 +#endif
76184 +
76185 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
76186 {
76187 atomic64_t *v = (atomic64_t *)l;
76188 @@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
76189 return atomic64_add_negative(i, v);
76190 }
76191
76192 -static inline long atomic_long_add_return(long i, atomic_long_t *l)
76193 +static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
76194 {
76195 atomic64_t *v = (atomic64_t *)l;
76196
76197 return (long)atomic64_add_return(i, v);
76198 }
76199
76200 +#ifdef CONFIG_PAX_REFCOUNT
76201 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
76202 +{
76203 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76204 +
76205 + return (long)atomic64_add_return_unchecked(i, v);
76206 +}
76207 +#endif
76208 +
76209 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
76210 {
76211 atomic64_t *v = (atomic64_t *)l;
76212 @@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
76213 return (long)atomic64_inc_return(v);
76214 }
76215
76216 +#ifdef CONFIG_PAX_REFCOUNT
76217 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
76218 +{
76219 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76220 +
76221 + return (long)atomic64_inc_return_unchecked(v);
76222 +}
76223 +#endif
76224 +
76225 static inline long atomic_long_dec_return(atomic_long_t *l)
76226 {
76227 atomic64_t *v = (atomic64_t *)l;
76228 @@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
76229
76230 typedef atomic_t atomic_long_t;
76231
76232 +#ifdef CONFIG_PAX_REFCOUNT
76233 +typedef atomic_unchecked_t atomic_long_unchecked_t;
76234 +#else
76235 +typedef atomic_t atomic_long_unchecked_t;
76236 +#endif
76237 +
76238 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
76239 static inline long atomic_long_read(atomic_long_t *l)
76240 {
76241 @@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
76242 return (long)atomic_read(v);
76243 }
76244
76245 +#ifdef CONFIG_PAX_REFCOUNT
76246 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
76247 +{
76248 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76249 +
76250 + return (long)atomic_read_unchecked(v);
76251 +}
76252 +#endif
76253 +
76254 static inline void atomic_long_set(atomic_long_t *l, long i)
76255 {
76256 atomic_t *v = (atomic_t *)l;
76257 @@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
76258 atomic_set(v, i);
76259 }
76260
76261 +#ifdef CONFIG_PAX_REFCOUNT
76262 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
76263 +{
76264 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76265 +
76266 + atomic_set_unchecked(v, i);
76267 +}
76268 +#endif
76269 +
76270 static inline void atomic_long_inc(atomic_long_t *l)
76271 {
76272 atomic_t *v = (atomic_t *)l;
76273 @@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
76274 atomic_inc(v);
76275 }
76276
76277 +#ifdef CONFIG_PAX_REFCOUNT
76278 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
76279 +{
76280 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76281 +
76282 + atomic_inc_unchecked(v);
76283 +}
76284 +#endif
76285 +
76286 static inline void atomic_long_dec(atomic_long_t *l)
76287 {
76288 atomic_t *v = (atomic_t *)l;
76289 @@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
76290 atomic_dec(v);
76291 }
76292
76293 +#ifdef CONFIG_PAX_REFCOUNT
76294 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
76295 +{
76296 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76297 +
76298 + atomic_dec_unchecked(v);
76299 +}
76300 +#endif
76301 +
76302 static inline void atomic_long_add(long i, atomic_long_t *l)
76303 {
76304 atomic_t *v = (atomic_t *)l;
76305 @@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
76306 atomic_add(i, v);
76307 }
76308
76309 +#ifdef CONFIG_PAX_REFCOUNT
76310 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
76311 +{
76312 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76313 +
76314 + atomic_add_unchecked(i, v);
76315 +}
76316 +#endif
76317 +
76318 static inline void atomic_long_sub(long i, atomic_long_t *l)
76319 {
76320 atomic_t *v = (atomic_t *)l;
76321 @@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
76322 atomic_sub(i, v);
76323 }
76324
76325 +#ifdef CONFIG_PAX_REFCOUNT
76326 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
76327 +{
76328 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76329 +
76330 + atomic_sub_unchecked(i, v);
76331 +}
76332 +#endif
76333 +
76334 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
76335 {
76336 atomic_t *v = (atomic_t *)l;
76337 @@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
76338 return (long)atomic_add_return(i, v);
76339 }
76340
76341 +#ifdef CONFIG_PAX_REFCOUNT
76342 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
76343 +{
76344 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76345 +
76346 + return (long)atomic_add_return_unchecked(i, v);
76347 +}
76348 +
76349 +#endif
76350 +
76351 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
76352 {
76353 atomic_t *v = (atomic_t *)l;
76354 @@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
76355 return (long)atomic_inc_return(v);
76356 }
76357
76358 +#ifdef CONFIG_PAX_REFCOUNT
76359 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
76360 +{
76361 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76362 +
76363 + return (long)atomic_inc_return_unchecked(v);
76364 +}
76365 +#endif
76366 +
76367 static inline long atomic_long_dec_return(atomic_long_t *l)
76368 {
76369 atomic_t *v = (atomic_t *)l;
76370 @@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
76371
76372 #endif /* BITS_PER_LONG == 64 */
76373
76374 +#ifdef CONFIG_PAX_REFCOUNT
76375 +static inline void pax_refcount_needs_these_functions(void)
76376 +{
76377 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
76378 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
76379 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
76380 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
76381 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
76382 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
76383 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
76384 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
76385 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
76386 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
76387 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
76388 +#ifdef CONFIG_X86
76389 + atomic_clear_mask_unchecked(0, NULL);
76390 + atomic_set_mask_unchecked(0, NULL);
76391 +#endif
76392 +
76393 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
76394 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
76395 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
76396 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
76397 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
76398 + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
76399 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
76400 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
76401 +}
76402 +#else
76403 +#define atomic_read_unchecked(v) atomic_read(v)
76404 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
76405 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
76406 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
76407 +#define atomic_inc_unchecked(v) atomic_inc(v)
76408 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
76409 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
76410 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
76411 +#define atomic_dec_unchecked(v) atomic_dec(v)
76412 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
76413 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
76414 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
76415 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
76416 +
76417 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
76418 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
76419 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
76420 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
76421 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
76422 +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
76423 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
76424 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
76425 +#endif
76426 +
76427 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
76428 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
76429 index 33bd2de..f31bff97 100644
76430 --- a/include/asm-generic/atomic.h
76431 +++ b/include/asm-generic/atomic.h
76432 @@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
76433 * Atomically clears the bits set in @mask from @v
76434 */
76435 #ifndef atomic_clear_mask
76436 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
76437 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
76438 {
76439 unsigned long flags;
76440
76441 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
76442 index b18ce4f..2ee2843 100644
76443 --- a/include/asm-generic/atomic64.h
76444 +++ b/include/asm-generic/atomic64.h
76445 @@ -16,6 +16,8 @@ typedef struct {
76446 long long counter;
76447 } atomic64_t;
76448
76449 +typedef atomic64_t atomic64_unchecked_t;
76450 +
76451 #define ATOMIC64_INIT(i) { (i) }
76452
76453 extern long long atomic64_read(const atomic64_t *v);
76454 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
76455 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
76456 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
76457
76458 +#define atomic64_read_unchecked(v) atomic64_read(v)
76459 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
76460 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
76461 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
76462 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
76463 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
76464 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
76465 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
76466 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
76467 +
76468 #endif /* _ASM_GENERIC_ATOMIC64_H */
76469 diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
76470 index a60a7cc..0fe12f2 100644
76471 --- a/include/asm-generic/bitops/__fls.h
76472 +++ b/include/asm-generic/bitops/__fls.h
76473 @@ -9,7 +9,7 @@
76474 *
76475 * Undefined if no set bit exists, so code should check against 0 first.
76476 */
76477 -static __always_inline unsigned long __fls(unsigned long word)
76478 +static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
76479 {
76480 int num = BITS_PER_LONG - 1;
76481
76482 diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
76483 index 0576d1f..dad6c71 100644
76484 --- a/include/asm-generic/bitops/fls.h
76485 +++ b/include/asm-generic/bitops/fls.h
76486 @@ -9,7 +9,7 @@
76487 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
76488 */
76489
76490 -static __always_inline int fls(int x)
76491 +static __always_inline int __intentional_overflow(-1) fls(int x)
76492 {
76493 int r = 32;
76494
76495 diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
76496 index b097cf8..3d40e14 100644
76497 --- a/include/asm-generic/bitops/fls64.h
76498 +++ b/include/asm-generic/bitops/fls64.h
76499 @@ -15,7 +15,7 @@
76500 * at position 64.
76501 */
76502 #if BITS_PER_LONG == 32
76503 -static __always_inline int fls64(__u64 x)
76504 +static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
76505 {
76506 __u32 h = x >> 32;
76507 if (h)
76508 @@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
76509 return fls(x);
76510 }
76511 #elif BITS_PER_LONG == 64
76512 -static __always_inline int fls64(__u64 x)
76513 +static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
76514 {
76515 if (x == 0)
76516 return 0;
76517 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
76518 index 1bfcfe5..e04c5c9 100644
76519 --- a/include/asm-generic/cache.h
76520 +++ b/include/asm-generic/cache.h
76521 @@ -6,7 +6,7 @@
76522 * cache lines need to provide their own cache.h.
76523 */
76524
76525 -#define L1_CACHE_SHIFT 5
76526 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
76527 +#define L1_CACHE_SHIFT 5UL
76528 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
76529
76530 #endif /* __ASM_GENERIC_CACHE_H */
76531 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
76532 index 0d68a1e..b74a761 100644
76533 --- a/include/asm-generic/emergency-restart.h
76534 +++ b/include/asm-generic/emergency-restart.h
76535 @@ -1,7 +1,7 @@
76536 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
76537 #define _ASM_GENERIC_EMERGENCY_RESTART_H
76538
76539 -static inline void machine_emergency_restart(void)
76540 +static inline __noreturn void machine_emergency_restart(void)
76541 {
76542 machine_restart(NULL);
76543 }
76544 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
76545 index 90f99c7..00ce236 100644
76546 --- a/include/asm-generic/kmap_types.h
76547 +++ b/include/asm-generic/kmap_types.h
76548 @@ -2,9 +2,9 @@
76549 #define _ASM_GENERIC_KMAP_TYPES_H
76550
76551 #ifdef __WITH_KM_FENCE
76552 -# define KM_TYPE_NR 41
76553 +# define KM_TYPE_NR 42
76554 #else
76555 -# define KM_TYPE_NR 20
76556 +# define KM_TYPE_NR 21
76557 #endif
76558
76559 #endif
76560 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
76561 index 9ceb03b..62b0b8f 100644
76562 --- a/include/asm-generic/local.h
76563 +++ b/include/asm-generic/local.h
76564 @@ -23,24 +23,37 @@ typedef struct
76565 atomic_long_t a;
76566 } local_t;
76567
76568 +typedef struct {
76569 + atomic_long_unchecked_t a;
76570 +} local_unchecked_t;
76571 +
76572 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
76573
76574 #define local_read(l) atomic_long_read(&(l)->a)
76575 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
76576 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
76577 +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
76578 #define local_inc(l) atomic_long_inc(&(l)->a)
76579 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
76580 #define local_dec(l) atomic_long_dec(&(l)->a)
76581 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
76582 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
76583 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
76584 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
76585 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
76586
76587 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
76588 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
76589 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
76590 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
76591 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
76592 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
76593 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
76594 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
76595 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
76596
76597 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
76598 +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
76599 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
76600 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
76601 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
76602 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
76603 index 725612b..9cc513a 100644
76604 --- a/include/asm-generic/pgtable-nopmd.h
76605 +++ b/include/asm-generic/pgtable-nopmd.h
76606 @@ -1,14 +1,19 @@
76607 #ifndef _PGTABLE_NOPMD_H
76608 #define _PGTABLE_NOPMD_H
76609
76610 -#ifndef __ASSEMBLY__
76611 -
76612 #include <asm-generic/pgtable-nopud.h>
76613
76614 -struct mm_struct;
76615 -
76616 #define __PAGETABLE_PMD_FOLDED
76617
76618 +#define PMD_SHIFT PUD_SHIFT
76619 +#define PTRS_PER_PMD 1
76620 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
76621 +#define PMD_MASK (~(PMD_SIZE-1))
76622 +
76623 +#ifndef __ASSEMBLY__
76624 +
76625 +struct mm_struct;
76626 +
76627 /*
76628 * Having the pmd type consist of a pud gets the size right, and allows
76629 * us to conceptually access the pud entry that this pmd is folded into
76630 @@ -16,11 +21,6 @@ struct mm_struct;
76631 */
76632 typedef struct { pud_t pud; } pmd_t;
76633
76634 -#define PMD_SHIFT PUD_SHIFT
76635 -#define PTRS_PER_PMD 1
76636 -#define PMD_SIZE (1UL << PMD_SHIFT)
76637 -#define PMD_MASK (~(PMD_SIZE-1))
76638 -
76639 /*
76640 * The "pud_xxx()" functions here are trivial for a folded two-level
76641 * setup: the pmd is never bad, and a pmd always exists (as it's folded
76642 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
76643 index 810431d..0ec4804f 100644
76644 --- a/include/asm-generic/pgtable-nopud.h
76645 +++ b/include/asm-generic/pgtable-nopud.h
76646 @@ -1,10 +1,15 @@
76647 #ifndef _PGTABLE_NOPUD_H
76648 #define _PGTABLE_NOPUD_H
76649
76650 -#ifndef __ASSEMBLY__
76651 -
76652 #define __PAGETABLE_PUD_FOLDED
76653
76654 +#define PUD_SHIFT PGDIR_SHIFT
76655 +#define PTRS_PER_PUD 1
76656 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
76657 +#define PUD_MASK (~(PUD_SIZE-1))
76658 +
76659 +#ifndef __ASSEMBLY__
76660 +
76661 /*
76662 * Having the pud type consist of a pgd gets the size right, and allows
76663 * us to conceptually access the pgd entry that this pud is folded into
76664 @@ -12,11 +17,6 @@
76665 */
76666 typedef struct { pgd_t pgd; } pud_t;
76667
76668 -#define PUD_SHIFT PGDIR_SHIFT
76669 -#define PTRS_PER_PUD 1
76670 -#define PUD_SIZE (1UL << PUD_SHIFT)
76671 -#define PUD_MASK (~(PUD_SIZE-1))
76672 -
76673 /*
76674 * The "pgd_xxx()" functions here are trivial for a folded two-level
76675 * setup: the pud is never bad, and a pud always exists (as it's folded
76676 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
76677 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
76678
76679 #define pgd_populate(mm, pgd, pud) do { } while (0)
76680 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
76681 /*
76682 * (puds are folded into pgds so this doesn't get actually called,
76683 * but the define is needed for a generic inline function.)
76684 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
76685 index 8e4f41d..c5e9afd 100644
76686 --- a/include/asm-generic/pgtable.h
76687 +++ b/include/asm-generic/pgtable.h
76688 @@ -748,6 +748,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
76689 }
76690 #endif /* CONFIG_NUMA_BALANCING */
76691
76692 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
76693 +#ifdef CONFIG_PAX_KERNEXEC
76694 +#error KERNEXEC requires pax_open_kernel
76695 +#else
76696 +static inline unsigned long pax_open_kernel(void) { return 0; }
76697 +#endif
76698 +#endif
76699 +
76700 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
76701 +#ifdef CONFIG_PAX_KERNEXEC
76702 +#error KERNEXEC requires pax_close_kernel
76703 +#else
76704 +static inline unsigned long pax_close_kernel(void) { return 0; }
76705 +#endif
76706 +#endif
76707 +
76708 #endif /* CONFIG_MMU */
76709
76710 #endif /* !__ASSEMBLY__ */
76711 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
76712 index dc1269c..48a4f51 100644
76713 --- a/include/asm-generic/uaccess.h
76714 +++ b/include/asm-generic/uaccess.h
76715 @@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
76716 return __clear_user(to, n);
76717 }
76718
76719 +#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
76720 +#ifdef CONFIG_PAX_MEMORY_UDEREF
76721 +#error UDEREF requires pax_open_userland
76722 +#else
76723 +static inline unsigned long pax_open_userland(void) { return 0; }
76724 +#endif
76725 +#endif
76726 +
76727 +#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
76728 +#ifdef CONFIG_PAX_MEMORY_UDEREF
76729 +#error UDEREF requires pax_close_userland
76730 +#else
76731 +static inline unsigned long pax_close_userland(void) { return 0; }
76732 +#endif
76733 +#endif
76734 +
76735 #endif /* __ASM_GENERIC_UACCESS_H */
76736 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
76737 index bc2121f..2f41f9a 100644
76738 --- a/include/asm-generic/vmlinux.lds.h
76739 +++ b/include/asm-generic/vmlinux.lds.h
76740 @@ -232,6 +232,7 @@
76741 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
76742 VMLINUX_SYMBOL(__start_rodata) = .; \
76743 *(.rodata) *(.rodata.*) \
76744 + *(.data..read_only) \
76745 *(__vermagic) /* Kernel version magic */ \
76746 . = ALIGN(8); \
76747 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
76748 @@ -716,17 +717,18 @@
76749 * section in the linker script will go there too. @phdr should have
76750 * a leading colon.
76751 *
76752 - * Note that this macros defines __per_cpu_load as an absolute symbol.
76753 + * Note that this macros defines per_cpu_load as an absolute symbol.
76754 * If there is no need to put the percpu section at a predetermined
76755 * address, use PERCPU_SECTION.
76756 */
76757 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
76758 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
76759 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
76760 + per_cpu_load = .; \
76761 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
76762 - LOAD_OFFSET) { \
76763 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
76764 PERCPU_INPUT(cacheline) \
76765 } phdr \
76766 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
76767 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
76768
76769 /**
76770 * PERCPU_SECTION - define output section for percpu area, simple version
76771 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
76772 index e73c19e..5b89e00 100644
76773 --- a/include/crypto/algapi.h
76774 +++ b/include/crypto/algapi.h
76775 @@ -34,7 +34,7 @@ struct crypto_type {
76776 unsigned int maskclear;
76777 unsigned int maskset;
76778 unsigned int tfmsize;
76779 -};
76780 +} __do_const;
76781
76782 struct crypto_instance {
76783 struct crypto_alg alg;
76784 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
76785 index 1d4a920..da65658 100644
76786 --- a/include/drm/drmP.h
76787 +++ b/include/drm/drmP.h
76788 @@ -66,6 +66,7 @@
76789 #include <linux/workqueue.h>
76790 #include <linux/poll.h>
76791 #include <asm/pgalloc.h>
76792 +#include <asm/local.h>
76793 #include <drm/drm.h>
76794 #include <drm/drm_sarea.h>
76795 #include <drm/drm_vma_manager.h>
76796 @@ -278,10 +279,12 @@ do { \
76797 * \param cmd command.
76798 * \param arg argument.
76799 */
76800 -typedef int drm_ioctl_t(struct drm_device *dev, void *data,
76801 +typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
76802 + struct drm_file *file_priv);
76803 +typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
76804 struct drm_file *file_priv);
76805
76806 -typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
76807 +typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
76808 unsigned long arg);
76809
76810 #define DRM_IOCTL_NR(n) _IOC_NR(n)
76811 @@ -297,10 +300,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
76812 struct drm_ioctl_desc {
76813 unsigned int cmd;
76814 int flags;
76815 - drm_ioctl_t *func;
76816 + drm_ioctl_t func;
76817 unsigned int cmd_drv;
76818 const char *name;
76819 -};
76820 +} __do_const;
76821
76822 /**
76823 * Creates a driver or general drm_ioctl_desc array entry for the given
76824 @@ -1013,7 +1016,8 @@ struct drm_info_list {
76825 int (*show)(struct seq_file*, void*); /** show callback */
76826 u32 driver_features; /**< Required driver features for this entry */
76827 void *data;
76828 -};
76829 +} __do_const;
76830 +typedef struct drm_info_list __no_const drm_info_list_no_const;
76831
76832 /**
76833 * debugfs node structure. This structure represents a debugfs file.
76834 @@ -1097,7 +1101,7 @@ struct drm_device {
76835
76836 /** \name Usage Counters */
76837 /*@{ */
76838 - int open_count; /**< Outstanding files open */
76839 + local_t open_count; /**< Outstanding files open */
76840 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
76841 atomic_t vma_count; /**< Outstanding vma areas open */
76842 int buf_use; /**< Buffers in use -- cannot alloc */
76843 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
76844 index ef6ad3a..be34b16 100644
76845 --- a/include/drm/drm_crtc_helper.h
76846 +++ b/include/drm/drm_crtc_helper.h
76847 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
76848 struct drm_connector *connector);
76849 /* disable encoder when not in use - more explicit than dpms off */
76850 void (*disable)(struct drm_encoder *encoder);
76851 -};
76852 +} __no_const;
76853
76854 /**
76855 * drm_connector_helper_funcs - helper operations for connectors
76856 diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
76857 index 940ece4..8cb727f 100644
76858 --- a/include/drm/i915_pciids.h
76859 +++ b/include/drm/i915_pciids.h
76860 @@ -37,7 +37,7 @@
76861 */
76862 #define INTEL_VGA_DEVICE(id, info) { \
76863 0x8086, id, \
76864 - ~0, ~0, \
76865 + PCI_ANY_ID, PCI_ANY_ID, \
76866 0x030000, 0xff0000, \
76867 (unsigned long) info }
76868
76869 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
76870 index 72dcbe8..8db58d7 100644
76871 --- a/include/drm/ttm/ttm_memory.h
76872 +++ b/include/drm/ttm/ttm_memory.h
76873 @@ -48,7 +48,7 @@
76874
76875 struct ttm_mem_shrink {
76876 int (*do_shrink) (struct ttm_mem_shrink *);
76877 -};
76878 +} __no_const;
76879
76880 /**
76881 * struct ttm_mem_global - Global memory accounting structure.
76882 diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
76883 index d1f61bf..2239439 100644
76884 --- a/include/drm/ttm/ttm_page_alloc.h
76885 +++ b/include/drm/ttm/ttm_page_alloc.h
76886 @@ -78,6 +78,7 @@ void ttm_dma_page_alloc_fini(void);
76887 */
76888 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
76889
76890 +struct device;
76891 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
76892 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
76893
76894 diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
76895 index 4b840e8..155d235 100644
76896 --- a/include/keys/asymmetric-subtype.h
76897 +++ b/include/keys/asymmetric-subtype.h
76898 @@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
76899 /* Verify the signature on a key of this subtype (optional) */
76900 int (*verify_signature)(const struct key *key,
76901 const struct public_key_signature *sig);
76902 -};
76903 +} __do_const;
76904
76905 /**
76906 * asymmetric_key_subtype - Get the subtype from an asymmetric key
76907 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
76908 index c1da539..1dcec55 100644
76909 --- a/include/linux/atmdev.h
76910 +++ b/include/linux/atmdev.h
76911 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
76912 #endif
76913
76914 struct k_atm_aal_stats {
76915 -#define __HANDLE_ITEM(i) atomic_t i
76916 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
76917 __AAL_STAT_ITEMS
76918 #undef __HANDLE_ITEM
76919 };
76920 @@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
76921 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
76922 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
76923 struct module *owner;
76924 -};
76925 +} __do_const ;
76926
76927 struct atmphy_ops {
76928 int (*start)(struct atm_dev *dev);
76929 diff --git a/include/linux/audit.h b/include/linux/audit.h
76930 index bf1ef22..2a55e1b 100644
76931 --- a/include/linux/audit.h
76932 +++ b/include/linux/audit.h
76933 @@ -195,7 +195,7 @@ static inline void audit_ptrace(struct task_struct *t)
76934 extern unsigned int audit_serial(void);
76935 extern int auditsc_get_stamp(struct audit_context *ctx,
76936 struct timespec *t, unsigned int *serial);
76937 -extern int audit_set_loginuid(kuid_t loginuid);
76938 +extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
76939
76940 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
76941 {
76942 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
76943 index fd8bf32..2cccd5a 100644
76944 --- a/include/linux/binfmts.h
76945 +++ b/include/linux/binfmts.h
76946 @@ -74,8 +74,10 @@ struct linux_binfmt {
76947 int (*load_binary)(struct linux_binprm *);
76948 int (*load_shlib)(struct file *);
76949 int (*core_dump)(struct coredump_params *cprm);
76950 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
76951 + void (*handle_mmap)(struct file *);
76952 unsigned long min_coredump; /* minimal dump size */
76953 -};
76954 +} __do_const;
76955
76956 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
76957
76958 diff --git a/include/linux/bitops.h b/include/linux/bitops.h
76959 index abc9ca7..e54ee27 100644
76960 --- a/include/linux/bitops.h
76961 +++ b/include/linux/bitops.h
76962 @@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
76963 * @word: value to rotate
76964 * @shift: bits to roll
76965 */
76966 -static inline __u32 rol32(__u32 word, unsigned int shift)
76967 +static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
76968 {
76969 return (word << shift) | (word >> (32 - shift));
76970 }
76971 @@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
76972 * @word: value to rotate
76973 * @shift: bits to roll
76974 */
76975 -static inline __u32 ror32(__u32 word, unsigned int shift)
76976 +static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
76977 {
76978 return (word >> shift) | (word << (32 - shift));
76979 }
76980 @@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
76981 return (__s32)(value << shift) >> shift;
76982 }
76983
76984 -static inline unsigned fls_long(unsigned long l)
76985 +static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
76986 {
76987 if (sizeof(l) == 4)
76988 return fls(l);
76989 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
76990 index 1b135d4..59fc876 100644
76991 --- a/include/linux/blkdev.h
76992 +++ b/include/linux/blkdev.h
76993 @@ -1578,7 +1578,7 @@ struct block_device_operations {
76994 /* this callback is with swap_lock and sometimes page table lock held */
76995 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
76996 struct module *owner;
76997 -};
76998 +} __do_const;
76999
77000 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
77001 unsigned long);
77002 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
77003 index afc1343..9735539 100644
77004 --- a/include/linux/blktrace_api.h
77005 +++ b/include/linux/blktrace_api.h
77006 @@ -25,7 +25,7 @@ struct blk_trace {
77007 struct dentry *dropped_file;
77008 struct dentry *msg_file;
77009 struct list_head running_list;
77010 - atomic_t dropped;
77011 + atomic_unchecked_t dropped;
77012 };
77013
77014 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
77015 diff --git a/include/linux/cache.h b/include/linux/cache.h
77016 index 4c57065..40346da 100644
77017 --- a/include/linux/cache.h
77018 +++ b/include/linux/cache.h
77019 @@ -16,6 +16,14 @@
77020 #define __read_mostly
77021 #endif
77022
77023 +#ifndef __read_only
77024 +#ifdef CONFIG_PAX_KERNEXEC
77025 +#error KERNEXEC requires __read_only
77026 +#else
77027 +#define __read_only __read_mostly
77028 +#endif
77029 +#endif
77030 +
77031 #ifndef ____cacheline_aligned
77032 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
77033 #endif
77034 diff --git a/include/linux/capability.h b/include/linux/capability.h
77035 index a6ee1f9..e1ca49d 100644
77036 --- a/include/linux/capability.h
77037 +++ b/include/linux/capability.h
77038 @@ -212,8 +212,13 @@ extern bool capable(int cap);
77039 extern bool ns_capable(struct user_namespace *ns, int cap);
77040 extern bool inode_capable(const struct inode *inode, int cap);
77041 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
77042 +extern bool capable_nolog(int cap);
77043 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
77044 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
77045
77046 /* audit system wants to get cap info from files as well */
77047 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
77048
77049 +extern int is_privileged_binary(const struct dentry *dentry);
77050 +
77051 #endif /* !_LINUX_CAPABILITY_H */
77052 diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
77053 index 8609d57..86e4d79 100644
77054 --- a/include/linux/cdrom.h
77055 +++ b/include/linux/cdrom.h
77056 @@ -87,7 +87,6 @@ struct cdrom_device_ops {
77057
77058 /* driver specifications */
77059 const int capability; /* capability flags */
77060 - int n_minors; /* number of active minor devices */
77061 /* handle uniform packets for scsi type devices (scsi,atapi) */
77062 int (*generic_packet) (struct cdrom_device_info *,
77063 struct packet_command *);
77064 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
77065 index 4ce9056..86caac6 100644
77066 --- a/include/linux/cleancache.h
77067 +++ b/include/linux/cleancache.h
77068 @@ -31,7 +31,7 @@ struct cleancache_ops {
77069 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
77070 void (*invalidate_inode)(int, struct cleancache_filekey);
77071 void (*invalidate_fs)(int);
77072 -};
77073 +} __no_const;
77074
77075 extern struct cleancache_ops *
77076 cleancache_register_ops(struct cleancache_ops *ops);
77077 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
77078 index 7e59253..d6e4cae 100644
77079 --- a/include/linux/clk-provider.h
77080 +++ b/include/linux/clk-provider.h
77081 @@ -141,6 +141,7 @@ struct clk_ops {
77082 unsigned long);
77083 void (*init)(struct clk_hw *hw);
77084 };
77085 +typedef struct clk_ops __no_const clk_ops_no_const;
77086
77087 /**
77088 * struct clk_init_data - holds init data that's common to all clocks and is
77089 diff --git a/include/linux/compat.h b/include/linux/compat.h
77090 index 19f6003..90b64f4 100644
77091 --- a/include/linux/compat.h
77092 +++ b/include/linux/compat.h
77093 @@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
77094 compat_size_t __user *len_ptr);
77095
77096 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
77097 -asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
77098 +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
77099 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
77100 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
77101 compat_ssize_t msgsz, int msgflg);
77102 @@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
77103 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
77104 compat_ulong_t addr, compat_ulong_t data);
77105 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77106 - compat_long_t addr, compat_long_t data);
77107 + compat_ulong_t addr, compat_ulong_t data);
77108
77109 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
77110 /*
77111 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
77112 index 2507fd2..55203f8 100644
77113 --- a/include/linux/compiler-gcc4.h
77114 +++ b/include/linux/compiler-gcc4.h
77115 @@ -39,9 +39,34 @@
77116 # define __compiletime_warning(message) __attribute__((warning(message)))
77117 # define __compiletime_error(message) __attribute__((error(message)))
77118 #endif /* __CHECKER__ */
77119 +
77120 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
77121 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
77122 +#define __bos0(ptr) __bos((ptr), 0)
77123 +#define __bos1(ptr) __bos((ptr), 1)
77124 #endif /* GCC_VERSION >= 40300 */
77125
77126 #if GCC_VERSION >= 40500
77127 +
77128 +#ifdef RANDSTRUCT_PLUGIN
77129 +#define __randomize_layout __attribute__((randomize_layout))
77130 +#define __no_randomize_layout __attribute__((no_randomize_layout))
77131 +#endif
77132 +
77133 +#ifdef CONSTIFY_PLUGIN
77134 +#define __no_const __attribute__((no_const))
77135 +#define __do_const __attribute__((do_const))
77136 +#endif
77137 +
77138 +#ifdef SIZE_OVERFLOW_PLUGIN
77139 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
77140 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
77141 +#endif
77142 +
77143 +#ifdef LATENT_ENTROPY_PLUGIN
77144 +#define __latent_entropy __attribute__((latent_entropy))
77145 +#endif
77146 +
77147 /*
77148 * Mark a position in code as unreachable. This can be used to
77149 * suppress control flow warnings after asm blocks that transfer
77150 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
77151 index 92669cd..cc564c0 100644
77152 --- a/include/linux/compiler.h
77153 +++ b/include/linux/compiler.h
77154 @@ -5,11 +5,14 @@
77155
77156 #ifdef __CHECKER__
77157 # define __user __attribute__((noderef, address_space(1)))
77158 +# define __force_user __force __user
77159 # define __kernel __attribute__((address_space(0)))
77160 +# define __force_kernel __force __kernel
77161 # define __safe __attribute__((safe))
77162 # define __force __attribute__((force))
77163 # define __nocast __attribute__((nocast))
77164 # define __iomem __attribute__((noderef, address_space(2)))
77165 +# define __force_iomem __force __iomem
77166 # define __must_hold(x) __attribute__((context(x,1,1)))
77167 # define __acquires(x) __attribute__((context(x,0,1)))
77168 # define __releases(x) __attribute__((context(x,1,0)))
77169 @@ -17,20 +20,37 @@
77170 # define __release(x) __context__(x,-1)
77171 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
77172 # define __percpu __attribute__((noderef, address_space(3)))
77173 +# define __force_percpu __force __percpu
77174 #ifdef CONFIG_SPARSE_RCU_POINTER
77175 # define __rcu __attribute__((noderef, address_space(4)))
77176 +# define __force_rcu __force __rcu
77177 #else
77178 # define __rcu
77179 +# define __force_rcu
77180 #endif
77181 extern void __chk_user_ptr(const volatile void __user *);
77182 extern void __chk_io_ptr(const volatile void __iomem *);
77183 #else
77184 -# define __user
77185 -# define __kernel
77186 +# ifdef CHECKER_PLUGIN
77187 +//# define __user
77188 +//# define __force_user
77189 +//# define __kernel
77190 +//# define __force_kernel
77191 +# else
77192 +# ifdef STRUCTLEAK_PLUGIN
77193 +# define __user __attribute__((user))
77194 +# else
77195 +# define __user
77196 +# endif
77197 +# define __force_user
77198 +# define __kernel
77199 +# define __force_kernel
77200 +# endif
77201 # define __safe
77202 # define __force
77203 # define __nocast
77204 # define __iomem
77205 +# define __force_iomem
77206 # define __chk_user_ptr(x) (void)0
77207 # define __chk_io_ptr(x) (void)0
77208 # define __builtin_warning(x, y...) (1)
77209 @@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
77210 # define __release(x) (void)0
77211 # define __cond_lock(x,c) (c)
77212 # define __percpu
77213 +# define __force_percpu
77214 # define __rcu
77215 +# define __force_rcu
77216 #endif
77217
77218 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
77219 @@ -275,6 +297,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77220 # define __attribute_const__ /* unimplemented */
77221 #endif
77222
77223 +#ifndef __randomize_layout
77224 +# define __randomize_layout
77225 +#endif
77226 +
77227 +#ifndef __no_randomize_layout
77228 +# define __no_randomize_layout
77229 +#endif
77230 +
77231 +#ifndef __no_const
77232 +# define __no_const
77233 +#endif
77234 +
77235 +#ifndef __do_const
77236 +# define __do_const
77237 +#endif
77238 +
77239 +#ifndef __size_overflow
77240 +# define __size_overflow(...)
77241 +#endif
77242 +
77243 +#ifndef __intentional_overflow
77244 +# define __intentional_overflow(...)
77245 +#endif
77246 +
77247 +#ifndef __latent_entropy
77248 +# define __latent_entropy
77249 +#endif
77250 +
77251 /*
77252 * Tell gcc if a function is cold. The compiler will assume any path
77253 * directly leading to the call is unlikely.
77254 @@ -284,6 +334,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77255 #define __cold
77256 #endif
77257
77258 +#ifndef __alloc_size
77259 +#define __alloc_size(...)
77260 +#endif
77261 +
77262 +#ifndef __bos
77263 +#define __bos(ptr, arg)
77264 +#endif
77265 +
77266 +#ifndef __bos0
77267 +#define __bos0(ptr)
77268 +#endif
77269 +
77270 +#ifndef __bos1
77271 +#define __bos1(ptr)
77272 +#endif
77273 +
77274 /* Simple shorthand for a section definition */
77275 #ifndef __section
77276 # define __section(S) __attribute__ ((__section__(#S)))
77277 @@ -349,7 +415,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77278 * use is to mediate communication between process-level code and irq/NMI
77279 * handlers, all running on the same CPU.
77280 */
77281 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
77282 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
77283 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
77284
77285 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
77286 #ifdef CONFIG_KPROBES
77287 diff --git a/include/linux/completion.h b/include/linux/completion.h
77288 index 5d5aaae..0ea9b84 100644
77289 --- a/include/linux/completion.h
77290 +++ b/include/linux/completion.h
77291 @@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
77292
77293 extern void wait_for_completion(struct completion *);
77294 extern void wait_for_completion_io(struct completion *);
77295 -extern int wait_for_completion_interruptible(struct completion *x);
77296 -extern int wait_for_completion_killable(struct completion *x);
77297 +extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
77298 +extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
77299 extern unsigned long wait_for_completion_timeout(struct completion *x,
77300 - unsigned long timeout);
77301 + unsigned long timeout) __intentional_overflow(-1);
77302 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
77303 - unsigned long timeout);
77304 + unsigned long timeout) __intentional_overflow(-1);
77305 extern long wait_for_completion_interruptible_timeout(
77306 - struct completion *x, unsigned long timeout);
77307 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
77308 extern long wait_for_completion_killable_timeout(
77309 - struct completion *x, unsigned long timeout);
77310 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
77311 extern bool try_wait_for_completion(struct completion *x);
77312 extern bool completion_done(struct completion *x);
77313
77314 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
77315 index 34025df..d94bbbc 100644
77316 --- a/include/linux/configfs.h
77317 +++ b/include/linux/configfs.h
77318 @@ -125,7 +125,7 @@ struct configfs_attribute {
77319 const char *ca_name;
77320 struct module *ca_owner;
77321 umode_t ca_mode;
77322 -};
77323 +} __do_const;
77324
77325 /*
77326 * Users often need to create attribute structures for their configurable
77327 diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
77328 index dc196bb..c55a50f 100644
77329 --- a/include/linux/cpufreq.h
77330 +++ b/include/linux/cpufreq.h
77331 @@ -189,6 +189,7 @@ struct global_attr {
77332 ssize_t (*store)(struct kobject *a, struct attribute *b,
77333 const char *c, size_t count);
77334 };
77335 +typedef struct global_attr __no_const global_attr_no_const;
77336
77337 #define define_one_global_ro(_name) \
77338 static struct global_attr _name = \
77339 @@ -225,7 +226,7 @@ struct cpufreq_driver {
77340 int (*suspend) (struct cpufreq_policy *policy);
77341 int (*resume) (struct cpufreq_policy *policy);
77342 struct freq_attr **attr;
77343 -};
77344 +} __do_const;
77345
77346 /* flags */
77347 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
77348 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
77349 index 50fcbb0..9d2dbd9 100644
77350 --- a/include/linux/cpuidle.h
77351 +++ b/include/linux/cpuidle.h
77352 @@ -50,7 +50,8 @@ struct cpuidle_state {
77353 int index);
77354
77355 int (*enter_dead) (struct cpuidle_device *dev, int index);
77356 -};
77357 +} __do_const;
77358 +typedef struct cpuidle_state __no_const cpuidle_state_no_const;
77359
77360 /* Idle State Flags */
77361 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
77362 @@ -192,7 +193,7 @@ struct cpuidle_governor {
77363 void (*reflect) (struct cpuidle_device *dev, int index);
77364
77365 struct module *owner;
77366 -};
77367 +} __do_const;
77368
77369 #ifdef CONFIG_CPU_IDLE
77370 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
77371 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
77372 index d08e4d2..95fad61 100644
77373 --- a/include/linux/cpumask.h
77374 +++ b/include/linux/cpumask.h
77375 @@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
77376 }
77377
77378 /* Valid inputs for n are -1 and 0. */
77379 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77380 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
77381 {
77382 return n+1;
77383 }
77384
77385 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77386 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
77387 {
77388 return n+1;
77389 }
77390
77391 -static inline unsigned int cpumask_next_and(int n,
77392 +static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
77393 const struct cpumask *srcp,
77394 const struct cpumask *andp)
77395 {
77396 @@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
77397 *
77398 * Returns >= nr_cpu_ids if no further cpus set.
77399 */
77400 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77401 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
77402 {
77403 /* -1 is a legal arg here. */
77404 if (n != -1)
77405 @@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77406 *
77407 * Returns >= nr_cpu_ids if no further cpus unset.
77408 */
77409 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77410 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
77411 {
77412 /* -1 is a legal arg here. */
77413 if (n != -1)
77414 @@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77415 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
77416 }
77417
77418 -int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
77419 +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
77420 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
77421
77422 /**
77423 diff --git a/include/linux/cred.h b/include/linux/cred.h
77424 index 04421e8..117e17a 100644
77425 --- a/include/linux/cred.h
77426 +++ b/include/linux/cred.h
77427 @@ -35,7 +35,7 @@ struct group_info {
77428 int nblocks;
77429 kgid_t small_block[NGROUPS_SMALL];
77430 kgid_t *blocks[0];
77431 -};
77432 +} __randomize_layout;
77433
77434 /**
77435 * get_group_info - Get a reference to a group info structure
77436 @@ -136,7 +136,7 @@ struct cred {
77437 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
77438 struct group_info *group_info; /* supplementary groups for euid/fsgid */
77439 struct rcu_head rcu; /* RCU deletion hook */
77440 -};
77441 +} __randomize_layout;
77442
77443 extern void __put_cred(struct cred *);
77444 extern void exit_creds(struct task_struct *);
77445 @@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
77446 static inline void validate_process_creds(void)
77447 {
77448 }
77449 +static inline void validate_task_creds(struct task_struct *task)
77450 +{
77451 +}
77452 #endif
77453
77454 /**
77455 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
77456 index b92eadf..b4ecdc1 100644
77457 --- a/include/linux/crypto.h
77458 +++ b/include/linux/crypto.h
77459 @@ -373,7 +373,7 @@ struct cipher_tfm {
77460 const u8 *key, unsigned int keylen);
77461 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
77462 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
77463 -};
77464 +} __no_const;
77465
77466 struct hash_tfm {
77467 int (*init)(struct hash_desc *desc);
77468 @@ -394,13 +394,13 @@ struct compress_tfm {
77469 int (*cot_decompress)(struct crypto_tfm *tfm,
77470 const u8 *src, unsigned int slen,
77471 u8 *dst, unsigned int *dlen);
77472 -};
77473 +} __no_const;
77474
77475 struct rng_tfm {
77476 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
77477 unsigned int dlen);
77478 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
77479 -};
77480 +} __no_const;
77481
77482 #define crt_ablkcipher crt_u.ablkcipher
77483 #define crt_aead crt_u.aead
77484 diff --git a/include/linux/ctype.h b/include/linux/ctype.h
77485 index 653589e..4ef254a 100644
77486 --- a/include/linux/ctype.h
77487 +++ b/include/linux/ctype.h
77488 @@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
77489 * Fast implementation of tolower() for internal usage. Do not use in your
77490 * code.
77491 */
77492 -static inline char _tolower(const char c)
77493 +static inline unsigned char _tolower(const unsigned char c)
77494 {
77495 return c | 0x20;
77496 }
77497 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
77498 index bf72e9a..4ca7927 100644
77499 --- a/include/linux/dcache.h
77500 +++ b/include/linux/dcache.h
77501 @@ -133,7 +133,7 @@ struct dentry {
77502 } d_u;
77503 struct list_head d_subdirs; /* our children */
77504 struct hlist_node d_alias; /* inode alias list */
77505 -};
77506 +} __randomize_layout;
77507
77508 /*
77509 * dentry->d_lock spinlock nesting subclasses:
77510 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
77511 index 7925bf0..d5143d2 100644
77512 --- a/include/linux/decompress/mm.h
77513 +++ b/include/linux/decompress/mm.h
77514 @@ -77,7 +77,7 @@ static void free(void *where)
77515 * warnings when not needed (indeed large_malloc / large_free are not
77516 * needed by inflate */
77517
77518 -#define malloc(a) kmalloc(a, GFP_KERNEL)
77519 +#define malloc(a) kmalloc((a), GFP_KERNEL)
77520 #define free(a) kfree(a)
77521
77522 #define large_malloc(a) vmalloc(a)
77523 diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
77524 index d48dc00..211ee54 100644
77525 --- a/include/linux/devfreq.h
77526 +++ b/include/linux/devfreq.h
77527 @@ -114,7 +114,7 @@ struct devfreq_governor {
77528 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
77529 int (*event_handler)(struct devfreq *devfreq,
77530 unsigned int event, void *data);
77531 -};
77532 +} __do_const;
77533
77534 /**
77535 * struct devfreq - Device devfreq structure
77536 diff --git a/include/linux/device.h b/include/linux/device.h
77537 index 952b010..d5b7691 100644
77538 --- a/include/linux/device.h
77539 +++ b/include/linux/device.h
77540 @@ -310,7 +310,7 @@ struct subsys_interface {
77541 struct list_head node;
77542 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
77543 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
77544 -};
77545 +} __do_const;
77546
77547 int subsys_interface_register(struct subsys_interface *sif);
77548 void subsys_interface_unregister(struct subsys_interface *sif);
77549 @@ -506,7 +506,7 @@ struct device_type {
77550 void (*release)(struct device *dev);
77551
77552 const struct dev_pm_ops *pm;
77553 -};
77554 +} __do_const;
77555
77556 /* interface for exporting device attributes */
77557 struct device_attribute {
77558 @@ -516,11 +516,12 @@ struct device_attribute {
77559 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
77560 const char *buf, size_t count);
77561 };
77562 +typedef struct device_attribute __no_const device_attribute_no_const;
77563
77564 struct dev_ext_attribute {
77565 struct device_attribute attr;
77566 void *var;
77567 -};
77568 +} __do_const;
77569
77570 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
77571 char *buf);
77572 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
77573 index fd4aee2..1f28db9 100644
77574 --- a/include/linux/dma-mapping.h
77575 +++ b/include/linux/dma-mapping.h
77576 @@ -54,7 +54,7 @@ struct dma_map_ops {
77577 u64 (*get_required_mask)(struct device *dev);
77578 #endif
77579 int is_phys;
77580 -};
77581 +} __do_const;
77582
77583 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
77584
77585 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
77586 index 41cf0c3..f3b771c 100644
77587 --- a/include/linux/dmaengine.h
77588 +++ b/include/linux/dmaengine.h
77589 @@ -1114,9 +1114,9 @@ struct dma_pinned_list {
77590 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
77591 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
77592
77593 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
77594 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
77595 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
77596 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
77597 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
77598 struct dma_pinned_list *pinned_list, struct page *page,
77599 unsigned int offset, size_t len);
77600
77601 diff --git a/include/linux/efi.h b/include/linux/efi.h
77602 index 11ce678..7b8c69c 100644
77603 --- a/include/linux/efi.h
77604 +++ b/include/linux/efi.h
77605 @@ -764,6 +764,7 @@ struct efivar_operations {
77606 efi_set_variable_t *set_variable;
77607 efi_query_variable_store_t *query_variable_store;
77608 };
77609 +typedef struct efivar_operations __no_const efivar_operations_no_const;
77610
77611 struct efivars {
77612 /*
77613 diff --git a/include/linux/elf.h b/include/linux/elf.h
77614 index 67a5fa7..b817372 100644
77615 --- a/include/linux/elf.h
77616 +++ b/include/linux/elf.h
77617 @@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
77618 #define elf_note elf32_note
77619 #define elf_addr_t Elf32_Off
77620 #define Elf_Half Elf32_Half
77621 +#define elf_dyn Elf32_Dyn
77622
77623 #else
77624
77625 @@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
77626 #define elf_note elf64_note
77627 #define elf_addr_t Elf64_Off
77628 #define Elf_Half Elf64_Half
77629 +#define elf_dyn Elf64_Dyn
77630
77631 #endif
77632
77633 diff --git a/include/linux/err.h b/include/linux/err.h
77634 index 15f92e0..e825a8e 100644
77635 --- a/include/linux/err.h
77636 +++ b/include/linux/err.h
77637 @@ -19,12 +19,12 @@
77638
77639 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
77640
77641 -static inline void * __must_check ERR_PTR(long error)
77642 +static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
77643 {
77644 return (void *) error;
77645 }
77646
77647 -static inline long __must_check PTR_ERR(__force const void *ptr)
77648 +static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
77649 {
77650 return (long) ptr;
77651 }
77652 diff --git a/include/linux/extcon.h b/include/linux/extcon.h
77653 index 21c59af..6057a03 100644
77654 --- a/include/linux/extcon.h
77655 +++ b/include/linux/extcon.h
77656 @@ -135,7 +135,7 @@ struct extcon_dev {
77657 /* /sys/class/extcon/.../mutually_exclusive/... */
77658 struct attribute_group attr_g_muex;
77659 struct attribute **attrs_muex;
77660 - struct device_attribute *d_attrs_muex;
77661 + device_attribute_no_const *d_attrs_muex;
77662 };
77663
77664 /**
77665 diff --git a/include/linux/fb.h b/include/linux/fb.h
77666 index 70c4836..ff3daec 100644
77667 --- a/include/linux/fb.h
77668 +++ b/include/linux/fb.h
77669 @@ -304,7 +304,7 @@ struct fb_ops {
77670 /* called at KDB enter and leave time to prepare the console */
77671 int (*fb_debug_enter)(struct fb_info *info);
77672 int (*fb_debug_leave)(struct fb_info *info);
77673 -};
77674 +} __do_const;
77675
77676 #ifdef CONFIG_FB_TILEBLITTING
77677 #define FB_TILE_CURSOR_NONE 0
77678 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
77679 index 085197b..0fa6f0b 100644
77680 --- a/include/linux/fdtable.h
77681 +++ b/include/linux/fdtable.h
77682 @@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
77683 void put_files_struct(struct files_struct *fs);
77684 void reset_files_struct(struct files_struct *);
77685 int unshare_files(struct files_struct **);
77686 -struct files_struct *dup_fd(struct files_struct *, int *);
77687 +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
77688 void do_close_on_exec(struct files_struct *);
77689 int iterate_fd(struct files_struct *, unsigned,
77690 int (*)(const void *, struct file *, unsigned),
77691 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
77692 index 8293262..2b3b8bd 100644
77693 --- a/include/linux/frontswap.h
77694 +++ b/include/linux/frontswap.h
77695 @@ -11,7 +11,7 @@ struct frontswap_ops {
77696 int (*load)(unsigned, pgoff_t, struct page *);
77697 void (*invalidate_page)(unsigned, pgoff_t);
77698 void (*invalidate_area)(unsigned);
77699 -};
77700 +} __no_const;
77701
77702 extern bool frontswap_enabled;
77703 extern struct frontswap_ops *
77704 diff --git a/include/linux/fs.h b/include/linux/fs.h
77705 index 121f11f..0f2a863 100644
77706 --- a/include/linux/fs.h
77707 +++ b/include/linux/fs.h
77708 @@ -423,7 +423,7 @@ struct address_space {
77709 spinlock_t private_lock; /* for use by the address_space */
77710 struct list_head private_list; /* ditto */
77711 void *private_data; /* ditto */
77712 -} __attribute__((aligned(sizeof(long))));
77713 +} __attribute__((aligned(sizeof(long)))) __randomize_layout;
77714 /*
77715 * On most architectures that alignment is already the case; but
77716 * must be enforced here for CRIS, to let the least significant bit
77717 @@ -466,7 +466,7 @@ struct block_device {
77718 int bd_fsfreeze_count;
77719 /* Mutex for freeze */
77720 struct mutex bd_fsfreeze_mutex;
77721 -};
77722 +} __randomize_layout;
77723
77724 /*
77725 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
77726 @@ -610,7 +610,7 @@ struct inode {
77727 atomic_t i_readcount; /* struct files open RO */
77728 #endif
77729 void *i_private; /* fs or device private pointer */
77730 -};
77731 +} __randomize_layout;
77732
77733 static inline int inode_unhashed(struct inode *inode)
77734 {
77735 @@ -808,7 +808,7 @@ struct file {
77736 #ifdef CONFIG_DEBUG_WRITECOUNT
77737 unsigned long f_mnt_write_state;
77738 #endif
77739 -};
77740 +} __randomize_layout;
77741
77742 struct file_handle {
77743 __u32 handle_bytes;
77744 @@ -978,7 +978,7 @@ struct file_lock {
77745 int state; /* state of grant or error if -ve */
77746 } afs;
77747 } fl_u;
77748 -};
77749 +} __randomize_layout;
77750
77751 /* The following constant reflects the upper bound of the file/locking space */
77752 #ifndef OFFSET_MAX
77753 @@ -1325,7 +1325,7 @@ struct super_block {
77754 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
77755 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
77756 struct rcu_head rcu;
77757 -};
77758 +} __randomize_layout;
77759
77760 extern struct timespec current_fs_time(struct super_block *sb);
77761
77762 @@ -1547,7 +1547,8 @@ struct file_operations {
77763 long (*fallocate)(struct file *file, int mode, loff_t offset,
77764 loff_t len);
77765 int (*show_fdinfo)(struct seq_file *m, struct file *f);
77766 -};
77767 +} __do_const __randomize_layout;
77768 +typedef struct file_operations __no_const file_operations_no_const;
77769
77770 struct inode_operations {
77771 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
77772 @@ -2808,4 +2809,14 @@ static inline bool dir_relax(struct inode *inode)
77773 return !IS_DEADDIR(inode);
77774 }
77775
77776 +static inline bool is_sidechannel_device(const struct inode *inode)
77777 +{
77778 +#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
77779 + umode_t mode = inode->i_mode;
77780 + return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
77781 +#else
77782 + return false;
77783 +#endif
77784 +}
77785 +
77786 #endif /* _LINUX_FS_H */
77787 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
77788 index 0efc3e6..fd23610 100644
77789 --- a/include/linux/fs_struct.h
77790 +++ b/include/linux/fs_struct.h
77791 @@ -6,13 +6,13 @@
77792 #include <linux/seqlock.h>
77793
77794 struct fs_struct {
77795 - int users;
77796 + atomic_t users;
77797 spinlock_t lock;
77798 seqcount_t seq;
77799 int umask;
77800 int in_exec;
77801 struct path root, pwd;
77802 -};
77803 +} __randomize_layout;
77804
77805 extern struct kmem_cache *fs_cachep;
77806
77807 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
77808 index 7714849..a4a5c7a 100644
77809 --- a/include/linux/fscache-cache.h
77810 +++ b/include/linux/fscache-cache.h
77811 @@ -113,7 +113,7 @@ struct fscache_operation {
77812 fscache_operation_release_t release;
77813 };
77814
77815 -extern atomic_t fscache_op_debug_id;
77816 +extern atomic_unchecked_t fscache_op_debug_id;
77817 extern void fscache_op_work_func(struct work_struct *work);
77818
77819 extern void fscache_enqueue_operation(struct fscache_operation *);
77820 @@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
77821 INIT_WORK(&op->work, fscache_op_work_func);
77822 atomic_set(&op->usage, 1);
77823 op->state = FSCACHE_OP_ST_INITIALISED;
77824 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
77825 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
77826 op->processor = processor;
77827 op->release = release;
77828 INIT_LIST_HEAD(&op->pend_link);
77829 diff --git a/include/linux/fscache.h b/include/linux/fscache.h
77830 index 115bb81..e7b812b 100644
77831 --- a/include/linux/fscache.h
77832 +++ b/include/linux/fscache.h
77833 @@ -152,7 +152,7 @@ struct fscache_cookie_def {
77834 * - this is mandatory for any object that may have data
77835 */
77836 void (*now_uncached)(void *cookie_netfs_data);
77837 -};
77838 +} __do_const;
77839
77840 /*
77841 * fscache cached network filesystem type
77842 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
77843 index 1c804b0..1432c2b 100644
77844 --- a/include/linux/fsnotify.h
77845 +++ b/include/linux/fsnotify.h
77846 @@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
77847 struct inode *inode = file_inode(file);
77848 __u32 mask = FS_ACCESS;
77849
77850 + if (is_sidechannel_device(inode))
77851 + return;
77852 +
77853 if (S_ISDIR(inode->i_mode))
77854 mask |= FS_ISDIR;
77855
77856 @@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
77857 struct inode *inode = file_inode(file);
77858 __u32 mask = FS_MODIFY;
77859
77860 + if (is_sidechannel_device(inode))
77861 + return;
77862 +
77863 if (S_ISDIR(inode->i_mode))
77864 mask |= FS_ISDIR;
77865
77866 @@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
77867 */
77868 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
77869 {
77870 - return kstrdup(name, GFP_KERNEL);
77871 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
77872 }
77873
77874 /*
77875 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
77876 index 9f3c275..8bdff5d 100644
77877 --- a/include/linux/genhd.h
77878 +++ b/include/linux/genhd.h
77879 @@ -194,7 +194,7 @@ struct gendisk {
77880 struct kobject *slave_dir;
77881
77882 struct timer_rand_state *random;
77883 - atomic_t sync_io; /* RAID */
77884 + atomic_unchecked_t sync_io; /* RAID */
77885 struct disk_events *ev;
77886 #ifdef CONFIG_BLK_DEV_INTEGRITY
77887 struct blk_integrity *integrity;
77888 @@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
77889 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
77890
77891 /* drivers/char/random.c */
77892 -extern void add_disk_randomness(struct gendisk *disk);
77893 +extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
77894 extern void rand_initialize_disk(struct gendisk *disk);
77895
77896 static inline sector_t get_start_sect(struct block_device *bdev)
77897 diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
77898 index c0894dd..2fbf10c 100644
77899 --- a/include/linux/genl_magic_func.h
77900 +++ b/include/linux/genl_magic_func.h
77901 @@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
77902 },
77903
77904 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
77905 -static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
77906 +static struct genl_ops ZZZ_genl_ops[] = {
77907 #include GENL_MAGIC_INCLUDE_FILE
77908 };
77909
77910 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
77911 index 9b4dd49..61fd41d 100644
77912 --- a/include/linux/gfp.h
77913 +++ b/include/linux/gfp.h
77914 @@ -35,6 +35,13 @@ struct vm_area_struct;
77915 #define ___GFP_NO_KSWAPD 0x400000u
77916 #define ___GFP_OTHER_NODE 0x800000u
77917 #define ___GFP_WRITE 0x1000000u
77918 +
77919 +#ifdef CONFIG_PAX_USERCOPY_SLABS
77920 +#define ___GFP_USERCOPY 0x2000000u
77921 +#else
77922 +#define ___GFP_USERCOPY 0
77923 +#endif
77924 +
77925 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
77926
77927 /*
77928 @@ -92,6 +99,7 @@ struct vm_area_struct;
77929 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
77930 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
77931 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
77932 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
77933
77934 /*
77935 * This may seem redundant, but it's a way of annotating false positives vs.
77936 @@ -99,7 +107,7 @@ struct vm_area_struct;
77937 */
77938 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
77939
77940 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
77941 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
77942 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
77943
77944 /* This equals 0, but use constants in case they ever change */
77945 @@ -153,6 +161,8 @@ struct vm_area_struct;
77946 /* 4GB DMA on some platforms */
77947 #define GFP_DMA32 __GFP_DMA32
77948
77949 +#define GFP_USERCOPY __GFP_USERCOPY
77950 +
77951 /* Convert GFP flags to their corresponding migrate type */
77952 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
77953 {
77954 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
77955 new file mode 100644
77956 index 0000000..edb2cb6
77957 --- /dev/null
77958 +++ b/include/linux/gracl.h
77959 @@ -0,0 +1,340 @@
77960 +#ifndef GR_ACL_H
77961 +#define GR_ACL_H
77962 +
77963 +#include <linux/grdefs.h>
77964 +#include <linux/resource.h>
77965 +#include <linux/capability.h>
77966 +#include <linux/dcache.h>
77967 +#include <asm/resource.h>
77968 +
77969 +/* Major status information */
77970 +
77971 +#define GR_VERSION "grsecurity 3.0"
77972 +#define GRSECURITY_VERSION 0x3000
77973 +
77974 +enum {
77975 + GR_SHUTDOWN = 0,
77976 + GR_ENABLE = 1,
77977 + GR_SPROLE = 2,
77978 + GR_OLDRELOAD = 3,
77979 + GR_SEGVMOD = 4,
77980 + GR_STATUS = 5,
77981 + GR_UNSPROLE = 6,
77982 + GR_PASSSET = 7,
77983 + GR_SPROLEPAM = 8,
77984 + GR_RELOAD = 9,
77985 +};
77986 +
77987 +/* Password setup definitions
77988 + * kernel/grhash.c */
77989 +enum {
77990 + GR_PW_LEN = 128,
77991 + GR_SALT_LEN = 16,
77992 + GR_SHA_LEN = 32,
77993 +};
77994 +
77995 +enum {
77996 + GR_SPROLE_LEN = 64,
77997 +};
77998 +
77999 +enum {
78000 + GR_NO_GLOB = 0,
78001 + GR_REG_GLOB,
78002 + GR_CREATE_GLOB
78003 +};
78004 +
78005 +#define GR_NLIMITS 32
78006 +
78007 +/* Begin Data Structures */
78008 +
78009 +struct sprole_pw {
78010 + unsigned char *rolename;
78011 + unsigned char salt[GR_SALT_LEN];
78012 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
78013 +};
78014 +
78015 +struct name_entry {
78016 + __u32 key;
78017 + ino_t inode;
78018 + dev_t device;
78019 + char *name;
78020 + __u16 len;
78021 + __u8 deleted;
78022 + struct name_entry *prev;
78023 + struct name_entry *next;
78024 +};
78025 +
78026 +struct inodev_entry {
78027 + struct name_entry *nentry;
78028 + struct inodev_entry *prev;
78029 + struct inodev_entry *next;
78030 +};
78031 +
78032 +struct acl_role_db {
78033 + struct acl_role_label **r_hash;
78034 + __u32 r_size;
78035 +};
78036 +
78037 +struct inodev_db {
78038 + struct inodev_entry **i_hash;
78039 + __u32 i_size;
78040 +};
78041 +
78042 +struct name_db {
78043 + struct name_entry **n_hash;
78044 + __u32 n_size;
78045 +};
78046 +
78047 +struct crash_uid {
78048 + uid_t uid;
78049 + unsigned long expires;
78050 +};
78051 +
78052 +struct gr_hash_struct {
78053 + void **table;
78054 + void **nametable;
78055 + void *first;
78056 + __u32 table_size;
78057 + __u32 used_size;
78058 + int type;
78059 +};
78060 +
78061 +/* Userspace Grsecurity ACL data structures */
78062 +
78063 +struct acl_subject_label {
78064 + char *filename;
78065 + ino_t inode;
78066 + dev_t device;
78067 + __u32 mode;
78068 + kernel_cap_t cap_mask;
78069 + kernel_cap_t cap_lower;
78070 + kernel_cap_t cap_invert_audit;
78071 +
78072 + struct rlimit res[GR_NLIMITS];
78073 + __u32 resmask;
78074 +
78075 + __u8 user_trans_type;
78076 + __u8 group_trans_type;
78077 + uid_t *user_transitions;
78078 + gid_t *group_transitions;
78079 + __u16 user_trans_num;
78080 + __u16 group_trans_num;
78081 +
78082 + __u32 sock_families[2];
78083 + __u32 ip_proto[8];
78084 + __u32 ip_type;
78085 + struct acl_ip_label **ips;
78086 + __u32 ip_num;
78087 + __u32 inaddr_any_override;
78088 +
78089 + __u32 crashes;
78090 + unsigned long expires;
78091 +
78092 + struct acl_subject_label *parent_subject;
78093 + struct gr_hash_struct *hash;
78094 + struct acl_subject_label *prev;
78095 + struct acl_subject_label *next;
78096 +
78097 + struct acl_object_label **obj_hash;
78098 + __u32 obj_hash_size;
78099 + __u16 pax_flags;
78100 +};
78101 +
78102 +struct role_allowed_ip {
78103 + __u32 addr;
78104 + __u32 netmask;
78105 +
78106 + struct role_allowed_ip *prev;
78107 + struct role_allowed_ip *next;
78108 +};
78109 +
78110 +struct role_transition {
78111 + char *rolename;
78112 +
78113 + struct role_transition *prev;
78114 + struct role_transition *next;
78115 +};
78116 +
78117 +struct acl_role_label {
78118 + char *rolename;
78119 + uid_t uidgid;
78120 + __u16 roletype;
78121 +
78122 + __u16 auth_attempts;
78123 + unsigned long expires;
78124 +
78125 + struct acl_subject_label *root_label;
78126 + struct gr_hash_struct *hash;
78127 +
78128 + struct acl_role_label *prev;
78129 + struct acl_role_label *next;
78130 +
78131 + struct role_transition *transitions;
78132 + struct role_allowed_ip *allowed_ips;
78133 + uid_t *domain_children;
78134 + __u16 domain_child_num;
78135 +
78136 + umode_t umask;
78137 +
78138 + struct acl_subject_label **subj_hash;
78139 + __u32 subj_hash_size;
78140 +};
78141 +
78142 +struct user_acl_role_db {
78143 + struct acl_role_label **r_table;
78144 + __u32 num_pointers; /* Number of allocations to track */
78145 + __u32 num_roles; /* Number of roles */
78146 + __u32 num_domain_children; /* Number of domain children */
78147 + __u32 num_subjects; /* Number of subjects */
78148 + __u32 num_objects; /* Number of objects */
78149 +};
78150 +
78151 +struct acl_object_label {
78152 + char *filename;
78153 + ino_t inode;
78154 + dev_t device;
78155 + __u32 mode;
78156 +
78157 + struct acl_subject_label *nested;
78158 + struct acl_object_label *globbed;
78159 +
78160 + /* next two structures not used */
78161 +
78162 + struct acl_object_label *prev;
78163 + struct acl_object_label *next;
78164 +};
78165 +
78166 +struct acl_ip_label {
78167 + char *iface;
78168 + __u32 addr;
78169 + __u32 netmask;
78170 + __u16 low, high;
78171 + __u8 mode;
78172 + __u32 type;
78173 + __u32 proto[8];
78174 +
78175 + /* next two structures not used */
78176 +
78177 + struct acl_ip_label *prev;
78178 + struct acl_ip_label *next;
78179 +};
78180 +
78181 +struct gr_arg {
78182 + struct user_acl_role_db role_db;
78183 + unsigned char pw[GR_PW_LEN];
78184 + unsigned char salt[GR_SALT_LEN];
78185 + unsigned char sum[GR_SHA_LEN];
78186 + unsigned char sp_role[GR_SPROLE_LEN];
78187 + struct sprole_pw *sprole_pws;
78188 + dev_t segv_device;
78189 + ino_t segv_inode;
78190 + uid_t segv_uid;
78191 + __u16 num_sprole_pws;
78192 + __u16 mode;
78193 +};
78194 +
78195 +struct gr_arg_wrapper {
78196 + struct gr_arg *arg;
78197 + __u32 version;
78198 + __u32 size;
78199 +};
78200 +
78201 +struct subject_map {
78202 + struct acl_subject_label *user;
78203 + struct acl_subject_label *kernel;
78204 + struct subject_map *prev;
78205 + struct subject_map *next;
78206 +};
78207 +
78208 +struct acl_subj_map_db {
78209 + struct subject_map **s_hash;
78210 + __u32 s_size;
78211 +};
78212 +
78213 +struct gr_policy_state {
78214 + struct sprole_pw **acl_special_roles;
78215 + __u16 num_sprole_pws;
78216 + struct acl_role_label *kernel_role;
78217 + struct acl_role_label *role_list;
78218 + struct acl_role_label *default_role;
78219 + struct acl_role_db acl_role_set;
78220 + struct acl_subj_map_db subj_map_set;
78221 + struct name_db name_set;
78222 + struct inodev_db inodev_set;
78223 +};
78224 +
78225 +struct gr_alloc_state {
78226 + unsigned long alloc_stack_next;
78227 + unsigned long alloc_stack_size;
78228 + void **alloc_stack;
78229 +};
78230 +
78231 +struct gr_reload_state {
78232 + struct gr_policy_state oldpolicy;
78233 + struct gr_alloc_state oldalloc;
78234 + struct gr_policy_state newpolicy;
78235 + struct gr_alloc_state newalloc;
78236 + struct gr_policy_state *oldpolicy_ptr;
78237 + struct gr_alloc_state *oldalloc_ptr;
78238 + unsigned char oldmode;
78239 +};
78240 +
78241 +/* End Data Structures Section */
78242 +
78243 +/* Hash functions generated by empirical testing by Brad Spengler
78244 + Makes good use of the low bits of the inode. Generally 0-1 times
78245 + in loop for successful match. 0-3 for unsuccessful match.
78246 + Shift/add algorithm with modulus of table size and an XOR*/
78247 +
78248 +static __inline__ unsigned int
78249 +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
78250 +{
78251 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
78252 +}
78253 +
78254 + static __inline__ unsigned int
78255 +gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
78256 +{
78257 + return ((const unsigned long)userp % sz);
78258 +}
78259 +
78260 +static __inline__ unsigned int
78261 +gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
78262 +{
78263 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
78264 +}
78265 +
78266 +static __inline__ unsigned int
78267 +gr_nhash(const char *name, const __u16 len, const unsigned int sz)
78268 +{
78269 + return full_name_hash((const unsigned char *)name, len) % sz;
78270 +}
78271 +
78272 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
78273 + subj = NULL; \
78274 + iter = 0; \
78275 + while (iter < role->subj_hash_size) { \
78276 + if (subj == NULL) \
78277 + subj = role->subj_hash[iter]; \
78278 + if (subj == NULL) { \
78279 + iter++; \
78280 + continue; \
78281 + }
78282 +
78283 +#define FOR_EACH_SUBJECT_END(subj,iter) \
78284 + subj = subj->next; \
78285 + if (subj == NULL) \
78286 + iter++; \
78287 + }
78288 +
78289 +
78290 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
78291 + subj = role->hash->first; \
78292 + while (subj != NULL) {
78293 +
78294 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
78295 + subj = subj->next; \
78296 + }
78297 +
78298 +#endif
78299 +
78300 diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
78301 new file mode 100644
78302 index 0000000..33ebd1f
78303 --- /dev/null
78304 +++ b/include/linux/gracl_compat.h
78305 @@ -0,0 +1,156 @@
78306 +#ifndef GR_ACL_COMPAT_H
78307 +#define GR_ACL_COMPAT_H
78308 +
78309 +#include <linux/resource.h>
78310 +#include <asm/resource.h>
78311 +
78312 +struct sprole_pw_compat {
78313 + compat_uptr_t rolename;
78314 + unsigned char salt[GR_SALT_LEN];
78315 + unsigned char sum[GR_SHA_LEN];
78316 +};
78317 +
78318 +struct gr_hash_struct_compat {
78319 + compat_uptr_t table;
78320 + compat_uptr_t nametable;
78321 + compat_uptr_t first;
78322 + __u32 table_size;
78323 + __u32 used_size;
78324 + int type;
78325 +};
78326 +
78327 +struct acl_subject_label_compat {
78328 + compat_uptr_t filename;
78329 + compat_ino_t inode;
78330 + __u32 device;
78331 + __u32 mode;
78332 + kernel_cap_t cap_mask;
78333 + kernel_cap_t cap_lower;
78334 + kernel_cap_t cap_invert_audit;
78335 +
78336 + struct compat_rlimit res[GR_NLIMITS];
78337 + __u32 resmask;
78338 +
78339 + __u8 user_trans_type;
78340 + __u8 group_trans_type;
78341 + compat_uptr_t user_transitions;
78342 + compat_uptr_t group_transitions;
78343 + __u16 user_trans_num;
78344 + __u16 group_trans_num;
78345 +
78346 + __u32 sock_families[2];
78347 + __u32 ip_proto[8];
78348 + __u32 ip_type;
78349 + compat_uptr_t ips;
78350 + __u32 ip_num;
78351 + __u32 inaddr_any_override;
78352 +
78353 + __u32 crashes;
78354 + compat_ulong_t expires;
78355 +
78356 + compat_uptr_t parent_subject;
78357 + compat_uptr_t hash;
78358 + compat_uptr_t prev;
78359 + compat_uptr_t next;
78360 +
78361 + compat_uptr_t obj_hash;
78362 + __u32 obj_hash_size;
78363 + __u16 pax_flags;
78364 +};
78365 +
78366 +struct role_allowed_ip_compat {
78367 + __u32 addr;
78368 + __u32 netmask;
78369 +
78370 + compat_uptr_t prev;
78371 + compat_uptr_t next;
78372 +};
78373 +
78374 +struct role_transition_compat {
78375 + compat_uptr_t rolename;
78376 +
78377 + compat_uptr_t prev;
78378 + compat_uptr_t next;
78379 +};
78380 +
78381 +struct acl_role_label_compat {
78382 + compat_uptr_t rolename;
78383 + uid_t uidgid;
78384 + __u16 roletype;
78385 +
78386 + __u16 auth_attempts;
78387 + compat_ulong_t expires;
78388 +
78389 + compat_uptr_t root_label;
78390 + compat_uptr_t hash;
78391 +
78392 + compat_uptr_t prev;
78393 + compat_uptr_t next;
78394 +
78395 + compat_uptr_t transitions;
78396 + compat_uptr_t allowed_ips;
78397 + compat_uptr_t domain_children;
78398 + __u16 domain_child_num;
78399 +
78400 + umode_t umask;
78401 +
78402 + compat_uptr_t subj_hash;
78403 + __u32 subj_hash_size;
78404 +};
78405 +
78406 +struct user_acl_role_db_compat {
78407 + compat_uptr_t r_table;
78408 + __u32 num_pointers;
78409 + __u32 num_roles;
78410 + __u32 num_domain_children;
78411 + __u32 num_subjects;
78412 + __u32 num_objects;
78413 +};
78414 +
78415 +struct acl_object_label_compat {
78416 + compat_uptr_t filename;
78417 + compat_ino_t inode;
78418 + __u32 device;
78419 + __u32 mode;
78420 +
78421 + compat_uptr_t nested;
78422 + compat_uptr_t globbed;
78423 +
78424 + compat_uptr_t prev;
78425 + compat_uptr_t next;
78426 +};
78427 +
78428 +struct acl_ip_label_compat {
78429 + compat_uptr_t iface;
78430 + __u32 addr;
78431 + __u32 netmask;
78432 + __u16 low, high;
78433 + __u8 mode;
78434 + __u32 type;
78435 + __u32 proto[8];
78436 +
78437 + compat_uptr_t prev;
78438 + compat_uptr_t next;
78439 +};
78440 +
78441 +struct gr_arg_compat {
78442 + struct user_acl_role_db_compat role_db;
78443 + unsigned char pw[GR_PW_LEN];
78444 + unsigned char salt[GR_SALT_LEN];
78445 + unsigned char sum[GR_SHA_LEN];
78446 + unsigned char sp_role[GR_SPROLE_LEN];
78447 + compat_uptr_t sprole_pws;
78448 + __u32 segv_device;
78449 + compat_ino_t segv_inode;
78450 + uid_t segv_uid;
78451 + __u16 num_sprole_pws;
78452 + __u16 mode;
78453 +};
78454 +
78455 +struct gr_arg_wrapper_compat {
78456 + compat_uptr_t arg;
78457 + __u32 version;
78458 + __u32 size;
78459 +};
78460 +
78461 +#endif
78462 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
78463 new file mode 100644
78464 index 0000000..323ecf2
78465 --- /dev/null
78466 +++ b/include/linux/gralloc.h
78467 @@ -0,0 +1,9 @@
78468 +#ifndef __GRALLOC_H
78469 +#define __GRALLOC_H
78470 +
78471 +void acl_free_all(void);
78472 +int acl_alloc_stack_init(unsigned long size);
78473 +void *acl_alloc(unsigned long len);
78474 +void *acl_alloc_num(unsigned long num, unsigned long len);
78475 +
78476 +#endif
78477 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
78478 new file mode 100644
78479 index 0000000..be66033
78480 --- /dev/null
78481 +++ b/include/linux/grdefs.h
78482 @@ -0,0 +1,140 @@
78483 +#ifndef GRDEFS_H
78484 +#define GRDEFS_H
78485 +
78486 +/* Begin grsecurity status declarations */
78487 +
78488 +enum {
78489 + GR_READY = 0x01,
78490 + GR_STATUS_INIT = 0x00 // disabled state
78491 +};
78492 +
78493 +/* Begin ACL declarations */
78494 +
78495 +/* Role flags */
78496 +
78497 +enum {
78498 + GR_ROLE_USER = 0x0001,
78499 + GR_ROLE_GROUP = 0x0002,
78500 + GR_ROLE_DEFAULT = 0x0004,
78501 + GR_ROLE_SPECIAL = 0x0008,
78502 + GR_ROLE_AUTH = 0x0010,
78503 + GR_ROLE_NOPW = 0x0020,
78504 + GR_ROLE_GOD = 0x0040,
78505 + GR_ROLE_LEARN = 0x0080,
78506 + GR_ROLE_TPE = 0x0100,
78507 + GR_ROLE_DOMAIN = 0x0200,
78508 + GR_ROLE_PAM = 0x0400,
78509 + GR_ROLE_PERSIST = 0x0800
78510 +};
78511 +
78512 +/* ACL Subject and Object mode flags */
78513 +enum {
78514 + GR_DELETED = 0x80000000
78515 +};
78516 +
78517 +/* ACL Object-only mode flags */
78518 +enum {
78519 + GR_READ = 0x00000001,
78520 + GR_APPEND = 0x00000002,
78521 + GR_WRITE = 0x00000004,
78522 + GR_EXEC = 0x00000008,
78523 + GR_FIND = 0x00000010,
78524 + GR_INHERIT = 0x00000020,
78525 + GR_SETID = 0x00000040,
78526 + GR_CREATE = 0x00000080,
78527 + GR_DELETE = 0x00000100,
78528 + GR_LINK = 0x00000200,
78529 + GR_AUDIT_READ = 0x00000400,
78530 + GR_AUDIT_APPEND = 0x00000800,
78531 + GR_AUDIT_WRITE = 0x00001000,
78532 + GR_AUDIT_EXEC = 0x00002000,
78533 + GR_AUDIT_FIND = 0x00004000,
78534 + GR_AUDIT_INHERIT= 0x00008000,
78535 + GR_AUDIT_SETID = 0x00010000,
78536 + GR_AUDIT_CREATE = 0x00020000,
78537 + GR_AUDIT_DELETE = 0x00040000,
78538 + GR_AUDIT_LINK = 0x00080000,
78539 + GR_PTRACERD = 0x00100000,
78540 + GR_NOPTRACE = 0x00200000,
78541 + GR_SUPPRESS = 0x00400000,
78542 + GR_NOLEARN = 0x00800000,
78543 + GR_INIT_TRANSFER= 0x01000000
78544 +};
78545 +
78546 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
78547 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
78548 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
78549 +
78550 +/* ACL subject-only mode flags */
78551 +enum {
78552 + GR_KILL = 0x00000001,
78553 + GR_VIEW = 0x00000002,
78554 + GR_PROTECTED = 0x00000004,
78555 + GR_LEARN = 0x00000008,
78556 + GR_OVERRIDE = 0x00000010,
78557 + /* just a placeholder, this mode is only used in userspace */
78558 + GR_DUMMY = 0x00000020,
78559 + GR_PROTSHM = 0x00000040,
78560 + GR_KILLPROC = 0x00000080,
78561 + GR_KILLIPPROC = 0x00000100,
78562 + /* just a placeholder, this mode is only used in userspace */
78563 + GR_NOTROJAN = 0x00000200,
78564 + GR_PROTPROCFD = 0x00000400,
78565 + GR_PROCACCT = 0x00000800,
78566 + GR_RELAXPTRACE = 0x00001000,
78567 + //GR_NESTED = 0x00002000,
78568 + GR_INHERITLEARN = 0x00004000,
78569 + GR_PROCFIND = 0x00008000,
78570 + GR_POVERRIDE = 0x00010000,
78571 + GR_KERNELAUTH = 0x00020000,
78572 + GR_ATSECURE = 0x00040000,
78573 + GR_SHMEXEC = 0x00080000
78574 +};
78575 +
78576 +enum {
78577 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
78578 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
78579 + GR_PAX_ENABLE_MPROTECT = 0x0004,
78580 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
78581 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
78582 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
78583 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
78584 + GR_PAX_DISABLE_MPROTECT = 0x0400,
78585 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
78586 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
78587 +};
78588 +
78589 +enum {
78590 + GR_ID_USER = 0x01,
78591 + GR_ID_GROUP = 0x02,
78592 +};
78593 +
78594 +enum {
78595 + GR_ID_ALLOW = 0x01,
78596 + GR_ID_DENY = 0x02,
78597 +};
78598 +
78599 +#define GR_CRASH_RES 31
78600 +#define GR_UIDTABLE_MAX 500
78601 +
78602 +/* begin resource learning section */
78603 +enum {
78604 + GR_RLIM_CPU_BUMP = 60,
78605 + GR_RLIM_FSIZE_BUMP = 50000,
78606 + GR_RLIM_DATA_BUMP = 10000,
78607 + GR_RLIM_STACK_BUMP = 1000,
78608 + GR_RLIM_CORE_BUMP = 10000,
78609 + GR_RLIM_RSS_BUMP = 500000,
78610 + GR_RLIM_NPROC_BUMP = 1,
78611 + GR_RLIM_NOFILE_BUMP = 5,
78612 + GR_RLIM_MEMLOCK_BUMP = 50000,
78613 + GR_RLIM_AS_BUMP = 500000,
78614 + GR_RLIM_LOCKS_BUMP = 2,
78615 + GR_RLIM_SIGPENDING_BUMP = 5,
78616 + GR_RLIM_MSGQUEUE_BUMP = 10000,
78617 + GR_RLIM_NICE_BUMP = 1,
78618 + GR_RLIM_RTPRIO_BUMP = 1,
78619 + GR_RLIM_RTTIME_BUMP = 1000000
78620 +};
78621 +
78622 +#endif
78623 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
78624 new file mode 100644
78625 index 0000000..d25522e
78626 --- /dev/null
78627 +++ b/include/linux/grinternal.h
78628 @@ -0,0 +1,229 @@
78629 +#ifndef __GRINTERNAL_H
78630 +#define __GRINTERNAL_H
78631 +
78632 +#ifdef CONFIG_GRKERNSEC
78633 +
78634 +#include <linux/fs.h>
78635 +#include <linux/mnt_namespace.h>
78636 +#include <linux/nsproxy.h>
78637 +#include <linux/gracl.h>
78638 +#include <linux/grdefs.h>
78639 +#include <linux/grmsg.h>
78640 +
78641 +void gr_add_learn_entry(const char *fmt, ...)
78642 + __attribute__ ((format (printf, 1, 2)));
78643 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
78644 + const struct vfsmount *mnt);
78645 +__u32 gr_check_create(const struct dentry *new_dentry,
78646 + const struct dentry *parent,
78647 + const struct vfsmount *mnt, const __u32 mode);
78648 +int gr_check_protected_task(const struct task_struct *task);
78649 +__u32 to_gr_audit(const __u32 reqmode);
78650 +int gr_set_acls(const int type);
78651 +int gr_acl_is_enabled(void);
78652 +char gr_roletype_to_char(void);
78653 +
78654 +void gr_handle_alertkill(struct task_struct *task);
78655 +char *gr_to_filename(const struct dentry *dentry,
78656 + const struct vfsmount *mnt);
78657 +char *gr_to_filename1(const struct dentry *dentry,
78658 + const struct vfsmount *mnt);
78659 +char *gr_to_filename2(const struct dentry *dentry,
78660 + const struct vfsmount *mnt);
78661 +char *gr_to_filename3(const struct dentry *dentry,
78662 + const struct vfsmount *mnt);
78663 +
78664 +extern int grsec_enable_ptrace_readexec;
78665 +extern int grsec_enable_harden_ptrace;
78666 +extern int grsec_enable_link;
78667 +extern int grsec_enable_fifo;
78668 +extern int grsec_enable_execve;
78669 +extern int grsec_enable_shm;
78670 +extern int grsec_enable_execlog;
78671 +extern int grsec_enable_signal;
78672 +extern int grsec_enable_audit_ptrace;
78673 +extern int grsec_enable_forkfail;
78674 +extern int grsec_enable_time;
78675 +extern int grsec_enable_rofs;
78676 +extern int grsec_deny_new_usb;
78677 +extern int grsec_enable_chroot_shmat;
78678 +extern int grsec_enable_chroot_mount;
78679 +extern int grsec_enable_chroot_double;
78680 +extern int grsec_enable_chroot_pivot;
78681 +extern int grsec_enable_chroot_chdir;
78682 +extern int grsec_enable_chroot_chmod;
78683 +extern int grsec_enable_chroot_mknod;
78684 +extern int grsec_enable_chroot_fchdir;
78685 +extern int grsec_enable_chroot_nice;
78686 +extern int grsec_enable_chroot_execlog;
78687 +extern int grsec_enable_chroot_caps;
78688 +extern int grsec_enable_chroot_sysctl;
78689 +extern int grsec_enable_chroot_unix;
78690 +extern int grsec_enable_symlinkown;
78691 +extern kgid_t grsec_symlinkown_gid;
78692 +extern int grsec_enable_tpe;
78693 +extern kgid_t grsec_tpe_gid;
78694 +extern int grsec_enable_tpe_all;
78695 +extern int grsec_enable_tpe_invert;
78696 +extern int grsec_enable_socket_all;
78697 +extern kgid_t grsec_socket_all_gid;
78698 +extern int grsec_enable_socket_client;
78699 +extern kgid_t grsec_socket_client_gid;
78700 +extern int grsec_enable_socket_server;
78701 +extern kgid_t grsec_socket_server_gid;
78702 +extern kgid_t grsec_audit_gid;
78703 +extern int grsec_enable_group;
78704 +extern int grsec_enable_log_rwxmaps;
78705 +extern int grsec_enable_mount;
78706 +extern int grsec_enable_chdir;
78707 +extern int grsec_resource_logging;
78708 +extern int grsec_enable_blackhole;
78709 +extern int grsec_lastack_retries;
78710 +extern int grsec_enable_brute;
78711 +extern int grsec_enable_harden_ipc;
78712 +extern int grsec_lock;
78713 +
78714 +extern spinlock_t grsec_alert_lock;
78715 +extern unsigned long grsec_alert_wtime;
78716 +extern unsigned long grsec_alert_fyet;
78717 +
78718 +extern spinlock_t grsec_audit_lock;
78719 +
78720 +extern rwlock_t grsec_exec_file_lock;
78721 +
78722 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
78723 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
78724 + (tsk)->exec_file->f_path.mnt) : "/")
78725 +
78726 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
78727 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
78728 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
78729 +
78730 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
78731 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
78732 + (tsk)->exec_file->f_path.mnt) : "/")
78733 +
78734 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
78735 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
78736 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
78737 +
78738 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
78739 +
78740 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
78741 +
78742 +static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
78743 +{
78744 + if (file1 && file2) {
78745 + const struct inode *inode1 = file1->f_path.dentry->d_inode;
78746 + const struct inode *inode2 = file2->f_path.dentry->d_inode;
78747 + if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
78748 + return true;
78749 + }
78750 +
78751 + return false;
78752 +}
78753 +
78754 +#define GR_CHROOT_CAPS {{ \
78755 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
78756 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
78757 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
78758 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
78759 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
78760 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
78761 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
78762 +
78763 +#define security_learn(normal_msg,args...) \
78764 +({ \
78765 + read_lock(&grsec_exec_file_lock); \
78766 + gr_add_learn_entry(normal_msg "\n", ## args); \
78767 + read_unlock(&grsec_exec_file_lock); \
78768 +})
78769 +
78770 +enum {
78771 + GR_DO_AUDIT,
78772 + GR_DONT_AUDIT,
78773 + /* used for non-audit messages that we shouldn't kill the task on */
78774 + GR_DONT_AUDIT_GOOD
78775 +};
78776 +
78777 +enum {
78778 + GR_TTYSNIFF,
78779 + GR_RBAC,
78780 + GR_RBAC_STR,
78781 + GR_STR_RBAC,
78782 + GR_RBAC_MODE2,
78783 + GR_RBAC_MODE3,
78784 + GR_FILENAME,
78785 + GR_SYSCTL_HIDDEN,
78786 + GR_NOARGS,
78787 + GR_ONE_INT,
78788 + GR_ONE_INT_TWO_STR,
78789 + GR_ONE_STR,
78790 + GR_STR_INT,
78791 + GR_TWO_STR_INT,
78792 + GR_TWO_INT,
78793 + GR_TWO_U64,
78794 + GR_THREE_INT,
78795 + GR_FIVE_INT_TWO_STR,
78796 + GR_TWO_STR,
78797 + GR_THREE_STR,
78798 + GR_FOUR_STR,
78799 + GR_STR_FILENAME,
78800 + GR_FILENAME_STR,
78801 + GR_FILENAME_TWO_INT,
78802 + GR_FILENAME_TWO_INT_STR,
78803 + GR_TEXTREL,
78804 + GR_PTRACE,
78805 + GR_RESOURCE,
78806 + GR_CAP,
78807 + GR_SIG,
78808 + GR_SIG2,
78809 + GR_CRASH1,
78810 + GR_CRASH2,
78811 + GR_PSACCT,
78812 + GR_RWXMAP,
78813 + GR_RWXMAPVMA
78814 +};
78815 +
78816 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
78817 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
78818 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
78819 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
78820 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
78821 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
78822 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
78823 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
78824 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
78825 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
78826 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
78827 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
78828 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
78829 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
78830 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
78831 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
78832 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
78833 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
78834 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
78835 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
78836 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
78837 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
78838 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
78839 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
78840 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
78841 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
78842 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
78843 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
78844 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
78845 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
78846 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
78847 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
78848 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
78849 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
78850 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
78851 +#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
78852 +
78853 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
78854 +
78855 +#endif
78856 +
78857 +#endif
78858 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
78859 new file mode 100644
78860 index 0000000..ba93581
78861 --- /dev/null
78862 +++ b/include/linux/grmsg.h
78863 @@ -0,0 +1,116 @@
78864 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
78865 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
78866 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
78867 +#define GR_STOPMOD_MSG "denied modification of module state by "
78868 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
78869 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
78870 +#define GR_IOPERM_MSG "denied use of ioperm() by "
78871 +#define GR_IOPL_MSG "denied use of iopl() by "
78872 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
78873 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
78874 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
78875 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
78876 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
78877 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
78878 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
78879 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
78880 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
78881 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
78882 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
78883 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
78884 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
78885 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
78886 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
78887 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
78888 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
78889 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
78890 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
78891 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
78892 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
78893 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
78894 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
78895 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
78896 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
78897 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
78898 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
78899 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
78900 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
78901 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
78902 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
78903 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
78904 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
78905 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
78906 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
78907 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
78908 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
78909 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
78910 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
78911 +#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
78912 +#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
78913 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
78914 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
78915 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
78916 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
78917 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
78918 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
78919 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
78920 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
78921 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
78922 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
78923 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
78924 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
78925 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
78926 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
78927 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
78928 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
78929 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
78930 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
78931 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
78932 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
78933 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
78934 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
78935 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
78936 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
78937 +#define GR_NICE_CHROOT_MSG "denied priority change by "
78938 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
78939 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
78940 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
78941 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
78942 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
78943 +#define GR_TIME_MSG "time set by "
78944 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
78945 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
78946 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
78947 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
78948 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
78949 +#define GR_BIND_MSG "denied bind() by "
78950 +#define GR_CONNECT_MSG "denied connect() by "
78951 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
78952 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
78953 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
78954 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
78955 +#define GR_CAP_ACL_MSG "use of %s denied for "
78956 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
78957 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
78958 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
78959 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
78960 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
78961 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
78962 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
78963 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
78964 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
78965 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
78966 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
78967 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
78968 +#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
78969 +#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
78970 +#define GR_VM86_MSG "denied use of vm86 by "
78971 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
78972 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
78973 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
78974 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
78975 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
78976 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
78977 +#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
78978 +#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
78979 +#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
78980 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
78981 new file mode 100644
78982 index 0000000..8108301
78983 --- /dev/null
78984 +++ b/include/linux/grsecurity.h
78985 @@ -0,0 +1,246 @@
78986 +#ifndef GR_SECURITY_H
78987 +#define GR_SECURITY_H
78988 +#include <linux/fs.h>
78989 +#include <linux/fs_struct.h>
78990 +#include <linux/binfmts.h>
78991 +#include <linux/gracl.h>
78992 +
78993 +/* notify of brain-dead configs */
78994 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78995 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
78996 +#endif
78997 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78998 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78999 +#endif
79000 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
79001 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
79002 +#endif
79003 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
79004 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
79005 +#endif
79006 +
79007 +int gr_handle_new_usb(void);
79008 +
79009 +void gr_handle_brute_attach(int dumpable);
79010 +void gr_handle_brute_check(void);
79011 +void gr_handle_kernel_exploit(void);
79012 +
79013 +char gr_roletype_to_char(void);
79014 +
79015 +int gr_acl_enable_at_secure(void);
79016 +
79017 +int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
79018 +int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
79019 +
79020 +void gr_del_task_from_ip_table(struct task_struct *p);
79021 +
79022 +int gr_pid_is_chrooted(struct task_struct *p);
79023 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
79024 +int gr_handle_chroot_nice(void);
79025 +int gr_handle_chroot_sysctl(const int op);
79026 +int gr_handle_chroot_setpriority(struct task_struct *p,
79027 + const int niceval);
79028 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
79029 +int gr_handle_chroot_chroot(const struct dentry *dentry,
79030 + const struct vfsmount *mnt);
79031 +void gr_handle_chroot_chdir(const struct path *path);
79032 +int gr_handle_chroot_chmod(const struct dentry *dentry,
79033 + const struct vfsmount *mnt, const int mode);
79034 +int gr_handle_chroot_mknod(const struct dentry *dentry,
79035 + const struct vfsmount *mnt, const int mode);
79036 +int gr_handle_chroot_mount(const struct dentry *dentry,
79037 + const struct vfsmount *mnt,
79038 + const char *dev_name);
79039 +int gr_handle_chroot_pivot(void);
79040 +int gr_handle_chroot_unix(const pid_t pid);
79041 +
79042 +int gr_handle_rawio(const struct inode *inode);
79043 +
79044 +void gr_handle_ioperm(void);
79045 +void gr_handle_iopl(void);
79046 +void gr_handle_msr_write(void);
79047 +
79048 +umode_t gr_acl_umask(void);
79049 +
79050 +int gr_tpe_allow(const struct file *file);
79051 +
79052 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
79053 +void gr_clear_chroot_entries(struct task_struct *task);
79054 +
79055 +void gr_log_forkfail(const int retval);
79056 +void gr_log_timechange(void);
79057 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
79058 +void gr_log_chdir(const struct dentry *dentry,
79059 + const struct vfsmount *mnt);
79060 +void gr_log_chroot_exec(const struct dentry *dentry,
79061 + const struct vfsmount *mnt);
79062 +void gr_log_remount(const char *devname, const int retval);
79063 +void gr_log_unmount(const char *devname, const int retval);
79064 +void gr_log_mount(const char *from, const char *to, const int retval);
79065 +void gr_log_textrel(struct vm_area_struct *vma);
79066 +void gr_log_ptgnustack(struct file *file);
79067 +void gr_log_rwxmmap(struct file *file);
79068 +void gr_log_rwxmprotect(struct vm_area_struct *vma);
79069 +
79070 +int gr_handle_follow_link(const struct inode *parent,
79071 + const struct inode *inode,
79072 + const struct dentry *dentry,
79073 + const struct vfsmount *mnt);
79074 +int gr_handle_fifo(const struct dentry *dentry,
79075 + const struct vfsmount *mnt,
79076 + const struct dentry *dir, const int flag,
79077 + const int acc_mode);
79078 +int gr_handle_hardlink(const struct dentry *dentry,
79079 + const struct vfsmount *mnt,
79080 + struct inode *inode,
79081 + const int mode, const struct filename *to);
79082 +
79083 +int gr_is_capable(const int cap);
79084 +int gr_is_capable_nolog(const int cap);
79085 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
79086 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
79087 +
79088 +void gr_copy_label(struct task_struct *tsk);
79089 +void gr_handle_crash(struct task_struct *task, const int sig);
79090 +int gr_handle_signal(const struct task_struct *p, const int sig);
79091 +int gr_check_crash_uid(const kuid_t uid);
79092 +int gr_check_protected_task(const struct task_struct *task);
79093 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
79094 +int gr_acl_handle_mmap(const struct file *file,
79095 + const unsigned long prot);
79096 +int gr_acl_handle_mprotect(const struct file *file,
79097 + const unsigned long prot);
79098 +int gr_check_hidden_task(const struct task_struct *tsk);
79099 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
79100 + const struct vfsmount *mnt);
79101 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
79102 + const struct vfsmount *mnt);
79103 +__u32 gr_acl_handle_access(const struct dentry *dentry,
79104 + const struct vfsmount *mnt, const int fmode);
79105 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
79106 + const struct vfsmount *mnt, umode_t *mode);
79107 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
79108 + const struct vfsmount *mnt);
79109 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
79110 + const struct vfsmount *mnt);
79111 +__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
79112 + const struct vfsmount *mnt);
79113 +int gr_handle_ptrace(struct task_struct *task, const long request);
79114 +int gr_handle_proc_ptrace(struct task_struct *task);
79115 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
79116 + const struct vfsmount *mnt);
79117 +int gr_check_crash_exec(const struct file *filp);
79118 +int gr_acl_is_enabled(void);
79119 +void gr_set_role_label(struct task_struct *task, const kuid_t uid,
79120 + const kgid_t gid);
79121 +int gr_set_proc_label(const struct dentry *dentry,
79122 + const struct vfsmount *mnt,
79123 + const int unsafe_flags);
79124 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
79125 + const struct vfsmount *mnt);
79126 +__u32 gr_acl_handle_open(const struct dentry *dentry,
79127 + const struct vfsmount *mnt, int acc_mode);
79128 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
79129 + const struct dentry *p_dentry,
79130 + const struct vfsmount *p_mnt,
79131 + int open_flags, int acc_mode, const int imode);
79132 +void gr_handle_create(const struct dentry *dentry,
79133 + const struct vfsmount *mnt);
79134 +void gr_handle_proc_create(const struct dentry *dentry,
79135 + const struct inode *inode);
79136 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
79137 + const struct dentry *parent_dentry,
79138 + const struct vfsmount *parent_mnt,
79139 + const int mode);
79140 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
79141 + const struct dentry *parent_dentry,
79142 + const struct vfsmount *parent_mnt);
79143 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
79144 + const struct vfsmount *mnt);
79145 +void gr_handle_delete(const ino_t ino, const dev_t dev);
79146 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
79147 + const struct vfsmount *mnt);
79148 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
79149 + const struct dentry *parent_dentry,
79150 + const struct vfsmount *parent_mnt,
79151 + const struct filename *from);
79152 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
79153 + const struct dentry *parent_dentry,
79154 + const struct vfsmount *parent_mnt,
79155 + const struct dentry *old_dentry,
79156 + const struct vfsmount *old_mnt, const struct filename *to);
79157 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
79158 +int gr_acl_handle_rename(struct dentry *new_dentry,
79159 + struct dentry *parent_dentry,
79160 + const struct vfsmount *parent_mnt,
79161 + struct dentry *old_dentry,
79162 + struct inode *old_parent_inode,
79163 + struct vfsmount *old_mnt, const struct filename *newname);
79164 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
79165 + struct dentry *old_dentry,
79166 + struct dentry *new_dentry,
79167 + struct vfsmount *mnt, const __u8 replace);
79168 +__u32 gr_check_link(const struct dentry *new_dentry,
79169 + const struct dentry *parent_dentry,
79170 + const struct vfsmount *parent_mnt,
79171 + const struct dentry *old_dentry,
79172 + const struct vfsmount *old_mnt);
79173 +int gr_acl_handle_filldir(const struct file *file, const char *name,
79174 + const unsigned int namelen, const ino_t ino);
79175 +
79176 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
79177 + const struct vfsmount *mnt);
79178 +void gr_acl_handle_exit(void);
79179 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
79180 +int gr_acl_handle_procpidmem(const struct task_struct *task);
79181 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
79182 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
79183 +void gr_audit_ptrace(struct task_struct *task);
79184 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
79185 +void gr_put_exec_file(struct task_struct *task);
79186 +
79187 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
79188 +
79189 +#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
79190 +extern void gr_learn_resource(const struct task_struct *task, const int res,
79191 + const unsigned long wanted, const int gt);
79192 +#else
79193 +static inline void gr_learn_resource(const struct task_struct *task, const int res,
79194 + const unsigned long wanted, const int gt)
79195 +{
79196 +}
79197 +#endif
79198 +
79199 +#ifdef CONFIG_GRKERNSEC_RESLOG
79200 +extern void gr_log_resource(const struct task_struct *task, const int res,
79201 + const unsigned long wanted, const int gt);
79202 +#else
79203 +static inline void gr_log_resource(const struct task_struct *task, const int res,
79204 + const unsigned long wanted, const int gt)
79205 +{
79206 +}
79207 +#endif
79208 +
79209 +#ifdef CONFIG_GRKERNSEC
79210 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
79211 +void gr_handle_vm86(void);
79212 +void gr_handle_mem_readwrite(u64 from, u64 to);
79213 +
79214 +void gr_log_badprocpid(const char *entry);
79215 +
79216 +extern int grsec_enable_dmesg;
79217 +extern int grsec_disable_privio;
79218 +
79219 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79220 +extern kgid_t grsec_proc_gid;
79221 +#endif
79222 +
79223 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79224 +extern int grsec_enable_chroot_findtask;
79225 +#endif
79226 +#ifdef CONFIG_GRKERNSEC_SETXID
79227 +extern int grsec_enable_setxid;
79228 +#endif
79229 +#endif
79230 +
79231 +#endif
79232 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
79233 new file mode 100644
79234 index 0000000..e7ffaaf
79235 --- /dev/null
79236 +++ b/include/linux/grsock.h
79237 @@ -0,0 +1,19 @@
79238 +#ifndef __GRSOCK_H
79239 +#define __GRSOCK_H
79240 +
79241 +extern void gr_attach_curr_ip(const struct sock *sk);
79242 +extern int gr_handle_sock_all(const int family, const int type,
79243 + const int protocol);
79244 +extern int gr_handle_sock_server(const struct sockaddr *sck);
79245 +extern int gr_handle_sock_server_other(const struct sock *sck);
79246 +extern int gr_handle_sock_client(const struct sockaddr *sck);
79247 +extern int gr_search_connect(struct socket * sock,
79248 + struct sockaddr_in * addr);
79249 +extern int gr_search_bind(struct socket * sock,
79250 + struct sockaddr_in * addr);
79251 +extern int gr_search_listen(struct socket * sock);
79252 +extern int gr_search_accept(struct socket * sock);
79253 +extern int gr_search_socket(const int domain, const int type,
79254 + const int protocol);
79255 +
79256 +#endif
79257 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
79258 index 7fb31da..08b5114 100644
79259 --- a/include/linux/highmem.h
79260 +++ b/include/linux/highmem.h
79261 @@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
79262 kunmap_atomic(kaddr);
79263 }
79264
79265 +static inline void sanitize_highpage(struct page *page)
79266 +{
79267 + void *kaddr;
79268 + unsigned long flags;
79269 +
79270 + local_irq_save(flags);
79271 + kaddr = kmap_atomic(page);
79272 + clear_page(kaddr);
79273 + kunmap_atomic(kaddr);
79274 + local_irq_restore(flags);
79275 +}
79276 +
79277 static inline void zero_user_segments(struct page *page,
79278 unsigned start1, unsigned end1,
79279 unsigned start2, unsigned end2)
79280 diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
79281 index 1c7b89a..7dda400 100644
79282 --- a/include/linux/hwmon-sysfs.h
79283 +++ b/include/linux/hwmon-sysfs.h
79284 @@ -25,7 +25,8 @@
79285 struct sensor_device_attribute{
79286 struct device_attribute dev_attr;
79287 int index;
79288 -};
79289 +} __do_const;
79290 +typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
79291 #define to_sensor_dev_attr(_dev_attr) \
79292 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
79293
79294 @@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
79295 struct device_attribute dev_attr;
79296 u8 index;
79297 u8 nr;
79298 -};
79299 +} __do_const;
79300 +typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
79301 #define to_sensor_dev_attr_2(_dev_attr) \
79302 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
79303
79304 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
79305 index d9c8dbd3..def6e5a 100644
79306 --- a/include/linux/i2c.h
79307 +++ b/include/linux/i2c.h
79308 @@ -364,6 +364,7 @@ struct i2c_algorithm {
79309 /* To determine what the adapter supports */
79310 u32 (*functionality) (struct i2c_adapter *);
79311 };
79312 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
79313
79314 /**
79315 * struct i2c_bus_recovery_info - I2C bus recovery information
79316 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
79317 index d23c3c2..eb63c81 100644
79318 --- a/include/linux/i2o.h
79319 +++ b/include/linux/i2o.h
79320 @@ -565,7 +565,7 @@ struct i2o_controller {
79321 struct i2o_device *exec; /* Executive */
79322 #if BITS_PER_LONG == 64
79323 spinlock_t context_list_lock; /* lock for context_list */
79324 - atomic_t context_list_counter; /* needed for unique contexts */
79325 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
79326 struct list_head context_list; /* list of context id's
79327 and pointers */
79328 #endif
79329 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
79330 index aff7ad8..3942bbd 100644
79331 --- a/include/linux/if_pppox.h
79332 +++ b/include/linux/if_pppox.h
79333 @@ -76,7 +76,7 @@ struct pppox_proto {
79334 int (*ioctl)(struct socket *sock, unsigned int cmd,
79335 unsigned long arg);
79336 struct module *owner;
79337 -};
79338 +} __do_const;
79339
79340 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
79341 extern void unregister_pppox_proto(int proto_num);
79342 diff --git a/include/linux/init.h b/include/linux/init.h
79343 index 8e68a64..3f977a0 100644
79344 --- a/include/linux/init.h
79345 +++ b/include/linux/init.h
79346 @@ -37,9 +37,17 @@
79347 * section.
79348 */
79349
79350 +#define add_init_latent_entropy __latent_entropy
79351 +
79352 +#ifdef CONFIG_MEMORY_HOTPLUG
79353 +#define add_meminit_latent_entropy
79354 +#else
79355 +#define add_meminit_latent_entropy __latent_entropy
79356 +#endif
79357 +
79358 /* These are for everybody (although not all archs will actually
79359 discard it in modules) */
79360 -#define __init __section(.init.text) __cold notrace
79361 +#define __init __section(.init.text) __cold notrace add_init_latent_entropy
79362 #define __initdata __section(.init.data)
79363 #define __initconst __constsection(.init.rodata)
79364 #define __exitdata __section(.exit.data)
79365 @@ -100,7 +108,7 @@
79366 #define __cpuexitconst
79367
79368 /* Used for MEMORY_HOTPLUG */
79369 -#define __meminit __section(.meminit.text) __cold notrace
79370 +#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
79371 #define __meminitdata __section(.meminit.data)
79372 #define __meminitconst __constsection(.meminit.rodata)
79373 #define __memexit __section(.memexit.text) __exitused __cold notrace
79374 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
79375 index b0ed422..d79ea23 100644
79376 --- a/include/linux/init_task.h
79377 +++ b/include/linux/init_task.h
79378 @@ -154,6 +154,12 @@ extern struct task_group root_task_group;
79379
79380 #define INIT_TASK_COMM "swapper"
79381
79382 +#ifdef CONFIG_X86
79383 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
79384 +#else
79385 +#define INIT_TASK_THREAD_INFO
79386 +#endif
79387 +
79388 /*
79389 * INIT_TASK is used to set up the first task table, touch at
79390 * your own risk!. Base=0, limit=0x1fffff (=2MB)
79391 @@ -193,6 +199,7 @@ extern struct task_group root_task_group;
79392 RCU_POINTER_INITIALIZER(cred, &init_cred), \
79393 .comm = INIT_TASK_COMM, \
79394 .thread = INIT_THREAD, \
79395 + INIT_TASK_THREAD_INFO \
79396 .fs = &init_fs, \
79397 .files = &init_files, \
79398 .signal = &init_signals, \
79399 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
79400 index db43b58..5d5084b 100644
79401 --- a/include/linux/interrupt.h
79402 +++ b/include/linux/interrupt.h
79403 @@ -360,7 +360,7 @@ enum
79404 /* map softirq index to softirq name. update 'softirq_to_name' in
79405 * kernel/softirq.c when adding a new softirq.
79406 */
79407 -extern char *softirq_to_name[NR_SOFTIRQS];
79408 +extern const char * const softirq_to_name[NR_SOFTIRQS];
79409
79410 /* softirq mask and active fields moved to irq_cpustat_t in
79411 * asm/hardirq.h to get better cache usage. KAO
79412 @@ -368,8 +368,8 @@ extern char *softirq_to_name[NR_SOFTIRQS];
79413
79414 struct softirq_action
79415 {
79416 - void (*action)(struct softirq_action *);
79417 -};
79418 + void (*action)(void);
79419 +} __no_const;
79420
79421 asmlinkage void do_softirq(void);
79422 asmlinkage void __do_softirq(void);
79423 @@ -383,7 +383,7 @@ static inline void do_softirq_own_stack(void)
79424 }
79425 #endif
79426
79427 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
79428 +extern void open_softirq(int nr, void (*action)(void));
79429 extern void softirq_init(void);
79430 extern void __raise_softirq_irqoff(unsigned int nr);
79431
79432 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
79433 index a444c79..8c41ea9 100644
79434 --- a/include/linux/iommu.h
79435 +++ b/include/linux/iommu.h
79436 @@ -130,7 +130,7 @@ struct iommu_ops {
79437 u32 (*domain_get_windows)(struct iommu_domain *domain);
79438
79439 unsigned long pgsize_bitmap;
79440 -};
79441 +} __do_const;
79442
79443 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
79444 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
79445 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
79446 index 89b7c24..382af74 100644
79447 --- a/include/linux/ioport.h
79448 +++ b/include/linux/ioport.h
79449 @@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
79450 int adjust_resource(struct resource *res, resource_size_t start,
79451 resource_size_t size);
79452 resource_size_t resource_alignment(struct resource *res);
79453 -static inline resource_size_t resource_size(const struct resource *res)
79454 +static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
79455 {
79456 return res->end - res->start + 1;
79457 }
79458 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
79459 index d6ad91f..f10f279 100644
79460 --- a/include/linux/ipc_namespace.h
79461 +++ b/include/linux/ipc_namespace.h
79462 @@ -70,7 +70,7 @@ struct ipc_namespace {
79463 struct user_namespace *user_ns;
79464
79465 unsigned int proc_inum;
79466 -};
79467 +} __randomize_layout;
79468
79469 extern struct ipc_namespace init_ipc_ns;
79470 extern atomic_t nr_ipc_ns;
79471 diff --git a/include/linux/irq.h b/include/linux/irq.h
79472 index 7dc1003..407327b 100644
79473 --- a/include/linux/irq.h
79474 +++ b/include/linux/irq.h
79475 @@ -338,7 +338,8 @@ struct irq_chip {
79476 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
79477
79478 unsigned long flags;
79479 -};
79480 +} __do_const;
79481 +typedef struct irq_chip __no_const irq_chip_no_const;
79482
79483 /*
79484 * irq_chip specific flags
79485 diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
79486 index cac496b..ffa0567 100644
79487 --- a/include/linux/irqchip/arm-gic.h
79488 +++ b/include/linux/irqchip/arm-gic.h
79489 @@ -61,9 +61,11 @@
79490
79491 #ifndef __ASSEMBLY__
79492
79493 +#include <linux/irq.h>
79494 +
79495 struct device_node;
79496
79497 -extern struct irq_chip gic_arch_extn;
79498 +extern irq_chip_no_const gic_arch_extn;
79499
79500 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
79501 u32 offset, struct device_node *);
79502 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
79503 index d235e88..8ccbe74 100644
79504 --- a/include/linux/jiffies.h
79505 +++ b/include/linux/jiffies.h
79506 @@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
79507 /*
79508 * Convert various time units to each other:
79509 */
79510 -extern unsigned int jiffies_to_msecs(const unsigned long j);
79511 -extern unsigned int jiffies_to_usecs(const unsigned long j);
79512 -extern unsigned long msecs_to_jiffies(const unsigned int m);
79513 -extern unsigned long usecs_to_jiffies(const unsigned int u);
79514 -extern unsigned long timespec_to_jiffies(const struct timespec *value);
79515 +extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
79516 +extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
79517 +extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
79518 +extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
79519 +extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
79520 extern void jiffies_to_timespec(const unsigned long jiffies,
79521 struct timespec *value);
79522 -extern unsigned long timeval_to_jiffies(const struct timeval *value);
79523 +extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
79524 extern void jiffies_to_timeval(const unsigned long jiffies,
79525 struct timeval *value);
79526
79527 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
79528 index 6883e19..e854fcb 100644
79529 --- a/include/linux/kallsyms.h
79530 +++ b/include/linux/kallsyms.h
79531 @@ -15,7 +15,8 @@
79532
79533 struct module;
79534
79535 -#ifdef CONFIG_KALLSYMS
79536 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
79537 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
79538 /* Lookup the address for a symbol. Returns 0 if not found. */
79539 unsigned long kallsyms_lookup_name(const char *name);
79540
79541 @@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
79542 /* Stupid that this does nothing, but I didn't create this mess. */
79543 #define __print_symbol(fmt, addr)
79544 #endif /*CONFIG_KALLSYMS*/
79545 +#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
79546 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
79547 +extern unsigned long kallsyms_lookup_name(const char *name);
79548 +extern void __print_symbol(const char *fmt, unsigned long address);
79549 +extern int sprint_backtrace(char *buffer, unsigned long address);
79550 +extern int sprint_symbol(char *buffer, unsigned long address);
79551 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
79552 +const char *kallsyms_lookup(unsigned long addr,
79553 + unsigned long *symbolsize,
79554 + unsigned long *offset,
79555 + char **modname, char *namebuf);
79556 +extern int kallsyms_lookup_size_offset(unsigned long addr,
79557 + unsigned long *symbolsize,
79558 + unsigned long *offset);
79559 +#endif
79560
79561 /* This macro allows us to keep printk typechecking */
79562 static __printf(1, 2)
79563 diff --git a/include/linux/key-type.h b/include/linux/key-type.h
79564 index a74c3a8..28d3f21 100644
79565 --- a/include/linux/key-type.h
79566 +++ b/include/linux/key-type.h
79567 @@ -131,7 +131,7 @@ struct key_type {
79568 /* internal fields */
79569 struct list_head link; /* link in types list */
79570 struct lock_class_key lock_class; /* key->sem lock class */
79571 -};
79572 +} __do_const;
79573
79574 extern struct key_type key_type_keyring;
79575
79576 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
79577 index dfb4f2f..7927e62 100644
79578 --- a/include/linux/kgdb.h
79579 +++ b/include/linux/kgdb.h
79580 @@ -52,7 +52,7 @@ extern int kgdb_connected;
79581 extern int kgdb_io_module_registered;
79582
79583 extern atomic_t kgdb_setting_breakpoint;
79584 -extern atomic_t kgdb_cpu_doing_single_step;
79585 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
79586
79587 extern struct task_struct *kgdb_usethread;
79588 extern struct task_struct *kgdb_contthread;
79589 @@ -254,7 +254,7 @@ struct kgdb_arch {
79590 void (*correct_hw_break)(void);
79591
79592 void (*enable_nmi)(bool on);
79593 -};
79594 +} __do_const;
79595
79596 /**
79597 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
79598 @@ -279,7 +279,7 @@ struct kgdb_io {
79599 void (*pre_exception) (void);
79600 void (*post_exception) (void);
79601 int is_console;
79602 -};
79603 +} __do_const;
79604
79605 extern struct kgdb_arch arch_kgdb_ops;
79606
79607 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
79608 index 0555cc6..40116ce 100644
79609 --- a/include/linux/kmod.h
79610 +++ b/include/linux/kmod.h
79611 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
79612 * usually useless though. */
79613 extern __printf(2, 3)
79614 int __request_module(bool wait, const char *name, ...);
79615 +extern __printf(3, 4)
79616 +int ___request_module(bool wait, char *param_name, const char *name, ...);
79617 #define request_module(mod...) __request_module(true, mod)
79618 #define request_module_nowait(mod...) __request_module(false, mod)
79619 #define try_then_request_module(x, mod...) \
79620 @@ -57,6 +59,9 @@ struct subprocess_info {
79621 struct work_struct work;
79622 struct completion *complete;
79623 char *path;
79624 +#ifdef CONFIG_GRKERNSEC
79625 + char *origpath;
79626 +#endif
79627 char **argv;
79628 char **envp;
79629 int wait;
79630 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
79631 index e7ba650..0af3acb 100644
79632 --- a/include/linux/kobject.h
79633 +++ b/include/linux/kobject.h
79634 @@ -116,7 +116,7 @@ struct kobj_type {
79635 struct attribute **default_attrs;
79636 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
79637 const void *(*namespace)(struct kobject *kobj);
79638 -};
79639 +} __do_const;
79640
79641 struct kobj_uevent_env {
79642 char *envp[UEVENT_NUM_ENVP];
79643 @@ -139,6 +139,7 @@ struct kobj_attribute {
79644 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
79645 const char *buf, size_t count);
79646 };
79647 +typedef struct kobj_attribute __no_const kobj_attribute_no_const;
79648
79649 extern const struct sysfs_ops kobj_sysfs_ops;
79650
79651 @@ -166,7 +167,7 @@ struct kset {
79652 spinlock_t list_lock;
79653 struct kobject kobj;
79654 const struct kset_uevent_ops *uevent_ops;
79655 -};
79656 +} __randomize_layout;
79657
79658 extern void kset_init(struct kset *kset);
79659 extern int __must_check kset_register(struct kset *kset);
79660 diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
79661 index df32d25..fb52e27 100644
79662 --- a/include/linux/kobject_ns.h
79663 +++ b/include/linux/kobject_ns.h
79664 @@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
79665 const void *(*netlink_ns)(struct sock *sk);
79666 const void *(*initial_ns)(void);
79667 void (*drop_ns)(void *);
79668 -};
79669 +} __do_const;
79670
79671 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
79672 int kobj_ns_type_registered(enum kobj_ns_type type);
79673 diff --git a/include/linux/kref.h b/include/linux/kref.h
79674 index 484604d..0f6c5b6 100644
79675 --- a/include/linux/kref.h
79676 +++ b/include/linux/kref.h
79677 @@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
79678 static inline int kref_sub(struct kref *kref, unsigned int count,
79679 void (*release)(struct kref *kref))
79680 {
79681 - WARN_ON(release == NULL);
79682 + BUG_ON(release == NULL);
79683
79684 if (atomic_sub_and_test((int) count, &kref->refcount)) {
79685 release(kref);
79686 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
79687 index 9523d2a..16c0424 100644
79688 --- a/include/linux/kvm_host.h
79689 +++ b/include/linux/kvm_host.h
79690 @@ -457,7 +457,7 @@ static inline void kvm_irqfd_exit(void)
79691 {
79692 }
79693 #endif
79694 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79695 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79696 struct module *module);
79697 void kvm_exit(void);
79698
79699 @@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
79700 struct kvm_guest_debug *dbg);
79701 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
79702
79703 -int kvm_arch_init(void *opaque);
79704 +int kvm_arch_init(const void *opaque);
79705 void kvm_arch_exit(void);
79706
79707 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
79708 diff --git a/include/linux/libata.h b/include/linux/libata.h
79709 index bec6dbe..2873d64 100644
79710 --- a/include/linux/libata.h
79711 +++ b/include/linux/libata.h
79712 @@ -975,7 +975,7 @@ struct ata_port_operations {
79713 * fields must be pointers.
79714 */
79715 const struct ata_port_operations *inherits;
79716 -};
79717 +} __do_const;
79718
79719 struct ata_port_info {
79720 unsigned long flags;
79721 diff --git a/include/linux/linkage.h b/include/linux/linkage.h
79722 index d3e8ad2..a949f68 100644
79723 --- a/include/linux/linkage.h
79724 +++ b/include/linux/linkage.h
79725 @@ -31,6 +31,7 @@
79726 #endif
79727
79728 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
79729 +#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
79730 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
79731
79732 /*
79733 diff --git a/include/linux/list.h b/include/linux/list.h
79734 index ef95941..82db65a 100644
79735 --- a/include/linux/list.h
79736 +++ b/include/linux/list.h
79737 @@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
79738 extern void list_del(struct list_head *entry);
79739 #endif
79740
79741 +extern void __pax_list_add(struct list_head *new,
79742 + struct list_head *prev,
79743 + struct list_head *next);
79744 +static inline void pax_list_add(struct list_head *new, struct list_head *head)
79745 +{
79746 + __pax_list_add(new, head, head->next);
79747 +}
79748 +static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
79749 +{
79750 + __pax_list_add(new, head->prev, head);
79751 +}
79752 +extern void pax_list_del(struct list_head *entry);
79753 +
79754 /**
79755 * list_replace - replace old entry by new one
79756 * @old : the element to be replaced
79757 @@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
79758 INIT_LIST_HEAD(entry);
79759 }
79760
79761 +extern void pax_list_del_init(struct list_head *entry);
79762 +
79763 /**
79764 * list_move - delete from one list and add as another's head
79765 * @list: the entry to move
79766 diff --git a/include/linux/math64.h b/include/linux/math64.h
79767 index c45c089..298841c 100644
79768 --- a/include/linux/math64.h
79769 +++ b/include/linux/math64.h
79770 @@ -15,7 +15,7 @@
79771 * This is commonly provided by 32bit archs to provide an optimized 64bit
79772 * divide.
79773 */
79774 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79775 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79776 {
79777 *remainder = dividend % divisor;
79778 return dividend / divisor;
79779 @@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
79780 /**
79781 * div64_u64 - unsigned 64bit divide with 64bit divisor
79782 */
79783 -static inline u64 div64_u64(u64 dividend, u64 divisor)
79784 +static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79785 {
79786 return dividend / divisor;
79787 }
79788 @@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
79789 #define div64_ul(x, y) div_u64((x), (y))
79790
79791 #ifndef div_u64_rem
79792 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79793 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79794 {
79795 *remainder = do_div(dividend, divisor);
79796 return dividend;
79797 @@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
79798 #endif
79799
79800 #ifndef div64_u64
79801 -extern u64 div64_u64(u64 dividend, u64 divisor);
79802 +extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
79803 #endif
79804
79805 #ifndef div64_s64
79806 @@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
79807 * divide.
79808 */
79809 #ifndef div_u64
79810 -static inline u64 div_u64(u64 dividend, u32 divisor)
79811 +static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
79812 {
79813 u32 remainder;
79814 return div_u64_rem(dividend, divisor, &remainder);
79815 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
79816 index 9fe426b..8148be6 100644
79817 --- a/include/linux/mempolicy.h
79818 +++ b/include/linux/mempolicy.h
79819 @@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
79820 }
79821
79822 #define vma_policy(vma) ((vma)->vm_policy)
79823 +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
79824 +{
79825 + vma->vm_policy = pol;
79826 +}
79827
79828 static inline void mpol_get(struct mempolicy *pol)
79829 {
79830 @@ -241,6 +245,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
79831 }
79832
79833 #define vma_policy(vma) NULL
79834 +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
79835 +{
79836 +}
79837
79838 static inline int
79839 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
79840 diff --git a/include/linux/mm.h b/include/linux/mm.h
79841 index 0ab5439..2859c61 100644
79842 --- a/include/linux/mm.h
79843 +++ b/include/linux/mm.h
79844 @@ -117,6 +117,11 @@ extern unsigned int kobjsize(const void *objp);
79845 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
79846 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
79847 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
79848 +
79849 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
79850 +#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
79851 +#endif
79852 +
79853 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
79854
79855 #ifdef CONFIG_MEM_SOFT_DIRTY
79856 @@ -219,8 +224,8 @@ struct vm_operations_struct {
79857 /* called by access_process_vm when get_user_pages() fails, typically
79858 * for use by special VMAs that can switch between memory and hardware
79859 */
79860 - int (*access)(struct vm_area_struct *vma, unsigned long addr,
79861 - void *buf, int len, int write);
79862 + ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
79863 + void *buf, size_t len, int write);
79864 #ifdef CONFIG_NUMA
79865 /*
79866 * set_policy() op must add a reference to any non-NULL @new mempolicy
79867 @@ -250,6 +255,7 @@ struct vm_operations_struct {
79868 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
79869 unsigned long size, pgoff_t pgoff);
79870 };
79871 +typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
79872
79873 struct mmu_gather;
79874 struct inode;
79875 @@ -1064,8 +1070,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
79876 unsigned long *pfn);
79877 int follow_phys(struct vm_area_struct *vma, unsigned long address,
79878 unsigned int flags, unsigned long *prot, resource_size_t *phys);
79879 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
79880 - void *buf, int len, int write);
79881 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
79882 + void *buf, size_t len, int write);
79883
79884 static inline void unmap_shared_mapping_range(struct address_space *mapping,
79885 loff_t const holebegin, loff_t const holelen)
79886 @@ -1104,9 +1110,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
79887 }
79888 #endif
79889
79890 -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
79891 -extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
79892 - void *buf, int len, int write);
79893 +extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
79894 +extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
79895 + void *buf, size_t len, int write);
79896
79897 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79898 unsigned long start, unsigned long nr_pages,
79899 @@ -1138,34 +1144,6 @@ int set_page_dirty(struct page *page);
79900 int set_page_dirty_lock(struct page *page);
79901 int clear_page_dirty_for_io(struct page *page);
79902
79903 -/* Is the vma a continuation of the stack vma above it? */
79904 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
79905 -{
79906 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
79907 -}
79908 -
79909 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
79910 - unsigned long addr)
79911 -{
79912 - return (vma->vm_flags & VM_GROWSDOWN) &&
79913 - (vma->vm_start == addr) &&
79914 - !vma_growsdown(vma->vm_prev, addr);
79915 -}
79916 -
79917 -/* Is the vma a continuation of the stack vma below it? */
79918 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
79919 -{
79920 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
79921 -}
79922 -
79923 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
79924 - unsigned long addr)
79925 -{
79926 - return (vma->vm_flags & VM_GROWSUP) &&
79927 - (vma->vm_end == addr) &&
79928 - !vma_growsup(vma->vm_next, addr);
79929 -}
79930 -
79931 extern pid_t
79932 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
79933
79934 @@ -1265,6 +1243,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
79935 }
79936 #endif
79937
79938 +#ifdef CONFIG_MMU
79939 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
79940 +#else
79941 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
79942 +{
79943 + return __pgprot(0);
79944 +}
79945 +#endif
79946 +
79947 int vma_wants_writenotify(struct vm_area_struct *vma);
79948
79949 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
79950 @@ -1283,8 +1270,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
79951 {
79952 return 0;
79953 }
79954 +
79955 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
79956 + unsigned long address)
79957 +{
79958 + return 0;
79959 +}
79960 #else
79961 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79962 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79963 #endif
79964
79965 #ifdef __PAGETABLE_PMD_FOLDED
79966 @@ -1293,8 +1287,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
79967 {
79968 return 0;
79969 }
79970 +
79971 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
79972 + unsigned long address)
79973 +{
79974 + return 0;
79975 +}
79976 #else
79977 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
79978 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
79979 #endif
79980
79981 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
79982 @@ -1312,11 +1313,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
79983 NULL: pud_offset(pgd, address);
79984 }
79985
79986 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79987 +{
79988 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
79989 + NULL: pud_offset(pgd, address);
79990 +}
79991 +
79992 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79993 {
79994 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
79995 NULL: pmd_offset(pud, address);
79996 }
79997 +
79998 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79999 +{
80000 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
80001 + NULL: pmd_offset(pud, address);
80002 +}
80003 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
80004
80005 #if USE_SPLIT_PTE_PTLOCKS
80006 @@ -1694,7 +1707,7 @@ extern int install_special_mapping(struct mm_struct *mm,
80007 unsigned long addr, unsigned long len,
80008 unsigned long flags, struct page **pages);
80009
80010 -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
80011 +extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
80012
80013 extern unsigned long mmap_region(struct file *file, unsigned long addr,
80014 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
80015 @@ -1702,6 +1715,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80016 unsigned long len, unsigned long prot, unsigned long flags,
80017 unsigned long pgoff, unsigned long *populate);
80018 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
80019 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
80020
80021 #ifdef CONFIG_MMU
80022 extern int __mm_populate(unsigned long addr, unsigned long len,
80023 @@ -1730,10 +1744,11 @@ struct vm_unmapped_area_info {
80024 unsigned long high_limit;
80025 unsigned long align_mask;
80026 unsigned long align_offset;
80027 + unsigned long threadstack_offset;
80028 };
80029
80030 -extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
80031 -extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
80032 +extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
80033 +extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
80034
80035 /*
80036 * Search for an unmapped address range.
80037 @@ -1745,7 +1760,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
80038 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
80039 */
80040 static inline unsigned long
80041 -vm_unmapped_area(struct vm_unmapped_area_info *info)
80042 +vm_unmapped_area(const struct vm_unmapped_area_info *info)
80043 {
80044 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
80045 return unmapped_area(info);
80046 @@ -1808,6 +1823,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
80047 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
80048 struct vm_area_struct **pprev);
80049
80050 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
80051 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
80052 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
80053 +
80054 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
80055 NULL if none. Assume start_addr < end_addr. */
80056 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
80057 @@ -1836,15 +1855,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
80058 return vma;
80059 }
80060
80061 -#ifdef CONFIG_MMU
80062 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
80063 -#else
80064 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
80065 -{
80066 - return __pgprot(0);
80067 -}
80068 -#endif
80069 -
80070 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
80071 unsigned long change_prot_numa(struct vm_area_struct *vma,
80072 unsigned long start, unsigned long end);
80073 @@ -1896,6 +1906,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
80074 static inline void vm_stat_account(struct mm_struct *mm,
80075 unsigned long flags, struct file *file, long pages)
80076 {
80077 +
80078 +#ifdef CONFIG_PAX_RANDMMAP
80079 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
80080 +#endif
80081 +
80082 mm->total_vm += pages;
80083 }
80084 #endif /* CONFIG_PROC_FS */
80085 @@ -1977,7 +1992,7 @@ extern int unpoison_memory(unsigned long pfn);
80086 extern int sysctl_memory_failure_early_kill;
80087 extern int sysctl_memory_failure_recovery;
80088 extern void shake_page(struct page *p, int access);
80089 -extern atomic_long_t num_poisoned_pages;
80090 +extern atomic_long_unchecked_t num_poisoned_pages;
80091 extern int soft_offline_page(struct page *page, int flags);
80092
80093 extern void dump_page(struct page *page);
80094 @@ -2014,5 +2029,11 @@ void __init setup_nr_node_ids(void);
80095 static inline void setup_nr_node_ids(void) {}
80096 #endif
80097
80098 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
80099 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
80100 +#else
80101 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
80102 +#endif
80103 +
80104 #endif /* __KERNEL__ */
80105 #endif /* _LINUX_MM_H */
80106 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
80107 index 290901a..e99b01c 100644
80108 --- a/include/linux/mm_types.h
80109 +++ b/include/linux/mm_types.h
80110 @@ -307,7 +307,9 @@ struct vm_area_struct {
80111 #ifdef CONFIG_NUMA
80112 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
80113 #endif
80114 -};
80115 +
80116 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
80117 +} __randomize_layout;
80118
80119 struct core_thread {
80120 struct task_struct *task;
80121 @@ -453,7 +455,25 @@ struct mm_struct {
80122 bool tlb_flush_pending;
80123 #endif
80124 struct uprobes_state uprobes_state;
80125 -};
80126 +
80127 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
80128 + unsigned long pax_flags;
80129 +#endif
80130 +
80131 +#ifdef CONFIG_PAX_DLRESOLVE
80132 + unsigned long call_dl_resolve;
80133 +#endif
80134 +
80135 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
80136 + unsigned long call_syscall;
80137 +#endif
80138 +
80139 +#ifdef CONFIG_PAX_ASLR
80140 + unsigned long delta_mmap; /* randomized offset */
80141 + unsigned long delta_stack; /* randomized offset */
80142 +#endif
80143 +
80144 +} __randomize_layout;
80145
80146 static inline void mm_init_cpumask(struct mm_struct *mm)
80147 {
80148 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
80149 index c5d5278..f0b68c8 100644
80150 --- a/include/linux/mmiotrace.h
80151 +++ b/include/linux/mmiotrace.h
80152 @@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
80153 /* Called from ioremap.c */
80154 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
80155 void __iomem *addr);
80156 -extern void mmiotrace_iounmap(volatile void __iomem *addr);
80157 +extern void mmiotrace_iounmap(const volatile void __iomem *addr);
80158
80159 /* For anyone to insert markers. Remember trailing newline. */
80160 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
80161 @@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
80162 {
80163 }
80164
80165 -static inline void mmiotrace_iounmap(volatile void __iomem *addr)
80166 +static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
80167 {
80168 }
80169
80170 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
80171 index bd791e4..8617c34f 100644
80172 --- a/include/linux/mmzone.h
80173 +++ b/include/linux/mmzone.h
80174 @@ -396,7 +396,7 @@ struct zone {
80175 unsigned long flags; /* zone flags, see below */
80176
80177 /* Zone statistics */
80178 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80179 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80180
80181 /*
80182 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
80183 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
80184 index 45e9214..a7227d6 100644
80185 --- a/include/linux/mod_devicetable.h
80186 +++ b/include/linux/mod_devicetable.h
80187 @@ -13,7 +13,7 @@
80188 typedef unsigned long kernel_ulong_t;
80189 #endif
80190
80191 -#define PCI_ANY_ID (~0)
80192 +#define PCI_ANY_ID ((__u16)~0)
80193
80194 struct pci_device_id {
80195 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
80196 @@ -139,7 +139,7 @@ struct usb_device_id {
80197 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
80198 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
80199
80200 -#define HID_ANY_ID (~0)
80201 +#define HID_ANY_ID (~0U)
80202 #define HID_BUS_ANY 0xffff
80203 #define HID_GROUP_ANY 0x0000
80204
80205 @@ -467,7 +467,7 @@ struct dmi_system_id {
80206 const char *ident;
80207 struct dmi_strmatch matches[4];
80208 void *driver_data;
80209 -};
80210 +} __do_const;
80211 /*
80212 * struct dmi_device_id appears during expansion of
80213 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
80214 diff --git a/include/linux/module.h b/include/linux/module.h
80215 index 15cd6b1..f6e2e6a 100644
80216 --- a/include/linux/module.h
80217 +++ b/include/linux/module.h
80218 @@ -17,9 +17,11 @@
80219 #include <linux/moduleparam.h>
80220 #include <linux/tracepoint.h>
80221 #include <linux/export.h>
80222 +#include <linux/fs.h>
80223
80224 #include <linux/percpu.h>
80225 #include <asm/module.h>
80226 +#include <asm/pgtable.h>
80227
80228 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
80229 #define MODULE_SIG_STRING "~Module signature appended~\n"
80230 @@ -43,7 +45,7 @@ struct module_kobject {
80231 struct kobject *drivers_dir;
80232 struct module_param_attrs *mp;
80233 struct completion *kobj_completion;
80234 -};
80235 +} __randomize_layout;
80236
80237 struct module_attribute {
80238 struct attribute attr;
80239 @@ -55,12 +57,13 @@ struct module_attribute {
80240 int (*test)(struct module *);
80241 void (*free)(struct module *);
80242 };
80243 +typedef struct module_attribute __no_const module_attribute_no_const;
80244
80245 struct module_version_attribute {
80246 struct module_attribute mattr;
80247 const char *module_name;
80248 const char *version;
80249 -} __attribute__ ((__aligned__(sizeof(void *))));
80250 +} __do_const __attribute__ ((__aligned__(sizeof(void *))));
80251
80252 extern ssize_t __modver_version_show(struct module_attribute *,
80253 struct module_kobject *, char *);
80254 @@ -238,7 +241,7 @@ struct module
80255
80256 /* Sysfs stuff. */
80257 struct module_kobject mkobj;
80258 - struct module_attribute *modinfo_attrs;
80259 + module_attribute_no_const *modinfo_attrs;
80260 const char *version;
80261 const char *srcversion;
80262 struct kobject *holders_dir;
80263 @@ -287,19 +290,16 @@ struct module
80264 int (*init)(void);
80265
80266 /* If this is non-NULL, vfree after init() returns */
80267 - void *module_init;
80268 + void *module_init_rx, *module_init_rw;
80269
80270 /* Here is the actual code + data, vfree'd on unload. */
80271 - void *module_core;
80272 + void *module_core_rx, *module_core_rw;
80273
80274 /* Here are the sizes of the init and core sections */
80275 - unsigned int init_size, core_size;
80276 + unsigned int init_size_rw, core_size_rw;
80277
80278 /* The size of the executable code in each section. */
80279 - unsigned int init_text_size, core_text_size;
80280 -
80281 - /* Size of RO sections of the module (text+rodata) */
80282 - unsigned int init_ro_size, core_ro_size;
80283 + unsigned int init_size_rx, core_size_rx;
80284
80285 /* Arch-specific module values */
80286 struct mod_arch_specific arch;
80287 @@ -355,6 +355,10 @@ struct module
80288 #ifdef CONFIG_EVENT_TRACING
80289 struct ftrace_event_call **trace_events;
80290 unsigned int num_trace_events;
80291 + struct file_operations trace_id;
80292 + struct file_operations trace_enable;
80293 + struct file_operations trace_format;
80294 + struct file_operations trace_filter;
80295 #endif
80296 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
80297 unsigned int num_ftrace_callsites;
80298 @@ -378,7 +382,7 @@ struct module
80299 ctor_fn_t *ctors;
80300 unsigned int num_ctors;
80301 #endif
80302 -};
80303 +} __randomize_layout;
80304 #ifndef MODULE_ARCH_INIT
80305 #define MODULE_ARCH_INIT {}
80306 #endif
80307 @@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr);
80308 bool is_module_percpu_address(unsigned long addr);
80309 bool is_module_text_address(unsigned long addr);
80310
80311 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
80312 +{
80313 +
80314 +#ifdef CONFIG_PAX_KERNEXEC
80315 + if (ktla_ktva(addr) >= (unsigned long)start &&
80316 + ktla_ktva(addr) < (unsigned long)start + size)
80317 + return 1;
80318 +#endif
80319 +
80320 + return ((void *)addr >= start && (void *)addr < start + size);
80321 +}
80322 +
80323 +static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
80324 +{
80325 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
80326 +}
80327 +
80328 +static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
80329 +{
80330 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
80331 +}
80332 +
80333 +static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
80334 +{
80335 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
80336 +}
80337 +
80338 +static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
80339 +{
80340 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
80341 +}
80342 +
80343 static inline int within_module_core(unsigned long addr, const struct module *mod)
80344 {
80345 - return (unsigned long)mod->module_core <= addr &&
80346 - addr < (unsigned long)mod->module_core + mod->core_size;
80347 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
80348 }
80349
80350 static inline int within_module_init(unsigned long addr, const struct module *mod)
80351 {
80352 - return (unsigned long)mod->module_init <= addr &&
80353 - addr < (unsigned long)mod->module_init + mod->init_size;
80354 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
80355 }
80356
80357 /* Search for module by name: must hold module_mutex. */
80358 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
80359 index 560ca53..ef621ef 100644
80360 --- a/include/linux/moduleloader.h
80361 +++ b/include/linux/moduleloader.h
80362 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
80363 sections. Returns NULL on failure. */
80364 void *module_alloc(unsigned long size);
80365
80366 +#ifdef CONFIG_PAX_KERNEXEC
80367 +void *module_alloc_exec(unsigned long size);
80368 +#else
80369 +#define module_alloc_exec(x) module_alloc(x)
80370 +#endif
80371 +
80372 /* Free memory returned from module_alloc. */
80373 void module_free(struct module *mod, void *module_region);
80374
80375 +#ifdef CONFIG_PAX_KERNEXEC
80376 +void module_free_exec(struct module *mod, void *module_region);
80377 +#else
80378 +#define module_free_exec(x, y) module_free((x), (y))
80379 +#endif
80380 +
80381 /*
80382 * Apply the given relocation to the (simplified) ELF. Return -error
80383 * or 0.
80384 @@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
80385 unsigned int relsec,
80386 struct module *me)
80387 {
80388 +#ifdef CONFIG_MODULES
80389 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
80390 +#endif
80391 return -ENOEXEC;
80392 }
80393 #endif
80394 @@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
80395 unsigned int relsec,
80396 struct module *me)
80397 {
80398 +#ifdef CONFIG_MODULES
80399 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
80400 +#endif
80401 return -ENOEXEC;
80402 }
80403 #endif
80404 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
80405 index c3eb102..073c4a6 100644
80406 --- a/include/linux/moduleparam.h
80407 +++ b/include/linux/moduleparam.h
80408 @@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
80409 * @len is usually just sizeof(string).
80410 */
80411 #define module_param_string(name, string, len, perm) \
80412 - static const struct kparam_string __param_string_##name \
80413 + static const struct kparam_string __param_string_##name __used \
80414 = { len, string }; \
80415 __module_param_call(MODULE_PARAM_PREFIX, name, \
80416 &param_ops_string, \
80417 @@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
80418 */
80419 #define module_param_array_named(name, array, type, nump, perm) \
80420 param_check_##type(name, &(array)[0]); \
80421 - static const struct kparam_array __param_arr_##name \
80422 + static const struct kparam_array __param_arr_##name __used \
80423 = { .max = ARRAY_SIZE(array), .num = nump, \
80424 .ops = &param_ops_##type, \
80425 .elemsize = sizeof(array[0]), .elem = array }; \
80426 diff --git a/include/linux/mount.h b/include/linux/mount.h
80427 index 371d346..fba2819 100644
80428 --- a/include/linux/mount.h
80429 +++ b/include/linux/mount.h
80430 @@ -56,7 +56,7 @@ struct vfsmount {
80431 struct dentry *mnt_root; /* root of the mounted tree */
80432 struct super_block *mnt_sb; /* pointer to superblock */
80433 int mnt_flags;
80434 -};
80435 +} __randomize_layout;
80436
80437 struct file; /* forward dec */
80438
80439 diff --git a/include/linux/namei.h b/include/linux/namei.h
80440 index 492de72..1bddcd4 100644
80441 --- a/include/linux/namei.h
80442 +++ b/include/linux/namei.h
80443 @@ -19,7 +19,7 @@ struct nameidata {
80444 unsigned seq, m_seq;
80445 int last_type;
80446 unsigned depth;
80447 - char *saved_names[MAX_NESTED_LINKS + 1];
80448 + const char *saved_names[MAX_NESTED_LINKS + 1];
80449 };
80450
80451 /*
80452 @@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
80453
80454 extern void nd_jump_link(struct nameidata *nd, struct path *path);
80455
80456 -static inline void nd_set_link(struct nameidata *nd, char *path)
80457 +static inline void nd_set_link(struct nameidata *nd, const char *path)
80458 {
80459 nd->saved_names[nd->depth] = path;
80460 }
80461
80462 -static inline char *nd_get_link(struct nameidata *nd)
80463 +static inline const char *nd_get_link(const struct nameidata *nd)
80464 {
80465 return nd->saved_names[nd->depth];
80466 }
80467 diff --git a/include/linux/net.h b/include/linux/net.h
80468 index 69be3e6..0fb422d 100644
80469 --- a/include/linux/net.h
80470 +++ b/include/linux/net.h
80471 @@ -192,7 +192,7 @@ struct net_proto_family {
80472 int (*create)(struct net *net, struct socket *sock,
80473 int protocol, int kern);
80474 struct module *owner;
80475 -};
80476 +} __do_const;
80477
80478 struct iovec;
80479 struct kvec;
80480 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
80481 index 2177a6b..67fc561 100644
80482 --- a/include/linux/netdevice.h
80483 +++ b/include/linux/netdevice.h
80484 @@ -1129,6 +1129,7 @@ struct net_device_ops {
80485 struct net_device *dev,
80486 void *priv);
80487 };
80488 +typedef struct net_device_ops __no_const net_device_ops_no_const;
80489
80490 /*
80491 * The DEVICE structure.
80492 @@ -1211,7 +1212,7 @@ struct net_device {
80493 int iflink;
80494
80495 struct net_device_stats stats;
80496 - atomic_long_t rx_dropped; /* dropped packets by core network
80497 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
80498 * Do not use this in drivers.
80499 */
80500
80501 diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
80502 index 2077489..a15e561 100644
80503 --- a/include/linux/netfilter.h
80504 +++ b/include/linux/netfilter.h
80505 @@ -84,7 +84,7 @@ struct nf_sockopt_ops {
80506 #endif
80507 /* Use the module struct to lock set/get code in place */
80508 struct module *owner;
80509 -};
80510 +} __do_const;
80511
80512 /* Function to register/unregister hook points. */
80513 int nf_register_hook(struct nf_hook_ops *reg);
80514 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
80515 index 28c7436..2d6156a 100644
80516 --- a/include/linux/netfilter/nfnetlink.h
80517 +++ b/include/linux/netfilter/nfnetlink.h
80518 @@ -19,7 +19,7 @@ struct nfnl_callback {
80519 const struct nlattr * const cda[]);
80520 const struct nla_policy *policy; /* netlink attribute policy */
80521 const u_int16_t attr_count; /* number of nlattr's */
80522 -};
80523 +} __do_const;
80524
80525 struct nfnetlink_subsystem {
80526 const char *name;
80527 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
80528 new file mode 100644
80529 index 0000000..33f4af8
80530 --- /dev/null
80531 +++ b/include/linux/netfilter/xt_gradm.h
80532 @@ -0,0 +1,9 @@
80533 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
80534 +#define _LINUX_NETFILTER_XT_GRADM_H 1
80535 +
80536 +struct xt_gradm_mtinfo {
80537 + __u16 flags;
80538 + __u16 invflags;
80539 +};
80540 +
80541 +#endif
80542 diff --git a/include/linux/nls.h b/include/linux/nls.h
80543 index 5dc635f..35f5e11 100644
80544 --- a/include/linux/nls.h
80545 +++ b/include/linux/nls.h
80546 @@ -31,7 +31,7 @@ struct nls_table {
80547 const unsigned char *charset2upper;
80548 struct module *owner;
80549 struct nls_table *next;
80550 -};
80551 +} __do_const;
80552
80553 /* this value hold the maximum octet of charset */
80554 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
80555 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
80556 index d14a4c3..a078786 100644
80557 --- a/include/linux/notifier.h
80558 +++ b/include/linux/notifier.h
80559 @@ -54,7 +54,8 @@ struct notifier_block {
80560 notifier_fn_t notifier_call;
80561 struct notifier_block __rcu *next;
80562 int priority;
80563 -};
80564 +} __do_const;
80565 +typedef struct notifier_block __no_const notifier_block_no_const;
80566
80567 struct atomic_notifier_head {
80568 spinlock_t lock;
80569 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
80570 index b2a0f15..4d7da32 100644
80571 --- a/include/linux/oprofile.h
80572 +++ b/include/linux/oprofile.h
80573 @@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
80574 int oprofilefs_create_ro_ulong(struct dentry * root,
80575 char const * name, ulong * val);
80576
80577 -/** Create a file for read-only access to an atomic_t. */
80578 +/** Create a file for read-only access to an atomic_unchecked_t. */
80579 int oprofilefs_create_ro_atomic(struct dentry * root,
80580 - char const * name, atomic_t * val);
80581 + char const * name, atomic_unchecked_t * val);
80582
80583 /** create a directory */
80584 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
80585 diff --git a/include/linux/padata.h b/include/linux/padata.h
80586 index 4386946..f50c615 100644
80587 --- a/include/linux/padata.h
80588 +++ b/include/linux/padata.h
80589 @@ -129,7 +129,7 @@ struct parallel_data {
80590 struct padata_serial_queue __percpu *squeue;
80591 atomic_t reorder_objects;
80592 atomic_t refcnt;
80593 - atomic_t seq_nr;
80594 + atomic_unchecked_t seq_nr;
80595 struct padata_cpumask cpumask;
80596 spinlock_t lock ____cacheline_aligned;
80597 unsigned int processed;
80598 diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
80599 index a2e2f1d..8a391d2 100644
80600 --- a/include/linux/pci_hotplug.h
80601 +++ b/include/linux/pci_hotplug.h
80602 @@ -71,7 +71,8 @@ struct hotplug_slot_ops {
80603 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
80604 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
80605 int (*reset_slot) (struct hotplug_slot *slot, int probe);
80606 -};
80607 +} __do_const;
80608 +typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
80609
80610 /**
80611 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
80612 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
80613 index 2e069d1..27054b8 100644
80614 --- a/include/linux/perf_event.h
80615 +++ b/include/linux/perf_event.h
80616 @@ -327,8 +327,8 @@ struct perf_event {
80617
80618 enum perf_event_active_state state;
80619 unsigned int attach_state;
80620 - local64_t count;
80621 - atomic64_t child_count;
80622 + local64_t count; /* PaX: fix it one day */
80623 + atomic64_unchecked_t child_count;
80624
80625 /*
80626 * These are the total time in nanoseconds that the event
80627 @@ -379,8 +379,8 @@ struct perf_event {
80628 * These accumulate total time (in nanoseconds) that children
80629 * events have been enabled and running, respectively.
80630 */
80631 - atomic64_t child_total_time_enabled;
80632 - atomic64_t child_total_time_running;
80633 + atomic64_unchecked_t child_total_time_enabled;
80634 + atomic64_unchecked_t child_total_time_running;
80635
80636 /*
80637 * Protect attach/detach and child_list:
80638 @@ -707,7 +707,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
80639 entry->ip[entry->nr++] = ip;
80640 }
80641
80642 -extern int sysctl_perf_event_paranoid;
80643 +extern int sysctl_perf_event_legitimately_concerned;
80644 extern int sysctl_perf_event_mlock;
80645 extern int sysctl_perf_event_sample_rate;
80646 extern int sysctl_perf_cpu_time_max_percent;
80647 @@ -722,19 +722,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
80648 loff_t *ppos);
80649
80650
80651 +static inline bool perf_paranoid_any(void)
80652 +{
80653 + return sysctl_perf_event_legitimately_concerned > 2;
80654 +}
80655 +
80656 static inline bool perf_paranoid_tracepoint_raw(void)
80657 {
80658 - return sysctl_perf_event_paranoid > -1;
80659 + return sysctl_perf_event_legitimately_concerned > -1;
80660 }
80661
80662 static inline bool perf_paranoid_cpu(void)
80663 {
80664 - return sysctl_perf_event_paranoid > 0;
80665 + return sysctl_perf_event_legitimately_concerned > 0;
80666 }
80667
80668 static inline bool perf_paranoid_kernel(void)
80669 {
80670 - return sysctl_perf_event_paranoid > 1;
80671 + return sysctl_perf_event_legitimately_concerned > 1;
80672 }
80673
80674 extern void perf_event_init(void);
80675 @@ -850,7 +855,7 @@ struct perf_pmu_events_attr {
80676 struct device_attribute attr;
80677 u64 id;
80678 const char *event_str;
80679 -};
80680 +} __do_const;
80681
80682 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
80683 static struct perf_pmu_events_attr _var = { \
80684 diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
80685 index 7246ef3..1539ea4 100644
80686 --- a/include/linux/pid_namespace.h
80687 +++ b/include/linux/pid_namespace.h
80688 @@ -43,7 +43,7 @@ struct pid_namespace {
80689 int hide_pid;
80690 int reboot; /* group exit code if this pidns was rebooted */
80691 unsigned int proc_inum;
80692 -};
80693 +} __randomize_layout;
80694
80695 extern struct pid_namespace init_pid_ns;
80696
80697 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
80698 index ab57526..94598804 100644
80699 --- a/include/linux/pipe_fs_i.h
80700 +++ b/include/linux/pipe_fs_i.h
80701 @@ -47,10 +47,10 @@ struct pipe_inode_info {
80702 struct mutex mutex;
80703 wait_queue_head_t wait;
80704 unsigned int nrbufs, curbuf, buffers;
80705 - unsigned int readers;
80706 - unsigned int writers;
80707 - unsigned int files;
80708 - unsigned int waiting_writers;
80709 + atomic_t readers;
80710 + atomic_t writers;
80711 + atomic_t files;
80712 + atomic_t waiting_writers;
80713 unsigned int r_counter;
80714 unsigned int w_counter;
80715 struct page *tmp_page;
80716 diff --git a/include/linux/pm.h b/include/linux/pm.h
80717 index a224c7f..92d8a97 100644
80718 --- a/include/linux/pm.h
80719 +++ b/include/linux/pm.h
80720 @@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
80721 struct dev_pm_domain {
80722 struct dev_pm_ops ops;
80723 };
80724 +typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
80725
80726 /*
80727 * The PM_EVENT_ messages are also used by drivers implementing the legacy
80728 diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
80729 index 7c1d252..0e7061d 100644
80730 --- a/include/linux/pm_domain.h
80731 +++ b/include/linux/pm_domain.h
80732 @@ -44,11 +44,11 @@ struct gpd_dev_ops {
80733 int (*thaw_early)(struct device *dev);
80734 int (*thaw)(struct device *dev);
80735 bool (*active_wakeup)(struct device *dev);
80736 -};
80737 +} __no_const;
80738
80739 struct gpd_cpu_data {
80740 unsigned int saved_exit_latency;
80741 - struct cpuidle_state *idle_state;
80742 + cpuidle_state_no_const *idle_state;
80743 };
80744
80745 struct generic_pm_domain {
80746 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
80747 index 6fa7cea..7bf6415 100644
80748 --- a/include/linux/pm_runtime.h
80749 +++ b/include/linux/pm_runtime.h
80750 @@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
80751
80752 static inline void pm_runtime_mark_last_busy(struct device *dev)
80753 {
80754 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
80755 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
80756 }
80757
80758 #else /* !CONFIG_PM_RUNTIME */
80759 diff --git a/include/linux/pnp.h b/include/linux/pnp.h
80760 index 195aafc..49a7bc2 100644
80761 --- a/include/linux/pnp.h
80762 +++ b/include/linux/pnp.h
80763 @@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
80764 struct pnp_fixup {
80765 char id[7];
80766 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
80767 -};
80768 +} __do_const;
80769
80770 /* config parameters */
80771 #define PNP_CONFIG_NORMAL 0x0001
80772 diff --git a/include/linux/poison.h b/include/linux/poison.h
80773 index 2110a81..13a11bb 100644
80774 --- a/include/linux/poison.h
80775 +++ b/include/linux/poison.h
80776 @@ -19,8 +19,8 @@
80777 * under normal circumstances, used to verify that nobody uses
80778 * non-initialized list entries.
80779 */
80780 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
80781 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
80782 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
80783 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
80784
80785 /********** include/linux/timer.h **********/
80786 /*
80787 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
80788 index d8b187c3..9a9257a 100644
80789 --- a/include/linux/power/smartreflex.h
80790 +++ b/include/linux/power/smartreflex.h
80791 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
80792 int (*notify)(struct omap_sr *sr, u32 status);
80793 u8 notify_flags;
80794 u8 class_type;
80795 -};
80796 +} __do_const;
80797
80798 /**
80799 * struct omap_sr_nvalue_table - Smartreflex n-target value info
80800 diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
80801 index 4ea1d37..80f4b33 100644
80802 --- a/include/linux/ppp-comp.h
80803 +++ b/include/linux/ppp-comp.h
80804 @@ -84,7 +84,7 @@ struct compressor {
80805 struct module *owner;
80806 /* Extra skb space needed by the compressor algorithm */
80807 unsigned int comp_extra;
80808 -};
80809 +} __do_const;
80810
80811 /*
80812 * The return value from decompress routine is the length of the
80813 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
80814 index a3d9dc8..8af9922 100644
80815 --- a/include/linux/preempt.h
80816 +++ b/include/linux/preempt.h
80817 @@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
80818 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
80819 #endif
80820
80821 +#define raw_preempt_count_add(val) __preempt_count_add(val)
80822 +#define raw_preempt_count_sub(val) __preempt_count_sub(val)
80823 +
80824 #define __preempt_count_inc() __preempt_count_add(1)
80825 #define __preempt_count_dec() __preempt_count_sub(1)
80826
80827 #define preempt_count_inc() preempt_count_add(1)
80828 +#define raw_preempt_count_inc() raw_preempt_count_add(1)
80829 #define preempt_count_dec() preempt_count_sub(1)
80830 +#define raw_preempt_count_dec() raw_preempt_count_sub(1)
80831
80832 #ifdef CONFIG_PREEMPT_COUNT
80833
80834 @@ -41,6 +46,12 @@ do { \
80835 barrier(); \
80836 } while (0)
80837
80838 +#define raw_preempt_disable() \
80839 +do { \
80840 + raw_preempt_count_inc(); \
80841 + barrier(); \
80842 +} while (0)
80843 +
80844 #define sched_preempt_enable_no_resched() \
80845 do { \
80846 barrier(); \
80847 @@ -49,6 +60,12 @@ do { \
80848
80849 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
80850
80851 +#define raw_preempt_enable_no_resched() \
80852 +do { \
80853 + barrier(); \
80854 + raw_preempt_count_dec(); \
80855 +} while (0)
80856 +
80857 #ifdef CONFIG_PREEMPT
80858 #define preempt_enable() \
80859 do { \
80860 @@ -105,8 +122,10 @@ do { \
80861 * region.
80862 */
80863 #define preempt_disable() barrier()
80864 +#define raw_preempt_disable() barrier()
80865 #define sched_preempt_enable_no_resched() barrier()
80866 #define preempt_enable_no_resched() barrier()
80867 +#define raw_preempt_enable_no_resched() barrier()
80868 #define preempt_enable() barrier()
80869 #define preempt_check_resched() do { } while (0)
80870
80871 diff --git a/include/linux/printk.h b/include/linux/printk.h
80872 index 6949258..7c4730e 100644
80873 --- a/include/linux/printk.h
80874 +++ b/include/linux/printk.h
80875 @@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
80876 void early_printk(const char *s, ...) { }
80877 #endif
80878
80879 +extern int kptr_restrict;
80880 +
80881 #ifdef CONFIG_PRINTK
80882 asmlinkage __printf(5, 0)
80883 int vprintk_emit(int facility, int level,
80884 @@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
80885
80886 extern int printk_delay_msec;
80887 extern int dmesg_restrict;
80888 -extern int kptr_restrict;
80889
80890 extern void wake_up_klogd(void);
80891
80892 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
80893 index 608e60a..c26f864 100644
80894 --- a/include/linux/proc_fs.h
80895 +++ b/include/linux/proc_fs.h
80896 @@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
80897 return proc_create_data(name, mode, parent, proc_fops, NULL);
80898 }
80899
80900 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
80901 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
80902 +{
80903 +#ifdef CONFIG_GRKERNSEC_PROC_USER
80904 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
80905 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80906 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
80907 +#else
80908 + return proc_create_data(name, mode, parent, proc_fops, NULL);
80909 +#endif
80910 +}
80911 +
80912 +
80913 extern void proc_set_size(struct proc_dir_entry *, loff_t);
80914 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
80915 extern void *PDE_DATA(const struct inode *);
80916 diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
80917 index 34a1e10..70f6bde 100644
80918 --- a/include/linux/proc_ns.h
80919 +++ b/include/linux/proc_ns.h
80920 @@ -14,7 +14,7 @@ struct proc_ns_operations {
80921 void (*put)(void *ns);
80922 int (*install)(struct nsproxy *nsproxy, void *ns);
80923 unsigned int (*inum)(void *ns);
80924 -};
80925 +} __do_const __randomize_layout;
80926
80927 struct proc_ns {
80928 void *ns;
80929 diff --git a/include/linux/quota.h b/include/linux/quota.h
80930 index cc7494a..1e27036 100644
80931 --- a/include/linux/quota.h
80932 +++ b/include/linux/quota.h
80933 @@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
80934
80935 extern bool qid_eq(struct kqid left, struct kqid right);
80936 extern bool qid_lt(struct kqid left, struct kqid right);
80937 -extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80938 +extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
80939 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
80940 extern bool qid_valid(struct kqid qid);
80941
80942 diff --git a/include/linux/random.h b/include/linux/random.h
80943 index 4002b3d..d5ad855 100644
80944 --- a/include/linux/random.h
80945 +++ b/include/linux/random.h
80946 @@ -10,9 +10,19 @@
80947
80948
80949 extern void add_device_randomness(const void *, unsigned int);
80950 +
80951 +static inline void add_latent_entropy(void)
80952 +{
80953 +
80954 +#ifdef LATENT_ENTROPY_PLUGIN
80955 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
80956 +#endif
80957 +
80958 +}
80959 +
80960 extern void add_input_randomness(unsigned int type, unsigned int code,
80961 - unsigned int value);
80962 -extern void add_interrupt_randomness(int irq, int irq_flags);
80963 + unsigned int value) __latent_entropy;
80964 +extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
80965
80966 extern void get_random_bytes(void *buf, int nbytes);
80967 extern void get_random_bytes_arch(void *buf, int nbytes);
80968 @@ -23,10 +33,10 @@ extern int random_int_secret_init(void);
80969 extern const struct file_operations random_fops, urandom_fops;
80970 #endif
80971
80972 -unsigned int get_random_int(void);
80973 +unsigned int __intentional_overflow(-1) get_random_int(void);
80974 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
80975
80976 -u32 prandom_u32(void);
80977 +u32 prandom_u32(void) __intentional_overflow(-1);
80978 void prandom_bytes(void *buf, int nbytes);
80979 void prandom_seed(u32 seed);
80980 void prandom_reseed_late(void);
80981 @@ -38,6 +48,11 @@ struct rnd_state {
80982 u32 prandom_u32_state(struct rnd_state *state);
80983 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
80984
80985 +static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
80986 +{
80987 + return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
80988 +}
80989 +
80990 /*
80991 * Handle minimum values for seeds
80992 */
80993 diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
80994 index fea49b5..2ac22bb 100644
80995 --- a/include/linux/rbtree_augmented.h
80996 +++ b/include/linux/rbtree_augmented.h
80997 @@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
80998 old->rbaugmented = rbcompute(old); \
80999 } \
81000 rbstatic const struct rb_augment_callbacks rbname = { \
81001 - rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
81002 + .propagate = rbname ## _propagate, \
81003 + .copy = rbname ## _copy, \
81004 + .rotate = rbname ## _rotate \
81005 };
81006
81007
81008 diff --git a/include/linux/rculist.h b/include/linux/rculist.h
81009 index 45a0a9e..e83788e 100644
81010 --- a/include/linux/rculist.h
81011 +++ b/include/linux/rculist.h
81012 @@ -29,8 +29,8 @@
81013 */
81014 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
81015 {
81016 - ACCESS_ONCE(list->next) = list;
81017 - ACCESS_ONCE(list->prev) = list;
81018 + ACCESS_ONCE_RW(list->next) = list;
81019 + ACCESS_ONCE_RW(list->prev) = list;
81020 }
81021
81022 /*
81023 @@ -59,6 +59,9 @@ extern void __list_add_rcu(struct list_head *new,
81024 struct list_head *prev, struct list_head *next);
81025 #endif
81026
81027 +extern void __pax_list_add_rcu(struct list_head *new,
81028 + struct list_head *prev, struct list_head *next);
81029 +
81030 /**
81031 * list_add_rcu - add a new entry to rcu-protected list
81032 * @new: new entry to be added
81033 @@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
81034 __list_add_rcu(new, head, head->next);
81035 }
81036
81037 +static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
81038 +{
81039 + __pax_list_add_rcu(new, head, head->next);
81040 +}
81041 +
81042 /**
81043 * list_add_tail_rcu - add a new entry to rcu-protected list
81044 * @new: new entry to be added
81045 @@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
81046 __list_add_rcu(new, head->prev, head);
81047 }
81048
81049 +static inline void pax_list_add_tail_rcu(struct list_head *new,
81050 + struct list_head *head)
81051 +{
81052 + __pax_list_add_rcu(new, head->prev, head);
81053 +}
81054 +
81055 /**
81056 * list_del_rcu - deletes entry from list without re-initialization
81057 * @entry: the element to delete from the list.
81058 @@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
81059 entry->prev = LIST_POISON2;
81060 }
81061
81062 +extern void pax_list_del_rcu(struct list_head *entry);
81063 +
81064 /**
81065 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
81066 * @n: the element to delete from the hash list.
81067 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
81068 index 9e7db9e..7d4fd72 100644
81069 --- a/include/linux/reboot.h
81070 +++ b/include/linux/reboot.h
81071 @@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
81072 */
81073
81074 extern void migrate_to_reboot_cpu(void);
81075 -extern void machine_restart(char *cmd);
81076 -extern void machine_halt(void);
81077 -extern void machine_power_off(void);
81078 +extern void machine_restart(char *cmd) __noreturn;
81079 +extern void machine_halt(void) __noreturn;
81080 +extern void machine_power_off(void) __noreturn;
81081
81082 extern void machine_shutdown(void);
81083 struct pt_regs;
81084 @@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
81085 */
81086
81087 extern void kernel_restart_prepare(char *cmd);
81088 -extern void kernel_restart(char *cmd);
81089 -extern void kernel_halt(void);
81090 -extern void kernel_power_off(void);
81091 +extern void kernel_restart(char *cmd) __noreturn;
81092 +extern void kernel_halt(void) __noreturn;
81093 +extern void kernel_power_off(void) __noreturn;
81094
81095 extern int C_A_D; /* for sysctl */
81096 void ctrl_alt_del(void);
81097 @@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
81098 * Emergency restart, callable from an interrupt handler.
81099 */
81100
81101 -extern void emergency_restart(void);
81102 +extern void emergency_restart(void) __noreturn;
81103 #include <asm/emergency-restart.h>
81104
81105 #endif /* _LINUX_REBOOT_H */
81106 diff --git a/include/linux/regset.h b/include/linux/regset.h
81107 index 8e0c9fe..ac4d221 100644
81108 --- a/include/linux/regset.h
81109 +++ b/include/linux/regset.h
81110 @@ -161,7 +161,8 @@ struct user_regset {
81111 unsigned int align;
81112 unsigned int bias;
81113 unsigned int core_note_type;
81114 -};
81115 +} __do_const;
81116 +typedef struct user_regset __no_const user_regset_no_const;
81117
81118 /**
81119 * struct user_regset_view - available regsets
81120 diff --git a/include/linux/relay.h b/include/linux/relay.h
81121 index d7c8359..818daf5 100644
81122 --- a/include/linux/relay.h
81123 +++ b/include/linux/relay.h
81124 @@ -157,7 +157,7 @@ struct rchan_callbacks
81125 * The callback should return 0 if successful, negative if not.
81126 */
81127 int (*remove_buf_file)(struct dentry *dentry);
81128 -};
81129 +} __no_const;
81130
81131 /*
81132 * CONFIG_RELAY kernel API, kernel/relay.c
81133 diff --git a/include/linux/rio.h b/include/linux/rio.h
81134 index b71d573..2f940bd 100644
81135 --- a/include/linux/rio.h
81136 +++ b/include/linux/rio.h
81137 @@ -355,7 +355,7 @@ struct rio_ops {
81138 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
81139 u64 rstart, u32 size, u32 flags);
81140 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
81141 -};
81142 +} __no_const;
81143
81144 #define RIO_RESOURCE_MEM 0x00000100
81145 #define RIO_RESOURCE_DOORBELL 0x00000200
81146 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
81147 index 6dacb93..6174423 100644
81148 --- a/include/linux/rmap.h
81149 +++ b/include/linux/rmap.h
81150 @@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
81151 void anon_vma_init(void); /* create anon_vma_cachep */
81152 int anon_vma_prepare(struct vm_area_struct *);
81153 void unlink_anon_vmas(struct vm_area_struct *);
81154 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
81155 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
81156 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
81157 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
81158
81159 static inline void anon_vma_merge(struct vm_area_struct *vma,
81160 struct vm_area_struct *next)
81161 diff --git a/include/linux/sched.h b/include/linux/sched.h
81162 index 53f97eb..1d90705 100644
81163 --- a/include/linux/sched.h
81164 +++ b/include/linux/sched.h
81165 @@ -63,6 +63,7 @@ struct bio_list;
81166 struct fs_struct;
81167 struct perf_event_context;
81168 struct blk_plug;
81169 +struct linux_binprm;
81170
81171 /*
81172 * List of flags we want to share for kernel threads,
81173 @@ -304,7 +305,7 @@ extern char __sched_text_start[], __sched_text_end[];
81174 extern int in_sched_functions(unsigned long addr);
81175
81176 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
81177 -extern signed long schedule_timeout(signed long timeout);
81178 +extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
81179 extern signed long schedule_timeout_interruptible(signed long timeout);
81180 extern signed long schedule_timeout_killable(signed long timeout);
81181 extern signed long schedule_timeout_uninterruptible(signed long timeout);
81182 @@ -315,6 +316,19 @@ struct nsproxy;
81183 struct user_namespace;
81184
81185 #ifdef CONFIG_MMU
81186 +
81187 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
81188 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
81189 +#else
81190 +static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
81191 +{
81192 + return 0;
81193 +}
81194 +#endif
81195 +
81196 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
81197 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
81198 +
81199 extern void arch_pick_mmap_layout(struct mm_struct *mm);
81200 extern unsigned long
81201 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
81202 @@ -600,6 +614,17 @@ struct signal_struct {
81203 #ifdef CONFIG_TASKSTATS
81204 struct taskstats *stats;
81205 #endif
81206 +
81207 +#ifdef CONFIG_GRKERNSEC
81208 + u32 curr_ip;
81209 + u32 saved_ip;
81210 + u32 gr_saddr;
81211 + u32 gr_daddr;
81212 + u16 gr_sport;
81213 + u16 gr_dport;
81214 + u8 used_accept:1;
81215 +#endif
81216 +
81217 #ifdef CONFIG_AUDIT
81218 unsigned audit_tty;
81219 unsigned audit_tty_log_passwd;
81220 @@ -626,7 +651,7 @@ struct signal_struct {
81221 struct mutex cred_guard_mutex; /* guard against foreign influences on
81222 * credential calculations
81223 * (notably. ptrace) */
81224 -};
81225 +} __randomize_layout;
81226
81227 /*
81228 * Bits in flags field of signal_struct.
81229 @@ -680,6 +705,14 @@ struct user_struct {
81230 struct key *session_keyring; /* UID's default session keyring */
81231 #endif
81232
81233 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
81234 + unsigned char kernel_banned;
81235 +#endif
81236 +#ifdef CONFIG_GRKERNSEC_BRUTE
81237 + unsigned char suid_banned;
81238 + unsigned long suid_ban_expires;
81239 +#endif
81240 +
81241 /* Hash table maintenance information */
81242 struct hlist_node uidhash_node;
81243 kuid_t uid;
81244 @@ -687,7 +720,7 @@ struct user_struct {
81245 #ifdef CONFIG_PERF_EVENTS
81246 atomic_long_t locked_vm;
81247 #endif
81248 -};
81249 +} __randomize_layout;
81250
81251 extern int uids_sysfs_init(void);
81252
81253 @@ -1162,8 +1195,8 @@ struct task_struct {
81254 struct list_head thread_group;
81255
81256 struct completion *vfork_done; /* for vfork() */
81257 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
81258 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
81259 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
81260 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
81261
81262 cputime_t utime, stime, utimescaled, stimescaled;
81263 cputime_t gtime;
81264 @@ -1188,11 +1221,6 @@ struct task_struct {
81265 struct task_cputime cputime_expires;
81266 struct list_head cpu_timers[3];
81267
81268 -/* process credentials */
81269 - const struct cred __rcu *real_cred; /* objective and real subjective task
81270 - * credentials (COW) */
81271 - const struct cred __rcu *cred; /* effective (overridable) subjective task
81272 - * credentials (COW) */
81273 char comm[TASK_COMM_LEN]; /* executable name excluding path
81274 - access with [gs]et_task_comm (which lock
81275 it with task_lock())
81276 @@ -1209,6 +1237,10 @@ struct task_struct {
81277 #endif
81278 /* CPU-specific state of this task */
81279 struct thread_struct thread;
81280 +/* thread_info moved to task_struct */
81281 +#ifdef CONFIG_X86
81282 + struct thread_info tinfo;
81283 +#endif
81284 /* filesystem information */
81285 struct fs_struct *fs;
81286 /* open file information */
81287 @@ -1282,6 +1314,10 @@ struct task_struct {
81288 gfp_t lockdep_reclaim_gfp;
81289 #endif
81290
81291 +/* process credentials */
81292 + const struct cred __rcu *real_cred; /* objective and real subjective task
81293 + * credentials (COW) */
81294 +
81295 /* journalling filesystem info */
81296 void *journal_info;
81297
81298 @@ -1320,6 +1356,10 @@ struct task_struct {
81299 /* cg_list protected by css_set_lock and tsk->alloc_lock */
81300 struct list_head cg_list;
81301 #endif
81302 +
81303 + const struct cred __rcu *cred; /* effective (overridable) subjective task
81304 + * credentials (COW) */
81305 +
81306 #ifdef CONFIG_FUTEX
81307 struct robust_list_head __user *robust_list;
81308 #ifdef CONFIG_COMPAT
81309 @@ -1454,7 +1494,78 @@ struct task_struct {
81310 unsigned int sequential_io;
81311 unsigned int sequential_io_avg;
81312 #endif
81313 -};
81314 +
81315 +#ifdef CONFIG_GRKERNSEC
81316 + /* grsecurity */
81317 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81318 + u64 exec_id;
81319 +#endif
81320 +#ifdef CONFIG_GRKERNSEC_SETXID
81321 + const struct cred *delayed_cred;
81322 +#endif
81323 + struct dentry *gr_chroot_dentry;
81324 + struct acl_subject_label *acl;
81325 + struct acl_subject_label *tmpacl;
81326 + struct acl_role_label *role;
81327 + struct file *exec_file;
81328 + unsigned long brute_expires;
81329 + u16 acl_role_id;
81330 + u8 inherited;
81331 + /* is this the task that authenticated to the special role */
81332 + u8 acl_sp_role;
81333 + u8 is_writable;
81334 + u8 brute;
81335 + u8 gr_is_chrooted;
81336 +#endif
81337 +
81338 +} __randomize_layout;
81339 +
81340 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
81341 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
81342 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
81343 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
81344 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
81345 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
81346 +
81347 +#ifdef CONFIG_PAX_SOFTMODE
81348 +extern int pax_softmode;
81349 +#endif
81350 +
81351 +extern int pax_check_flags(unsigned long *);
81352 +#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
81353 +
81354 +/* if tsk != current then task_lock must be held on it */
81355 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
81356 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
81357 +{
81358 + if (likely(tsk->mm))
81359 + return tsk->mm->pax_flags;
81360 + else
81361 + return 0UL;
81362 +}
81363 +
81364 +/* if tsk != current then task_lock must be held on it */
81365 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
81366 +{
81367 + if (likely(tsk->mm)) {
81368 + tsk->mm->pax_flags = flags;
81369 + return 0;
81370 + }
81371 + return -EINVAL;
81372 +}
81373 +#endif
81374 +
81375 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81376 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
81377 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
81378 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
81379 +#endif
81380 +
81381 +struct path;
81382 +extern char *pax_get_path(const struct path *path, char *buf, int buflen);
81383 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
81384 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
81385 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
81386
81387 /* Future-safe accessor for struct task_struct's cpus_allowed. */
81388 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
81389 @@ -1531,7 +1642,7 @@ struct pid_namespace;
81390 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
81391 struct pid_namespace *ns);
81392
81393 -static inline pid_t task_pid_nr(struct task_struct *tsk)
81394 +static inline pid_t task_pid_nr(const struct task_struct *tsk)
81395 {
81396 return tsk->pid;
81397 }
81398 @@ -1981,7 +2092,9 @@ void yield(void);
81399 extern struct exec_domain default_exec_domain;
81400
81401 union thread_union {
81402 +#ifndef CONFIG_X86
81403 struct thread_info thread_info;
81404 +#endif
81405 unsigned long stack[THREAD_SIZE/sizeof(long)];
81406 };
81407
81408 @@ -2014,6 +2127,7 @@ extern struct pid_namespace init_pid_ns;
81409 */
81410
81411 extern struct task_struct *find_task_by_vpid(pid_t nr);
81412 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
81413 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
81414 struct pid_namespace *ns);
81415
81416 @@ -2178,7 +2292,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
81417 extern void exit_itimers(struct signal_struct *);
81418 extern void flush_itimer_signals(void);
81419
81420 -extern void do_group_exit(int);
81421 +extern __noreturn void do_group_exit(int);
81422
81423 extern int allow_signal(int);
81424 extern int disallow_signal(int);
81425 @@ -2369,9 +2483,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
81426
81427 #endif
81428
81429 -static inline int object_is_on_stack(void *obj)
81430 +static inline int object_starts_on_stack(void *obj)
81431 {
81432 - void *stack = task_stack_page(current);
81433 + const void *stack = task_stack_page(current);
81434
81435 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
81436 }
81437 diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
81438 index e3347c5..f682891 100644
81439 --- a/include/linux/sched/sysctl.h
81440 +++ b/include/linux/sched/sysctl.h
81441 @@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
81442 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
81443
81444 extern int sysctl_max_map_count;
81445 +extern unsigned long sysctl_heap_stack_gap;
81446
81447 extern unsigned int sysctl_sched_latency;
81448 extern unsigned int sysctl_sched_min_granularity;
81449 diff --git a/include/linux/security.h b/include/linux/security.h
81450 index 5623a7f..b352409 100644
81451 --- a/include/linux/security.h
81452 +++ b/include/linux/security.h
81453 @@ -27,6 +27,7 @@
81454 #include <linux/slab.h>
81455 #include <linux/err.h>
81456 #include <linux/string.h>
81457 +#include <linux/grsecurity.h>
81458
81459 struct linux_binprm;
81460 struct cred;
81461 @@ -116,8 +117,6 @@ struct seq_file;
81462
81463 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
81464
81465 -void reset_security_ops(void);
81466 -
81467 #ifdef CONFIG_MMU
81468 extern unsigned long mmap_min_addr;
81469 extern unsigned long dac_mmap_min_addr;
81470 diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
81471 index dc368b8..e895209 100644
81472 --- a/include/linux/semaphore.h
81473 +++ b/include/linux/semaphore.h
81474 @@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
81475 }
81476
81477 extern void down(struct semaphore *sem);
81478 -extern int __must_check down_interruptible(struct semaphore *sem);
81479 +extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
81480 extern int __must_check down_killable(struct semaphore *sem);
81481 extern int __must_check down_trylock(struct semaphore *sem);
81482 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
81483 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
81484 index 52e0097..09625ef 100644
81485 --- a/include/linux/seq_file.h
81486 +++ b/include/linux/seq_file.h
81487 @@ -27,6 +27,9 @@ struct seq_file {
81488 struct mutex lock;
81489 const struct seq_operations *op;
81490 int poll_event;
81491 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81492 + u64 exec_id;
81493 +#endif
81494 #ifdef CONFIG_USER_NS
81495 struct user_namespace *user_ns;
81496 #endif
81497 @@ -39,6 +42,7 @@ struct seq_operations {
81498 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
81499 int (*show) (struct seq_file *m, void *v);
81500 };
81501 +typedef struct seq_operations __no_const seq_operations_no_const;
81502
81503 #define SEQ_SKIP 1
81504
81505 diff --git a/include/linux/shm.h b/include/linux/shm.h
81506 index 429c199..4d42e38 100644
81507 --- a/include/linux/shm.h
81508 +++ b/include/linux/shm.h
81509 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
81510
81511 /* The task created the shm object. NULL if the task is dead. */
81512 struct task_struct *shm_creator;
81513 +#ifdef CONFIG_GRKERNSEC
81514 + time_t shm_createtime;
81515 + pid_t shm_lapid;
81516 +#endif
81517 };
81518
81519 /* shm_mode upper byte flags */
81520 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
81521 index 37cb679..dbaebc0 100644
81522 --- a/include/linux/skbuff.h
81523 +++ b/include/linux/skbuff.h
81524 @@ -643,7 +643,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
81525 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
81526 int node);
81527 struct sk_buff *build_skb(void *data, unsigned int frag_size);
81528 -static inline struct sk_buff *alloc_skb(unsigned int size,
81529 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
81530 gfp_t priority)
81531 {
81532 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
81533 @@ -750,7 +750,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
81534 */
81535 static inline int skb_queue_empty(const struct sk_buff_head *list)
81536 {
81537 - return list->next == (struct sk_buff *)list;
81538 + return list->next == (const struct sk_buff *)list;
81539 }
81540
81541 /**
81542 @@ -763,7 +763,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
81543 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
81544 const struct sk_buff *skb)
81545 {
81546 - return skb->next == (struct sk_buff *)list;
81547 + return skb->next == (const struct sk_buff *)list;
81548 }
81549
81550 /**
81551 @@ -776,7 +776,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
81552 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
81553 const struct sk_buff *skb)
81554 {
81555 - return skb->prev == (struct sk_buff *)list;
81556 + return skb->prev == (const struct sk_buff *)list;
81557 }
81558
81559 /**
81560 @@ -1686,7 +1686,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
81561 return skb->inner_transport_header - skb->inner_network_header;
81562 }
81563
81564 -static inline int skb_network_offset(const struct sk_buff *skb)
81565 +static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
81566 {
81567 return skb_network_header(skb) - skb->data;
81568 }
81569 @@ -1746,7 +1746,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
81570 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
81571 */
81572 #ifndef NET_SKB_PAD
81573 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
81574 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
81575 #endif
81576
81577 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
81578 @@ -2345,7 +2345,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
81579 int *err);
81580 unsigned int datagram_poll(struct file *file, struct socket *sock,
81581 struct poll_table_struct *wait);
81582 -int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
81583 +int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
81584 struct iovec *to, int size);
81585 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
81586 struct iovec *iov);
81587 @@ -2618,6 +2618,9 @@ static inline void nf_reset(struct sk_buff *skb)
81588 nf_bridge_put(skb->nf_bridge);
81589 skb->nf_bridge = NULL;
81590 #endif
81591 +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
81592 + skb->nf_trace = 0;
81593 +#endif
81594 }
81595
81596 static inline void nf_reset_trace(struct sk_buff *skb)
81597 diff --git a/include/linux/slab.h b/include/linux/slab.h
81598 index 1e2f4fe..df49ca6 100644
81599 --- a/include/linux/slab.h
81600 +++ b/include/linux/slab.h
81601 @@ -14,15 +14,29 @@
81602 #include <linux/gfp.h>
81603 #include <linux/types.h>
81604 #include <linux/workqueue.h>
81605 -
81606 +#include <linux/err.h>
81607
81608 /*
81609 * Flags to pass to kmem_cache_create().
81610 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
81611 */
81612 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
81613 +
81614 +#ifdef CONFIG_PAX_USERCOPY_SLABS
81615 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
81616 +#else
81617 +#define SLAB_USERCOPY 0x00000000UL
81618 +#endif
81619 +
81620 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
81621 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
81622 +
81623 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
81624 +#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
81625 +#else
81626 +#define SLAB_NO_SANITIZE 0x00000000UL
81627 +#endif
81628 +
81629 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
81630 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
81631 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
81632 @@ -98,10 +112,13 @@
81633 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
81634 * Both make kfree a no-op.
81635 */
81636 -#define ZERO_SIZE_PTR ((void *)16)
81637 +#define ZERO_SIZE_PTR \
81638 +({ \
81639 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
81640 + (void *)(-MAX_ERRNO-1L); \
81641 +})
81642
81643 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
81644 - (unsigned long)ZERO_SIZE_PTR)
81645 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
81646
81647 #include <linux/kmemleak.h>
81648
81649 @@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
81650 void kfree(const void *);
81651 void kzfree(const void *);
81652 size_t ksize(const void *);
81653 +const char *check_heap_object(const void *ptr, unsigned long n);
81654 +bool is_usercopy_object(const void *ptr);
81655
81656 /*
81657 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
81658 @@ -174,7 +193,7 @@ struct kmem_cache {
81659 unsigned int align; /* Alignment as calculated */
81660 unsigned long flags; /* Active flags on the slab */
81661 const char *name; /* Slab name for sysfs */
81662 - int refcount; /* Use counter */
81663 + atomic_t refcount; /* Use counter */
81664 void (*ctor)(void *); /* Called on object slot creation */
81665 struct list_head list; /* List of all slab caches on the system */
81666 };
81667 @@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
81668 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
81669 #endif
81670
81671 +#ifdef CONFIG_PAX_USERCOPY_SLABS
81672 +extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
81673 +#endif
81674 +
81675 /*
81676 * Figure out which kmalloc slab an allocation of a certain size
81677 * belongs to.
81678 @@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
81679 * 2 = 120 .. 192 bytes
81680 * n = 2^(n-1) .. 2^n -1
81681 */
81682 -static __always_inline int kmalloc_index(size_t size)
81683 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
81684 {
81685 if (!size)
81686 return 0;
81687 @@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size)
81688 }
81689 #endif /* !CONFIG_SLOB */
81690
81691 -void *__kmalloc(size_t size, gfp_t flags);
81692 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
81693 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
81694
81695 #ifdef CONFIG_NUMA
81696 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
81697 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
81698 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
81699 #else
81700 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
81701 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
81702 index 09bfffb..4fc80fb 100644
81703 --- a/include/linux/slab_def.h
81704 +++ b/include/linux/slab_def.h
81705 @@ -36,7 +36,7 @@ struct kmem_cache {
81706 /* 4) cache creation/removal */
81707 const char *name;
81708 struct list_head list;
81709 - int refcount;
81710 + atomic_t refcount;
81711 int object_size;
81712 int align;
81713
81714 @@ -52,10 +52,14 @@ struct kmem_cache {
81715 unsigned long node_allocs;
81716 unsigned long node_frees;
81717 unsigned long node_overflow;
81718 - atomic_t allochit;
81719 - atomic_t allocmiss;
81720 - atomic_t freehit;
81721 - atomic_t freemiss;
81722 + atomic_unchecked_t allochit;
81723 + atomic_unchecked_t allocmiss;
81724 + atomic_unchecked_t freehit;
81725 + atomic_unchecked_t freemiss;
81726 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
81727 + atomic_unchecked_t sanitized;
81728 + atomic_unchecked_t not_sanitized;
81729 +#endif
81730
81731 /*
81732 * If debugging is enabled, then the allocator can add additional
81733 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
81734 index f56bfa9..8378a26 100644
81735 --- a/include/linux/slub_def.h
81736 +++ b/include/linux/slub_def.h
81737 @@ -74,7 +74,7 @@ struct kmem_cache {
81738 struct kmem_cache_order_objects max;
81739 struct kmem_cache_order_objects min;
81740 gfp_t allocflags; /* gfp flags to use on each alloc */
81741 - int refcount; /* Refcount for slab cache destroy */
81742 + atomic_t refcount; /* Refcount for slab cache destroy */
81743 void (*ctor)(void *);
81744 int inuse; /* Offset to metadata */
81745 int align; /* Alignment */
81746 diff --git a/include/linux/smp.h b/include/linux/smp.h
81747 index 5da22ee..71d8a28 100644
81748 --- a/include/linux/smp.h
81749 +++ b/include/linux/smp.h
81750 @@ -176,7 +176,9 @@ static inline void kick_all_cpus_sync(void) { }
81751 #endif
81752
81753 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
81754 +#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
81755 #define put_cpu() preempt_enable()
81756 +#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
81757
81758 /*
81759 * Callback to arch code if there's nosmp or maxcpus=0 on the
81760 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
81761 index 54f91d3..be2c379 100644
81762 --- a/include/linux/sock_diag.h
81763 +++ b/include/linux/sock_diag.h
81764 @@ -11,7 +11,7 @@ struct sock;
81765 struct sock_diag_handler {
81766 __u8 family;
81767 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
81768 -};
81769 +} __do_const;
81770
81771 int sock_diag_register(const struct sock_diag_handler *h);
81772 void sock_diag_unregister(const struct sock_diag_handler *h);
81773 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
81774 index 680f9a3..f13aeb0 100644
81775 --- a/include/linux/sonet.h
81776 +++ b/include/linux/sonet.h
81777 @@ -7,7 +7,7 @@
81778 #include <uapi/linux/sonet.h>
81779
81780 struct k_sonet_stats {
81781 -#define __HANDLE_ITEM(i) atomic_t i
81782 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
81783 __SONET_ITEMS
81784 #undef __HANDLE_ITEM
81785 };
81786 diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
81787 index 07d8e53..dc934c9 100644
81788 --- a/include/linux/sunrpc/addr.h
81789 +++ b/include/linux/sunrpc/addr.h
81790 @@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
81791 {
81792 switch (sap->sa_family) {
81793 case AF_INET:
81794 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
81795 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
81796 case AF_INET6:
81797 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
81798 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
81799 }
81800 return 0;
81801 }
81802 @@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
81803 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
81804 const struct sockaddr *src)
81805 {
81806 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
81807 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
81808 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
81809
81810 dsin->sin_family = ssin->sin_family;
81811 @@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
81812 if (sa->sa_family != AF_INET6)
81813 return 0;
81814
81815 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
81816 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
81817 }
81818
81819 #endif /* _LINUX_SUNRPC_ADDR_H */
81820 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
81821 index 8af2804..c7414ef 100644
81822 --- a/include/linux/sunrpc/clnt.h
81823 +++ b/include/linux/sunrpc/clnt.h
81824 @@ -97,7 +97,7 @@ struct rpc_procinfo {
81825 unsigned int p_timer; /* Which RTT timer to use */
81826 u32 p_statidx; /* Which procedure to account */
81827 const char * p_name; /* name of procedure */
81828 -};
81829 +} __do_const;
81830
81831 #ifdef __KERNEL__
81832
81833 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
81834 index 6eecfc2..7ada79d 100644
81835 --- a/include/linux/sunrpc/svc.h
81836 +++ b/include/linux/sunrpc/svc.h
81837 @@ -410,7 +410,7 @@ struct svc_procedure {
81838 unsigned int pc_count; /* call count */
81839 unsigned int pc_cachetype; /* cache info (NFS) */
81840 unsigned int pc_xdrressize; /* maximum size of XDR reply */
81841 -};
81842 +} __do_const;
81843
81844 /*
81845 * Function prototypes.
81846 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
81847 index 0b8e3e6..33e0a01 100644
81848 --- a/include/linux/sunrpc/svc_rdma.h
81849 +++ b/include/linux/sunrpc/svc_rdma.h
81850 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
81851 extern unsigned int svcrdma_max_requests;
81852 extern unsigned int svcrdma_max_req_size;
81853
81854 -extern atomic_t rdma_stat_recv;
81855 -extern atomic_t rdma_stat_read;
81856 -extern atomic_t rdma_stat_write;
81857 -extern atomic_t rdma_stat_sq_starve;
81858 -extern atomic_t rdma_stat_rq_starve;
81859 -extern atomic_t rdma_stat_rq_poll;
81860 -extern atomic_t rdma_stat_rq_prod;
81861 -extern atomic_t rdma_stat_sq_poll;
81862 -extern atomic_t rdma_stat_sq_prod;
81863 +extern atomic_unchecked_t rdma_stat_recv;
81864 +extern atomic_unchecked_t rdma_stat_read;
81865 +extern atomic_unchecked_t rdma_stat_write;
81866 +extern atomic_unchecked_t rdma_stat_sq_starve;
81867 +extern atomic_unchecked_t rdma_stat_rq_starve;
81868 +extern atomic_unchecked_t rdma_stat_rq_poll;
81869 +extern atomic_unchecked_t rdma_stat_rq_prod;
81870 +extern atomic_unchecked_t rdma_stat_sq_poll;
81871 +extern atomic_unchecked_t rdma_stat_sq_prod;
81872
81873 #define RPCRDMA_VERSION 1
81874
81875 diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
81876 index 8d71d65..f79586e 100644
81877 --- a/include/linux/sunrpc/svcauth.h
81878 +++ b/include/linux/sunrpc/svcauth.h
81879 @@ -120,7 +120,7 @@ struct auth_ops {
81880 int (*release)(struct svc_rqst *rq);
81881 void (*domain_release)(struct auth_domain *);
81882 int (*set_client)(struct svc_rqst *rq);
81883 -};
81884 +} __do_const;
81885
81886 #define SVC_GARBAGE 1
81887 #define SVC_SYSERR 2
81888 diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
81889 index a5ffd32..0935dea 100644
81890 --- a/include/linux/swiotlb.h
81891 +++ b/include/linux/swiotlb.h
81892 @@ -60,7 +60,8 @@ extern void
81893
81894 extern void
81895 swiotlb_free_coherent(struct device *hwdev, size_t size,
81896 - void *vaddr, dma_addr_t dma_handle);
81897 + void *vaddr, dma_addr_t dma_handle,
81898 + struct dma_attrs *attrs);
81899
81900 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
81901 unsigned long offset, size_t size,
81902 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
81903 index 94273bb..c2e05fc 100644
81904 --- a/include/linux/syscalls.h
81905 +++ b/include/linux/syscalls.h
81906 @@ -97,8 +97,14 @@ struct sigaltstack;
81907 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
81908
81909 #define __SC_DECL(t, a) t a
81910 +#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
81911 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
81912 -#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
81913 +#define __SC_LONG(t, a) __typeof( \
81914 + __builtin_choose_expr( \
81915 + sizeof(t) > sizeof(int), \
81916 + (t) 0, \
81917 + __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
81918 + )) a
81919 #define __SC_CAST(t, a) (t) a
81920 #define __SC_ARGS(t, a) a
81921 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
81922 @@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
81923 asmlinkage long sys_fsync(unsigned int fd);
81924 asmlinkage long sys_fdatasync(unsigned int fd);
81925 asmlinkage long sys_bdflush(int func, long data);
81926 -asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
81927 - char __user *type, unsigned long flags,
81928 +asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
81929 + const char __user *type, unsigned long flags,
81930 void __user *data);
81931 -asmlinkage long sys_umount(char __user *name, int flags);
81932 -asmlinkage long sys_oldumount(char __user *name);
81933 +asmlinkage long sys_umount(const char __user *name, int flags);
81934 +asmlinkage long sys_oldumount(const char __user *name);
81935 asmlinkage long sys_truncate(const char __user *path, long length);
81936 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
81937 asmlinkage long sys_stat(const char __user *filename,
81938 @@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
81939 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
81940 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
81941 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
81942 - struct sockaddr __user *, int);
81943 + struct sockaddr __user *, int) __intentional_overflow(0);
81944 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
81945 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
81946 unsigned int vlen, unsigned flags);
81947 diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
81948 index 27b3b0b..e093dd9 100644
81949 --- a/include/linux/syscore_ops.h
81950 +++ b/include/linux/syscore_ops.h
81951 @@ -16,7 +16,7 @@ struct syscore_ops {
81952 int (*suspend)(void);
81953 void (*resume)(void);
81954 void (*shutdown)(void);
81955 -};
81956 +} __do_const;
81957
81958 extern void register_syscore_ops(struct syscore_ops *ops);
81959 extern void unregister_syscore_ops(struct syscore_ops *ops);
81960 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
81961 index 14a8ff2..fa95f3a 100644
81962 --- a/include/linux/sysctl.h
81963 +++ b/include/linux/sysctl.h
81964 @@ -34,13 +34,13 @@ struct ctl_table_root;
81965 struct ctl_table_header;
81966 struct ctl_dir;
81967
81968 -typedef struct ctl_table ctl_table;
81969 -
81970 typedef int proc_handler (struct ctl_table *ctl, int write,
81971 void __user *buffer, size_t *lenp, loff_t *ppos);
81972
81973 extern int proc_dostring(struct ctl_table *, int,
81974 void __user *, size_t *, loff_t *);
81975 +extern int proc_dostring_modpriv(struct ctl_table *, int,
81976 + void __user *, size_t *, loff_t *);
81977 extern int proc_dointvec(struct ctl_table *, int,
81978 void __user *, size_t *, loff_t *);
81979 extern int proc_dointvec_minmax(struct ctl_table *, int,
81980 @@ -115,7 +115,9 @@ struct ctl_table
81981 struct ctl_table_poll *poll;
81982 void *extra1;
81983 void *extra2;
81984 -};
81985 +} __do_const __randomize_layout;
81986 +typedef struct ctl_table __no_const ctl_table_no_const;
81987 +typedef struct ctl_table ctl_table;
81988
81989 struct ctl_node {
81990 struct rb_node node;
81991 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
81992 index 6695040..3d4192d 100644
81993 --- a/include/linux/sysfs.h
81994 +++ b/include/linux/sysfs.h
81995 @@ -33,7 +33,8 @@ struct attribute {
81996 struct lock_class_key *key;
81997 struct lock_class_key skey;
81998 #endif
81999 -};
82000 +} __do_const;
82001 +typedef struct attribute __no_const attribute_no_const;
82002
82003 /**
82004 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
82005 @@ -62,7 +63,8 @@ struct attribute_group {
82006 struct attribute *, int);
82007 struct attribute **attrs;
82008 struct bin_attribute **bin_attrs;
82009 -};
82010 +} __do_const;
82011 +typedef struct attribute_group __no_const attribute_group_no_const;
82012
82013 /**
82014 * Use these macros to make defining attributes easier. See include/linux/device.h
82015 @@ -126,7 +128,8 @@ struct bin_attribute {
82016 char *, loff_t, size_t);
82017 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
82018 struct vm_area_struct *vma);
82019 -};
82020 +} __do_const;
82021 +typedef struct bin_attribute __no_const bin_attribute_no_const;
82022
82023 /**
82024 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
82025 diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
82026 index 387fa7d..3fcde6b 100644
82027 --- a/include/linux/sysrq.h
82028 +++ b/include/linux/sysrq.h
82029 @@ -16,6 +16,7 @@
82030
82031 #include <linux/errno.h>
82032 #include <linux/types.h>
82033 +#include <linux/compiler.h>
82034
82035 /* Possible values of bitmask for enabling sysrq functions */
82036 /* 0x0001 is reserved for enable everything */
82037 @@ -33,7 +34,7 @@ struct sysrq_key_op {
82038 char *help_msg;
82039 char *action_msg;
82040 int enable_mask;
82041 -};
82042 +} __do_const;
82043
82044 #ifdef CONFIG_MAGIC_SYSRQ
82045
82046 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
82047 index fddbe20..0312de8 100644
82048 --- a/include/linux/thread_info.h
82049 +++ b/include/linux/thread_info.h
82050 @@ -161,6 +161,15 @@ static inline bool test_and_clear_restore_sigmask(void)
82051 #error "no set_restore_sigmask() provided and default one won't work"
82052 #endif
82053
82054 +extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
82055 +static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
82056 +{
82057 +#ifndef CONFIG_PAX_USERCOPY_DEBUG
82058 + if (!__builtin_constant_p(n))
82059 +#endif
82060 + __check_object_size(ptr, n, to_user);
82061 +}
82062 +
82063 #endif /* __KERNEL__ */
82064
82065 #endif /* _LINUX_THREAD_INFO_H */
82066 diff --git a/include/linux/tty.h b/include/linux/tty.h
82067 index 97d660e..6356755 100644
82068 --- a/include/linux/tty.h
82069 +++ b/include/linux/tty.h
82070 @@ -196,7 +196,7 @@ struct tty_port {
82071 const struct tty_port_operations *ops; /* Port operations */
82072 spinlock_t lock; /* Lock protecting tty field */
82073 int blocked_open; /* Waiting to open */
82074 - int count; /* Usage count */
82075 + atomic_t count; /* Usage count */
82076 wait_queue_head_t open_wait; /* Open waiters */
82077 wait_queue_head_t close_wait; /* Close waiters */
82078 wait_queue_head_t delta_msr_wait; /* Modem status change */
82079 @@ -278,7 +278,7 @@ struct tty_struct {
82080 /* If the tty has a pending do_SAK, queue it here - akpm */
82081 struct work_struct SAK_work;
82082 struct tty_port *port;
82083 -};
82084 +} __randomize_layout;
82085
82086 /* Each of a tty's open files has private_data pointing to tty_file_private */
82087 struct tty_file_private {
82088 @@ -545,7 +545,7 @@ extern int tty_port_open(struct tty_port *port,
82089 struct tty_struct *tty, struct file *filp);
82090 static inline int tty_port_users(struct tty_port *port)
82091 {
82092 - return port->count + port->blocked_open;
82093 + return atomic_read(&port->count) + port->blocked_open;
82094 }
82095
82096 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
82097 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
82098 index 756a609..f61242d 100644
82099 --- a/include/linux/tty_driver.h
82100 +++ b/include/linux/tty_driver.h
82101 @@ -285,7 +285,7 @@ struct tty_operations {
82102 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
82103 #endif
82104 const struct file_operations *proc_fops;
82105 -};
82106 +} __do_const;
82107
82108 struct tty_driver {
82109 int magic; /* magic number for this structure */
82110 @@ -319,7 +319,7 @@ struct tty_driver {
82111
82112 const struct tty_operations *ops;
82113 struct list_head tty_drivers;
82114 -};
82115 +} __randomize_layout;
82116
82117 extern struct list_head tty_drivers;
82118
82119 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
82120 index f15c898..207b7d1 100644
82121 --- a/include/linux/tty_ldisc.h
82122 +++ b/include/linux/tty_ldisc.h
82123 @@ -211,7 +211,7 @@ struct tty_ldisc_ops {
82124
82125 struct module *owner;
82126
82127 - int refcount;
82128 + atomic_t refcount;
82129 };
82130
82131 struct tty_ldisc {
82132 diff --git a/include/linux/types.h b/include/linux/types.h
82133 index 4d118ba..c3ee9bf 100644
82134 --- a/include/linux/types.h
82135 +++ b/include/linux/types.h
82136 @@ -176,10 +176,26 @@ typedef struct {
82137 int counter;
82138 } atomic_t;
82139
82140 +#ifdef CONFIG_PAX_REFCOUNT
82141 +typedef struct {
82142 + int counter;
82143 +} atomic_unchecked_t;
82144 +#else
82145 +typedef atomic_t atomic_unchecked_t;
82146 +#endif
82147 +
82148 #ifdef CONFIG_64BIT
82149 typedef struct {
82150 long counter;
82151 } atomic64_t;
82152 +
82153 +#ifdef CONFIG_PAX_REFCOUNT
82154 +typedef struct {
82155 + long counter;
82156 +} atomic64_unchecked_t;
82157 +#else
82158 +typedef atomic64_t atomic64_unchecked_t;
82159 +#endif
82160 #endif
82161
82162 struct list_head {
82163 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
82164 index 9d8cf05..0ed74dd 100644
82165 --- a/include/linux/uaccess.h
82166 +++ b/include/linux/uaccess.h
82167 @@ -72,11 +72,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
82168 long ret; \
82169 mm_segment_t old_fs = get_fs(); \
82170 \
82171 - set_fs(KERNEL_DS); \
82172 pagefault_disable(); \
82173 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
82174 - pagefault_enable(); \
82175 + set_fs(KERNEL_DS); \
82176 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
82177 set_fs(old_fs); \
82178 + pagefault_enable(); \
82179 ret; \
82180 })
82181
82182 diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
82183 index 8e522cbc..aa8572d 100644
82184 --- a/include/linux/uidgid.h
82185 +++ b/include/linux/uidgid.h
82186 @@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
82187
82188 #endif /* CONFIG_USER_NS */
82189
82190 +#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
82191 +#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
82192 +#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
82193 +#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
82194 +
82195 #endif /* _LINUX_UIDGID_H */
82196 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
82197 index 99c1b4d..562e6f3 100644
82198 --- a/include/linux/unaligned/access_ok.h
82199 +++ b/include/linux/unaligned/access_ok.h
82200 @@ -4,34 +4,34 @@
82201 #include <linux/kernel.h>
82202 #include <asm/byteorder.h>
82203
82204 -static inline u16 get_unaligned_le16(const void *p)
82205 +static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
82206 {
82207 - return le16_to_cpup((__le16 *)p);
82208 + return le16_to_cpup((const __le16 *)p);
82209 }
82210
82211 -static inline u32 get_unaligned_le32(const void *p)
82212 +static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
82213 {
82214 - return le32_to_cpup((__le32 *)p);
82215 + return le32_to_cpup((const __le32 *)p);
82216 }
82217
82218 -static inline u64 get_unaligned_le64(const void *p)
82219 +static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
82220 {
82221 - return le64_to_cpup((__le64 *)p);
82222 + return le64_to_cpup((const __le64 *)p);
82223 }
82224
82225 -static inline u16 get_unaligned_be16(const void *p)
82226 +static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
82227 {
82228 - return be16_to_cpup((__be16 *)p);
82229 + return be16_to_cpup((const __be16 *)p);
82230 }
82231
82232 -static inline u32 get_unaligned_be32(const void *p)
82233 +static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
82234 {
82235 - return be32_to_cpup((__be32 *)p);
82236 + return be32_to_cpup((const __be32 *)p);
82237 }
82238
82239 -static inline u64 get_unaligned_be64(const void *p)
82240 +static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
82241 {
82242 - return be64_to_cpup((__be64 *)p);
82243 + return be64_to_cpup((const __be64 *)p);
82244 }
82245
82246 static inline void put_unaligned_le16(u16 val, void *p)
82247 diff --git a/include/linux/usb.h b/include/linux/usb.h
82248 index 7454865..29f4bfa 100644
82249 --- a/include/linux/usb.h
82250 +++ b/include/linux/usb.h
82251 @@ -563,7 +563,7 @@ struct usb_device {
82252 int maxchild;
82253
82254 u32 quirks;
82255 - atomic_t urbnum;
82256 + atomic_unchecked_t urbnum;
82257
82258 unsigned long active_duration;
82259
82260 @@ -1641,7 +1641,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
82261
82262 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
82263 __u8 request, __u8 requesttype, __u16 value, __u16 index,
82264 - void *data, __u16 size, int timeout);
82265 + void *data, __u16 size, int timeout) __intentional_overflow(-1);
82266 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
82267 void *data, int len, int *actual_length, int timeout);
82268 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
82269 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
82270 index e452ba6..78f8e80 100644
82271 --- a/include/linux/usb/renesas_usbhs.h
82272 +++ b/include/linux/usb/renesas_usbhs.h
82273 @@ -39,7 +39,7 @@ enum {
82274 */
82275 struct renesas_usbhs_driver_callback {
82276 int (*notify_hotplug)(struct platform_device *pdev);
82277 -};
82278 +} __no_const;
82279
82280 /*
82281 * callback functions for platform
82282 diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
82283 index 4836ba3..603f6ee 100644
82284 --- a/include/linux/user_namespace.h
82285 +++ b/include/linux/user_namespace.h
82286 @@ -33,7 +33,7 @@ struct user_namespace {
82287 struct key *persistent_keyring_register;
82288 struct rw_semaphore persistent_keyring_register_sem;
82289 #endif
82290 -};
82291 +} __randomize_layout;
82292
82293 extern struct user_namespace init_user_ns;
82294
82295 diff --git a/include/linux/utsname.h b/include/linux/utsname.h
82296 index 239e277..22a5cf5 100644
82297 --- a/include/linux/utsname.h
82298 +++ b/include/linux/utsname.h
82299 @@ -24,7 +24,7 @@ struct uts_namespace {
82300 struct new_utsname name;
82301 struct user_namespace *user_ns;
82302 unsigned int proc_inum;
82303 -};
82304 +} __randomize_layout;
82305 extern struct uts_namespace init_uts_ns;
82306
82307 #ifdef CONFIG_UTS_NS
82308 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
82309 index 6f8fbcf..4efc177 100644
82310 --- a/include/linux/vermagic.h
82311 +++ b/include/linux/vermagic.h
82312 @@ -25,9 +25,42 @@
82313 #define MODULE_ARCH_VERMAGIC ""
82314 #endif
82315
82316 +#ifdef CONFIG_PAX_REFCOUNT
82317 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
82318 +#else
82319 +#define MODULE_PAX_REFCOUNT ""
82320 +#endif
82321 +
82322 +#ifdef CONSTIFY_PLUGIN
82323 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
82324 +#else
82325 +#define MODULE_CONSTIFY_PLUGIN ""
82326 +#endif
82327 +
82328 +#ifdef STACKLEAK_PLUGIN
82329 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
82330 +#else
82331 +#define MODULE_STACKLEAK_PLUGIN ""
82332 +#endif
82333 +
82334 +#ifdef RANDSTRUCT_PLUGIN
82335 +#include <generated/randomize_layout_hash.h>
82336 +#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
82337 +#else
82338 +#define MODULE_RANDSTRUCT_PLUGIN
82339 +#endif
82340 +
82341 +#ifdef CONFIG_GRKERNSEC
82342 +#define MODULE_GRSEC "GRSEC "
82343 +#else
82344 +#define MODULE_GRSEC ""
82345 +#endif
82346 +
82347 #define VERMAGIC_STRING \
82348 UTS_RELEASE " " \
82349 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
82350 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
82351 - MODULE_ARCH_VERMAGIC
82352 + MODULE_ARCH_VERMAGIC \
82353 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
82354 + MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
82355
82356 diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
82357 index 502073a..a7de024 100644
82358 --- a/include/linux/vga_switcheroo.h
82359 +++ b/include/linux/vga_switcheroo.h
82360 @@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
82361
82362 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
82363
82364 -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
82365 -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
82366 +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
82367 +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
82368 #else
82369
82370 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
82371 @@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
82372
82373 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
82374
82375 -static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
82376 -static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
82377 +static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
82378 +static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
82379
82380 #endif
82381 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
82382 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
82383 index 4b8a891..cb8df6e 100644
82384 --- a/include/linux/vmalloc.h
82385 +++ b/include/linux/vmalloc.h
82386 @@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
82387 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
82388 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
82389 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
82390 +
82391 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
82392 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
82393 +#endif
82394 +
82395 /* bits [20..32] reserved for arch specific ioremap internals */
82396
82397 /*
82398 @@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
82399
82400 /* for /dev/kmem */
82401 extern long vread(char *buf, char *addr, unsigned long count);
82402 -extern long vwrite(char *buf, char *addr, unsigned long count);
82403 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
82404
82405 /*
82406 * Internals. Dont't use..
82407 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
82408 index a67b384..f52a537 100644
82409 --- a/include/linux/vmstat.h
82410 +++ b/include/linux/vmstat.h
82411 @@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
82412 /*
82413 * Zone based page accounting with per cpu differentials.
82414 */
82415 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82416 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82417
82418 static inline void zone_page_state_add(long x, struct zone *zone,
82419 enum zone_stat_item item)
82420 {
82421 - atomic_long_add(x, &zone->vm_stat[item]);
82422 - atomic_long_add(x, &vm_stat[item]);
82423 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
82424 + atomic_long_add_unchecked(x, &vm_stat[item]);
82425 }
82426
82427 -static inline unsigned long global_page_state(enum zone_stat_item item)
82428 +static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
82429 {
82430 - long x = atomic_long_read(&vm_stat[item]);
82431 + long x = atomic_long_read_unchecked(&vm_stat[item]);
82432 #ifdef CONFIG_SMP
82433 if (x < 0)
82434 x = 0;
82435 @@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
82436 return x;
82437 }
82438
82439 -static inline unsigned long zone_page_state(struct zone *zone,
82440 +static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
82441 enum zone_stat_item item)
82442 {
82443 - long x = atomic_long_read(&zone->vm_stat[item]);
82444 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
82445 #ifdef CONFIG_SMP
82446 if (x < 0)
82447 x = 0;
82448 @@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
82449 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
82450 enum zone_stat_item item)
82451 {
82452 - long x = atomic_long_read(&zone->vm_stat[item]);
82453 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
82454
82455 #ifdef CONFIG_SMP
82456 int cpu;
82457 @@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
82458
82459 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
82460 {
82461 - atomic_long_inc(&zone->vm_stat[item]);
82462 - atomic_long_inc(&vm_stat[item]);
82463 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
82464 + atomic_long_inc_unchecked(&vm_stat[item]);
82465 }
82466
82467 static inline void __inc_zone_page_state(struct page *page,
82468 @@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
82469
82470 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
82471 {
82472 - atomic_long_dec(&zone->vm_stat[item]);
82473 - atomic_long_dec(&vm_stat[item]);
82474 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
82475 + atomic_long_dec_unchecked(&vm_stat[item]);
82476 }
82477
82478 static inline void __dec_zone_page_state(struct page *page,
82479 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
82480 index 91b0a68..0e9adf6 100644
82481 --- a/include/linux/xattr.h
82482 +++ b/include/linux/xattr.h
82483 @@ -28,7 +28,7 @@ struct xattr_handler {
82484 size_t size, int handler_flags);
82485 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
82486 size_t size, int flags, int handler_flags);
82487 -};
82488 +} __do_const;
82489
82490 struct xattr {
82491 const char *name;
82492 @@ -37,6 +37,9 @@ struct xattr {
82493 };
82494
82495 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
82496 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
82497 +ssize_t pax_getxattr(struct dentry *, void *, size_t);
82498 +#endif
82499 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
82500 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
82501 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
82502 diff --git a/include/linux/zlib.h b/include/linux/zlib.h
82503 index 9c5a6b4..09c9438 100644
82504 --- a/include/linux/zlib.h
82505 +++ b/include/linux/zlib.h
82506 @@ -31,6 +31,7 @@
82507 #define _ZLIB_H
82508
82509 #include <linux/zconf.h>
82510 +#include <linux/compiler.h>
82511
82512 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
82513 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
82514 @@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
82515
82516 /* basic functions */
82517
82518 -extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
82519 +extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
82520 /*
82521 Returns the number of bytes that needs to be allocated for a per-
82522 stream workspace with the specified parameters. A pointer to this
82523 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
82524 index c768c9f..bdcaa5a 100644
82525 --- a/include/media/v4l2-dev.h
82526 +++ b/include/media/v4l2-dev.h
82527 @@ -76,7 +76,7 @@ struct v4l2_file_operations {
82528 int (*mmap) (struct file *, struct vm_area_struct *);
82529 int (*open) (struct file *);
82530 int (*release) (struct file *);
82531 -};
82532 +} __do_const;
82533
82534 /*
82535 * Newer version of video_device, handled by videodev2.c
82536 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
82537 index c9b1593..a572459 100644
82538 --- a/include/media/v4l2-device.h
82539 +++ b/include/media/v4l2-device.h
82540 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
82541 this function returns 0. If the name ends with a digit (e.g. cx18),
82542 then the name will be set to cx18-0 since cx180 looks really odd. */
82543 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
82544 - atomic_t *instance);
82545 + atomic_unchecked_t *instance);
82546
82547 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
82548 Since the parent disappears this ensures that v4l2_dev doesn't have an
82549 diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
82550 index 9a36d92..0aafe2a 100644
82551 --- a/include/net/9p/transport.h
82552 +++ b/include/net/9p/transport.h
82553 @@ -60,7 +60,7 @@ struct p9_trans_module {
82554 int (*cancel) (struct p9_client *, struct p9_req_t *req);
82555 int (*zc_request)(struct p9_client *, struct p9_req_t *,
82556 char *, char *, int , int, int, int);
82557 -};
82558 +} __do_const;
82559
82560 void v9fs_register_trans(struct p9_trans_module *m);
82561 void v9fs_unregister_trans(struct p9_trans_module *m);
82562 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
82563 index c853b16d..37fccb7 100644
82564 --- a/include/net/bluetooth/l2cap.h
82565 +++ b/include/net/bluetooth/l2cap.h
82566 @@ -557,7 +557,7 @@ struct l2cap_ops {
82567 long (*get_sndtimeo) (struct l2cap_chan *chan);
82568 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
82569 unsigned long len, int nb);
82570 -};
82571 +} __do_const;
82572
82573 struct l2cap_conn {
82574 struct hci_conn *hcon;
82575 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
82576 index f2ae33d..c457cf0 100644
82577 --- a/include/net/caif/cfctrl.h
82578 +++ b/include/net/caif/cfctrl.h
82579 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
82580 void (*radioset_rsp)(void);
82581 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
82582 struct cflayer *client_layer);
82583 -};
82584 +} __no_const;
82585
82586 /* Link Setup Parameters for CAIF-Links. */
82587 struct cfctrl_link_param {
82588 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
82589 struct cfctrl {
82590 struct cfsrvl serv;
82591 struct cfctrl_rsp res;
82592 - atomic_t req_seq_no;
82593 - atomic_t rsp_seq_no;
82594 + atomic_unchecked_t req_seq_no;
82595 + atomic_unchecked_t rsp_seq_no;
82596 struct list_head list;
82597 /* Protects from simultaneous access to first_req list */
82598 spinlock_t info_list_lock;
82599 diff --git a/include/net/flow.h b/include/net/flow.h
82600 index 65ce471..b7bbe9b 100644
82601 --- a/include/net/flow.h
82602 +++ b/include/net/flow.h
82603 @@ -222,6 +222,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
82604
82605 void flow_cache_flush(void);
82606 void flow_cache_flush_deferred(void);
82607 -extern atomic_t flow_cache_genid;
82608 +extern atomic_unchecked_t flow_cache_genid;
82609
82610 #endif
82611 diff --git a/include/net/genetlink.h b/include/net/genetlink.h
82612 index 1b177ed..a24a138 100644
82613 --- a/include/net/genetlink.h
82614 +++ b/include/net/genetlink.h
82615 @@ -118,7 +118,7 @@ struct genl_ops {
82616 u8 cmd;
82617 u8 internal_flags;
82618 u8 flags;
82619 -};
82620 +} __do_const;
82621
82622 int __genl_register_family(struct genl_family *family);
82623
82624 diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
82625 index 734d9b5..48a9a4b 100644
82626 --- a/include/net/gro_cells.h
82627 +++ b/include/net/gro_cells.h
82628 @@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
82629 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
82630
82631 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
82632 - atomic_long_inc(&dev->rx_dropped);
82633 + atomic_long_inc_unchecked(&dev->rx_dropped);
82634 kfree_skb(skb);
82635 return;
82636 }
82637 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
82638 index c55aeed..b3393f4 100644
82639 --- a/include/net/inet_connection_sock.h
82640 +++ b/include/net/inet_connection_sock.h
82641 @@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
82642 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
82643 int (*bind_conflict)(const struct sock *sk,
82644 const struct inet_bind_bucket *tb, bool relax);
82645 -};
82646 +} __do_const;
82647
82648 /** inet_connection_sock - INET connection oriented sock
82649 *
82650 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
82651 index f4e127a..c3d5e9c 100644
82652 --- a/include/net/inetpeer.h
82653 +++ b/include/net/inetpeer.h
82654 @@ -47,8 +47,8 @@ struct inet_peer {
82655 */
82656 union {
82657 struct {
82658 - atomic_t rid; /* Frag reception counter */
82659 - atomic_t ip_id_count; /* IP ID for the next packet */
82660 + atomic_unchecked_t rid; /* Frag reception counter */
82661 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
82662 };
82663 struct rcu_head rcu;
82664 struct inet_peer *gc_next;
82665 @@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
82666 /* can be called with or without local BH being disabled */
82667 static inline int inet_getid(struct inet_peer *p, int more)
82668 {
82669 - int old, new;
82670 + int id;
82671 more++;
82672 inet_peer_refcheck(p);
82673 - do {
82674 - old = atomic_read(&p->ip_id_count);
82675 - new = old + more;
82676 - if (!new)
82677 - new = 1;
82678 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
82679 - return new;
82680 + id = atomic_add_return_unchecked(more, &p->ip_id_count);
82681 + if (!id)
82682 + id = atomic_inc_return_unchecked(&p->ip_id_count);
82683 + return id;
82684 }
82685
82686 #endif /* _NET_INETPEER_H */
82687 diff --git a/include/net/ip.h b/include/net/ip.h
82688 index 5a25f36..2e73203 100644
82689 --- a/include/net/ip.h
82690 +++ b/include/net/ip.h
82691 @@ -219,7 +219,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
82692
82693 void inet_get_local_port_range(struct net *net, int *low, int *high);
82694
82695 -extern unsigned long *sysctl_local_reserved_ports;
82696 +extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
82697 static inline int inet_is_reserved_local_port(int port)
82698 {
82699 return test_bit(port, sysctl_local_reserved_ports);
82700 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
82701 index 9922093..a1755d6 100644
82702 --- a/include/net/ip_fib.h
82703 +++ b/include/net/ip_fib.h
82704 @@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
82705
82706 #define FIB_RES_SADDR(net, res) \
82707 ((FIB_RES_NH(res).nh_saddr_genid == \
82708 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
82709 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
82710 FIB_RES_NH(res).nh_saddr : \
82711 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
82712 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
82713 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
82714 index 5679d92..2e7a690 100644
82715 --- a/include/net/ip_vs.h
82716 +++ b/include/net/ip_vs.h
82717 @@ -558,7 +558,7 @@ struct ip_vs_conn {
82718 struct ip_vs_conn *control; /* Master control connection */
82719 atomic_t n_control; /* Number of controlled ones */
82720 struct ip_vs_dest *dest; /* real server */
82721 - atomic_t in_pkts; /* incoming packet counter */
82722 + atomic_unchecked_t in_pkts; /* incoming packet counter */
82723
82724 /* packet transmitter for different forwarding methods. If it
82725 mangles the packet, it must return NF_DROP or better NF_STOLEN,
82726 @@ -705,7 +705,7 @@ struct ip_vs_dest {
82727 __be16 port; /* port number of the server */
82728 union nf_inet_addr addr; /* IP address of the server */
82729 volatile unsigned int flags; /* dest status flags */
82730 - atomic_t conn_flags; /* flags to copy to conn */
82731 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
82732 atomic_t weight; /* server weight */
82733
82734 atomic_t refcnt; /* reference counter */
82735 @@ -960,11 +960,11 @@ struct netns_ipvs {
82736 /* ip_vs_lblc */
82737 int sysctl_lblc_expiration;
82738 struct ctl_table_header *lblc_ctl_header;
82739 - struct ctl_table *lblc_ctl_table;
82740 + ctl_table_no_const *lblc_ctl_table;
82741 /* ip_vs_lblcr */
82742 int sysctl_lblcr_expiration;
82743 struct ctl_table_header *lblcr_ctl_header;
82744 - struct ctl_table *lblcr_ctl_table;
82745 + ctl_table_no_const *lblcr_ctl_table;
82746 /* ip_vs_est */
82747 struct list_head est_list; /* estimator list */
82748 spinlock_t est_lock;
82749 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
82750 index 0224402..dafaf94a 100644
82751 --- a/include/net/irda/ircomm_tty.h
82752 +++ b/include/net/irda/ircomm_tty.h
82753 @@ -35,6 +35,7 @@
82754 #include <linux/termios.h>
82755 #include <linux/timer.h>
82756 #include <linux/tty.h> /* struct tty_struct */
82757 +#include <asm/local.h>
82758
82759 #include <net/irda/irias_object.h>
82760 #include <net/irda/ircomm_core.h>
82761 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
82762 index 714cc9a..ea05f3e 100644
82763 --- a/include/net/iucv/af_iucv.h
82764 +++ b/include/net/iucv/af_iucv.h
82765 @@ -149,7 +149,7 @@ struct iucv_skb_cb {
82766 struct iucv_sock_list {
82767 struct hlist_head head;
82768 rwlock_t lock;
82769 - atomic_t autobind_name;
82770 + atomic_unchecked_t autobind_name;
82771 };
82772
82773 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
82774 diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
82775 index f3be818..bf46196 100644
82776 --- a/include/net/llc_c_ac.h
82777 +++ b/include/net/llc_c_ac.h
82778 @@ -87,7 +87,7 @@
82779 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
82780 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
82781
82782 -typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
82783 +typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
82784
82785 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
82786 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
82787 diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
82788 index 3948cf1..83b28c4 100644
82789 --- a/include/net/llc_c_ev.h
82790 +++ b/include/net/llc_c_ev.h
82791 @@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
82792 return (struct llc_conn_state_ev *)skb->cb;
82793 }
82794
82795 -typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
82796 -typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
82797 +typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
82798 +typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
82799
82800 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
82801 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
82802 diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
82803 index 0e79cfb..f46db31 100644
82804 --- a/include/net/llc_c_st.h
82805 +++ b/include/net/llc_c_st.h
82806 @@ -37,7 +37,7 @@ struct llc_conn_state_trans {
82807 u8 next_state;
82808 llc_conn_ev_qfyr_t *ev_qualifiers;
82809 llc_conn_action_t *ev_actions;
82810 -};
82811 +} __do_const;
82812
82813 struct llc_conn_state {
82814 u8 current_state;
82815 diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
82816 index a61b98c..aade1eb 100644
82817 --- a/include/net/llc_s_ac.h
82818 +++ b/include/net/llc_s_ac.h
82819 @@ -23,7 +23,7 @@
82820 #define SAP_ACT_TEST_IND 9
82821
82822 /* All action functions must look like this */
82823 -typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
82824 +typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
82825
82826 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
82827 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
82828 diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
82829 index 567c681..cd73ac0 100644
82830 --- a/include/net/llc_s_st.h
82831 +++ b/include/net/llc_s_st.h
82832 @@ -20,7 +20,7 @@ struct llc_sap_state_trans {
82833 llc_sap_ev_t ev;
82834 u8 next_state;
82835 llc_sap_action_t *ev_actions;
82836 -};
82837 +} __do_const;
82838
82839 struct llc_sap_state {
82840 u8 curr_state;
82841 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
82842 index 7ceed99..d3ffaa2 100644
82843 --- a/include/net/mac80211.h
82844 +++ b/include/net/mac80211.h
82845 @@ -4407,7 +4407,7 @@ struct rate_control_ops {
82846 void (*add_sta_debugfs)(void *priv, void *priv_sta,
82847 struct dentry *dir);
82848 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
82849 -};
82850 +} __do_const;
82851
82852 static inline int rate_supported(struct ieee80211_sta *sta,
82853 enum ieee80211_band band,
82854 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
82855 index 536501a..47b7982 100644
82856 --- a/include/net/neighbour.h
82857 +++ b/include/net/neighbour.h
82858 @@ -123,7 +123,7 @@ struct neigh_ops {
82859 void (*error_report)(struct neighbour *, struct sk_buff *);
82860 int (*output)(struct neighbour *, struct sk_buff *);
82861 int (*connected_output)(struct neighbour *, struct sk_buff *);
82862 -};
82863 +} __do_const;
82864
82865 struct pneigh_entry {
82866 struct pneigh_entry *next;
82867 @@ -163,7 +163,6 @@ struct neigh_table {
82868 void (*proxy_redo)(struct sk_buff *skb);
82869 char *id;
82870 struct neigh_parms parms;
82871 - /* HACK. gc_* should follow parms without a gap! */
82872 int gc_interval;
82873 int gc_thresh1;
82874 int gc_thresh2;
82875 @@ -178,7 +177,7 @@ struct neigh_table {
82876 struct neigh_statistics __percpu *stats;
82877 struct neigh_hash_table __rcu *nht;
82878 struct pneigh_entry **phash_buckets;
82879 -};
82880 +} __randomize_layout;
82881
82882 #define NEIGH_PRIV_ALIGN sizeof(long long)
82883 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
82884 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
82885 index da68c9a..c4a0720 100644
82886 --- a/include/net/net_namespace.h
82887 +++ b/include/net/net_namespace.h
82888 @@ -124,8 +124,8 @@ struct net {
82889 struct netns_ipvs *ipvs;
82890 #endif
82891 struct sock *diag_nlsk;
82892 - atomic_t fnhe_genid;
82893 -};
82894 + atomic_unchecked_t fnhe_genid;
82895 +} __randomize_layout;
82896
82897 /*
82898 * ifindex generation is per-net namespace, and loopback is
82899 @@ -281,7 +281,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
82900 #define __net_init __init
82901 #define __net_exit __exit_refok
82902 #define __net_initdata __initdata
82903 +#ifdef CONSTIFY_PLUGIN
82904 #define __net_initconst __initconst
82905 +#else
82906 +#define __net_initconst __initdata
82907 +#endif
82908 #endif
82909
82910 struct pernet_operations {
82911 @@ -291,7 +295,7 @@ struct pernet_operations {
82912 void (*exit_batch)(struct list_head *net_exit_list);
82913 int *id;
82914 size_t size;
82915 -};
82916 +} __do_const;
82917
82918 /*
82919 * Use these carefully. If you implement a network device and it
82920 @@ -339,23 +343,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
82921
82922 static inline int rt_genid_ipv4(struct net *net)
82923 {
82924 - return atomic_read(&net->ipv4.rt_genid);
82925 + return atomic_read_unchecked(&net->ipv4.rt_genid);
82926 }
82927
82928 static inline void rt_genid_bump_ipv4(struct net *net)
82929 {
82930 - atomic_inc(&net->ipv4.rt_genid);
82931 + atomic_inc_unchecked(&net->ipv4.rt_genid);
82932 }
82933
82934 #if IS_ENABLED(CONFIG_IPV6)
82935 static inline int rt_genid_ipv6(struct net *net)
82936 {
82937 - return atomic_read(&net->ipv6.rt_genid);
82938 + return atomic_read_unchecked(&net->ipv6.rt_genid);
82939 }
82940
82941 static inline void rt_genid_bump_ipv6(struct net *net)
82942 {
82943 - atomic_inc(&net->ipv6.rt_genid);
82944 + atomic_inc_unchecked(&net->ipv6.rt_genid);
82945 }
82946 #else
82947 static inline int rt_genid_ipv6(struct net *net)
82948 @@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
82949
82950 static inline int fnhe_genid(struct net *net)
82951 {
82952 - return atomic_read(&net->fnhe_genid);
82953 + return atomic_read_unchecked(&net->fnhe_genid);
82954 }
82955
82956 static inline void fnhe_genid_bump(struct net *net)
82957 {
82958 - atomic_inc(&net->fnhe_genid);
82959 + atomic_inc_unchecked(&net->fnhe_genid);
82960 }
82961
82962 #endif /* __NET_NET_NAMESPACE_H */
82963 diff --git a/include/net/netdma.h b/include/net/netdma.h
82964 index 8ba8ce2..99b7fff 100644
82965 --- a/include/net/netdma.h
82966 +++ b/include/net/netdma.h
82967 @@ -24,7 +24,7 @@
82968 #include <linux/dmaengine.h>
82969 #include <linux/skbuff.h>
82970
82971 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82972 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82973 struct sk_buff *skb, int offset, struct iovec *to,
82974 size_t len, struct dma_pinned_list *pinned_list);
82975
82976 diff --git a/include/net/netlink.h b/include/net/netlink.h
82977 index 2b47eaa..6d5bcc2 100644
82978 --- a/include/net/netlink.h
82979 +++ b/include/net/netlink.h
82980 @@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
82981 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
82982 {
82983 if (mark)
82984 - skb_trim(skb, (unsigned char *) mark - skb->data);
82985 + skb_trim(skb, (const unsigned char *) mark - skb->data);
82986 }
82987
82988 /**
82989 diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
82990 index c9c0c53..53f24c3 100644
82991 --- a/include/net/netns/conntrack.h
82992 +++ b/include/net/netns/conntrack.h
82993 @@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
82994 struct nf_proto_net {
82995 #ifdef CONFIG_SYSCTL
82996 struct ctl_table_header *ctl_table_header;
82997 - struct ctl_table *ctl_table;
82998 + ctl_table_no_const *ctl_table;
82999 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
83000 struct ctl_table_header *ctl_compat_header;
83001 - struct ctl_table *ctl_compat_table;
83002 + ctl_table_no_const *ctl_compat_table;
83003 #endif
83004 #endif
83005 unsigned int users;
83006 @@ -58,7 +58,7 @@ struct nf_ip_net {
83007 struct nf_icmp_net icmpv6;
83008 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
83009 struct ctl_table_header *ctl_table_header;
83010 - struct ctl_table *ctl_table;
83011 + ctl_table_no_const *ctl_table;
83012 #endif
83013 };
83014
83015 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
83016 index ee520cb..9a0fd88 100644
83017 --- a/include/net/netns/ipv4.h
83018 +++ b/include/net/netns/ipv4.h
83019 @@ -72,7 +72,7 @@ struct netns_ipv4 {
83020
83021 kgid_t sysctl_ping_group_range[2];
83022
83023 - atomic_t dev_addr_genid;
83024 + atomic_unchecked_t dev_addr_genid;
83025
83026 #ifdef CONFIG_IP_MROUTE
83027 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
83028 @@ -82,6 +82,6 @@ struct netns_ipv4 {
83029 struct fib_rules_ops *mr_rules_ops;
83030 #endif
83031 #endif
83032 - atomic_t rt_genid;
83033 + atomic_unchecked_t rt_genid;
83034 };
83035 #endif
83036 diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
83037 index 0fb2401..477d81c 100644
83038 --- a/include/net/netns/ipv6.h
83039 +++ b/include/net/netns/ipv6.h
83040 @@ -71,8 +71,8 @@ struct netns_ipv6 {
83041 struct fib_rules_ops *mr6_rules_ops;
83042 #endif
83043 #endif
83044 - atomic_t dev_addr_genid;
83045 - atomic_t rt_genid;
83046 + atomic_unchecked_t dev_addr_genid;
83047 + atomic_unchecked_t rt_genid;
83048 };
83049
83050 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
83051 diff --git a/include/net/ping.h b/include/net/ping.h
83052 index 90f4841..74446a8 100644
83053 --- a/include/net/ping.h
83054 +++ b/include/net/ping.h
83055 @@ -56,7 +56,7 @@ struct ping_iter_state {
83056 extern struct proto ping_prot;
83057 extern struct ping_table ping_table;
83058 #if IS_ENABLED(CONFIG_IPV6)
83059 -extern struct pingv6_ops pingv6_ops;
83060 +extern struct pingv6_ops *pingv6_ops;
83061 #endif
83062
83063 struct pingfakehdr {
83064 diff --git a/include/net/protocol.h b/include/net/protocol.h
83065 index fbf7676..a5e21c3 100644
83066 --- a/include/net/protocol.h
83067 +++ b/include/net/protocol.h
83068 @@ -44,7 +44,7 @@ struct net_protocol {
83069 void (*err_handler)(struct sk_buff *skb, u32 info);
83070 unsigned int no_policy:1,
83071 netns_ok:1;
83072 -};
83073 +} __do_const;
83074
83075 #if IS_ENABLED(CONFIG_IPV6)
83076 struct inet6_protocol {
83077 @@ -57,7 +57,7 @@ struct inet6_protocol {
83078 u8 type, u8 code, int offset,
83079 __be32 info);
83080 unsigned int flags; /* INET6_PROTO_xxx */
83081 -};
83082 +} __do_const;
83083
83084 #define INET6_PROTO_NOPOLICY 0x1
83085 #define INET6_PROTO_FINAL 0x2
83086 diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
83087 index bb13a18..e734116 100644
83088 --- a/include/net/rtnetlink.h
83089 +++ b/include/net/rtnetlink.h
83090 @@ -79,7 +79,7 @@ struct rtnl_link_ops {
83091 const struct net_device *dev);
83092 unsigned int (*get_num_tx_queues)(void);
83093 unsigned int (*get_num_rx_queues)(void);
83094 -};
83095 +} __do_const;
83096
83097 int __rtnl_link_register(struct rtnl_link_ops *ops);
83098 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
83099 diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
83100 index 6bd44fe..96f364e 100644
83101 --- a/include/net/sctp/checksum.h
83102 +++ b/include/net/sctp/checksum.h
83103 @@ -62,8 +62,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
83104 unsigned int offset)
83105 {
83106 struct sctphdr *sh = sctp_hdr(skb);
83107 - __le32 ret, old = sh->checksum;
83108 - const struct skb_checksum_ops ops = {
83109 + __le32 ret, old = sh->checksum;
83110 + static const struct skb_checksum_ops ops = {
83111 .update = sctp_csum_update,
83112 .combine = sctp_csum_combine,
83113 };
83114 diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
83115 index 4ef75af..5aa073a 100644
83116 --- a/include/net/sctp/sm.h
83117 +++ b/include/net/sctp/sm.h
83118 @@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
83119 typedef struct {
83120 sctp_state_fn_t *fn;
83121 const char *name;
83122 -} sctp_sm_table_entry_t;
83123 +} __do_const sctp_sm_table_entry_t;
83124
83125 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
83126 * currently in use.
83127 @@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
83128 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
83129
83130 /* Extern declarations for major data structures. */
83131 -extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
83132 +extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
83133
83134
83135 /* Get the size of a DATA chunk payload. */
83136 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
83137 index 0a248b3..4dcbe5c 100644
83138 --- a/include/net/sctp/structs.h
83139 +++ b/include/net/sctp/structs.h
83140 @@ -508,7 +508,7 @@ struct sctp_pf {
83141 struct sctp_association *asoc);
83142 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
83143 struct sctp_af *af;
83144 -};
83145 +} __do_const;
83146
83147
83148 /* Structure to track chunk fragments that have been acked, but peer
83149 diff --git a/include/net/sock.h b/include/net/sock.h
83150 index 2ef3c3e..e02013e 100644
83151 --- a/include/net/sock.h
83152 +++ b/include/net/sock.h
83153 @@ -348,7 +348,7 @@ struct sock {
83154 unsigned int sk_napi_id;
83155 unsigned int sk_ll_usec;
83156 #endif
83157 - atomic_t sk_drops;
83158 + atomic_unchecked_t sk_drops;
83159 int sk_rcvbuf;
83160
83161 struct sk_filter __rcu *sk_filter;
83162 @@ -1209,7 +1209,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
83163 return ret >> PAGE_SHIFT;
83164 }
83165
83166 -static inline long
83167 +static inline long __intentional_overflow(-1)
83168 sk_memory_allocated(const struct sock *sk)
83169 {
83170 struct proto *prot = sk->sk_prot;
83171 @@ -1813,7 +1813,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
83172 }
83173
83174 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
83175 - char __user *from, char *to,
83176 + char __user *from, unsigned char *to,
83177 int copy, int offset)
83178 {
83179 if (skb->ip_summed == CHECKSUM_NONE) {
83180 @@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
83181 }
83182 }
83183
83184 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
83185 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
83186
83187 /**
83188 * sk_page_frag - return an appropriate page_frag
83189 diff --git a/include/net/tcp.h b/include/net/tcp.h
83190 index 9250d62..10a7f03 100644
83191 --- a/include/net/tcp.h
83192 +++ b/include/net/tcp.h
83193 @@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
83194 #ifdef CONFIG_SYN_COOKIES
83195 #include <linux/ktime.h>
83196
83197 -/* Syncookies use a monotonic timer which increments every 64 seconds.
83198 +/* Syncookies use a monotonic timer which increments every 60 seconds.
83199 * This counter is used both as a hash input and partially encoded into
83200 * the cookie value. A cookie is only validated further if the delta
83201 * between the current counter value and the encoded one is less than this,
83202 - * i.e. a sent cookie is valid only at most for 128 seconds (or less if
83203 + * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
83204 * the counter advances immediately after a cookie is generated).
83205 */
83206 #define MAX_SYNCOOKIE_AGE 2
83207
83208 static inline u32 tcp_cookie_time(void)
83209 {
83210 - struct timespec now;
83211 - getnstimeofday(&now);
83212 - return now.tv_sec >> 6; /* 64 seconds granularity */
83213 + u64 val = get_jiffies_64();
83214 +
83215 + do_div(val, 60 * HZ);
83216 + return val;
83217 }
83218
83219 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
83220 @@ -540,7 +541,7 @@ void tcp_retransmit_timer(struct sock *sk);
83221 void tcp_xmit_retransmit_queue(struct sock *);
83222 void tcp_simple_retransmit(struct sock *);
83223 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
83224 -int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
83225 +int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
83226
83227 void tcp_send_probe0(struct sock *);
83228 void tcp_send_partial(struct sock *);
83229 @@ -711,8 +712,8 @@ struct tcp_skb_cb {
83230 struct inet6_skb_parm h6;
83231 #endif
83232 } header; /* For incoming frames */
83233 - __u32 seq; /* Starting sequence number */
83234 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
83235 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
83236 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
83237 __u32 when; /* used to compute rtt's */
83238 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
83239
83240 @@ -726,7 +727,7 @@ struct tcp_skb_cb {
83241
83242 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
83243 /* 1 byte hole */
83244 - __u32 ack_seq; /* Sequence number ACK'd */
83245 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
83246 };
83247
83248 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
83249 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
83250 index 6b82fdf..14d74d2 100644
83251 --- a/include/net/xfrm.h
83252 +++ b/include/net/xfrm.h
83253 @@ -287,7 +287,6 @@ struct xfrm_dst;
83254 struct xfrm_policy_afinfo {
83255 unsigned short family;
83256 struct dst_ops *dst_ops;
83257 - void (*garbage_collect)(struct net *net);
83258 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
83259 const xfrm_address_t *saddr,
83260 const xfrm_address_t *daddr);
83261 @@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
83262 struct net_device *dev,
83263 const struct flowi *fl);
83264 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
83265 -};
83266 +} __do_const;
83267
83268 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
83269 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
83270 @@ -344,7 +343,7 @@ struct xfrm_state_afinfo {
83271 int (*transport_finish)(struct sk_buff *skb,
83272 int async);
83273 void (*local_error)(struct sk_buff *skb, u32 mtu);
83274 -};
83275 +} __do_const;
83276
83277 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
83278 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
83279 @@ -429,7 +428,7 @@ struct xfrm_mode {
83280 struct module *owner;
83281 unsigned int encap;
83282 int flags;
83283 -};
83284 +} __do_const;
83285
83286 /* Flags for xfrm_mode. */
83287 enum {
83288 @@ -526,7 +525,7 @@ struct xfrm_policy {
83289 struct timer_list timer;
83290
83291 struct flow_cache_object flo;
83292 - atomic_t genid;
83293 + atomic_unchecked_t genid;
83294 u32 priority;
83295 u32 index;
83296 struct xfrm_mark mark;
83297 @@ -1166,6 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
83298 }
83299
83300 void xfrm_garbage_collect(struct net *net);
83301 +void xfrm_garbage_collect_deferred(struct net *net);
83302
83303 #else
83304
83305 @@ -1204,6 +1204,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
83306 static inline void xfrm_garbage_collect(struct net *net)
83307 {
83308 }
83309 +static inline void xfrm_garbage_collect_deferred(struct net *net)
83310 +{
83311 +}
83312 #endif
83313
83314 static __inline__
83315 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
83316 index 1017e0b..227aa4d 100644
83317 --- a/include/rdma/iw_cm.h
83318 +++ b/include/rdma/iw_cm.h
83319 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
83320 int backlog);
83321
83322 int (*destroy_listen)(struct iw_cm_id *cm_id);
83323 -};
83324 +} __no_const;
83325
83326 /**
83327 * iw_create_cm_id - Create an IW CM identifier.
83328 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
83329 index 52beadf..598734c 100644
83330 --- a/include/scsi/libfc.h
83331 +++ b/include/scsi/libfc.h
83332 @@ -771,6 +771,7 @@ struct libfc_function_template {
83333 */
83334 void (*disc_stop_final) (struct fc_lport *);
83335 };
83336 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
83337
83338 /**
83339 * struct fc_disc - Discovery context
83340 @@ -875,7 +876,7 @@ struct fc_lport {
83341 struct fc_vport *vport;
83342
83343 /* Operational Information */
83344 - struct libfc_function_template tt;
83345 + libfc_function_template_no_const tt;
83346 u8 link_up;
83347 u8 qfull;
83348 enum fc_lport_state state;
83349 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
83350 index d65fbec..f80fef2 100644
83351 --- a/include/scsi/scsi_device.h
83352 +++ b/include/scsi/scsi_device.h
83353 @@ -180,9 +180,9 @@ struct scsi_device {
83354 unsigned int max_device_blocked; /* what device_blocked counts down from */
83355 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
83356
83357 - atomic_t iorequest_cnt;
83358 - atomic_t iodone_cnt;
83359 - atomic_t ioerr_cnt;
83360 + atomic_unchecked_t iorequest_cnt;
83361 + atomic_unchecked_t iodone_cnt;
83362 + atomic_unchecked_t ioerr_cnt;
83363
83364 struct device sdev_gendev,
83365 sdev_dev;
83366 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
83367 index b797e8f..8e2c3aa 100644
83368 --- a/include/scsi/scsi_transport_fc.h
83369 +++ b/include/scsi/scsi_transport_fc.h
83370 @@ -751,7 +751,8 @@ struct fc_function_template {
83371 unsigned long show_host_system_hostname:1;
83372
83373 unsigned long disable_target_scan:1;
83374 -};
83375 +} __do_const;
83376 +typedef struct fc_function_template __no_const fc_function_template_no_const;
83377
83378
83379 /**
83380 diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
83381 index ae6c3b8..fd748ac 100644
83382 --- a/include/sound/compress_driver.h
83383 +++ b/include/sound/compress_driver.h
83384 @@ -128,7 +128,7 @@ struct snd_compr_ops {
83385 struct snd_compr_caps *caps);
83386 int (*get_codec_caps) (struct snd_compr_stream *stream,
83387 struct snd_compr_codec_caps *codec);
83388 -};
83389 +} __no_const;
83390
83391 /**
83392 * struct snd_compr: Compressed device
83393 diff --git a/include/sound/soc.h b/include/sound/soc.h
83394 index 1f741cb..8cefc08 100644
83395 --- a/include/sound/soc.h
83396 +++ b/include/sound/soc.h
83397 @@ -763,7 +763,7 @@ struct snd_soc_codec_driver {
83398 /* probe ordering - for components with runtime dependencies */
83399 int probe_order;
83400 int remove_order;
83401 -};
83402 +} __do_const;
83403
83404 /* SoC platform interface */
83405 struct snd_soc_platform_driver {
83406 @@ -809,7 +809,7 @@ struct snd_soc_platform_driver {
83407 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
83408 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
83409 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
83410 -};
83411 +} __do_const;
83412
83413 struct snd_soc_platform {
83414 const char *name;
83415 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
83416 index e3569f8..6544ffd 100644
83417 --- a/include/target/target_core_base.h
83418 +++ b/include/target/target_core_base.h
83419 @@ -687,7 +687,7 @@ struct se_device {
83420 atomic_long_t write_bytes;
83421 /* Active commands on this virtual SE device */
83422 atomic_t simple_cmds;
83423 - atomic_t dev_ordered_id;
83424 + atomic_unchecked_t dev_ordered_id;
83425 atomic_t dev_ordered_sync;
83426 atomic_t dev_qf_count;
83427 int export_count;
83428 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
83429 new file mode 100644
83430 index 0000000..fb634b7
83431 --- /dev/null
83432 +++ b/include/trace/events/fs.h
83433 @@ -0,0 +1,53 @@
83434 +#undef TRACE_SYSTEM
83435 +#define TRACE_SYSTEM fs
83436 +
83437 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
83438 +#define _TRACE_FS_H
83439 +
83440 +#include <linux/fs.h>
83441 +#include <linux/tracepoint.h>
83442 +
83443 +TRACE_EVENT(do_sys_open,
83444 +
83445 + TP_PROTO(const char *filename, int flags, int mode),
83446 +
83447 + TP_ARGS(filename, flags, mode),
83448 +
83449 + TP_STRUCT__entry(
83450 + __string( filename, filename )
83451 + __field( int, flags )
83452 + __field( int, mode )
83453 + ),
83454 +
83455 + TP_fast_assign(
83456 + __assign_str(filename, filename);
83457 + __entry->flags = flags;
83458 + __entry->mode = mode;
83459 + ),
83460 +
83461 + TP_printk("\"%s\" %x %o",
83462 + __get_str(filename), __entry->flags, __entry->mode)
83463 +);
83464 +
83465 +TRACE_EVENT(open_exec,
83466 +
83467 + TP_PROTO(const char *filename),
83468 +
83469 + TP_ARGS(filename),
83470 +
83471 + TP_STRUCT__entry(
83472 + __string( filename, filename )
83473 + ),
83474 +
83475 + TP_fast_assign(
83476 + __assign_str(filename, filename);
83477 + ),
83478 +
83479 + TP_printk("\"%s\"",
83480 + __get_str(filename))
83481 +);
83482 +
83483 +#endif /* _TRACE_FS_H */
83484 +
83485 +/* This part must be outside protection */
83486 +#include <trace/define_trace.h>
83487 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
83488 index 1c09820..7f5ec79 100644
83489 --- a/include/trace/events/irq.h
83490 +++ b/include/trace/events/irq.h
83491 @@ -36,7 +36,7 @@ struct softirq_action;
83492 */
83493 TRACE_EVENT(irq_handler_entry,
83494
83495 - TP_PROTO(int irq, struct irqaction *action),
83496 + TP_PROTO(int irq, const struct irqaction *action),
83497
83498 TP_ARGS(irq, action),
83499
83500 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
83501 */
83502 TRACE_EVENT(irq_handler_exit,
83503
83504 - TP_PROTO(int irq, struct irqaction *action, int ret),
83505 + TP_PROTO(int irq, const struct irqaction *action, int ret),
83506
83507 TP_ARGS(irq, action, ret),
83508
83509 diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
83510 index 7caf44c..23c6f27 100644
83511 --- a/include/uapi/linux/a.out.h
83512 +++ b/include/uapi/linux/a.out.h
83513 @@ -39,6 +39,14 @@ enum machine_type {
83514 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
83515 };
83516
83517 +/* Constants for the N_FLAGS field */
83518 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
83519 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
83520 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
83521 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
83522 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
83523 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
83524 +
83525 #if !defined (N_MAGIC)
83526 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
83527 #endif
83528 diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
83529 index d876736..ccce5c0 100644
83530 --- a/include/uapi/linux/byteorder/little_endian.h
83531 +++ b/include/uapi/linux/byteorder/little_endian.h
83532 @@ -42,51 +42,51 @@
83533
83534 static inline __le64 __cpu_to_le64p(const __u64 *p)
83535 {
83536 - return (__force __le64)*p;
83537 + return (__force const __le64)*p;
83538 }
83539 -static inline __u64 __le64_to_cpup(const __le64 *p)
83540 +static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
83541 {
83542 - return (__force __u64)*p;
83543 + return (__force const __u64)*p;
83544 }
83545 static inline __le32 __cpu_to_le32p(const __u32 *p)
83546 {
83547 - return (__force __le32)*p;
83548 + return (__force const __le32)*p;
83549 }
83550 static inline __u32 __le32_to_cpup(const __le32 *p)
83551 {
83552 - return (__force __u32)*p;
83553 + return (__force const __u32)*p;
83554 }
83555 static inline __le16 __cpu_to_le16p(const __u16 *p)
83556 {
83557 - return (__force __le16)*p;
83558 + return (__force const __le16)*p;
83559 }
83560 static inline __u16 __le16_to_cpup(const __le16 *p)
83561 {
83562 - return (__force __u16)*p;
83563 + return (__force const __u16)*p;
83564 }
83565 static inline __be64 __cpu_to_be64p(const __u64 *p)
83566 {
83567 - return (__force __be64)__swab64p(p);
83568 + return (__force const __be64)__swab64p(p);
83569 }
83570 static inline __u64 __be64_to_cpup(const __be64 *p)
83571 {
83572 - return __swab64p((__u64 *)p);
83573 + return __swab64p((const __u64 *)p);
83574 }
83575 static inline __be32 __cpu_to_be32p(const __u32 *p)
83576 {
83577 - return (__force __be32)__swab32p(p);
83578 + return (__force const __be32)__swab32p(p);
83579 }
83580 -static inline __u32 __be32_to_cpup(const __be32 *p)
83581 +static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
83582 {
83583 - return __swab32p((__u32 *)p);
83584 + return __swab32p((const __u32 *)p);
83585 }
83586 static inline __be16 __cpu_to_be16p(const __u16 *p)
83587 {
83588 - return (__force __be16)__swab16p(p);
83589 + return (__force const __be16)__swab16p(p);
83590 }
83591 static inline __u16 __be16_to_cpup(const __be16 *p)
83592 {
83593 - return __swab16p((__u16 *)p);
83594 + return __swab16p((const __u16 *)p);
83595 }
83596 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
83597 #define __le64_to_cpus(x) do { (void)(x); } while (0)
83598 diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
83599 index ef6103b..d4e65dd 100644
83600 --- a/include/uapi/linux/elf.h
83601 +++ b/include/uapi/linux/elf.h
83602 @@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
83603 #define PT_GNU_EH_FRAME 0x6474e550
83604
83605 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
83606 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
83607 +
83608 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
83609 +
83610 +/* Constants for the e_flags field */
83611 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
83612 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
83613 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
83614 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
83615 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
83616 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
83617
83618 /*
83619 * Extended Numbering
83620 @@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
83621 #define DT_DEBUG 21
83622 #define DT_TEXTREL 22
83623 #define DT_JMPREL 23
83624 +#define DT_FLAGS 30
83625 + #define DF_TEXTREL 0x00000004
83626 #define DT_ENCODING 32
83627 #define OLD_DT_LOOS 0x60000000
83628 #define DT_LOOS 0x6000000d
83629 @@ -240,6 +253,19 @@ typedef struct elf64_hdr {
83630 #define PF_W 0x2
83631 #define PF_X 0x1
83632
83633 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
83634 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
83635 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
83636 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
83637 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
83638 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
83639 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
83640 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
83641 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
83642 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
83643 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
83644 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
83645 +
83646 typedef struct elf32_phdr{
83647 Elf32_Word p_type;
83648 Elf32_Off p_offset;
83649 @@ -332,6 +358,8 @@ typedef struct elf64_shdr {
83650 #define EI_OSABI 7
83651 #define EI_PAD 8
83652
83653 +#define EI_PAX 14
83654 +
83655 #define ELFMAG0 0x7f /* EI_MAG */
83656 #define ELFMAG1 'E'
83657 #define ELFMAG2 'L'
83658 diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
83659 index aa169c4..6a2771d 100644
83660 --- a/include/uapi/linux/personality.h
83661 +++ b/include/uapi/linux/personality.h
83662 @@ -30,6 +30,7 @@ enum {
83663 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
83664 ADDR_NO_RANDOMIZE | \
83665 ADDR_COMPAT_LAYOUT | \
83666 + ADDR_LIMIT_3GB | \
83667 MMAP_PAGE_ZERO)
83668
83669 /*
83670 diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
83671 index 7530e74..e714828 100644
83672 --- a/include/uapi/linux/screen_info.h
83673 +++ b/include/uapi/linux/screen_info.h
83674 @@ -43,7 +43,8 @@ struct screen_info {
83675 __u16 pages; /* 0x32 */
83676 __u16 vesa_attributes; /* 0x34 */
83677 __u32 capabilities; /* 0x36 */
83678 - __u8 _reserved[6]; /* 0x3a */
83679 + __u16 vesapm_size; /* 0x3a */
83680 + __u8 _reserved[4]; /* 0x3c */
83681 } __attribute__((packed));
83682
83683 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
83684 diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
83685 index 0e011eb..82681b1 100644
83686 --- a/include/uapi/linux/swab.h
83687 +++ b/include/uapi/linux/swab.h
83688 @@ -43,7 +43,7 @@
83689 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
83690 */
83691
83692 -static inline __attribute_const__ __u16 __fswab16(__u16 val)
83693 +static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
83694 {
83695 #ifdef __HAVE_BUILTIN_BSWAP16__
83696 return __builtin_bswap16(val);
83697 @@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
83698 #endif
83699 }
83700
83701 -static inline __attribute_const__ __u32 __fswab32(__u32 val)
83702 +static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
83703 {
83704 #ifdef __HAVE_BUILTIN_BSWAP32__
83705 return __builtin_bswap32(val);
83706 @@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
83707 #endif
83708 }
83709
83710 -static inline __attribute_const__ __u64 __fswab64(__u64 val)
83711 +static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
83712 {
83713 #ifdef __HAVE_BUILTIN_BSWAP64__
83714 return __builtin_bswap64(val);
83715 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
83716 index 6d67213..552fdd9 100644
83717 --- a/include/uapi/linux/sysctl.h
83718 +++ b/include/uapi/linux/sysctl.h
83719 @@ -155,8 +155,6 @@ enum
83720 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
83721 };
83722
83723 -
83724 -
83725 /* CTL_VM names: */
83726 enum
83727 {
83728 diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
83729 index 437f1b0..0eeb38d 100644
83730 --- a/include/uapi/linux/videodev2.h
83731 +++ b/include/uapi/linux/videodev2.h
83732 @@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
83733 union {
83734 __s32 value;
83735 __s64 value64;
83736 - char *string;
83737 + char __user *string;
83738 };
83739 } __attribute__ ((packed));
83740
83741 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
83742 index e4629b9..6958086 100644
83743 --- a/include/uapi/linux/xattr.h
83744 +++ b/include/uapi/linux/xattr.h
83745 @@ -63,5 +63,9 @@
83746 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
83747 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
83748
83749 +/* User namespace */
83750 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
83751 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
83752 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
83753
83754 #endif /* _UAPI_LINUX_XATTR_H */
83755 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
83756 index f9466fa..f4e2b81 100644
83757 --- a/include/video/udlfb.h
83758 +++ b/include/video/udlfb.h
83759 @@ -53,10 +53,10 @@ struct dlfb_data {
83760 u32 pseudo_palette[256];
83761 int blank_mode; /*one of FB_BLANK_ */
83762 /* blit-only rendering path metrics, exposed through sysfs */
83763 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
83764 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
83765 - atomic_t bytes_sent; /* to usb, after compression including overhead */
83766 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
83767 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
83768 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
83769 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
83770 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
83771 };
83772
83773 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
83774 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
83775 index 30f5362..8ed8ac9 100644
83776 --- a/include/video/uvesafb.h
83777 +++ b/include/video/uvesafb.h
83778 @@ -122,6 +122,7 @@ struct uvesafb_par {
83779 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
83780 u8 pmi_setpal; /* PMI for palette changes */
83781 u16 *pmi_base; /* protected mode interface location */
83782 + u8 *pmi_code; /* protected mode code location */
83783 void *pmi_start;
83784 void *pmi_pal;
83785 u8 *vbe_state_orig; /*
83786 diff --git a/init/Kconfig b/init/Kconfig
83787 index 4e5d96a..93cd8a1 100644
83788 --- a/init/Kconfig
83789 +++ b/init/Kconfig
83790 @@ -1079,6 +1079,7 @@ endif # CGROUPS
83791
83792 config CHECKPOINT_RESTORE
83793 bool "Checkpoint/restore support" if EXPERT
83794 + depends on !GRKERNSEC
83795 default n
83796 help
83797 Enables additional kernel features in a sake of checkpoint/restore.
83798 @@ -1550,7 +1551,7 @@ config SLUB_DEBUG
83799
83800 config COMPAT_BRK
83801 bool "Disable heap randomization"
83802 - default y
83803 + default n
83804 help
83805 Randomizing heap placement makes heap exploits harder, but it
83806 also breaks ancient binaries (including anything libc5 based).
83807 @@ -1838,7 +1839,7 @@ config INIT_ALL_POSSIBLE
83808 config STOP_MACHINE
83809 bool
83810 default y
83811 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
83812 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
83813 help
83814 Need stop_machine() primitive.
83815
83816 diff --git a/init/Makefile b/init/Makefile
83817 index 7bc47ee..6da2dc7 100644
83818 --- a/init/Makefile
83819 +++ b/init/Makefile
83820 @@ -2,6 +2,9 @@
83821 # Makefile for the linux kernel.
83822 #
83823
83824 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
83825 +asflags-y := $(GCC_PLUGINS_AFLAGS)
83826 +
83827 obj-y := main.o version.o mounts.o
83828 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
83829 obj-y += noinitramfs.o
83830 diff --git a/init/do_mounts.c b/init/do_mounts.c
83831 index 8e5addc..c96ea61 100644
83832 --- a/init/do_mounts.c
83833 +++ b/init/do_mounts.c
83834 @@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
83835 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
83836 {
83837 struct super_block *s;
83838 - int err = sys_mount(name, "/root", fs, flags, data);
83839 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
83840 if (err)
83841 return err;
83842
83843 - sys_chdir("/root");
83844 + sys_chdir((const char __force_user *)"/root");
83845 s = current->fs->pwd.dentry->d_sb;
83846 ROOT_DEV = s->s_dev;
83847 printk(KERN_INFO
83848 @@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
83849 va_start(args, fmt);
83850 vsprintf(buf, fmt, args);
83851 va_end(args);
83852 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
83853 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
83854 if (fd >= 0) {
83855 sys_ioctl(fd, FDEJECT, 0);
83856 sys_close(fd);
83857 }
83858 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
83859 - fd = sys_open("/dev/console", O_RDWR, 0);
83860 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
83861 if (fd >= 0) {
83862 sys_ioctl(fd, TCGETS, (long)&termios);
83863 termios.c_lflag &= ~ICANON;
83864 sys_ioctl(fd, TCSETSF, (long)&termios);
83865 - sys_read(fd, &c, 1);
83866 + sys_read(fd, (char __user *)&c, 1);
83867 termios.c_lflag |= ICANON;
83868 sys_ioctl(fd, TCSETSF, (long)&termios);
83869 sys_close(fd);
83870 @@ -589,8 +589,8 @@ void __init prepare_namespace(void)
83871 mount_root();
83872 out:
83873 devtmpfs_mount("dev");
83874 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
83875 - sys_chroot(".");
83876 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
83877 + sys_chroot((const char __force_user *)".");
83878 }
83879
83880 static bool is_tmpfs;
83881 diff --git a/init/do_mounts.h b/init/do_mounts.h
83882 index f5b978a..69dbfe8 100644
83883 --- a/init/do_mounts.h
83884 +++ b/init/do_mounts.h
83885 @@ -15,15 +15,15 @@ extern int root_mountflags;
83886
83887 static inline int create_dev(char *name, dev_t dev)
83888 {
83889 - sys_unlink(name);
83890 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
83891 + sys_unlink((char __force_user *)name);
83892 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
83893 }
83894
83895 #if BITS_PER_LONG == 32
83896 static inline u32 bstat(char *name)
83897 {
83898 struct stat64 stat;
83899 - if (sys_stat64(name, &stat) != 0)
83900 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
83901 return 0;
83902 if (!S_ISBLK(stat.st_mode))
83903 return 0;
83904 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
83905 static inline u32 bstat(char *name)
83906 {
83907 struct stat stat;
83908 - if (sys_newstat(name, &stat) != 0)
83909 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
83910 return 0;
83911 if (!S_ISBLK(stat.st_mode))
83912 return 0;
83913 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
83914 index 3e0878e..8a9d7a0 100644
83915 --- a/init/do_mounts_initrd.c
83916 +++ b/init/do_mounts_initrd.c
83917 @@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
83918 {
83919 sys_unshare(CLONE_FS | CLONE_FILES);
83920 /* stdin/stdout/stderr for /linuxrc */
83921 - sys_open("/dev/console", O_RDWR, 0);
83922 + sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
83923 sys_dup(0);
83924 sys_dup(0);
83925 /* move initrd over / and chdir/chroot in initrd root */
83926 - sys_chdir("/root");
83927 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
83928 - sys_chroot(".");
83929 + sys_chdir((const char __force_user *)"/root");
83930 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
83931 + sys_chroot((const char __force_user *)".");
83932 sys_setsid();
83933 return 0;
83934 }
83935 @@ -59,8 +59,8 @@ static void __init handle_initrd(void)
83936 create_dev("/dev/root.old", Root_RAM0);
83937 /* mount initrd on rootfs' /root */
83938 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
83939 - sys_mkdir("/old", 0700);
83940 - sys_chdir("/old");
83941 + sys_mkdir((const char __force_user *)"/old", 0700);
83942 + sys_chdir((const char __force_user *)"/old");
83943
83944 /* try loading default modules from initrd */
83945 load_default_modules();
83946 @@ -80,31 +80,31 @@ static void __init handle_initrd(void)
83947 current->flags &= ~PF_FREEZER_SKIP;
83948
83949 /* move initrd to rootfs' /old */
83950 - sys_mount("..", ".", NULL, MS_MOVE, NULL);
83951 + sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
83952 /* switch root and cwd back to / of rootfs */
83953 - sys_chroot("..");
83954 + sys_chroot((const char __force_user *)"..");
83955
83956 if (new_decode_dev(real_root_dev) == Root_RAM0) {
83957 - sys_chdir("/old");
83958 + sys_chdir((const char __force_user *)"/old");
83959 return;
83960 }
83961
83962 - sys_chdir("/");
83963 + sys_chdir((const char __force_user *)"/");
83964 ROOT_DEV = new_decode_dev(real_root_dev);
83965 mount_root();
83966
83967 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
83968 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
83969 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
83970 if (!error)
83971 printk("okay\n");
83972 else {
83973 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
83974 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
83975 if (error == -ENOENT)
83976 printk("/initrd does not exist. Ignored.\n");
83977 else
83978 printk("failed\n");
83979 printk(KERN_NOTICE "Unmounting old root\n");
83980 - sys_umount("/old", MNT_DETACH);
83981 + sys_umount((char __force_user *)"/old", MNT_DETACH);
83982 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
83983 if (fd < 0) {
83984 error = fd;
83985 @@ -127,11 +127,11 @@ int __init initrd_load(void)
83986 * mounted in the normal path.
83987 */
83988 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
83989 - sys_unlink("/initrd.image");
83990 + sys_unlink((const char __force_user *)"/initrd.image");
83991 handle_initrd();
83992 return 1;
83993 }
83994 }
83995 - sys_unlink("/initrd.image");
83996 + sys_unlink((const char __force_user *)"/initrd.image");
83997 return 0;
83998 }
83999 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
84000 index 8cb6db5..d729f50 100644
84001 --- a/init/do_mounts_md.c
84002 +++ b/init/do_mounts_md.c
84003 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
84004 partitioned ? "_d" : "", minor,
84005 md_setup_args[ent].device_names);
84006
84007 - fd = sys_open(name, 0, 0);
84008 + fd = sys_open((char __force_user *)name, 0, 0);
84009 if (fd < 0) {
84010 printk(KERN_ERR "md: open failed - cannot start "
84011 "array %s\n", name);
84012 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
84013 * array without it
84014 */
84015 sys_close(fd);
84016 - fd = sys_open(name, 0, 0);
84017 + fd = sys_open((char __force_user *)name, 0, 0);
84018 sys_ioctl(fd, BLKRRPART, 0);
84019 }
84020 sys_close(fd);
84021 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
84022
84023 wait_for_device_probe();
84024
84025 - fd = sys_open("/dev/md0", 0, 0);
84026 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
84027 if (fd >= 0) {
84028 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
84029 sys_close(fd);
84030 diff --git a/init/init_task.c b/init/init_task.c
84031 index ba0a7f36..2bcf1d5 100644
84032 --- a/init/init_task.c
84033 +++ b/init/init_task.c
84034 @@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
84035 * Initial thread structure. Alignment of this is handled by a special
84036 * linker map entry.
84037 */
84038 +#ifdef CONFIG_X86
84039 +union thread_union init_thread_union __init_task_data;
84040 +#else
84041 union thread_union init_thread_union __init_task_data =
84042 { INIT_THREAD_INFO(init_task) };
84043 +#endif
84044 diff --git a/init/initramfs.c b/init/initramfs.c
84045 index a67ef9d..2d17ed9 100644
84046 --- a/init/initramfs.c
84047 +++ b/init/initramfs.c
84048 @@ -84,7 +84,7 @@ static void __init free_hash(void)
84049 }
84050 }
84051
84052 -static long __init do_utime(char *filename, time_t mtime)
84053 +static long __init do_utime(char __force_user *filename, time_t mtime)
84054 {
84055 struct timespec t[2];
84056
84057 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
84058 struct dir_entry *de, *tmp;
84059 list_for_each_entry_safe(de, tmp, &dir_list, list) {
84060 list_del(&de->list);
84061 - do_utime(de->name, de->mtime);
84062 + do_utime((char __force_user *)de->name, de->mtime);
84063 kfree(de->name);
84064 kfree(de);
84065 }
84066 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
84067 if (nlink >= 2) {
84068 char *old = find_link(major, minor, ino, mode, collected);
84069 if (old)
84070 - return (sys_link(old, collected) < 0) ? -1 : 1;
84071 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
84072 }
84073 return 0;
84074 }
84075 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
84076 {
84077 struct stat st;
84078
84079 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
84080 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
84081 if (S_ISDIR(st.st_mode))
84082 - sys_rmdir(path);
84083 + sys_rmdir((char __force_user *)path);
84084 else
84085 - sys_unlink(path);
84086 + sys_unlink((char __force_user *)path);
84087 }
84088 }
84089
84090 @@ -315,7 +315,7 @@ static int __init do_name(void)
84091 int openflags = O_WRONLY|O_CREAT;
84092 if (ml != 1)
84093 openflags |= O_TRUNC;
84094 - wfd = sys_open(collected, openflags, mode);
84095 + wfd = sys_open((char __force_user *)collected, openflags, mode);
84096
84097 if (wfd >= 0) {
84098 sys_fchown(wfd, uid, gid);
84099 @@ -327,17 +327,17 @@ static int __init do_name(void)
84100 }
84101 }
84102 } else if (S_ISDIR(mode)) {
84103 - sys_mkdir(collected, mode);
84104 - sys_chown(collected, uid, gid);
84105 - sys_chmod(collected, mode);
84106 + sys_mkdir((char __force_user *)collected, mode);
84107 + sys_chown((char __force_user *)collected, uid, gid);
84108 + sys_chmod((char __force_user *)collected, mode);
84109 dir_add(collected, mtime);
84110 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
84111 S_ISFIFO(mode) || S_ISSOCK(mode)) {
84112 if (maybe_link() == 0) {
84113 - sys_mknod(collected, mode, rdev);
84114 - sys_chown(collected, uid, gid);
84115 - sys_chmod(collected, mode);
84116 - do_utime(collected, mtime);
84117 + sys_mknod((char __force_user *)collected, mode, rdev);
84118 + sys_chown((char __force_user *)collected, uid, gid);
84119 + sys_chmod((char __force_user *)collected, mode);
84120 + do_utime((char __force_user *)collected, mtime);
84121 }
84122 }
84123 return 0;
84124 @@ -346,15 +346,15 @@ static int __init do_name(void)
84125 static int __init do_copy(void)
84126 {
84127 if (count >= body_len) {
84128 - sys_write(wfd, victim, body_len);
84129 + sys_write(wfd, (char __force_user *)victim, body_len);
84130 sys_close(wfd);
84131 - do_utime(vcollected, mtime);
84132 + do_utime((char __force_user *)vcollected, mtime);
84133 kfree(vcollected);
84134 eat(body_len);
84135 state = SkipIt;
84136 return 0;
84137 } else {
84138 - sys_write(wfd, victim, count);
84139 + sys_write(wfd, (char __force_user *)victim, count);
84140 body_len -= count;
84141 eat(count);
84142 return 1;
84143 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
84144 {
84145 collected[N_ALIGN(name_len) + body_len] = '\0';
84146 clean_path(collected, 0);
84147 - sys_symlink(collected + N_ALIGN(name_len), collected);
84148 - sys_lchown(collected, uid, gid);
84149 - do_utime(collected, mtime);
84150 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
84151 + sys_lchown((char __force_user *)collected, uid, gid);
84152 + do_utime((char __force_user *)collected, mtime);
84153 state = SkipIt;
84154 next_state = Reset;
84155 return 0;
84156 @@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
84157 {
84158 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
84159 if (err)
84160 - panic(err); /* Failed to decompress INTERNAL initramfs */
84161 + panic("%s", err); /* Failed to decompress INTERNAL initramfs */
84162 if (initrd_start) {
84163 #ifdef CONFIG_BLK_DEV_RAM
84164 int fd;
84165 diff --git a/init/main.c b/init/main.c
84166 index febc511..f0851763 100644
84167 --- a/init/main.c
84168 +++ b/init/main.c
84169 @@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
84170 extern void tc_init(void);
84171 #endif
84172
84173 +extern void grsecurity_init(void);
84174 +
84175 /*
84176 * Debug helper: via this flag we know that we are in 'early bootup code'
84177 * where only the boot processor is running with IRQ disabled. This means
84178 @@ -164,6 +166,75 @@ static int __init set_reset_devices(char *str)
84179
84180 __setup("reset_devices", set_reset_devices);
84181
84182 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84183 +kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
84184 +static int __init setup_grsec_proc_gid(char *str)
84185 +{
84186 + grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
84187 + return 1;
84188 +}
84189 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
84190 +#endif
84191 +
84192 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
84193 +unsigned long pax_user_shadow_base __read_only;
84194 +EXPORT_SYMBOL(pax_user_shadow_base);
84195 +extern char pax_enter_kernel_user[];
84196 +extern char pax_exit_kernel_user[];
84197 +#endif
84198 +
84199 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
84200 +static int __init setup_pax_nouderef(char *str)
84201 +{
84202 +#ifdef CONFIG_X86_32
84203 + unsigned int cpu;
84204 + struct desc_struct *gdt;
84205 +
84206 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
84207 + gdt = get_cpu_gdt_table(cpu);
84208 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
84209 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
84210 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
84211 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
84212 + }
84213 + loadsegment(ds, __KERNEL_DS);
84214 + loadsegment(es, __KERNEL_DS);
84215 + loadsegment(ss, __KERNEL_DS);
84216 +#else
84217 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
84218 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
84219 + clone_pgd_mask = ~(pgdval_t)0UL;
84220 + pax_user_shadow_base = 0UL;
84221 + setup_clear_cpu_cap(X86_FEATURE_PCID);
84222 + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
84223 +#endif
84224 +
84225 + return 0;
84226 +}
84227 +early_param("pax_nouderef", setup_pax_nouderef);
84228 +
84229 +#ifdef CONFIG_X86_64
84230 +static int __init setup_pax_weakuderef(char *str)
84231 +{
84232 + if (clone_pgd_mask != ~(pgdval_t)0UL)
84233 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
84234 + return 1;
84235 +}
84236 +__setup("pax_weakuderef", setup_pax_weakuderef);
84237 +#endif
84238 +#endif
84239 +
84240 +#ifdef CONFIG_PAX_SOFTMODE
84241 +int pax_softmode;
84242 +
84243 +static int __init setup_pax_softmode(char *str)
84244 +{
84245 + get_option(&str, &pax_softmode);
84246 + return 1;
84247 +}
84248 +__setup("pax_softmode=", setup_pax_softmode);
84249 +#endif
84250 +
84251 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
84252 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
84253 static const char *panic_later, *panic_param;
84254 @@ -691,25 +762,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
84255 {
84256 int count = preempt_count();
84257 int ret;
84258 - char msgbuf[64];
84259 + const char *msg1 = "", *msg2 = "";
84260
84261 if (initcall_debug)
84262 ret = do_one_initcall_debug(fn);
84263 else
84264 ret = fn();
84265
84266 - msgbuf[0] = 0;
84267 -
84268 if (preempt_count() != count) {
84269 - sprintf(msgbuf, "preemption imbalance ");
84270 + msg1 = " preemption imbalance";
84271 preempt_count_set(count);
84272 }
84273 if (irqs_disabled()) {
84274 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
84275 + msg2 = " disabled interrupts";
84276 local_irq_enable();
84277 }
84278 - WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
84279 + WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
84280
84281 + add_latent_entropy();
84282 return ret;
84283 }
84284
84285 @@ -816,8 +886,8 @@ static int run_init_process(const char *init_filename)
84286 {
84287 argv_init[0] = init_filename;
84288 return do_execve(init_filename,
84289 - (const char __user *const __user *)argv_init,
84290 - (const char __user *const __user *)envp_init);
84291 + (const char __user *const __force_user *)argv_init,
84292 + (const char __user *const __force_user *)envp_init);
84293 }
84294
84295 static int try_to_run_init_process(const char *init_filename)
84296 @@ -834,6 +904,10 @@ static int try_to_run_init_process(const char *init_filename)
84297 return ret;
84298 }
84299
84300 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84301 +extern int gr_init_ran;
84302 +#endif
84303 +
84304 static noinline void __init kernel_init_freeable(void);
84305
84306 static int __ref kernel_init(void *unused)
84307 @@ -858,6 +932,11 @@ static int __ref kernel_init(void *unused)
84308 ramdisk_execute_command, ret);
84309 }
84310
84311 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84312 + /* if no initrd was used, be extra sure we enforce chroot restrictions */
84313 + gr_init_ran = 1;
84314 +#endif
84315 +
84316 /*
84317 * We try each of these until one succeeds.
84318 *
84319 @@ -913,7 +992,7 @@ static noinline void __init kernel_init_freeable(void)
84320 do_basic_setup();
84321
84322 /* Open the /dev/console on the rootfs, this should never fail */
84323 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
84324 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
84325 pr_err("Warning: unable to open an initial console.\n");
84326
84327 (void) sys_dup(0);
84328 @@ -926,11 +1005,13 @@ static noinline void __init kernel_init_freeable(void)
84329 if (!ramdisk_execute_command)
84330 ramdisk_execute_command = "/init";
84331
84332 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
84333 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
84334 ramdisk_execute_command = NULL;
84335 prepare_namespace();
84336 }
84337
84338 + grsecurity_init();
84339 +
84340 /*
84341 * Ok, we have completed the initial bootup, and
84342 * we're essentially up and running. Get rid of the
84343 diff --git a/ipc/compat.c b/ipc/compat.c
84344 index 892f658..e7c6320 100644
84345 --- a/ipc/compat.c
84346 +++ b/ipc/compat.c
84347 @@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
84348 COMPAT_SHMLBA);
84349 if (err < 0)
84350 return err;
84351 - return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
84352 + return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
84353 }
84354 case SHMDT:
84355 return sys_shmdt(compat_ptr(ptr));
84356 diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
84357 index b0e99de..09f385c 100644
84358 --- a/ipc/ipc_sysctl.c
84359 +++ b/ipc/ipc_sysctl.c
84360 @@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
84361 static int proc_ipc_dointvec(ctl_table *table, int write,
84362 void __user *buffer, size_t *lenp, loff_t *ppos)
84363 {
84364 - struct ctl_table ipc_table;
84365 + ctl_table_no_const ipc_table;
84366
84367 memcpy(&ipc_table, table, sizeof(ipc_table));
84368 ipc_table.data = get_ipc(table);
84369 @@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
84370 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
84371 void __user *buffer, size_t *lenp, loff_t *ppos)
84372 {
84373 - struct ctl_table ipc_table;
84374 + ctl_table_no_const ipc_table;
84375
84376 memcpy(&ipc_table, table, sizeof(ipc_table));
84377 ipc_table.data = get_ipc(table);
84378 @@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
84379 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
84380 void __user *buffer, size_t *lenp, loff_t *ppos)
84381 {
84382 - struct ctl_table ipc_table;
84383 + ctl_table_no_const ipc_table;
84384 size_t lenp_bef = *lenp;
84385 int rc;
84386
84387 @@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
84388 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
84389 void __user *buffer, size_t *lenp, loff_t *ppos)
84390 {
84391 - struct ctl_table ipc_table;
84392 + ctl_table_no_const ipc_table;
84393 memcpy(&ipc_table, table, sizeof(ipc_table));
84394 ipc_table.data = get_ipc(table);
84395
84396 @@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
84397 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
84398 void __user *buffer, size_t *lenp, loff_t *ppos)
84399 {
84400 - struct ctl_table ipc_table;
84401 + ctl_table_no_const ipc_table;
84402 size_t lenp_bef = *lenp;
84403 int oldval;
84404 int rc;
84405 diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
84406 index 5bb8bfe..a38ec05 100644
84407 --- a/ipc/mq_sysctl.c
84408 +++ b/ipc/mq_sysctl.c
84409 @@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
84410 static int proc_mq_dointvec(ctl_table *table, int write,
84411 void __user *buffer, size_t *lenp, loff_t *ppos)
84412 {
84413 - struct ctl_table mq_table;
84414 + ctl_table_no_const mq_table;
84415 memcpy(&mq_table, table, sizeof(mq_table));
84416 mq_table.data = get_mq(table);
84417
84418 @@ -35,7 +35,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
84419 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
84420 void __user *buffer, size_t *lenp, loff_t *ppos)
84421 {
84422 - struct ctl_table mq_table;
84423 + ctl_table_no_const mq_table;
84424 memcpy(&mq_table, table, sizeof(mq_table));
84425 mq_table.data = get_mq(table);
84426
84427 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
84428 index b8d4aed..96a4fe8 100644
84429 --- a/ipc/mqueue.c
84430 +++ b/ipc/mqueue.c
84431 @@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
84432 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
84433 info->attr.mq_msgsize);
84434
84435 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
84436 spin_lock(&mq_lock);
84437 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
84438 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
84439 diff --git a/ipc/msg.c b/ipc/msg.c
84440 index 52770bf..1c60a6f 100644
84441 --- a/ipc/msg.c
84442 +++ b/ipc/msg.c
84443 @@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
84444 return security_msg_queue_associate(msq, msgflg);
84445 }
84446
84447 +static struct ipc_ops msg_ops = {
84448 + .getnew = newque,
84449 + .associate = msg_security,
84450 + .more_checks = NULL
84451 +};
84452 +
84453 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
84454 {
84455 struct ipc_namespace *ns;
84456 - struct ipc_ops msg_ops;
84457 struct ipc_params msg_params;
84458
84459 ns = current->nsproxy->ipc_ns;
84460
84461 - msg_ops.getnew = newque;
84462 - msg_ops.associate = msg_security;
84463 - msg_ops.more_checks = NULL;
84464 -
84465 msg_params.key = key;
84466 msg_params.flg = msgflg;
84467
84468 diff --git a/ipc/sem.c b/ipc/sem.c
84469 index db9d241..bc8427c 100644
84470 --- a/ipc/sem.c
84471 +++ b/ipc/sem.c
84472 @@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
84473 return 0;
84474 }
84475
84476 +static struct ipc_ops sem_ops = {
84477 + .getnew = newary,
84478 + .associate = sem_security,
84479 + .more_checks = sem_more_checks
84480 +};
84481 +
84482 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
84483 {
84484 struct ipc_namespace *ns;
84485 - struct ipc_ops sem_ops;
84486 struct ipc_params sem_params;
84487
84488 ns = current->nsproxy->ipc_ns;
84489 @@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
84490 if (nsems < 0 || nsems > ns->sc_semmsl)
84491 return -EINVAL;
84492
84493 - sem_ops.getnew = newary;
84494 - sem_ops.associate = sem_security;
84495 - sem_ops.more_checks = sem_more_checks;
84496 -
84497 sem_params.key = key;
84498 sem_params.flg = semflg;
84499 sem_params.u.nsems = nsems;
84500 diff --git a/ipc/shm.c b/ipc/shm.c
84501 index 7a51443..3a257d8 100644
84502 --- a/ipc/shm.c
84503 +++ b/ipc/shm.c
84504 @@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
84505 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
84506 #endif
84507
84508 +#ifdef CONFIG_GRKERNSEC
84509 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84510 + const time_t shm_createtime, const kuid_t cuid,
84511 + const int shmid);
84512 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84513 + const time_t shm_createtime);
84514 +#endif
84515 +
84516 void shm_init_ns(struct ipc_namespace *ns)
84517 {
84518 ns->shm_ctlmax = SHMMAX;
84519 @@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
84520 shp->shm_lprid = 0;
84521 shp->shm_atim = shp->shm_dtim = 0;
84522 shp->shm_ctim = get_seconds();
84523 +#ifdef CONFIG_GRKERNSEC
84524 + {
84525 + struct timespec timeval;
84526 + do_posix_clock_monotonic_gettime(&timeval);
84527 +
84528 + shp->shm_createtime = timeval.tv_sec;
84529 + }
84530 +#endif
84531 shp->shm_segsz = size;
84532 shp->shm_nattch = 0;
84533 shp->shm_file = file;
84534 @@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
84535 return 0;
84536 }
84537
84538 +static struct ipc_ops shm_ops = {
84539 + .getnew = newseg,
84540 + .associate = shm_security,
84541 + .more_checks = shm_more_checks
84542 +};
84543 +
84544 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
84545 {
84546 struct ipc_namespace *ns;
84547 - struct ipc_ops shm_ops;
84548 struct ipc_params shm_params;
84549
84550 ns = current->nsproxy->ipc_ns;
84551
84552 - shm_ops.getnew = newseg;
84553 - shm_ops.associate = shm_security;
84554 - shm_ops.more_checks = shm_more_checks;
84555 -
84556 shm_params.key = key;
84557 shm_params.flg = shmflg;
84558 shm_params.u.size = size;
84559 @@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84560 f_mode = FMODE_READ | FMODE_WRITE;
84561 }
84562 if (shmflg & SHM_EXEC) {
84563 +
84564 +#ifdef CONFIG_PAX_MPROTECT
84565 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
84566 + goto out;
84567 +#endif
84568 +
84569 prot |= PROT_EXEC;
84570 acc_mode |= S_IXUGO;
84571 }
84572 @@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84573 if (err)
84574 goto out_unlock;
84575
84576 +#ifdef CONFIG_GRKERNSEC
84577 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
84578 + shp->shm_perm.cuid, shmid) ||
84579 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
84580 + err = -EACCES;
84581 + goto out_unlock;
84582 + }
84583 +#endif
84584 +
84585 ipc_lock_object(&shp->shm_perm);
84586
84587 /* check if shm_destroy() is tearing down shp */
84588 @@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84589 path = shp->shm_file->f_path;
84590 path_get(&path);
84591 shp->shm_nattch++;
84592 +#ifdef CONFIG_GRKERNSEC
84593 + shp->shm_lapid = current->pid;
84594 +#endif
84595 size = i_size_read(path.dentry->d_inode);
84596 ipc_unlock_object(&shp->shm_perm);
84597 rcu_read_unlock();
84598 diff --git a/ipc/util.c b/ipc/util.c
84599 index 3ae17a4..d67c32f 100644
84600 --- a/ipc/util.c
84601 +++ b/ipc/util.c
84602 @@ -71,6 +71,8 @@ struct ipc_proc_iface {
84603 int (*show)(struct seq_file *, void *);
84604 };
84605
84606 +extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
84607 +
84608 static void ipc_memory_notifier(struct work_struct *work)
84609 {
84610 ipcns_notify(IPCNS_MEMCHANGED);
84611 @@ -558,6 +560,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
84612 granted_mode >>= 6;
84613 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
84614 granted_mode >>= 3;
84615 +
84616 + if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
84617 + return -1;
84618 +
84619 /* is there some bit set in requested_mode but not in granted_mode? */
84620 if ((requested_mode & ~granted_mode & 0007) &&
84621 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
84622 diff --git a/kernel/acct.c b/kernel/acct.c
84623 index 8d6e145..33e0b1e 100644
84624 --- a/kernel/acct.c
84625 +++ b/kernel/acct.c
84626 @@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
84627 */
84628 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
84629 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
84630 - file->f_op->write(file, (char *)&ac,
84631 + file->f_op->write(file, (char __force_user *)&ac,
84632 sizeof(acct_t), &file->f_pos);
84633 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
84634 set_fs(fs);
84635 diff --git a/kernel/audit.c b/kernel/audit.c
84636 index 15ec13a..986322e 100644
84637 --- a/kernel/audit.c
84638 +++ b/kernel/audit.c
84639 @@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
84640 3) suppressed due to audit_rate_limit
84641 4) suppressed due to audit_backlog_limit
84642 */
84643 -static atomic_t audit_lost = ATOMIC_INIT(0);
84644 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
84645
84646 /* The netlink socket. */
84647 static struct sock *audit_sock;
84648 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
84649 unsigned long now;
84650 int print;
84651
84652 - atomic_inc(&audit_lost);
84653 + atomic_inc_unchecked(&audit_lost);
84654
84655 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
84656
84657 @@ -270,7 +270,7 @@ void audit_log_lost(const char *message)
84658 printk(KERN_WARNING
84659 "audit: audit_lost=%d audit_rate_limit=%d "
84660 "audit_backlog_limit=%d\n",
84661 - atomic_read(&audit_lost),
84662 + atomic_read_unchecked(&audit_lost),
84663 audit_rate_limit,
84664 audit_backlog_limit);
84665 audit_panic(message);
84666 @@ -766,7 +766,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84667 status_set.pid = audit_pid;
84668 status_set.rate_limit = audit_rate_limit;
84669 status_set.backlog_limit = audit_backlog_limit;
84670 - status_set.lost = atomic_read(&audit_lost);
84671 + status_set.lost = atomic_read_unchecked(&audit_lost);
84672 status_set.backlog = skb_queue_len(&audit_skb_queue);
84673 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
84674 &status_set, sizeof(status_set));
84675 @@ -1359,7 +1359,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
84676 int i, avail, new_len;
84677 unsigned char *ptr;
84678 struct sk_buff *skb;
84679 - static const unsigned char *hex = "0123456789ABCDEF";
84680 + static const unsigned char hex[] = "0123456789ABCDEF";
84681
84682 if (!ab)
84683 return;
84684 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
84685 index ff32843..27fc708 100644
84686 --- a/kernel/auditsc.c
84687 +++ b/kernel/auditsc.c
84688 @@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
84689 }
84690
84691 /* global counter which is incremented every time something logs in */
84692 -static atomic_t session_id = ATOMIC_INIT(0);
84693 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
84694
84695 static int audit_set_loginuid_perm(kuid_t loginuid)
84696 {
84697 @@ -2011,7 +2011,7 @@ int audit_set_loginuid(kuid_t loginuid)
84698
84699 /* are we setting or clearing? */
84700 if (uid_valid(loginuid))
84701 - sessionid = atomic_inc_return(&session_id);
84702 + sessionid = atomic_inc_return_unchecked(&session_id);
84703
84704 task->sessionid = sessionid;
84705 task->loginuid = loginuid;
84706 diff --git a/kernel/capability.c b/kernel/capability.c
84707 index 4e66bf9..cdccecf 100644
84708 --- a/kernel/capability.c
84709 +++ b/kernel/capability.c
84710 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
84711 * before modification is attempted and the application
84712 * fails.
84713 */
84714 + if (tocopy > ARRAY_SIZE(kdata))
84715 + return -EFAULT;
84716 +
84717 if (copy_to_user(dataptr, kdata, tocopy
84718 * sizeof(struct __user_cap_data_struct))) {
84719 return -EFAULT;
84720 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
84721 int ret;
84722
84723 rcu_read_lock();
84724 - ret = security_capable(__task_cred(t), ns, cap);
84725 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
84726 + gr_task_is_capable(t, __task_cred(t), cap);
84727 rcu_read_unlock();
84728
84729 - return (ret == 0);
84730 + return ret;
84731 }
84732
84733 /**
84734 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
84735 int ret;
84736
84737 rcu_read_lock();
84738 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
84739 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
84740 rcu_read_unlock();
84741
84742 - return (ret == 0);
84743 + return ret;
84744 }
84745
84746 /**
84747 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
84748 BUG();
84749 }
84750
84751 - if (security_capable(current_cred(), ns, cap) == 0) {
84752 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
84753 current->flags |= PF_SUPERPRIV;
84754 return true;
84755 }
84756 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
84757 }
84758 EXPORT_SYMBOL(ns_capable);
84759
84760 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
84761 +{
84762 + if (unlikely(!cap_valid(cap))) {
84763 + printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
84764 + BUG();
84765 + }
84766 +
84767 + if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
84768 + current->flags |= PF_SUPERPRIV;
84769 + return true;
84770 + }
84771 + return false;
84772 +}
84773 +EXPORT_SYMBOL(ns_capable_nolog);
84774 +
84775 /**
84776 * file_ns_capable - Determine if the file's opener had a capability in effect
84777 * @file: The file we want to check
84778 @@ -432,6 +451,12 @@ bool capable(int cap)
84779 }
84780 EXPORT_SYMBOL(capable);
84781
84782 +bool capable_nolog(int cap)
84783 +{
84784 + return ns_capable_nolog(&init_user_ns, cap);
84785 +}
84786 +EXPORT_SYMBOL(capable_nolog);
84787 +
84788 /**
84789 * inode_capable - Check superior capability over inode
84790 * @inode: The inode in question
84791 @@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
84792 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
84793 }
84794 EXPORT_SYMBOL(inode_capable);
84795 +
84796 +bool inode_capable_nolog(const struct inode *inode, int cap)
84797 +{
84798 + struct user_namespace *ns = current_user_ns();
84799 +
84800 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
84801 +}
84802 +EXPORT_SYMBOL(inode_capable_nolog);
84803 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
84804 index 271acd8..54b70fe 100644
84805 --- a/kernel/cgroup.c
84806 +++ b/kernel/cgroup.c
84807 @@ -5609,7 +5609,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
84808 struct css_set *cset = link->cset;
84809 struct task_struct *task;
84810 int count = 0;
84811 - seq_printf(seq, "css_set %p\n", cset);
84812 + seq_printf(seq, "css_set %pK\n", cset);
84813 list_for_each_entry(task, &cset->tasks, cg_list) {
84814 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
84815 seq_puts(seq, " ...\n");
84816 diff --git a/kernel/compat.c b/kernel/compat.c
84817 index 0a09e48..b46b3d78 100644
84818 --- a/kernel/compat.c
84819 +++ b/kernel/compat.c
84820 @@ -13,6 +13,7 @@
84821
84822 #include <linux/linkage.h>
84823 #include <linux/compat.h>
84824 +#include <linux/module.h>
84825 #include <linux/errno.h>
84826 #include <linux/time.h>
84827 #include <linux/signal.h>
84828 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
84829 mm_segment_t oldfs;
84830 long ret;
84831
84832 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
84833 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
84834 oldfs = get_fs();
84835 set_fs(KERNEL_DS);
84836 ret = hrtimer_nanosleep_restart(restart);
84837 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
84838 oldfs = get_fs();
84839 set_fs(KERNEL_DS);
84840 ret = hrtimer_nanosleep(&tu,
84841 - rmtp ? (struct timespec __user *)&rmt : NULL,
84842 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
84843 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
84844 set_fs(oldfs);
84845
84846 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
84847 mm_segment_t old_fs = get_fs();
84848
84849 set_fs(KERNEL_DS);
84850 - ret = sys_sigpending((old_sigset_t __user *) &s);
84851 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
84852 set_fs(old_fs);
84853 if (ret == 0)
84854 ret = put_user(s, set);
84855 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
84856 mm_segment_t old_fs = get_fs();
84857
84858 set_fs(KERNEL_DS);
84859 - ret = sys_old_getrlimit(resource, &r);
84860 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
84861 set_fs(old_fs);
84862
84863 if (!ret) {
84864 @@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
84865 set_fs (KERNEL_DS);
84866 ret = sys_wait4(pid,
84867 (stat_addr ?
84868 - (unsigned int __user *) &status : NULL),
84869 - options, (struct rusage __user *) &r);
84870 + (unsigned int __force_user *) &status : NULL),
84871 + options, (struct rusage __force_user *) &r);
84872 set_fs (old_fs);
84873
84874 if (ret > 0) {
84875 @@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
84876 memset(&info, 0, sizeof(info));
84877
84878 set_fs(KERNEL_DS);
84879 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
84880 - uru ? (struct rusage __user *)&ru : NULL);
84881 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
84882 + uru ? (struct rusage __force_user *)&ru : NULL);
84883 set_fs(old_fs);
84884
84885 if ((ret < 0) || (info.si_signo == 0))
84886 @@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
84887 oldfs = get_fs();
84888 set_fs(KERNEL_DS);
84889 err = sys_timer_settime(timer_id, flags,
84890 - (struct itimerspec __user *) &newts,
84891 - (struct itimerspec __user *) &oldts);
84892 + (struct itimerspec __force_user *) &newts,
84893 + (struct itimerspec __force_user *) &oldts);
84894 set_fs(oldfs);
84895 if (!err && old && put_compat_itimerspec(old, &oldts))
84896 return -EFAULT;
84897 @@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
84898 oldfs = get_fs();
84899 set_fs(KERNEL_DS);
84900 err = sys_timer_gettime(timer_id,
84901 - (struct itimerspec __user *) &ts);
84902 + (struct itimerspec __force_user *) &ts);
84903 set_fs(oldfs);
84904 if (!err && put_compat_itimerspec(setting, &ts))
84905 return -EFAULT;
84906 @@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
84907 oldfs = get_fs();
84908 set_fs(KERNEL_DS);
84909 err = sys_clock_settime(which_clock,
84910 - (struct timespec __user *) &ts);
84911 + (struct timespec __force_user *) &ts);
84912 set_fs(oldfs);
84913 return err;
84914 }
84915 @@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
84916 oldfs = get_fs();
84917 set_fs(KERNEL_DS);
84918 err = sys_clock_gettime(which_clock,
84919 - (struct timespec __user *) &ts);
84920 + (struct timespec __force_user *) &ts);
84921 set_fs(oldfs);
84922 if (!err && put_compat_timespec(&ts, tp))
84923 return -EFAULT;
84924 @@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
84925
84926 oldfs = get_fs();
84927 set_fs(KERNEL_DS);
84928 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
84929 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
84930 set_fs(oldfs);
84931
84932 err = compat_put_timex(utp, &txc);
84933 @@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
84934 oldfs = get_fs();
84935 set_fs(KERNEL_DS);
84936 err = sys_clock_getres(which_clock,
84937 - (struct timespec __user *) &ts);
84938 + (struct timespec __force_user *) &ts);
84939 set_fs(oldfs);
84940 if (!err && tp && put_compat_timespec(&ts, tp))
84941 return -EFAULT;
84942 @@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
84943 long err;
84944 mm_segment_t oldfs;
84945 struct timespec tu;
84946 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
84947 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
84948
84949 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
84950 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
84951 oldfs = get_fs();
84952 set_fs(KERNEL_DS);
84953 err = clock_nanosleep_restart(restart);
84954 @@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
84955 oldfs = get_fs();
84956 set_fs(KERNEL_DS);
84957 err = sys_clock_nanosleep(which_clock, flags,
84958 - (struct timespec __user *) &in,
84959 - (struct timespec __user *) &out);
84960 + (struct timespec __force_user *) &in,
84961 + (struct timespec __force_user *) &out);
84962 set_fs(oldfs);
84963
84964 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
84965 @@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
84966 mm_segment_t old_fs = get_fs();
84967
84968 set_fs(KERNEL_DS);
84969 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
84970 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
84971 set_fs(old_fs);
84972 if (put_compat_timespec(&t, interval))
84973 return -EFAULT;
84974 diff --git a/kernel/configs.c b/kernel/configs.c
84975 index c18b1f1..b9a0132 100644
84976 --- a/kernel/configs.c
84977 +++ b/kernel/configs.c
84978 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
84979 struct proc_dir_entry *entry;
84980
84981 /* create the current config file */
84982 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
84983 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
84984 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
84985 + &ikconfig_file_ops);
84986 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84987 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
84988 + &ikconfig_file_ops);
84989 +#endif
84990 +#else
84991 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
84992 &ikconfig_file_ops);
84993 +#endif
84994 +
84995 if (!entry)
84996 return -ENOMEM;
84997
84998 diff --git a/kernel/cred.c b/kernel/cred.c
84999 index e0573a4..3874e41 100644
85000 --- a/kernel/cred.c
85001 +++ b/kernel/cred.c
85002 @@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
85003 validate_creds(cred);
85004 alter_cred_subscribers(cred, -1);
85005 put_cred(cred);
85006 +
85007 +#ifdef CONFIG_GRKERNSEC_SETXID
85008 + cred = (struct cred *) tsk->delayed_cred;
85009 + if (cred != NULL) {
85010 + tsk->delayed_cred = NULL;
85011 + validate_creds(cred);
85012 + alter_cred_subscribers(cred, -1);
85013 + put_cred(cred);
85014 + }
85015 +#endif
85016 }
85017
85018 /**
85019 @@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
85020 * Always returns 0 thus allowing this function to be tail-called at the end
85021 * of, say, sys_setgid().
85022 */
85023 -int commit_creds(struct cred *new)
85024 +static int __commit_creds(struct cred *new)
85025 {
85026 struct task_struct *task = current;
85027 const struct cred *old = task->real_cred;
85028 @@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
85029
85030 get_cred(new); /* we will require a ref for the subj creds too */
85031
85032 + gr_set_role_label(task, new->uid, new->gid);
85033 +
85034 /* dumpability changes */
85035 if (!uid_eq(old->euid, new->euid) ||
85036 !gid_eq(old->egid, new->egid) ||
85037 @@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
85038 put_cred(old);
85039 return 0;
85040 }
85041 +#ifdef CONFIG_GRKERNSEC_SETXID
85042 +extern int set_user(struct cred *new);
85043 +
85044 +void gr_delayed_cred_worker(void)
85045 +{
85046 + const struct cred *new = current->delayed_cred;
85047 + struct cred *ncred;
85048 +
85049 + current->delayed_cred = NULL;
85050 +
85051 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
85052 + // from doing get_cred on it when queueing this
85053 + put_cred(new);
85054 + return;
85055 + } else if (new == NULL)
85056 + return;
85057 +
85058 + ncred = prepare_creds();
85059 + if (!ncred)
85060 + goto die;
85061 + // uids
85062 + ncred->uid = new->uid;
85063 + ncred->euid = new->euid;
85064 + ncred->suid = new->suid;
85065 + ncred->fsuid = new->fsuid;
85066 + // gids
85067 + ncred->gid = new->gid;
85068 + ncred->egid = new->egid;
85069 + ncred->sgid = new->sgid;
85070 + ncred->fsgid = new->fsgid;
85071 + // groups
85072 + if (set_groups(ncred, new->group_info) < 0) {
85073 + abort_creds(ncred);
85074 + goto die;
85075 + }
85076 + // caps
85077 + ncred->securebits = new->securebits;
85078 + ncred->cap_inheritable = new->cap_inheritable;
85079 + ncred->cap_permitted = new->cap_permitted;
85080 + ncred->cap_effective = new->cap_effective;
85081 + ncred->cap_bset = new->cap_bset;
85082 +
85083 + if (set_user(ncred)) {
85084 + abort_creds(ncred);
85085 + goto die;
85086 + }
85087 +
85088 + // from doing get_cred on it when queueing this
85089 + put_cred(new);
85090 +
85091 + __commit_creds(ncred);
85092 + return;
85093 +die:
85094 + // from doing get_cred on it when queueing this
85095 + put_cred(new);
85096 + do_group_exit(SIGKILL);
85097 +}
85098 +#endif
85099 +
85100 +int commit_creds(struct cred *new)
85101 +{
85102 +#ifdef CONFIG_GRKERNSEC_SETXID
85103 + int ret;
85104 + int schedule_it = 0;
85105 + struct task_struct *t;
85106 +
85107 + /* we won't get called with tasklist_lock held for writing
85108 + and interrupts disabled as the cred struct in that case is
85109 + init_cred
85110 + */
85111 + if (grsec_enable_setxid && !current_is_single_threaded() &&
85112 + uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
85113 + !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
85114 + schedule_it = 1;
85115 + }
85116 + ret = __commit_creds(new);
85117 + if (schedule_it) {
85118 + rcu_read_lock();
85119 + read_lock(&tasklist_lock);
85120 + for (t = next_thread(current); t != current;
85121 + t = next_thread(t)) {
85122 + if (t->delayed_cred == NULL) {
85123 + t->delayed_cred = get_cred(new);
85124 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
85125 + set_tsk_need_resched(t);
85126 + }
85127 + }
85128 + read_unlock(&tasklist_lock);
85129 + rcu_read_unlock();
85130 + }
85131 + return ret;
85132 +#else
85133 + return __commit_creds(new);
85134 +#endif
85135 +}
85136 +
85137 EXPORT_SYMBOL(commit_creds);
85138
85139 /**
85140 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
85141 index 7d2f35e..1bafcd0 100644
85142 --- a/kernel/debug/debug_core.c
85143 +++ b/kernel/debug/debug_core.c
85144 @@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
85145 */
85146 static atomic_t masters_in_kgdb;
85147 static atomic_t slaves_in_kgdb;
85148 -static atomic_t kgdb_break_tasklet_var;
85149 +static atomic_unchecked_t kgdb_break_tasklet_var;
85150 atomic_t kgdb_setting_breakpoint;
85151
85152 struct task_struct *kgdb_usethread;
85153 @@ -133,7 +133,7 @@ int kgdb_single_step;
85154 static pid_t kgdb_sstep_pid;
85155
85156 /* to keep track of the CPU which is doing the single stepping*/
85157 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
85158 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
85159
85160 /*
85161 * If you are debugging a problem where roundup (the collection of
85162 @@ -541,7 +541,7 @@ return_normal:
85163 * kernel will only try for the value of sstep_tries before
85164 * giving up and continuing on.
85165 */
85166 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
85167 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
85168 (kgdb_info[cpu].task &&
85169 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
85170 atomic_set(&kgdb_active, -1);
85171 @@ -639,8 +639,8 @@ cpu_master_loop:
85172 }
85173
85174 kgdb_restore:
85175 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
85176 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
85177 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
85178 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
85179 if (kgdb_info[sstep_cpu].task)
85180 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
85181 else
85182 @@ -916,18 +916,18 @@ static void kgdb_unregister_callbacks(void)
85183 static void kgdb_tasklet_bpt(unsigned long ing)
85184 {
85185 kgdb_breakpoint();
85186 - atomic_set(&kgdb_break_tasklet_var, 0);
85187 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
85188 }
85189
85190 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
85191
85192 void kgdb_schedule_breakpoint(void)
85193 {
85194 - if (atomic_read(&kgdb_break_tasklet_var) ||
85195 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
85196 atomic_read(&kgdb_active) != -1 ||
85197 atomic_read(&kgdb_setting_breakpoint))
85198 return;
85199 - atomic_inc(&kgdb_break_tasklet_var);
85200 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
85201 tasklet_schedule(&kgdb_tasklet_breakpoint);
85202 }
85203 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
85204 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
85205 index 0b097c8..11dd5c5 100644
85206 --- a/kernel/debug/kdb/kdb_main.c
85207 +++ b/kernel/debug/kdb/kdb_main.c
85208 @@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
85209 continue;
85210
85211 kdb_printf("%-20s%8u 0x%p ", mod->name,
85212 - mod->core_size, (void *)mod);
85213 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
85214 #ifdef CONFIG_MODULE_UNLOAD
85215 kdb_printf("%4ld ", module_refcount(mod));
85216 #endif
85217 @@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
85218 kdb_printf(" (Loading)");
85219 else
85220 kdb_printf(" (Live)");
85221 - kdb_printf(" 0x%p", mod->module_core);
85222 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
85223
85224 #ifdef CONFIG_MODULE_UNLOAD
85225 {
85226 diff --git a/kernel/events/core.c b/kernel/events/core.c
85227 index 6ed1163..f36346e 100644
85228 --- a/kernel/events/core.c
85229 +++ b/kernel/events/core.c
85230 @@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
85231 * 0 - disallow raw tracepoint access for unpriv
85232 * 1 - disallow cpu events for unpriv
85233 * 2 - disallow kernel profiling for unpriv
85234 + * 3 - disallow all unpriv perf event use
85235 */
85236 -int sysctl_perf_event_paranoid __read_mostly = 1;
85237 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
85238 +int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
85239 +#elif defined(CONFIG_GRKERNSEC_HIDESYM)
85240 +int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
85241 +#else
85242 +int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
85243 +#endif
85244
85245 /* Minimum for 512 kiB + 1 user control page */
85246 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
85247 @@ -184,7 +191,7 @@ void update_perf_cpu_limits(void)
85248
85249 tmp *= sysctl_perf_cpu_time_max_percent;
85250 do_div(tmp, 100);
85251 - ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
85252 + ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
85253 }
85254
85255 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
85256 @@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
85257 update_perf_cpu_limits();
85258 }
85259
85260 -static atomic64_t perf_event_id;
85261 +static atomic64_unchecked_t perf_event_id;
85262
85263 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
85264 enum event_type_t event_type);
85265 @@ -2985,7 +2992,7 @@ static void __perf_event_read(void *info)
85266
85267 static inline u64 perf_event_count(struct perf_event *event)
85268 {
85269 - return local64_read(&event->count) + atomic64_read(&event->child_count);
85270 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
85271 }
85272
85273 static u64 perf_event_read(struct perf_event *event)
85274 @@ -3353,9 +3360,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
85275 mutex_lock(&event->child_mutex);
85276 total += perf_event_read(event);
85277 *enabled += event->total_time_enabled +
85278 - atomic64_read(&event->child_total_time_enabled);
85279 + atomic64_read_unchecked(&event->child_total_time_enabled);
85280 *running += event->total_time_running +
85281 - atomic64_read(&event->child_total_time_running);
85282 + atomic64_read_unchecked(&event->child_total_time_running);
85283
85284 list_for_each_entry(child, &event->child_list, child_list) {
85285 total += perf_event_read(child);
85286 @@ -3770,10 +3777,10 @@ void perf_event_update_userpage(struct perf_event *event)
85287 userpg->offset -= local64_read(&event->hw.prev_count);
85288
85289 userpg->time_enabled = enabled +
85290 - atomic64_read(&event->child_total_time_enabled);
85291 + atomic64_read_unchecked(&event->child_total_time_enabled);
85292
85293 userpg->time_running = running +
85294 - atomic64_read(&event->child_total_time_running);
85295 + atomic64_read_unchecked(&event->child_total_time_running);
85296
85297 arch_perf_update_userpage(userpg, now);
85298
85299 @@ -4324,7 +4331,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
85300
85301 /* Data. */
85302 sp = perf_user_stack_pointer(regs);
85303 - rem = __output_copy_user(handle, (void *) sp, dump_size);
85304 + rem = __output_copy_user(handle, (void __user *) sp, dump_size);
85305 dyn_size = dump_size - rem;
85306
85307 perf_output_skip(handle, rem);
85308 @@ -4415,11 +4422,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
85309 values[n++] = perf_event_count(event);
85310 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
85311 values[n++] = enabled +
85312 - atomic64_read(&event->child_total_time_enabled);
85313 + atomic64_read_unchecked(&event->child_total_time_enabled);
85314 }
85315 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
85316 values[n++] = running +
85317 - atomic64_read(&event->child_total_time_running);
85318 + atomic64_read_unchecked(&event->child_total_time_running);
85319 }
85320 if (read_format & PERF_FORMAT_ID)
85321 values[n++] = primary_event_id(event);
85322 @@ -6686,7 +6693,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
85323 event->parent = parent_event;
85324
85325 event->ns = get_pid_ns(task_active_pid_ns(current));
85326 - event->id = atomic64_inc_return(&perf_event_id);
85327 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
85328
85329 event->state = PERF_EVENT_STATE_INACTIVE;
85330
85331 @@ -6985,6 +6992,11 @@ SYSCALL_DEFINE5(perf_event_open,
85332 if (flags & ~PERF_FLAG_ALL)
85333 return -EINVAL;
85334
85335 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
85336 + if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
85337 + return -EACCES;
85338 +#endif
85339 +
85340 err = perf_copy_attr(attr_uptr, &attr);
85341 if (err)
85342 return err;
85343 @@ -7316,10 +7328,10 @@ static void sync_child_event(struct perf_event *child_event,
85344 /*
85345 * Add back the child's count to the parent's count:
85346 */
85347 - atomic64_add(child_val, &parent_event->child_count);
85348 - atomic64_add(child_event->total_time_enabled,
85349 + atomic64_add_unchecked(child_val, &parent_event->child_count);
85350 + atomic64_add_unchecked(child_event->total_time_enabled,
85351 &parent_event->child_total_time_enabled);
85352 - atomic64_add(child_event->total_time_running,
85353 + atomic64_add_unchecked(child_event->total_time_running,
85354 &parent_event->child_total_time_running);
85355
85356 /*
85357 diff --git a/kernel/events/internal.h b/kernel/events/internal.h
85358 index 569b2187..19940d9 100644
85359 --- a/kernel/events/internal.h
85360 +++ b/kernel/events/internal.h
85361 @@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
85362 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
85363 }
85364
85365 -#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
85366 +#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
85367 static inline unsigned long \
85368 func_name(struct perf_output_handle *handle, \
85369 - const void *buf, unsigned long len) \
85370 + const void user *buf, unsigned long len) \
85371 { \
85372 unsigned long size, written; \
85373 \
85374 @@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
85375 return 0;
85376 }
85377
85378 -DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
85379 +DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
85380
85381 static inline unsigned long
85382 memcpy_skip(void *dst, const void *src, unsigned long n)
85383 @@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
85384 return 0;
85385 }
85386
85387 -DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
85388 +DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
85389
85390 #ifndef arch_perf_out_copy_user
85391 #define arch_perf_out_copy_user arch_perf_out_copy_user
85392 @@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
85393 }
85394 #endif
85395
85396 -DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
85397 +DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
85398
85399 /* Callchain handling */
85400 extern struct perf_callchain_entry *
85401 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
85402 index 24b7d6c..40cf797 100644
85403 --- a/kernel/events/uprobes.c
85404 +++ b/kernel/events/uprobes.c
85405 @@ -1640,7 +1640,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
85406 {
85407 struct page *page;
85408 uprobe_opcode_t opcode;
85409 - int result;
85410 + long result;
85411
85412 pagefault_disable();
85413 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
85414 diff --git a/kernel/exit.c b/kernel/exit.c
85415 index a949819..a5f127d 100644
85416 --- a/kernel/exit.c
85417 +++ b/kernel/exit.c
85418 @@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
85419 struct task_struct *leader;
85420 int zap_leader;
85421 repeat:
85422 +#ifdef CONFIG_NET
85423 + gr_del_task_from_ip_table(p);
85424 +#endif
85425 +
85426 /* don't need to get the RCU readlock here - the process is dead and
85427 * can't be modifying its own credentials. But shut RCU-lockdep up */
85428 rcu_read_lock();
85429 @@ -329,7 +333,7 @@ int allow_signal(int sig)
85430 * know it'll be handled, so that they don't get converted to
85431 * SIGKILL or just silently dropped.
85432 */
85433 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
85434 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
85435 recalc_sigpending();
85436 spin_unlock_irq(&current->sighand->siglock);
85437 return 0;
85438 @@ -698,6 +702,8 @@ void do_exit(long code)
85439 struct task_struct *tsk = current;
85440 int group_dead;
85441
85442 + set_fs(USER_DS);
85443 +
85444 profile_task_exit(tsk);
85445
85446 WARN_ON(blk_needs_flush_plug(tsk));
85447 @@ -714,7 +720,6 @@ void do_exit(long code)
85448 * mm_release()->clear_child_tid() from writing to a user-controlled
85449 * kernel address.
85450 */
85451 - set_fs(USER_DS);
85452
85453 ptrace_event(PTRACE_EVENT_EXIT, code);
85454
85455 @@ -773,6 +778,9 @@ void do_exit(long code)
85456 tsk->exit_code = code;
85457 taskstats_exit(tsk, group_dead);
85458
85459 + gr_acl_handle_psacct(tsk, code);
85460 + gr_acl_handle_exit();
85461 +
85462 exit_mm(tsk);
85463
85464 if (group_dead)
85465 @@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
85466 * Take down every thread in the group. This is called by fatal signals
85467 * as well as by sys_exit_group (below).
85468 */
85469 -void
85470 +__noreturn void
85471 do_group_exit(int exit_code)
85472 {
85473 struct signal_struct *sig = current->signal;
85474 diff --git a/kernel/fork.c b/kernel/fork.c
85475 index dfa736c..d170f9b 100644
85476 --- a/kernel/fork.c
85477 +++ b/kernel/fork.c
85478 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
85479 *stackend = STACK_END_MAGIC; /* for overflow detection */
85480
85481 #ifdef CONFIG_CC_STACKPROTECTOR
85482 - tsk->stack_canary = get_random_int();
85483 + tsk->stack_canary = pax_get_random_long();
85484 #endif
85485
85486 /*
85487 @@ -345,12 +345,80 @@ free_tsk:
85488 }
85489
85490 #ifdef CONFIG_MMU
85491 -static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85492 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
85493 +{
85494 + struct vm_area_struct *tmp;
85495 + unsigned long charge;
85496 + struct file *file;
85497 + int retval;
85498 +
85499 + charge = 0;
85500 + if (mpnt->vm_flags & VM_ACCOUNT) {
85501 + unsigned long len = vma_pages(mpnt);
85502 +
85503 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
85504 + goto fail_nomem;
85505 + charge = len;
85506 + }
85507 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
85508 + if (!tmp)
85509 + goto fail_nomem;
85510 + *tmp = *mpnt;
85511 + tmp->vm_mm = mm;
85512 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
85513 + retval = vma_dup_policy(mpnt, tmp);
85514 + if (retval)
85515 + goto fail_nomem_policy;
85516 + if (anon_vma_fork(tmp, mpnt))
85517 + goto fail_nomem_anon_vma_fork;
85518 + tmp->vm_flags &= ~VM_LOCKED;
85519 + tmp->vm_next = tmp->vm_prev = NULL;
85520 + tmp->vm_mirror = NULL;
85521 + file = tmp->vm_file;
85522 + if (file) {
85523 + struct inode *inode = file_inode(file);
85524 + struct address_space *mapping = file->f_mapping;
85525 +
85526 + get_file(file);
85527 + if (tmp->vm_flags & VM_DENYWRITE)
85528 + atomic_dec(&inode->i_writecount);
85529 + mutex_lock(&mapping->i_mmap_mutex);
85530 + if (tmp->vm_flags & VM_SHARED)
85531 + mapping->i_mmap_writable++;
85532 + flush_dcache_mmap_lock(mapping);
85533 + /* insert tmp into the share list, just after mpnt */
85534 + if (unlikely(tmp->vm_flags & VM_NONLINEAR))
85535 + vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
85536 + else
85537 + vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
85538 + flush_dcache_mmap_unlock(mapping);
85539 + mutex_unlock(&mapping->i_mmap_mutex);
85540 + }
85541 +
85542 + /*
85543 + * Clear hugetlb-related page reserves for children. This only
85544 + * affects MAP_PRIVATE mappings. Faults generated by the child
85545 + * are not guaranteed to succeed, even if read-only
85546 + */
85547 + if (is_vm_hugetlb_page(tmp))
85548 + reset_vma_resv_huge_pages(tmp);
85549 +
85550 + return tmp;
85551 +
85552 +fail_nomem_anon_vma_fork:
85553 + mpol_put(vma_policy(tmp));
85554 +fail_nomem_policy:
85555 + kmem_cache_free(vm_area_cachep, tmp);
85556 +fail_nomem:
85557 + vm_unacct_memory(charge);
85558 + return NULL;
85559 +}
85560 +
85561 +static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85562 {
85563 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
85564 struct rb_node **rb_link, *rb_parent;
85565 int retval;
85566 - unsigned long charge;
85567
85568 uprobe_start_dup_mmap();
85569 down_write(&oldmm->mmap_sem);
85570 @@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85571
85572 prev = NULL;
85573 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
85574 - struct file *file;
85575 -
85576 if (mpnt->vm_flags & VM_DONTCOPY) {
85577 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
85578 -vma_pages(mpnt));
85579 continue;
85580 }
85581 - charge = 0;
85582 - if (mpnt->vm_flags & VM_ACCOUNT) {
85583 - unsigned long len = vma_pages(mpnt);
85584 -
85585 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
85586 - goto fail_nomem;
85587 - charge = len;
85588 - }
85589 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
85590 - if (!tmp)
85591 - goto fail_nomem;
85592 - *tmp = *mpnt;
85593 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
85594 - retval = vma_dup_policy(mpnt, tmp);
85595 - if (retval)
85596 - goto fail_nomem_policy;
85597 - tmp->vm_mm = mm;
85598 - if (anon_vma_fork(tmp, mpnt))
85599 - goto fail_nomem_anon_vma_fork;
85600 - tmp->vm_flags &= ~VM_LOCKED;
85601 - tmp->vm_next = tmp->vm_prev = NULL;
85602 - file = tmp->vm_file;
85603 - if (file) {
85604 - struct inode *inode = file_inode(file);
85605 - struct address_space *mapping = file->f_mapping;
85606 -
85607 - get_file(file);
85608 - if (tmp->vm_flags & VM_DENYWRITE)
85609 - atomic_dec(&inode->i_writecount);
85610 - mutex_lock(&mapping->i_mmap_mutex);
85611 - if (tmp->vm_flags & VM_SHARED)
85612 - mapping->i_mmap_writable++;
85613 - flush_dcache_mmap_lock(mapping);
85614 - /* insert tmp into the share list, just after mpnt */
85615 - if (unlikely(tmp->vm_flags & VM_NONLINEAR))
85616 - vma_nonlinear_insert(tmp,
85617 - &mapping->i_mmap_nonlinear);
85618 - else
85619 - vma_interval_tree_insert_after(tmp, mpnt,
85620 - &mapping->i_mmap);
85621 - flush_dcache_mmap_unlock(mapping);
85622 - mutex_unlock(&mapping->i_mmap_mutex);
85623 + tmp = dup_vma(mm, oldmm, mpnt);
85624 + if (!tmp) {
85625 + retval = -ENOMEM;
85626 + goto out;
85627 }
85628
85629 /*
85630 @@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85631 if (retval)
85632 goto out;
85633 }
85634 +
85635 +#ifdef CONFIG_PAX_SEGMEXEC
85636 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
85637 + struct vm_area_struct *mpnt_m;
85638 +
85639 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
85640 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
85641 +
85642 + if (!mpnt->vm_mirror)
85643 + continue;
85644 +
85645 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
85646 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
85647 + mpnt->vm_mirror = mpnt_m;
85648 + } else {
85649 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
85650 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
85651 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
85652 + mpnt->vm_mirror->vm_mirror = mpnt;
85653 + }
85654 + }
85655 + BUG_ON(mpnt_m);
85656 + }
85657 +#endif
85658 +
85659 /* a new mm has just been created */
85660 arch_dup_mmap(oldmm, mm);
85661 retval = 0;
85662 @@ -468,14 +521,6 @@ out:
85663 up_write(&oldmm->mmap_sem);
85664 uprobe_end_dup_mmap();
85665 return retval;
85666 -fail_nomem_anon_vma_fork:
85667 - mpol_put(vma_policy(tmp));
85668 -fail_nomem_policy:
85669 - kmem_cache_free(vm_area_cachep, tmp);
85670 -fail_nomem:
85671 - retval = -ENOMEM;
85672 - vm_unacct_memory(charge);
85673 - goto out;
85674 }
85675
85676 static inline int mm_alloc_pgd(struct mm_struct *mm)
85677 @@ -689,8 +734,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
85678 return ERR_PTR(err);
85679
85680 mm = get_task_mm(task);
85681 - if (mm && mm != current->mm &&
85682 - !ptrace_may_access(task, mode)) {
85683 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
85684 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
85685 mmput(mm);
85686 mm = ERR_PTR(-EACCES);
85687 }
85688 @@ -909,13 +954,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
85689 spin_unlock(&fs->lock);
85690 return -EAGAIN;
85691 }
85692 - fs->users++;
85693 + atomic_inc(&fs->users);
85694 spin_unlock(&fs->lock);
85695 return 0;
85696 }
85697 tsk->fs = copy_fs_struct(fs);
85698 if (!tsk->fs)
85699 return -ENOMEM;
85700 + /* Carry through gr_chroot_dentry and is_chrooted instead
85701 + of recomputing it here. Already copied when the task struct
85702 + is duplicated. This allows pivot_root to not be treated as
85703 + a chroot
85704 + */
85705 + //gr_set_chroot_entries(tsk, &tsk->fs->root);
85706 +
85707 return 0;
85708 }
85709
85710 @@ -1126,7 +1178,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
85711 * parts of the process environment (as per the clone
85712 * flags). The actual kick-off is left to the caller.
85713 */
85714 -static struct task_struct *copy_process(unsigned long clone_flags,
85715 +static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
85716 unsigned long stack_start,
85717 unsigned long stack_size,
85718 int __user *child_tidptr,
85719 @@ -1198,6 +1250,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
85720 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
85721 #endif
85722 retval = -EAGAIN;
85723 +
85724 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
85725 +
85726 if (atomic_read(&p->real_cred->user->processes) >=
85727 task_rlimit(p, RLIMIT_NPROC)) {
85728 if (p->real_cred->user != INIT_USER &&
85729 @@ -1446,6 +1501,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
85730 goto bad_fork_free_pid;
85731 }
85732
85733 + /* synchronizes with gr_set_acls()
85734 + we need to call this past the point of no return for fork()
85735 + */
85736 + gr_copy_label(p);
85737 +
85738 if (likely(p->pid)) {
85739 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
85740
85741 @@ -1532,6 +1592,8 @@ bad_fork_cleanup_count:
85742 bad_fork_free:
85743 free_task(p);
85744 fork_out:
85745 + gr_log_forkfail(retval);
85746 +
85747 return ERR_PTR(retval);
85748 }
85749
85750 @@ -1593,6 +1655,7 @@ long do_fork(unsigned long clone_flags,
85751
85752 p = copy_process(clone_flags, stack_start, stack_size,
85753 child_tidptr, NULL, trace);
85754 + add_latent_entropy();
85755 /*
85756 * Do this prior waking up the new thread - the thread pointer
85757 * might get invalid after that point, if the thread exits quickly.
85758 @@ -1607,6 +1670,8 @@ long do_fork(unsigned long clone_flags,
85759 if (clone_flags & CLONE_PARENT_SETTID)
85760 put_user(nr, parent_tidptr);
85761
85762 + gr_handle_brute_check();
85763 +
85764 if (clone_flags & CLONE_VFORK) {
85765 p->vfork_done = &vfork;
85766 init_completion(&vfork);
85767 @@ -1723,7 +1788,7 @@ void __init proc_caches_init(void)
85768 mm_cachep = kmem_cache_create("mm_struct",
85769 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
85770 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
85771 - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
85772 + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
85773 mmap_init();
85774 nsproxy_cache_init();
85775 }
85776 @@ -1763,7 +1828,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
85777 return 0;
85778
85779 /* don't need lock here; in the worst case we'll do useless copy */
85780 - if (fs->users == 1)
85781 + if (atomic_read(&fs->users) == 1)
85782 return 0;
85783
85784 *new_fsp = copy_fs_struct(fs);
85785 @@ -1870,7 +1935,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
85786 fs = current->fs;
85787 spin_lock(&fs->lock);
85788 current->fs = new_fs;
85789 - if (--fs->users)
85790 + gr_set_chroot_entries(current, &current->fs->root);
85791 + if (atomic_dec_return(&fs->users))
85792 new_fs = NULL;
85793 else
85794 new_fs = fs;
85795 diff --git a/kernel/futex.c b/kernel/futex.c
85796 index f6ff019..ac53307 100644
85797 --- a/kernel/futex.c
85798 +++ b/kernel/futex.c
85799 @@ -54,6 +54,7 @@
85800 #include <linux/mount.h>
85801 #include <linux/pagemap.h>
85802 #include <linux/syscalls.h>
85803 +#include <linux/ptrace.h>
85804 #include <linux/signal.h>
85805 #include <linux/export.h>
85806 #include <linux/magic.h>
85807 @@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
85808 struct page *page, *page_head;
85809 int err, ro = 0;
85810
85811 +#ifdef CONFIG_PAX_SEGMEXEC
85812 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
85813 + return -EFAULT;
85814 +#endif
85815 +
85816 /*
85817 * The futex address must be "naturally" aligned.
85818 */
85819 @@ -442,7 +448,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
85820
85821 static int get_futex_value_locked(u32 *dest, u32 __user *from)
85822 {
85823 - int ret;
85824 + unsigned long ret;
85825
85826 pagefault_disable();
85827 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
85828 @@ -2735,6 +2741,7 @@ static int __init futex_init(void)
85829 {
85830 u32 curval;
85831 int i;
85832 + mm_segment_t oldfs;
85833
85834 /*
85835 * This will fail and we want it. Some arch implementations do
85836 @@ -2746,8 +2753,11 @@ static int __init futex_init(void)
85837 * implementation, the non-functional ones will return
85838 * -ENOSYS.
85839 */
85840 + oldfs = get_fs();
85841 + set_fs(USER_DS);
85842 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
85843 futex_cmpxchg_enabled = 1;
85844 + set_fs(oldfs);
85845
85846 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
85847 plist_head_init(&futex_queues[i].chain);
85848 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
85849 index f9f44fd..29885e4 100644
85850 --- a/kernel/futex_compat.c
85851 +++ b/kernel/futex_compat.c
85852 @@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
85853 return 0;
85854 }
85855
85856 -static void __user *futex_uaddr(struct robust_list __user *entry,
85857 +static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
85858 compat_long_t futex_offset)
85859 {
85860 compat_uptr_t base = ptr_to_compat(entry);
85861 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
85862 index f45b75b..bfac6d5 100644
85863 --- a/kernel/gcov/base.c
85864 +++ b/kernel/gcov/base.c
85865 @@ -108,11 +108,6 @@ void gcov_enable_events(void)
85866 }
85867
85868 #ifdef CONFIG_MODULES
85869 -static inline int within(void *addr, void *start, unsigned long size)
85870 -{
85871 - return ((addr >= start) && (addr < start + size));
85872 -}
85873 -
85874 /* Update list and generate events when modules are unloaded. */
85875 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
85876 void *data)
85877 @@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
85878
85879 /* Remove entries located in module from linked list. */
85880 while ((info = gcov_info_next(info))) {
85881 - if (within(info, mod->module_core, mod->core_size)) {
85882 + if (within_module_core_rw((unsigned long)info, mod)) {
85883 gcov_info_unlink(prev, info);
85884 if (gcov_events_enabled)
85885 gcov_event(GCOV_REMOVE, info);
85886 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
85887 index 383319b..56ebb13 100644
85888 --- a/kernel/hrtimer.c
85889 +++ b/kernel/hrtimer.c
85890 @@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
85891 local_irq_restore(flags);
85892 }
85893
85894 -static void run_hrtimer_softirq(struct softirq_action *h)
85895 +static __latent_entropy void run_hrtimer_softirq(void)
85896 {
85897 hrtimer_peek_ahead_timers();
85898 }
85899 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
85900 index 55fcce6..0e4cf34 100644
85901 --- a/kernel/irq_work.c
85902 +++ b/kernel/irq_work.c
85903 @@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
85904 return NOTIFY_OK;
85905 }
85906
85907 -static struct notifier_block cpu_notify;
85908 +static struct notifier_block cpu_notify = {
85909 + .notifier_call = irq_work_cpu_notify,
85910 + .priority = 0,
85911 +};
85912
85913 static __init int irq_work_init_cpu_notifier(void)
85914 {
85915 - cpu_notify.notifier_call = irq_work_cpu_notify;
85916 - cpu_notify.priority = 0;
85917 register_cpu_notifier(&cpu_notify);
85918 return 0;
85919 }
85920 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
85921 index 9019f15..9a3c42e 100644
85922 --- a/kernel/jump_label.c
85923 +++ b/kernel/jump_label.c
85924 @@ -14,6 +14,7 @@
85925 #include <linux/err.h>
85926 #include <linux/static_key.h>
85927 #include <linux/jump_label_ratelimit.h>
85928 +#include <linux/mm.h>
85929
85930 #ifdef HAVE_JUMP_LABEL
85931
85932 @@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
85933
85934 size = (((unsigned long)stop - (unsigned long)start)
85935 / sizeof(struct jump_entry));
85936 + pax_open_kernel();
85937 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
85938 + pax_close_kernel();
85939 }
85940
85941 static void jump_label_update(struct static_key *key, int enable);
85942 @@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
85943 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
85944 struct jump_entry *iter;
85945
85946 + pax_open_kernel();
85947 for (iter = iter_start; iter < iter_stop; iter++) {
85948 if (within_module_init(iter->code, mod))
85949 iter->code = 0;
85950 }
85951 + pax_close_kernel();
85952 }
85953
85954 static int
85955 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
85956 index 3127ad5..159d880 100644
85957 --- a/kernel/kallsyms.c
85958 +++ b/kernel/kallsyms.c
85959 @@ -11,6 +11,9 @@
85960 * Changed the compression method from stem compression to "table lookup"
85961 * compression (see scripts/kallsyms.c for a more complete description)
85962 */
85963 +#ifdef CONFIG_GRKERNSEC_HIDESYM
85964 +#define __INCLUDED_BY_HIDESYM 1
85965 +#endif
85966 #include <linux/kallsyms.h>
85967 #include <linux/module.h>
85968 #include <linux/init.h>
85969 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
85970
85971 static inline int is_kernel_inittext(unsigned long addr)
85972 {
85973 + if (system_state != SYSTEM_BOOTING)
85974 + return 0;
85975 +
85976 if (addr >= (unsigned long)_sinittext
85977 && addr <= (unsigned long)_einittext)
85978 return 1;
85979 return 0;
85980 }
85981
85982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85983 +#ifdef CONFIG_MODULES
85984 +static inline int is_module_text(unsigned long addr)
85985 +{
85986 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
85987 + return 1;
85988 +
85989 + addr = ktla_ktva(addr);
85990 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
85991 +}
85992 +#else
85993 +static inline int is_module_text(unsigned long addr)
85994 +{
85995 + return 0;
85996 +}
85997 +#endif
85998 +#endif
85999 +
86000 static inline int is_kernel_text(unsigned long addr)
86001 {
86002 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
86003 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
86004
86005 static inline int is_kernel(unsigned long addr)
86006 {
86007 +
86008 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
86009 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
86010 + return 1;
86011 +
86012 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
86013 +#else
86014 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
86015 +#endif
86016 +
86017 return 1;
86018 return in_gate_area_no_mm(addr);
86019 }
86020
86021 static int is_ksym_addr(unsigned long addr)
86022 {
86023 +
86024 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
86025 + if (is_module_text(addr))
86026 + return 0;
86027 +#endif
86028 +
86029 if (all_var)
86030 return is_kernel(addr);
86031
86032 @@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
86033
86034 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
86035 {
86036 - iter->name[0] = '\0';
86037 iter->nameoff = get_symbol_offset(new_pos);
86038 iter->pos = new_pos;
86039 }
86040 @@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
86041 {
86042 struct kallsym_iter *iter = m->private;
86043
86044 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86045 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
86046 + return 0;
86047 +#endif
86048 +
86049 /* Some debugging symbols have no name. Ignore them. */
86050 if (!iter->name[0])
86051 return 0;
86052 @@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
86053 */
86054 type = iter->exported ? toupper(iter->type) :
86055 tolower(iter->type);
86056 +
86057 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
86058 type, iter->name, iter->module_name);
86059 } else
86060 @@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
86061 struct kallsym_iter *iter;
86062 int ret;
86063
86064 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
86065 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
86066 if (!iter)
86067 return -ENOMEM;
86068 reset_iter(iter, 0);
86069 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
86070 index e30ac0f..3528cac 100644
86071 --- a/kernel/kcmp.c
86072 +++ b/kernel/kcmp.c
86073 @@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
86074 struct task_struct *task1, *task2;
86075 int ret;
86076
86077 +#ifdef CONFIG_GRKERNSEC
86078 + return -ENOSYS;
86079 +#endif
86080 +
86081 rcu_read_lock();
86082
86083 /*
86084 diff --git a/kernel/kexec.c b/kernel/kexec.c
86085 index 9c97016..df438f8 100644
86086 --- a/kernel/kexec.c
86087 +++ b/kernel/kexec.c
86088 @@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
86089 unsigned long flags)
86090 {
86091 struct compat_kexec_segment in;
86092 - struct kexec_segment out, __user *ksegments;
86093 + struct kexec_segment out;
86094 + struct kexec_segment __user *ksegments;
86095 unsigned long i, result;
86096
86097 /* Don't allow clients that don't understand the native
86098 diff --git a/kernel/kmod.c b/kernel/kmod.c
86099 index b086006..b66f630 100644
86100 --- a/kernel/kmod.c
86101 +++ b/kernel/kmod.c
86102 @@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
86103 kfree(info->argv);
86104 }
86105
86106 -static int call_modprobe(char *module_name, int wait)
86107 +static int call_modprobe(char *module_name, char *module_param, int wait)
86108 {
86109 struct subprocess_info *info;
86110 static char *envp[] = {
86111 @@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
86112 NULL
86113 };
86114
86115 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
86116 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
86117 if (!argv)
86118 goto out;
86119
86120 @@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
86121 argv[1] = "-q";
86122 argv[2] = "--";
86123 argv[3] = module_name; /* check free_modprobe_argv() */
86124 - argv[4] = NULL;
86125 + argv[4] = module_param;
86126 + argv[5] = NULL;
86127
86128 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
86129 NULL, free_modprobe_argv, NULL);
86130 @@ -129,9 +130,8 @@ out:
86131 * If module auto-loading support is disabled then this function
86132 * becomes a no-operation.
86133 */
86134 -int __request_module(bool wait, const char *fmt, ...)
86135 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
86136 {
86137 - va_list args;
86138 char module_name[MODULE_NAME_LEN];
86139 unsigned int max_modprobes;
86140 int ret;
86141 @@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
86142 if (!modprobe_path[0])
86143 return 0;
86144
86145 - va_start(args, fmt);
86146 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
86147 - va_end(args);
86148 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
86149 if (ret >= MODULE_NAME_LEN)
86150 return -ENAMETOOLONG;
86151
86152 @@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
86153 if (ret)
86154 return ret;
86155
86156 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
86157 + if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
86158 + /* hack to workaround consolekit/udisks stupidity */
86159 + read_lock(&tasklist_lock);
86160 + if (!strcmp(current->comm, "mount") &&
86161 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
86162 + read_unlock(&tasklist_lock);
86163 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
86164 + return -EPERM;
86165 + }
86166 + read_unlock(&tasklist_lock);
86167 + }
86168 +#endif
86169 +
86170 /* If modprobe needs a service that is in a module, we get a recursive
86171 * loop. Limit the number of running kmod threads to max_threads/2 or
86172 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
86173 @@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
86174
86175 trace_module_request(module_name, wait, _RET_IP_);
86176
86177 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
86178 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
86179
86180 atomic_dec(&kmod_concurrent);
86181 return ret;
86182 }
86183 +
86184 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
86185 +{
86186 + va_list args;
86187 + int ret;
86188 +
86189 + va_start(args, fmt);
86190 + ret = ____request_module(wait, module_param, fmt, args);
86191 + va_end(args);
86192 +
86193 + return ret;
86194 +}
86195 +
86196 +int __request_module(bool wait, const char *fmt, ...)
86197 +{
86198 + va_list args;
86199 + int ret;
86200 +
86201 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
86202 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
86203 + char module_param[MODULE_NAME_LEN];
86204 +
86205 + memset(module_param, 0, sizeof(module_param));
86206 +
86207 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
86208 +
86209 + va_start(args, fmt);
86210 + ret = ____request_module(wait, module_param, fmt, args);
86211 + va_end(args);
86212 +
86213 + return ret;
86214 + }
86215 +#endif
86216 +
86217 + va_start(args, fmt);
86218 + ret = ____request_module(wait, NULL, fmt, args);
86219 + va_end(args);
86220 +
86221 + return ret;
86222 +}
86223 +
86224 EXPORT_SYMBOL(__request_module);
86225 #endif /* CONFIG_MODULES */
86226
86227 @@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
86228 */
86229 set_user_nice(current, 0);
86230
86231 +#ifdef CONFIG_GRKERNSEC
86232 + /* this is race-free as far as userland is concerned as we copied
86233 + out the path to be used prior to this point and are now operating
86234 + on that copy
86235 + */
86236 + if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
86237 + strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
86238 + strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
86239 + printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
86240 + retval = -EPERM;
86241 + goto fail;
86242 + }
86243 +#endif
86244 +
86245 retval = -ENOMEM;
86246 new = prepare_kernel_cred(current);
86247 if (!new)
86248 @@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
86249 commit_creds(new);
86250
86251 retval = do_execve(sub_info->path,
86252 - (const char __user *const __user *)sub_info->argv,
86253 - (const char __user *const __user *)sub_info->envp);
86254 + (const char __user *const __force_user *)sub_info->argv,
86255 + (const char __user *const __force_user *)sub_info->envp);
86256 if (!retval)
86257 return 0;
86258
86259 @@ -260,6 +327,10 @@ static int call_helper(void *data)
86260
86261 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
86262 {
86263 +#ifdef CONFIG_GRKERNSEC
86264 + kfree(info->path);
86265 + info->path = info->origpath;
86266 +#endif
86267 if (info->cleanup)
86268 (*info->cleanup)(info);
86269 kfree(info);
86270 @@ -303,7 +374,7 @@ static int wait_for_helper(void *data)
86271 *
86272 * Thus the __user pointer cast is valid here.
86273 */
86274 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
86275 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
86276
86277 /*
86278 * If ret is 0, either ____call_usermodehelper failed and the
86279 @@ -542,7 +613,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
86280 goto out;
86281
86282 INIT_WORK(&sub_info->work, __call_usermodehelper);
86283 +#ifdef CONFIG_GRKERNSEC
86284 + sub_info->origpath = path;
86285 + sub_info->path = kstrdup(path, gfp_mask);
86286 +#else
86287 sub_info->path = path;
86288 +#endif
86289 sub_info->argv = argv;
86290 sub_info->envp = envp;
86291
86292 @@ -650,7 +726,7 @@ EXPORT_SYMBOL(call_usermodehelper);
86293 static int proc_cap_handler(struct ctl_table *table, int write,
86294 void __user *buffer, size_t *lenp, loff_t *ppos)
86295 {
86296 - struct ctl_table t;
86297 + ctl_table_no_const t;
86298 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
86299 kernel_cap_t new_cap;
86300 int err, i;
86301 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
86302 index ceeadfc..11c18b6 100644
86303 --- a/kernel/kprobes.c
86304 +++ b/kernel/kprobes.c
86305 @@ -31,6 +31,9 @@
86306 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
86307 * <prasanna@in.ibm.com> added function-return probes.
86308 */
86309 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86310 +#define __INCLUDED_BY_HIDESYM 1
86311 +#endif
86312 #include <linux/kprobes.h>
86313 #include <linux/hash.h>
86314 #include <linux/init.h>
86315 @@ -135,12 +138,12 @@ enum kprobe_slot_state {
86316
86317 static void *alloc_insn_page(void)
86318 {
86319 - return module_alloc(PAGE_SIZE);
86320 + return module_alloc_exec(PAGE_SIZE);
86321 }
86322
86323 static void free_insn_page(void *page)
86324 {
86325 - module_free(NULL, page);
86326 + module_free_exec(NULL, page);
86327 }
86328
86329 struct kprobe_insn_cache kprobe_insn_slots = {
86330 @@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
86331 kprobe_type = "k";
86332
86333 if (sym)
86334 - seq_printf(pi, "%p %s %s+0x%x %s ",
86335 + seq_printf(pi, "%pK %s %s+0x%x %s ",
86336 p->addr, kprobe_type, sym, offset,
86337 (modname ? modname : " "));
86338 else
86339 - seq_printf(pi, "%p %s %p ",
86340 + seq_printf(pi, "%pK %s %pK ",
86341 p->addr, kprobe_type, p->addr);
86342
86343 if (!pp)
86344 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
86345 index 9659d38..bffd520 100644
86346 --- a/kernel/ksysfs.c
86347 +++ b/kernel/ksysfs.c
86348 @@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
86349 {
86350 if (count+1 > UEVENT_HELPER_PATH_LEN)
86351 return -ENOENT;
86352 + if (!capable(CAP_SYS_ADMIN))
86353 + return -EPERM;
86354 memcpy(uevent_helper, buf, count);
86355 uevent_helper[count] = '\0';
86356 if (count && uevent_helper[count-1] == '\n')
86357 @@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
86358 return count;
86359 }
86360
86361 -static struct bin_attribute notes_attr = {
86362 +static bin_attribute_no_const notes_attr __read_only = {
86363 .attr = {
86364 .name = "notes",
86365 .mode = S_IRUGO,
86366 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
86367 index 576ba75..7c256e4 100644
86368 --- a/kernel/locking/lockdep.c
86369 +++ b/kernel/locking/lockdep.c
86370 @@ -596,6 +596,10 @@ static int static_obj(void *obj)
86371 end = (unsigned long) &_end,
86372 addr = (unsigned long) obj;
86373
86374 +#ifdef CONFIG_PAX_KERNEXEC
86375 + start = ktla_ktva(start);
86376 +#endif
86377 +
86378 /*
86379 * static variable?
86380 */
86381 @@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
86382 if (!static_obj(lock->key)) {
86383 debug_locks_off();
86384 printk("INFO: trying to register non-static key.\n");
86385 + printk("lock:%pS key:%pS.\n", lock, lock->key);
86386 printk("the code is fine but needs lockdep annotation.\n");
86387 printk("turning off the locking correctness validator.\n");
86388 dump_stack();
86389 @@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
86390 if (!class)
86391 return 0;
86392 }
86393 - atomic_inc((atomic_t *)&class->ops);
86394 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
86395 if (very_verbose(class)) {
86396 printk("\nacquire class [%p] %s", class->key, class->name);
86397 if (class->name_version > 1)
86398 diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
86399 index ef43ac4..2720dfa 100644
86400 --- a/kernel/locking/lockdep_proc.c
86401 +++ b/kernel/locking/lockdep_proc.c
86402 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
86403 return 0;
86404 }
86405
86406 - seq_printf(m, "%p", class->key);
86407 + seq_printf(m, "%pK", class->key);
86408 #ifdef CONFIG_DEBUG_LOCKDEP
86409 seq_printf(m, " OPS:%8ld", class->ops);
86410 #endif
86411 @@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
86412
86413 list_for_each_entry(entry, &class->locks_after, entry) {
86414 if (entry->distance == 1) {
86415 - seq_printf(m, " -> [%p] ", entry->class->key);
86416 + seq_printf(m, " -> [%pK] ", entry->class->key);
86417 print_name(m, entry->class);
86418 seq_puts(m, "\n");
86419 }
86420 @@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
86421 if (!class->key)
86422 continue;
86423
86424 - seq_printf(m, "[%p] ", class->key);
86425 + seq_printf(m, "[%pK] ", class->key);
86426 print_name(m, class);
86427 seq_puts(m, "\n");
86428 }
86429 @@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
86430 if (!i)
86431 seq_line(m, '-', 40-namelen, namelen);
86432
86433 - snprintf(ip, sizeof(ip), "[<%p>]",
86434 + snprintf(ip, sizeof(ip), "[<%pK>]",
86435 (void *)class->contention_point[i]);
86436 seq_printf(m, "%40s %14lu %29s %pS\n",
86437 name, stats->contention_point[i],
86438 @@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
86439 if (!i)
86440 seq_line(m, '-', 40-namelen, namelen);
86441
86442 - snprintf(ip, sizeof(ip), "[<%p>]",
86443 + snprintf(ip, sizeof(ip), "[<%pK>]",
86444 (void *)class->contending_point[i]);
86445 seq_printf(m, "%40s %14lu %29s %pS\n",
86446 name, stats->contending_point[i],
86447 diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
86448 index 7e3443f..b2a1e6b 100644
86449 --- a/kernel/locking/mutex-debug.c
86450 +++ b/kernel/locking/mutex-debug.c
86451 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
86452 }
86453
86454 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86455 - struct thread_info *ti)
86456 + struct task_struct *task)
86457 {
86458 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
86459
86460 /* Mark the current thread as blocked on the lock: */
86461 - ti->task->blocked_on = waiter;
86462 + task->blocked_on = waiter;
86463 }
86464
86465 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86466 - struct thread_info *ti)
86467 + struct task_struct *task)
86468 {
86469 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
86470 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
86471 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
86472 - ti->task->blocked_on = NULL;
86473 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
86474 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
86475 + task->blocked_on = NULL;
86476
86477 list_del_init(&waiter->list);
86478 waiter->task = NULL;
86479 diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
86480 index 0799fd3..d06ae3b 100644
86481 --- a/kernel/locking/mutex-debug.h
86482 +++ b/kernel/locking/mutex-debug.h
86483 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
86484 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
86485 extern void debug_mutex_add_waiter(struct mutex *lock,
86486 struct mutex_waiter *waiter,
86487 - struct thread_info *ti);
86488 + struct task_struct *task);
86489 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86490 - struct thread_info *ti);
86491 + struct task_struct *task);
86492 extern void debug_mutex_unlock(struct mutex *lock);
86493 extern void debug_mutex_init(struct mutex *lock, const char *name,
86494 struct lock_class_key *key);
86495 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
86496 index 4dd6e4c..df52693 100644
86497 --- a/kernel/locking/mutex.c
86498 +++ b/kernel/locking/mutex.c
86499 @@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
86500 node->locked = 1;
86501 return;
86502 }
86503 - ACCESS_ONCE(prev->next) = node;
86504 + ACCESS_ONCE_RW(prev->next) = node;
86505 smp_wmb();
86506 /* Wait until the lock holder passes the lock down */
86507 while (!ACCESS_ONCE(node->locked))
86508 @@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
86509 while (!(next = ACCESS_ONCE(node->next)))
86510 arch_mutex_cpu_relax();
86511 }
86512 - ACCESS_ONCE(next->locked) = 1;
86513 + ACCESS_ONCE_RW(next->locked) = 1;
86514 smp_wmb();
86515 }
86516
86517 @@ -520,7 +520,7 @@ slowpath:
86518 goto skip_wait;
86519
86520 debug_mutex_lock_common(lock, &waiter);
86521 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
86522 + debug_mutex_add_waiter(lock, &waiter, task);
86523
86524 /* add waiting tasks to the end of the waitqueue (FIFO): */
86525 list_add_tail(&waiter.list, &lock->wait_list);
86526 @@ -564,7 +564,7 @@ slowpath:
86527 schedule_preempt_disabled();
86528 spin_lock_mutex(&lock->wait_lock, flags);
86529 }
86530 - mutex_remove_waiter(lock, &waiter, current_thread_info());
86531 + mutex_remove_waiter(lock, &waiter, task);
86532 /* set it to 0 if there are no waiters left: */
86533 if (likely(list_empty(&lock->wait_list)))
86534 atomic_set(&lock->count, 0);
86535 @@ -601,7 +601,7 @@ skip_wait:
86536 return 0;
86537
86538 err:
86539 - mutex_remove_waiter(lock, &waiter, task_thread_info(task));
86540 + mutex_remove_waiter(lock, &waiter, task);
86541 spin_unlock_mutex(&lock->wait_lock, flags);
86542 debug_mutex_free_waiter(&waiter);
86543 mutex_release(&lock->dep_map, 1, ip);
86544 diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
86545 index 1d96dd0..994ff19 100644
86546 --- a/kernel/locking/rtmutex-tester.c
86547 +++ b/kernel/locking/rtmutex-tester.c
86548 @@ -22,7 +22,7 @@
86549 #define MAX_RT_TEST_MUTEXES 8
86550
86551 static spinlock_t rttest_lock;
86552 -static atomic_t rttest_event;
86553 +static atomic_unchecked_t rttest_event;
86554
86555 struct test_thread_data {
86556 int opcode;
86557 @@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86558
86559 case RTTEST_LOCKCONT:
86560 td->mutexes[td->opdata] = 1;
86561 - td->event = atomic_add_return(1, &rttest_event);
86562 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86563 return 0;
86564
86565 case RTTEST_RESET:
86566 @@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86567 return 0;
86568
86569 case RTTEST_RESETEVENT:
86570 - atomic_set(&rttest_event, 0);
86571 + atomic_set_unchecked(&rttest_event, 0);
86572 return 0;
86573
86574 default:
86575 @@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86576 return ret;
86577
86578 td->mutexes[id] = 1;
86579 - td->event = atomic_add_return(1, &rttest_event);
86580 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86581 rt_mutex_lock(&mutexes[id]);
86582 - td->event = atomic_add_return(1, &rttest_event);
86583 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86584 td->mutexes[id] = 4;
86585 return 0;
86586
86587 @@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86588 return ret;
86589
86590 td->mutexes[id] = 1;
86591 - td->event = atomic_add_return(1, &rttest_event);
86592 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86593 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
86594 - td->event = atomic_add_return(1, &rttest_event);
86595 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86596 td->mutexes[id] = ret ? 0 : 4;
86597 return ret ? -EINTR : 0;
86598
86599 @@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86600 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
86601 return ret;
86602
86603 - td->event = atomic_add_return(1, &rttest_event);
86604 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86605 rt_mutex_unlock(&mutexes[id]);
86606 - td->event = atomic_add_return(1, &rttest_event);
86607 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86608 td->mutexes[id] = 0;
86609 return 0;
86610
86611 @@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86612 break;
86613
86614 td->mutexes[dat] = 2;
86615 - td->event = atomic_add_return(1, &rttest_event);
86616 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86617 break;
86618
86619 default:
86620 @@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86621 return;
86622
86623 td->mutexes[dat] = 3;
86624 - td->event = atomic_add_return(1, &rttest_event);
86625 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86626 break;
86627
86628 case RTTEST_LOCKNOWAIT:
86629 @@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86630 return;
86631
86632 td->mutexes[dat] = 1;
86633 - td->event = atomic_add_return(1, &rttest_event);
86634 + td->event = atomic_add_return_unchecked(1, &rttest_event);
86635 return;
86636
86637 default:
86638 diff --git a/kernel/module.c b/kernel/module.c
86639 index f5a3b1e..97ebb15 100644
86640 --- a/kernel/module.c
86641 +++ b/kernel/module.c
86642 @@ -61,6 +61,7 @@
86643 #include <linux/pfn.h>
86644 #include <linux/bsearch.h>
86645 #include <linux/fips.h>
86646 +#include <linux/grsecurity.h>
86647 #include <uapi/linux/module.h>
86648 #include "module-internal.h"
86649
86650 @@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
86651
86652 /* Bounds of module allocation, for speeding __module_address.
86653 * Protected by module_mutex. */
86654 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
86655 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
86656 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
86657
86658 int register_module_notifier(struct notifier_block * nb)
86659 {
86660 @@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
86661 return true;
86662
86663 list_for_each_entry_rcu(mod, &modules, list) {
86664 - struct symsearch arr[] = {
86665 + struct symsearch modarr[] = {
86666 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
86667 NOT_GPL_ONLY, false },
86668 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
86669 @@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
86670 if (mod->state == MODULE_STATE_UNFORMED)
86671 continue;
86672
86673 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
86674 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
86675 return true;
86676 }
86677 return false;
86678 @@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
86679 if (!pcpusec->sh_size)
86680 return 0;
86681
86682 - if (align > PAGE_SIZE) {
86683 + if (align-1 >= PAGE_SIZE) {
86684 pr_warn("%s: per-cpu alignment %li > %li\n",
86685 mod->name, align, PAGE_SIZE);
86686 align = PAGE_SIZE;
86687 @@ -1064,7 +1066,7 @@ struct module_attribute module_uevent =
86688 static ssize_t show_coresize(struct module_attribute *mattr,
86689 struct module_kobject *mk, char *buffer)
86690 {
86691 - return sprintf(buffer, "%u\n", mk->mod->core_size);
86692 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
86693 }
86694
86695 static struct module_attribute modinfo_coresize =
86696 @@ -1073,7 +1075,7 @@ static struct module_attribute modinfo_coresize =
86697 static ssize_t show_initsize(struct module_attribute *mattr,
86698 struct module_kobject *mk, char *buffer)
86699 {
86700 - return sprintf(buffer, "%u\n", mk->mod->init_size);
86701 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
86702 }
86703
86704 static struct module_attribute modinfo_initsize =
86705 @@ -1165,12 +1167,29 @@ static int check_version(Elf_Shdr *sechdrs,
86706 goto bad_version;
86707 }
86708
86709 +#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86710 + /*
86711 + * avoid potentially printing jibberish on attempted load
86712 + * of a module randomized with a different seed
86713 + */
86714 + pr_warn("no symbol version for %s\n", symname);
86715 +#else
86716 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
86717 +#endif
86718 return 0;
86719
86720 bad_version:
86721 +#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86722 + /*
86723 + * avoid potentially printing jibberish on attempted load
86724 + * of a module randomized with a different seed
86725 + */
86726 + printk("attempted module disagrees about version of symbol %s\n",
86727 + symname);
86728 +#else
86729 printk("%s: disagrees about version of symbol %s\n",
86730 mod->name, symname);
86731 +#endif
86732 return 0;
86733 }
86734
86735 @@ -1286,7 +1305,7 @@ resolve_symbol_wait(struct module *mod,
86736 */
86737 #ifdef CONFIG_SYSFS
86738
86739 -#ifdef CONFIG_KALLSYMS
86740 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86741 static inline bool sect_empty(const Elf_Shdr *sect)
86742 {
86743 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
86744 @@ -1426,7 +1445,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
86745 {
86746 unsigned int notes, loaded, i;
86747 struct module_notes_attrs *notes_attrs;
86748 - struct bin_attribute *nattr;
86749 + bin_attribute_no_const *nattr;
86750
86751 /* failed to create section attributes, so can't create notes */
86752 if (!mod->sect_attrs)
86753 @@ -1538,7 +1557,7 @@ static void del_usage_links(struct module *mod)
86754 static int module_add_modinfo_attrs(struct module *mod)
86755 {
86756 struct module_attribute *attr;
86757 - struct module_attribute *temp_attr;
86758 + module_attribute_no_const *temp_attr;
86759 int error = 0;
86760 int i;
86761
86762 @@ -1759,21 +1778,21 @@ static void set_section_ro_nx(void *base,
86763
86764 static void unset_module_core_ro_nx(struct module *mod)
86765 {
86766 - set_page_attributes(mod->module_core + mod->core_text_size,
86767 - mod->module_core + mod->core_size,
86768 + set_page_attributes(mod->module_core_rw,
86769 + mod->module_core_rw + mod->core_size_rw,
86770 set_memory_x);
86771 - set_page_attributes(mod->module_core,
86772 - mod->module_core + mod->core_ro_size,
86773 + set_page_attributes(mod->module_core_rx,
86774 + mod->module_core_rx + mod->core_size_rx,
86775 set_memory_rw);
86776 }
86777
86778 static void unset_module_init_ro_nx(struct module *mod)
86779 {
86780 - set_page_attributes(mod->module_init + mod->init_text_size,
86781 - mod->module_init + mod->init_size,
86782 + set_page_attributes(mod->module_init_rw,
86783 + mod->module_init_rw + mod->init_size_rw,
86784 set_memory_x);
86785 - set_page_attributes(mod->module_init,
86786 - mod->module_init + mod->init_ro_size,
86787 + set_page_attributes(mod->module_init_rx,
86788 + mod->module_init_rx + mod->init_size_rx,
86789 set_memory_rw);
86790 }
86791
86792 @@ -1786,14 +1805,14 @@ void set_all_modules_text_rw(void)
86793 list_for_each_entry_rcu(mod, &modules, list) {
86794 if (mod->state == MODULE_STATE_UNFORMED)
86795 continue;
86796 - if ((mod->module_core) && (mod->core_text_size)) {
86797 - set_page_attributes(mod->module_core,
86798 - mod->module_core + mod->core_text_size,
86799 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
86800 + set_page_attributes(mod->module_core_rx,
86801 + mod->module_core_rx + mod->core_size_rx,
86802 set_memory_rw);
86803 }
86804 - if ((mod->module_init) && (mod->init_text_size)) {
86805 - set_page_attributes(mod->module_init,
86806 - mod->module_init + mod->init_text_size,
86807 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
86808 + set_page_attributes(mod->module_init_rx,
86809 + mod->module_init_rx + mod->init_size_rx,
86810 set_memory_rw);
86811 }
86812 }
86813 @@ -1809,14 +1828,14 @@ void set_all_modules_text_ro(void)
86814 list_for_each_entry_rcu(mod, &modules, list) {
86815 if (mod->state == MODULE_STATE_UNFORMED)
86816 continue;
86817 - if ((mod->module_core) && (mod->core_text_size)) {
86818 - set_page_attributes(mod->module_core,
86819 - mod->module_core + mod->core_text_size,
86820 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
86821 + set_page_attributes(mod->module_core_rx,
86822 + mod->module_core_rx + mod->core_size_rx,
86823 set_memory_ro);
86824 }
86825 - if ((mod->module_init) && (mod->init_text_size)) {
86826 - set_page_attributes(mod->module_init,
86827 - mod->module_init + mod->init_text_size,
86828 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
86829 + set_page_attributes(mod->module_init_rx,
86830 + mod->module_init_rx + mod->init_size_rx,
86831 set_memory_ro);
86832 }
86833 }
86834 @@ -1867,16 +1886,19 @@ static void free_module(struct module *mod)
86835
86836 /* This may be NULL, but that's OK */
86837 unset_module_init_ro_nx(mod);
86838 - module_free(mod, mod->module_init);
86839 + module_free(mod, mod->module_init_rw);
86840 + module_free_exec(mod, mod->module_init_rx);
86841 kfree(mod->args);
86842 percpu_modfree(mod);
86843
86844 /* Free lock-classes: */
86845 - lockdep_free_key_range(mod->module_core, mod->core_size);
86846 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
86847 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
86848
86849 /* Finally, free the core (containing the module structure) */
86850 unset_module_core_ro_nx(mod);
86851 - module_free(mod, mod->module_core);
86852 + module_free_exec(mod, mod->module_core_rx);
86853 + module_free(mod, mod->module_core_rw);
86854
86855 #ifdef CONFIG_MPU
86856 update_protections(current->mm);
86857 @@ -1945,9 +1967,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86858 int ret = 0;
86859 const struct kernel_symbol *ksym;
86860
86861 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
86862 + int is_fs_load = 0;
86863 + int register_filesystem_found = 0;
86864 + char *p;
86865 +
86866 + p = strstr(mod->args, "grsec_modharden_fs");
86867 + if (p) {
86868 + char *endptr = p + sizeof("grsec_modharden_fs") - 1;
86869 + /* copy \0 as well */
86870 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
86871 + is_fs_load = 1;
86872 + }
86873 +#endif
86874 +
86875 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
86876 const char *name = info->strtab + sym[i].st_name;
86877
86878 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
86879 + /* it's a real shame this will never get ripped and copied
86880 + upstream! ;(
86881 + */
86882 + if (is_fs_load && !strcmp(name, "register_filesystem"))
86883 + register_filesystem_found = 1;
86884 +#endif
86885 +
86886 switch (sym[i].st_shndx) {
86887 case SHN_COMMON:
86888 /* We compiled with -fno-common. These are not
86889 @@ -1968,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86890 ksym = resolve_symbol_wait(mod, info, name);
86891 /* Ok if resolved. */
86892 if (ksym && !IS_ERR(ksym)) {
86893 + pax_open_kernel();
86894 sym[i].st_value = ksym->value;
86895 + pax_close_kernel();
86896 break;
86897 }
86898
86899 @@ -1987,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86900 secbase = (unsigned long)mod_percpu(mod);
86901 else
86902 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
86903 + pax_open_kernel();
86904 sym[i].st_value += secbase;
86905 + pax_close_kernel();
86906 break;
86907 }
86908 }
86909
86910 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
86911 + if (is_fs_load && !register_filesystem_found) {
86912 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
86913 + ret = -EPERM;
86914 + }
86915 +#endif
86916 +
86917 return ret;
86918 }
86919
86920 @@ -2075,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
86921 || s->sh_entsize != ~0UL
86922 || strstarts(sname, ".init"))
86923 continue;
86924 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
86925 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
86926 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
86927 + else
86928 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
86929 pr_debug("\t%s\n", sname);
86930 }
86931 - switch (m) {
86932 - case 0: /* executable */
86933 - mod->core_size = debug_align(mod->core_size);
86934 - mod->core_text_size = mod->core_size;
86935 - break;
86936 - case 1: /* RO: text and ro-data */
86937 - mod->core_size = debug_align(mod->core_size);
86938 - mod->core_ro_size = mod->core_size;
86939 - break;
86940 - case 3: /* whole core */
86941 - mod->core_size = debug_align(mod->core_size);
86942 - break;
86943 - }
86944 }
86945
86946 pr_debug("Init section allocation order:\n");
86947 @@ -2104,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
86948 || s->sh_entsize != ~0UL
86949 || !strstarts(sname, ".init"))
86950 continue;
86951 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
86952 - | INIT_OFFSET_MASK);
86953 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
86954 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
86955 + else
86956 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
86957 + s->sh_entsize |= INIT_OFFSET_MASK;
86958 pr_debug("\t%s\n", sname);
86959 }
86960 - switch (m) {
86961 - case 0: /* executable */
86962 - mod->init_size = debug_align(mod->init_size);
86963 - mod->init_text_size = mod->init_size;
86964 - break;
86965 - case 1: /* RO: text and ro-data */
86966 - mod->init_size = debug_align(mod->init_size);
86967 - mod->init_ro_size = mod->init_size;
86968 - break;
86969 - case 3: /* whole init */
86970 - mod->init_size = debug_align(mod->init_size);
86971 - break;
86972 - }
86973 }
86974 }
86975
86976 @@ -2293,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86977
86978 /* Put symbol section at end of init part of module. */
86979 symsect->sh_flags |= SHF_ALLOC;
86980 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
86981 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
86982 info->index.sym) | INIT_OFFSET_MASK;
86983 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
86984
86985 @@ -2310,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86986 }
86987
86988 /* Append room for core symbols at end of core part. */
86989 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
86990 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
86991 - mod->core_size += strtab_size;
86992 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
86993 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
86994 + mod->core_size_rx += strtab_size;
86995
86996 /* Put string table section at end of init part of module. */
86997 strsect->sh_flags |= SHF_ALLOC;
86998 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
86999 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
87000 info->index.str) | INIT_OFFSET_MASK;
87001 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
87002 }
87003 @@ -2334,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
87004 /* Make sure we get permanent strtab: don't use info->strtab. */
87005 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
87006
87007 + pax_open_kernel();
87008 +
87009 /* Set types up while we still have access to sections. */
87010 for (i = 0; i < mod->num_symtab; i++)
87011 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
87012
87013 - mod->core_symtab = dst = mod->module_core + info->symoffs;
87014 - mod->core_strtab = s = mod->module_core + info->stroffs;
87015 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
87016 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
87017 src = mod->symtab;
87018 for (ndst = i = 0; i < mod->num_symtab; i++) {
87019 if (i == 0 ||
87020 @@ -2351,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
87021 }
87022 }
87023 mod->core_num_syms = ndst;
87024 +
87025 + pax_close_kernel();
87026 }
87027 #else
87028 static inline void layout_symtab(struct module *mod, struct load_info *info)
87029 @@ -2384,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
87030 return vmalloc_exec(size);
87031 }
87032
87033 -static void *module_alloc_update_bounds(unsigned long size)
87034 +static void *module_alloc_update_bounds_rw(unsigned long size)
87035 {
87036 void *ret = module_alloc(size);
87037
87038 if (ret) {
87039 mutex_lock(&module_mutex);
87040 /* Update module bounds. */
87041 - if ((unsigned long)ret < module_addr_min)
87042 - module_addr_min = (unsigned long)ret;
87043 - if ((unsigned long)ret + size > module_addr_max)
87044 - module_addr_max = (unsigned long)ret + size;
87045 + if ((unsigned long)ret < module_addr_min_rw)
87046 + module_addr_min_rw = (unsigned long)ret;
87047 + if ((unsigned long)ret + size > module_addr_max_rw)
87048 + module_addr_max_rw = (unsigned long)ret + size;
87049 + mutex_unlock(&module_mutex);
87050 + }
87051 + return ret;
87052 +}
87053 +
87054 +static void *module_alloc_update_bounds_rx(unsigned long size)
87055 +{
87056 + void *ret = module_alloc_exec(size);
87057 +
87058 + if (ret) {
87059 + mutex_lock(&module_mutex);
87060 + /* Update module bounds. */
87061 + if ((unsigned long)ret < module_addr_min_rx)
87062 + module_addr_min_rx = (unsigned long)ret;
87063 + if ((unsigned long)ret + size > module_addr_max_rx)
87064 + module_addr_max_rx = (unsigned long)ret + size;
87065 mutex_unlock(&module_mutex);
87066 }
87067 return ret;
87068 @@ -2651,7 +2706,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
87069 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
87070
87071 if (info->index.sym == 0) {
87072 +#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
87073 + /*
87074 + * avoid potentially printing jibberish on attempted load
87075 + * of a module randomized with a different seed
87076 + */
87077 + pr_warn("module has no symbols (stripped?)\n");
87078 +#else
87079 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
87080 +#endif
87081 return ERR_PTR(-ENOEXEC);
87082 }
87083
87084 @@ -2667,8 +2730,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
87085 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
87086 {
87087 const char *modmagic = get_modinfo(info, "vermagic");
87088 + const char *license = get_modinfo(info, "license");
87089 int err;
87090
87091 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
87092 + if (!license || !license_is_gpl_compatible(license))
87093 + return -ENOEXEC;
87094 +#endif
87095 +
87096 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
87097 modmagic = NULL;
87098
87099 @@ -2693,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
87100 }
87101
87102 /* Set up license info based on the info section */
87103 - set_license(mod, get_modinfo(info, "license"));
87104 + set_license(mod, license);
87105
87106 return 0;
87107 }
87108 @@ -2787,7 +2856,7 @@ static int move_module(struct module *mod, struct load_info *info)
87109 void *ptr;
87110
87111 /* Do the allocs. */
87112 - ptr = module_alloc_update_bounds(mod->core_size);
87113 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
87114 /*
87115 * The pointer to this block is stored in the module structure
87116 * which is inside the block. Just mark it as not being a
87117 @@ -2797,11 +2866,11 @@ static int move_module(struct module *mod, struct load_info *info)
87118 if (!ptr)
87119 return -ENOMEM;
87120
87121 - memset(ptr, 0, mod->core_size);
87122 - mod->module_core = ptr;
87123 + memset(ptr, 0, mod->core_size_rw);
87124 + mod->module_core_rw = ptr;
87125
87126 - if (mod->init_size) {
87127 - ptr = module_alloc_update_bounds(mod->init_size);
87128 + if (mod->init_size_rw) {
87129 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
87130 /*
87131 * The pointer to this block is stored in the module structure
87132 * which is inside the block. This block doesn't need to be
87133 @@ -2810,13 +2879,45 @@ static int move_module(struct module *mod, struct load_info *info)
87134 */
87135 kmemleak_ignore(ptr);
87136 if (!ptr) {
87137 - module_free(mod, mod->module_core);
87138 + module_free(mod, mod->module_core_rw);
87139 return -ENOMEM;
87140 }
87141 - memset(ptr, 0, mod->init_size);
87142 - mod->module_init = ptr;
87143 + memset(ptr, 0, mod->init_size_rw);
87144 + mod->module_init_rw = ptr;
87145 } else
87146 - mod->module_init = NULL;
87147 + mod->module_init_rw = NULL;
87148 +
87149 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
87150 + kmemleak_not_leak(ptr);
87151 + if (!ptr) {
87152 + if (mod->module_init_rw)
87153 + module_free(mod, mod->module_init_rw);
87154 + module_free(mod, mod->module_core_rw);
87155 + return -ENOMEM;
87156 + }
87157 +
87158 + pax_open_kernel();
87159 + memset(ptr, 0, mod->core_size_rx);
87160 + pax_close_kernel();
87161 + mod->module_core_rx = ptr;
87162 +
87163 + if (mod->init_size_rx) {
87164 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
87165 + kmemleak_ignore(ptr);
87166 + if (!ptr && mod->init_size_rx) {
87167 + module_free_exec(mod, mod->module_core_rx);
87168 + if (mod->module_init_rw)
87169 + module_free(mod, mod->module_init_rw);
87170 + module_free(mod, mod->module_core_rw);
87171 + return -ENOMEM;
87172 + }
87173 +
87174 + pax_open_kernel();
87175 + memset(ptr, 0, mod->init_size_rx);
87176 + pax_close_kernel();
87177 + mod->module_init_rx = ptr;
87178 + } else
87179 + mod->module_init_rx = NULL;
87180
87181 /* Transfer each section which specifies SHF_ALLOC */
87182 pr_debug("final section addresses:\n");
87183 @@ -2827,16 +2928,45 @@ static int move_module(struct module *mod, struct load_info *info)
87184 if (!(shdr->sh_flags & SHF_ALLOC))
87185 continue;
87186
87187 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
87188 - dest = mod->module_init
87189 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
87190 - else
87191 - dest = mod->module_core + shdr->sh_entsize;
87192 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
87193 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
87194 + dest = mod->module_init_rw
87195 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
87196 + else
87197 + dest = mod->module_init_rx
87198 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
87199 + } else {
87200 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
87201 + dest = mod->module_core_rw + shdr->sh_entsize;
87202 + else
87203 + dest = mod->module_core_rx + shdr->sh_entsize;
87204 + }
87205 +
87206 + if (shdr->sh_type != SHT_NOBITS) {
87207 +
87208 +#ifdef CONFIG_PAX_KERNEXEC
87209 +#ifdef CONFIG_X86_64
87210 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
87211 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
87212 +#endif
87213 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
87214 + pax_open_kernel();
87215 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
87216 + pax_close_kernel();
87217 + } else
87218 +#endif
87219
87220 - if (shdr->sh_type != SHT_NOBITS)
87221 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
87222 + }
87223 /* Update sh_addr to point to copy in image. */
87224 - shdr->sh_addr = (unsigned long)dest;
87225 +
87226 +#ifdef CONFIG_PAX_KERNEXEC
87227 + if (shdr->sh_flags & SHF_EXECINSTR)
87228 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
87229 + else
87230 +#endif
87231 +
87232 + shdr->sh_addr = (unsigned long)dest;
87233 pr_debug("\t0x%lx %s\n",
87234 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
87235 }
87236 @@ -2893,12 +3023,12 @@ static void flush_module_icache(const struct module *mod)
87237 * Do it before processing of module parameters, so the module
87238 * can provide parameter accessor functions of its own.
87239 */
87240 - if (mod->module_init)
87241 - flush_icache_range((unsigned long)mod->module_init,
87242 - (unsigned long)mod->module_init
87243 - + mod->init_size);
87244 - flush_icache_range((unsigned long)mod->module_core,
87245 - (unsigned long)mod->module_core + mod->core_size);
87246 + if (mod->module_init_rx)
87247 + flush_icache_range((unsigned long)mod->module_init_rx,
87248 + (unsigned long)mod->module_init_rx
87249 + + mod->init_size_rx);
87250 + flush_icache_range((unsigned long)mod->module_core_rx,
87251 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
87252
87253 set_fs(old_fs);
87254 }
87255 @@ -2955,8 +3085,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
87256 static void module_deallocate(struct module *mod, struct load_info *info)
87257 {
87258 percpu_modfree(mod);
87259 - module_free(mod, mod->module_init);
87260 - module_free(mod, mod->module_core);
87261 + module_free_exec(mod, mod->module_init_rx);
87262 + module_free_exec(mod, mod->module_core_rx);
87263 + module_free(mod, mod->module_init_rw);
87264 + module_free(mod, mod->module_core_rw);
87265 }
87266
87267 int __weak module_finalize(const Elf_Ehdr *hdr,
87268 @@ -2969,7 +3101,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
87269 static int post_relocation(struct module *mod, const struct load_info *info)
87270 {
87271 /* Sort exception table now relocations are done. */
87272 + pax_open_kernel();
87273 sort_extable(mod->extable, mod->extable + mod->num_exentries);
87274 + pax_close_kernel();
87275
87276 /* Copy relocated percpu area over. */
87277 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
87278 @@ -3023,16 +3157,16 @@ static int do_init_module(struct module *mod)
87279 MODULE_STATE_COMING, mod);
87280
87281 /* Set RO and NX regions for core */
87282 - set_section_ro_nx(mod->module_core,
87283 - mod->core_text_size,
87284 - mod->core_ro_size,
87285 - mod->core_size);
87286 + set_section_ro_nx(mod->module_core_rx,
87287 + mod->core_size_rx,
87288 + mod->core_size_rx,
87289 + mod->core_size_rx);
87290
87291 /* Set RO and NX regions for init */
87292 - set_section_ro_nx(mod->module_init,
87293 - mod->init_text_size,
87294 - mod->init_ro_size,
87295 - mod->init_size);
87296 + set_section_ro_nx(mod->module_init_rx,
87297 + mod->init_size_rx,
87298 + mod->init_size_rx,
87299 + mod->init_size_rx);
87300
87301 do_mod_ctors(mod);
87302 /* Start the module */
87303 @@ -3093,11 +3227,12 @@ static int do_init_module(struct module *mod)
87304 mod->strtab = mod->core_strtab;
87305 #endif
87306 unset_module_init_ro_nx(mod);
87307 - module_free(mod, mod->module_init);
87308 - mod->module_init = NULL;
87309 - mod->init_size = 0;
87310 - mod->init_ro_size = 0;
87311 - mod->init_text_size = 0;
87312 + module_free(mod, mod->module_init_rw);
87313 + module_free_exec(mod, mod->module_init_rx);
87314 + mod->module_init_rw = NULL;
87315 + mod->module_init_rx = NULL;
87316 + mod->init_size_rw = 0;
87317 + mod->init_size_rx = 0;
87318 mutex_unlock(&module_mutex);
87319 wake_up_all(&module_wq);
87320
87321 @@ -3240,9 +3375,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
87322 if (err)
87323 goto free_unload;
87324
87325 + /* Now copy in args */
87326 + mod->args = strndup_user(uargs, ~0UL >> 1);
87327 + if (IS_ERR(mod->args)) {
87328 + err = PTR_ERR(mod->args);
87329 + goto free_unload;
87330 + }
87331 +
87332 /* Set up MODINFO_ATTR fields */
87333 setup_modinfo(mod, info);
87334
87335 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
87336 + {
87337 + char *p, *p2;
87338 +
87339 + if (strstr(mod->args, "grsec_modharden_netdev")) {
87340 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
87341 + err = -EPERM;
87342 + goto free_modinfo;
87343 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
87344 + p += sizeof("grsec_modharden_normal") - 1;
87345 + p2 = strstr(p, "_");
87346 + if (p2) {
87347 + *p2 = '\0';
87348 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
87349 + *p2 = '_';
87350 + }
87351 + err = -EPERM;
87352 + goto free_modinfo;
87353 + }
87354 + }
87355 +#endif
87356 +
87357 /* Fix up syms, so that st_value is a pointer to location. */
87358 err = simplify_symbols(mod, info);
87359 if (err < 0)
87360 @@ -3258,13 +3422,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
87361
87362 flush_module_icache(mod);
87363
87364 - /* Now copy in args */
87365 - mod->args = strndup_user(uargs, ~0UL >> 1);
87366 - if (IS_ERR(mod->args)) {
87367 - err = PTR_ERR(mod->args);
87368 - goto free_arch_cleanup;
87369 - }
87370 -
87371 dynamic_debug_setup(info->debug, info->num_debug);
87372
87373 /* Finally it's fully formed, ready to start executing. */
87374 @@ -3299,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
87375 ddebug_cleanup:
87376 dynamic_debug_remove(info->debug);
87377 synchronize_sched();
87378 - kfree(mod->args);
87379 - free_arch_cleanup:
87380 module_arch_cleanup(mod);
87381 free_modinfo:
87382 free_modinfo(mod);
87383 + kfree(mod->args);
87384 free_unload:
87385 module_unload_free(mod);
87386 unlink_mod:
87387 @@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
87388 unsigned long nextval;
87389
87390 /* At worse, next value is at end of module */
87391 - if (within_module_init(addr, mod))
87392 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
87393 + if (within_module_init_rx(addr, mod))
87394 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
87395 + else if (within_module_init_rw(addr, mod))
87396 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
87397 + else if (within_module_core_rx(addr, mod))
87398 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
87399 + else if (within_module_core_rw(addr, mod))
87400 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
87401 else
87402 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
87403 + return NULL;
87404
87405 /* Scan for closest preceding symbol, and next symbol. (ELF
87406 starts real symbols at 1). */
87407 @@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
87408 return 0;
87409
87410 seq_printf(m, "%s %u",
87411 - mod->name, mod->init_size + mod->core_size);
87412 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
87413 print_unload_info(m, mod);
87414
87415 /* Informative for users. */
87416 @@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
87417 mod->state == MODULE_STATE_COMING ? "Loading":
87418 "Live");
87419 /* Used by oprofile and other similar tools. */
87420 - seq_printf(m, " 0x%pK", mod->module_core);
87421 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
87422
87423 /* Taints info */
87424 if (mod->taints)
87425 @@ -3685,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
87426
87427 static int __init proc_modules_init(void)
87428 {
87429 +#ifndef CONFIG_GRKERNSEC_HIDESYM
87430 +#ifdef CONFIG_GRKERNSEC_PROC_USER
87431 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
87432 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87433 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
87434 +#else
87435 proc_create("modules", 0, NULL, &proc_modules_operations);
87436 +#endif
87437 +#else
87438 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
87439 +#endif
87440 return 0;
87441 }
87442 module_init(proc_modules_init);
87443 @@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
87444 {
87445 struct module *mod;
87446
87447 - if (addr < module_addr_min || addr > module_addr_max)
87448 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
87449 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
87450 return NULL;
87451
87452 list_for_each_entry_rcu(mod, &modules, list) {
87453 if (mod->state == MODULE_STATE_UNFORMED)
87454 continue;
87455 - if (within_module_core(addr, mod)
87456 - || within_module_init(addr, mod))
87457 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
87458 return mod;
87459 }
87460 return NULL;
87461 @@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
87462 */
87463 struct module *__module_text_address(unsigned long addr)
87464 {
87465 - struct module *mod = __module_address(addr);
87466 + struct module *mod;
87467 +
87468 +#ifdef CONFIG_X86_32
87469 + addr = ktla_ktva(addr);
87470 +#endif
87471 +
87472 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
87473 + return NULL;
87474 +
87475 + mod = __module_address(addr);
87476 +
87477 if (mod) {
87478 /* Make sure it's within the text section. */
87479 - if (!within(addr, mod->module_init, mod->init_text_size)
87480 - && !within(addr, mod->module_core, mod->core_text_size))
87481 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
87482 mod = NULL;
87483 }
87484 return mod;
87485 diff --git a/kernel/notifier.c b/kernel/notifier.c
87486 index 2d5cc4c..d9ea600 100644
87487 --- a/kernel/notifier.c
87488 +++ b/kernel/notifier.c
87489 @@ -5,6 +5,7 @@
87490 #include <linux/rcupdate.h>
87491 #include <linux/vmalloc.h>
87492 #include <linux/reboot.h>
87493 +#include <linux/mm.h>
87494
87495 /*
87496 * Notifier list for kernel code which wants to be called
87497 @@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
87498 while ((*nl) != NULL) {
87499 if (n->priority > (*nl)->priority)
87500 break;
87501 - nl = &((*nl)->next);
87502 + nl = (struct notifier_block **)&((*nl)->next);
87503 }
87504 - n->next = *nl;
87505 + pax_open_kernel();
87506 + *(const void **)&n->next = *nl;
87507 rcu_assign_pointer(*nl, n);
87508 + pax_close_kernel();
87509 return 0;
87510 }
87511
87512 @@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
87513 return 0;
87514 if (n->priority > (*nl)->priority)
87515 break;
87516 - nl = &((*nl)->next);
87517 + nl = (struct notifier_block **)&((*nl)->next);
87518 }
87519 - n->next = *nl;
87520 + pax_open_kernel();
87521 + *(const void **)&n->next = *nl;
87522 rcu_assign_pointer(*nl, n);
87523 + pax_close_kernel();
87524 return 0;
87525 }
87526
87527 @@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
87528 {
87529 while ((*nl) != NULL) {
87530 if ((*nl) == n) {
87531 + pax_open_kernel();
87532 rcu_assign_pointer(*nl, n->next);
87533 + pax_close_kernel();
87534 return 0;
87535 }
87536 - nl = &((*nl)->next);
87537 + nl = (struct notifier_block **)&((*nl)->next);
87538 }
87539 return -ENOENT;
87540 }
87541 diff --git a/kernel/padata.c b/kernel/padata.c
87542 index 2abd25d..02c4faa 100644
87543 --- a/kernel/padata.c
87544 +++ b/kernel/padata.c
87545 @@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
87546 * seq_nr mod. number of cpus in use.
87547 */
87548
87549 - seq_nr = atomic_inc_return(&pd->seq_nr);
87550 + seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
87551 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
87552
87553 return padata_index_to_cpu(pd, cpu_index);
87554 @@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
87555 padata_init_pqueues(pd);
87556 padata_init_squeues(pd);
87557 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
87558 - atomic_set(&pd->seq_nr, -1);
87559 + atomic_set_unchecked(&pd->seq_nr, -1);
87560 atomic_set(&pd->reorder_objects, 0);
87561 atomic_set(&pd->refcnt, 0);
87562 pd->pinst = pinst;
87563 diff --git a/kernel/panic.c b/kernel/panic.c
87564 index c00b4ce..98c7d1a 100644
87565 --- a/kernel/panic.c
87566 +++ b/kernel/panic.c
87567 @@ -52,7 +52,7 @@ EXPORT_SYMBOL(panic_blink);
87568 /*
87569 * Stop ourself in panic -- architecture code may override this
87570 */
87571 -void __weak panic_smp_self_stop(void)
87572 +void __weak __noreturn panic_smp_self_stop(void)
87573 {
87574 while (1)
87575 cpu_relax();
87576 @@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
87577 disable_trace_on_warning();
87578
87579 pr_warn("------------[ cut here ]------------\n");
87580 - pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
87581 + pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
87582 raw_smp_processor_id(), current->pid, file, line, caller);
87583
87584 if (args)
87585 @@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
87586 */
87587 void __stack_chk_fail(void)
87588 {
87589 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
87590 + dump_stack();
87591 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
87592 __builtin_return_address(0));
87593 }
87594 EXPORT_SYMBOL(__stack_chk_fail);
87595 diff --git a/kernel/pid.c b/kernel/pid.c
87596 index 9b9a266..c20ef80 100644
87597 --- a/kernel/pid.c
87598 +++ b/kernel/pid.c
87599 @@ -33,6 +33,7 @@
87600 #include <linux/rculist.h>
87601 #include <linux/bootmem.h>
87602 #include <linux/hash.h>
87603 +#include <linux/security.h>
87604 #include <linux/pid_namespace.h>
87605 #include <linux/init_task.h>
87606 #include <linux/syscalls.h>
87607 @@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
87608
87609 int pid_max = PID_MAX_DEFAULT;
87610
87611 -#define RESERVED_PIDS 300
87612 +#define RESERVED_PIDS 500
87613
87614 int pid_max_min = RESERVED_PIDS + 1;
87615 int pid_max_max = PID_MAX_LIMIT;
87616 @@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
87617 */
87618 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
87619 {
87620 + struct task_struct *task;
87621 +
87622 rcu_lockdep_assert(rcu_read_lock_held(),
87623 "find_task_by_pid_ns() needs rcu_read_lock()"
87624 " protection");
87625 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
87626 +
87627 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
87628 +
87629 + if (gr_pid_is_chrooted(task))
87630 + return NULL;
87631 +
87632 + return task;
87633 }
87634
87635 struct task_struct *find_task_by_vpid(pid_t vnr)
87636 @@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
87637 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
87638 }
87639
87640 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
87641 +{
87642 + rcu_lockdep_assert(rcu_read_lock_held(),
87643 + "find_task_by_pid_ns() needs rcu_read_lock()"
87644 + " protection");
87645 + return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
87646 +}
87647 +
87648 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
87649 {
87650 struct pid *pid;
87651 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
87652 index 06c62de..b08cc6c 100644
87653 --- a/kernel/pid_namespace.c
87654 +++ b/kernel/pid_namespace.c
87655 @@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
87656 void __user *buffer, size_t *lenp, loff_t *ppos)
87657 {
87658 struct pid_namespace *pid_ns = task_active_pid_ns(current);
87659 - struct ctl_table tmp = *table;
87660 + ctl_table_no_const tmp = *table;
87661
87662 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
87663 return -EPERM;
87664 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
87665 index c7f31aa..2b44977 100644
87666 --- a/kernel/posix-cpu-timers.c
87667 +++ b/kernel/posix-cpu-timers.c
87668 @@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
87669
87670 static __init int init_posix_cpu_timers(void)
87671 {
87672 - struct k_clock process = {
87673 + static struct k_clock process = {
87674 .clock_getres = process_cpu_clock_getres,
87675 .clock_get = process_cpu_clock_get,
87676 .timer_create = process_cpu_timer_create,
87677 .nsleep = process_cpu_nsleep,
87678 .nsleep_restart = process_cpu_nsleep_restart,
87679 };
87680 - struct k_clock thread = {
87681 + static struct k_clock thread = {
87682 .clock_getres = thread_cpu_clock_getres,
87683 .clock_get = thread_cpu_clock_get,
87684 .timer_create = thread_cpu_timer_create,
87685 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
87686 index 424c2d4..679242f 100644
87687 --- a/kernel/posix-timers.c
87688 +++ b/kernel/posix-timers.c
87689 @@ -43,6 +43,7 @@
87690 #include <linux/hash.h>
87691 #include <linux/posix-clock.h>
87692 #include <linux/posix-timers.h>
87693 +#include <linux/grsecurity.h>
87694 #include <linux/syscalls.h>
87695 #include <linux/wait.h>
87696 #include <linux/workqueue.h>
87697 @@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
87698 * which we beg off on and pass to do_sys_settimeofday().
87699 */
87700
87701 -static struct k_clock posix_clocks[MAX_CLOCKS];
87702 +static struct k_clock *posix_clocks[MAX_CLOCKS];
87703
87704 /*
87705 * These ones are defined below.
87706 @@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
87707 */
87708 static __init int init_posix_timers(void)
87709 {
87710 - struct k_clock clock_realtime = {
87711 + static struct k_clock clock_realtime = {
87712 .clock_getres = hrtimer_get_res,
87713 .clock_get = posix_clock_realtime_get,
87714 .clock_set = posix_clock_realtime_set,
87715 @@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
87716 .timer_get = common_timer_get,
87717 .timer_del = common_timer_del,
87718 };
87719 - struct k_clock clock_monotonic = {
87720 + static struct k_clock clock_monotonic = {
87721 .clock_getres = hrtimer_get_res,
87722 .clock_get = posix_ktime_get_ts,
87723 .nsleep = common_nsleep,
87724 @@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
87725 .timer_get = common_timer_get,
87726 .timer_del = common_timer_del,
87727 };
87728 - struct k_clock clock_monotonic_raw = {
87729 + static struct k_clock clock_monotonic_raw = {
87730 .clock_getres = hrtimer_get_res,
87731 .clock_get = posix_get_monotonic_raw,
87732 };
87733 - struct k_clock clock_realtime_coarse = {
87734 + static struct k_clock clock_realtime_coarse = {
87735 .clock_getres = posix_get_coarse_res,
87736 .clock_get = posix_get_realtime_coarse,
87737 };
87738 - struct k_clock clock_monotonic_coarse = {
87739 + static struct k_clock clock_monotonic_coarse = {
87740 .clock_getres = posix_get_coarse_res,
87741 .clock_get = posix_get_monotonic_coarse,
87742 };
87743 - struct k_clock clock_tai = {
87744 + static struct k_clock clock_tai = {
87745 .clock_getres = hrtimer_get_res,
87746 .clock_get = posix_get_tai,
87747 .nsleep = common_nsleep,
87748 @@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
87749 .timer_get = common_timer_get,
87750 .timer_del = common_timer_del,
87751 };
87752 - struct k_clock clock_boottime = {
87753 + static struct k_clock clock_boottime = {
87754 .clock_getres = hrtimer_get_res,
87755 .clock_get = posix_get_boottime,
87756 .nsleep = common_nsleep,
87757 @@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
87758 return;
87759 }
87760
87761 - posix_clocks[clock_id] = *new_clock;
87762 + posix_clocks[clock_id] = new_clock;
87763 }
87764 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
87765
87766 @@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
87767 return (id & CLOCKFD_MASK) == CLOCKFD ?
87768 &clock_posix_dynamic : &clock_posix_cpu;
87769
87770 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
87771 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
87772 return NULL;
87773 - return &posix_clocks[id];
87774 + return posix_clocks[id];
87775 }
87776
87777 static int common_timer_create(struct k_itimer *new_timer)
87778 @@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
87779 struct k_clock *kc = clockid_to_kclock(which_clock);
87780 struct k_itimer *new_timer;
87781 int error, new_timer_id;
87782 - sigevent_t event;
87783 + sigevent_t event = { };
87784 int it_id_set = IT_ID_NOT_SET;
87785
87786 if (!kc)
87787 @@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
87788 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
87789 return -EFAULT;
87790
87791 + /* only the CLOCK_REALTIME clock can be set, all other clocks
87792 + have their clock_set fptr set to a nosettime dummy function
87793 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
87794 + call common_clock_set, which calls do_sys_settimeofday, which
87795 + we hook
87796 + */
87797 +
87798 return kc->clock_set(which_clock, &new_tp);
87799 }
87800
87801 diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
87802 index 2fac9cc..56fef29 100644
87803 --- a/kernel/power/Kconfig
87804 +++ b/kernel/power/Kconfig
87805 @@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
87806 config HIBERNATION
87807 bool "Hibernation (aka 'suspend to disk')"
87808 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
87809 + depends on !GRKERNSEC_KMEM
87810 + depends on !PAX_MEMORY_SANITIZE
87811 select HIBERNATE_CALLBACKS
87812 select LZO_COMPRESS
87813 select LZO_DECOMPRESS
87814 diff --git a/kernel/power/process.c b/kernel/power/process.c
87815 index 06ec886..9dba35e 100644
87816 --- a/kernel/power/process.c
87817 +++ b/kernel/power/process.c
87818 @@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
87819 unsigned int elapsed_msecs;
87820 bool wakeup = false;
87821 int sleep_usecs = USEC_PER_MSEC;
87822 + bool timedout = false;
87823
87824 do_gettimeofday(&start);
87825
87826 @@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
87827
87828 while (true) {
87829 todo = 0;
87830 + if (time_after(jiffies, end_time))
87831 + timedout = true;
87832 read_lock(&tasklist_lock);
87833 do_each_thread(g, p) {
87834 if (p == current || !freeze_task(p))
87835 continue;
87836
87837 - if (!freezer_should_skip(p))
87838 + if (!freezer_should_skip(p)) {
87839 todo++;
87840 + if (timedout) {
87841 + printk(KERN_ERR "Task refusing to freeze:\n");
87842 + sched_show_task(p);
87843 + }
87844 + }
87845 } while_each_thread(g, p);
87846 read_unlock(&tasklist_lock);
87847
87848 @@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
87849 todo += wq_busy;
87850 }
87851
87852 - if (!todo || time_after(jiffies, end_time))
87853 + if (!todo || timedout)
87854 break;
87855
87856 if (pm_wakeup_pending()) {
87857 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
87858 index be7c86b..b972b27 100644
87859 --- a/kernel/printk/printk.c
87860 +++ b/kernel/printk/printk.c
87861 @@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
87862 if (from_file && type != SYSLOG_ACTION_OPEN)
87863 return 0;
87864
87865 +#ifdef CONFIG_GRKERNSEC_DMESG
87866 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
87867 + return -EPERM;
87868 +#endif
87869 +
87870 if (syslog_action_restricted(type)) {
87871 if (capable(CAP_SYSLOG))
87872 return 0;
87873 @@ -1080,7 +1085,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
87874 next_seq = log_next_seq;
87875
87876 len = 0;
87877 - prev = 0;
87878 while (len >= 0 && seq < next_seq) {
87879 struct printk_log *msg = log_from_idx(idx);
87880 int textlen;
87881 @@ -2789,7 +2793,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
87882 next_idx = idx;
87883
87884 l = 0;
87885 - prev = 0;
87886 while (seq < dumper->next_seq) {
87887 struct printk_log *msg = log_from_idx(idx);
87888
87889 diff --git a/kernel/profile.c b/kernel/profile.c
87890 index 6631e1e..310c266 100644
87891 --- a/kernel/profile.c
87892 +++ b/kernel/profile.c
87893 @@ -37,7 +37,7 @@ struct profile_hit {
87894 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
87895 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
87896
87897 -static atomic_t *prof_buffer;
87898 +static atomic_unchecked_t *prof_buffer;
87899 static unsigned long prof_len, prof_shift;
87900
87901 int prof_on __read_mostly;
87902 @@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
87903 hits[i].pc = 0;
87904 continue;
87905 }
87906 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
87907 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
87908 hits[i].hits = hits[i].pc = 0;
87909 }
87910 }
87911 @@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
87912 * Add the current hit(s) and flush the write-queue out
87913 * to the global buffer:
87914 */
87915 - atomic_add(nr_hits, &prof_buffer[pc]);
87916 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
87917 for (i = 0; i < NR_PROFILE_HIT; ++i) {
87918 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
87919 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
87920 hits[i].pc = hits[i].hits = 0;
87921 }
87922 out:
87923 @@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
87924 {
87925 unsigned long pc;
87926 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
87927 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
87928 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
87929 }
87930 #endif /* !CONFIG_SMP */
87931
87932 @@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
87933 return -EFAULT;
87934 buf++; p++; count--; read++;
87935 }
87936 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
87937 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
87938 if (copy_to_user(buf, (void *)pnt, count))
87939 return -EFAULT;
87940 read += count;
87941 @@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
87942 }
87943 #endif
87944 profile_discard_flip_buffers();
87945 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
87946 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
87947 return count;
87948 }
87949
87950 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
87951 index 1f4bcb3..99cf7ab 100644
87952 --- a/kernel/ptrace.c
87953 +++ b/kernel/ptrace.c
87954 @@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
87955 if (seize)
87956 flags |= PT_SEIZED;
87957 rcu_read_lock();
87958 - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
87959 + if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
87960 flags |= PT_PTRACE_CAP;
87961 rcu_read_unlock();
87962 task->ptrace = flags;
87963 @@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
87964 break;
87965 return -EIO;
87966 }
87967 - if (copy_to_user(dst, buf, retval))
87968 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
87969 return -EFAULT;
87970 copied += retval;
87971 src += retval;
87972 @@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
87973 bool seized = child->ptrace & PT_SEIZED;
87974 int ret = -EIO;
87975 siginfo_t siginfo, *si;
87976 - void __user *datavp = (void __user *) data;
87977 + void __user *datavp = (__force void __user *) data;
87978 unsigned long __user *datalp = datavp;
87979 unsigned long flags;
87980
87981 @@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
87982 goto out;
87983 }
87984
87985 + if (gr_handle_ptrace(child, request)) {
87986 + ret = -EPERM;
87987 + goto out_put_task_struct;
87988 + }
87989 +
87990 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
87991 ret = ptrace_attach(child, request, addr, data);
87992 /*
87993 * Some architectures need to do book-keeping after
87994 * a ptrace attach.
87995 */
87996 - if (!ret)
87997 + if (!ret) {
87998 arch_ptrace_attach(child);
87999 + gr_audit_ptrace(child);
88000 + }
88001 goto out_put_task_struct;
88002 }
88003
88004 @@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
88005 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
88006 if (copied != sizeof(tmp))
88007 return -EIO;
88008 - return put_user(tmp, (unsigned long __user *)data);
88009 + return put_user(tmp, (__force unsigned long __user *)data);
88010 }
88011
88012 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
88013 @@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
88014 }
88015
88016 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
88017 - compat_long_t addr, compat_long_t data)
88018 + compat_ulong_t addr, compat_ulong_t data)
88019 {
88020 struct task_struct *child;
88021 long ret;
88022 @@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
88023 goto out;
88024 }
88025
88026 + if (gr_handle_ptrace(child, request)) {
88027 + ret = -EPERM;
88028 + goto out_put_task_struct;
88029 + }
88030 +
88031 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
88032 ret = ptrace_attach(child, request, addr, data);
88033 /*
88034 * Some architectures need to do book-keeping after
88035 * a ptrace attach.
88036 */
88037 - if (!ret)
88038 + if (!ret) {
88039 arch_ptrace_attach(child);
88040 + gr_audit_ptrace(child);
88041 + }
88042 goto out_put_task_struct;
88043 }
88044
88045 diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
88046 index 01d5ccb..cdcbee6 100644
88047 --- a/kernel/rcu/srcu.c
88048 +++ b/kernel/rcu/srcu.c
88049 @@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
88050
88051 idx = ACCESS_ONCE(sp->completed) & 0x1;
88052 preempt_disable();
88053 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
88054 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
88055 smp_mb(); /* B */ /* Avoid leaking the critical section. */
88056 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
88057 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
88058 preempt_enable();
88059 return idx;
88060 }
88061 diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
88062 index 1254f31..16258dc 100644
88063 --- a/kernel/rcu/tiny.c
88064 +++ b/kernel/rcu/tiny.c
88065 @@ -46,7 +46,7 @@
88066 /* Forward declarations for tiny_plugin.h. */
88067 struct rcu_ctrlblk;
88068 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
88069 -static void rcu_process_callbacks(struct softirq_action *unused);
88070 +static void rcu_process_callbacks(void);
88071 static void __call_rcu(struct rcu_head *head,
88072 void (*func)(struct rcu_head *rcu),
88073 struct rcu_ctrlblk *rcp);
88074 @@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
88075 false));
88076 }
88077
88078 -static void rcu_process_callbacks(struct softirq_action *unused)
88079 +static __latent_entropy void rcu_process_callbacks(void)
88080 {
88081 __rcu_process_callbacks(&rcu_sched_ctrlblk);
88082 __rcu_process_callbacks(&rcu_bh_ctrlblk);
88083 diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
88084 index 3929cd4..421624d 100644
88085 --- a/kernel/rcu/torture.c
88086 +++ b/kernel/rcu/torture.c
88087 @@ -176,12 +176,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
88088 { 0 };
88089 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
88090 { 0 };
88091 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
88092 -static atomic_t n_rcu_torture_alloc;
88093 -static atomic_t n_rcu_torture_alloc_fail;
88094 -static atomic_t n_rcu_torture_free;
88095 -static atomic_t n_rcu_torture_mberror;
88096 -static atomic_t n_rcu_torture_error;
88097 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
88098 +static atomic_unchecked_t n_rcu_torture_alloc;
88099 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
88100 +static atomic_unchecked_t n_rcu_torture_free;
88101 +static atomic_unchecked_t n_rcu_torture_mberror;
88102 +static atomic_unchecked_t n_rcu_torture_error;
88103 static long n_rcu_torture_barrier_error;
88104 static long n_rcu_torture_boost_ktrerror;
88105 static long n_rcu_torture_boost_rterror;
88106 @@ -299,11 +299,11 @@ rcu_torture_alloc(void)
88107
88108 spin_lock_bh(&rcu_torture_lock);
88109 if (list_empty(&rcu_torture_freelist)) {
88110 - atomic_inc(&n_rcu_torture_alloc_fail);
88111 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
88112 spin_unlock_bh(&rcu_torture_lock);
88113 return NULL;
88114 }
88115 - atomic_inc(&n_rcu_torture_alloc);
88116 + atomic_inc_unchecked(&n_rcu_torture_alloc);
88117 p = rcu_torture_freelist.next;
88118 list_del_init(p);
88119 spin_unlock_bh(&rcu_torture_lock);
88120 @@ -316,7 +316,7 @@ rcu_torture_alloc(void)
88121 static void
88122 rcu_torture_free(struct rcu_torture *p)
88123 {
88124 - atomic_inc(&n_rcu_torture_free);
88125 + atomic_inc_unchecked(&n_rcu_torture_free);
88126 spin_lock_bh(&rcu_torture_lock);
88127 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
88128 spin_unlock_bh(&rcu_torture_lock);
88129 @@ -437,7 +437,7 @@ rcu_torture_cb(struct rcu_head *p)
88130 i = rp->rtort_pipe_count;
88131 if (i > RCU_TORTURE_PIPE_LEN)
88132 i = RCU_TORTURE_PIPE_LEN;
88133 - atomic_inc(&rcu_torture_wcount[i]);
88134 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
88135 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
88136 rp->rtort_mbtest = 0;
88137 rcu_torture_free(rp);
88138 @@ -827,7 +827,7 @@ rcu_torture_writer(void *arg)
88139 i = old_rp->rtort_pipe_count;
88140 if (i > RCU_TORTURE_PIPE_LEN)
88141 i = RCU_TORTURE_PIPE_LEN;
88142 - atomic_inc(&rcu_torture_wcount[i]);
88143 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
88144 old_rp->rtort_pipe_count++;
88145 if (gp_normal == gp_exp)
88146 exp = !!(rcu_random(&rand) & 0x80);
88147 @@ -845,7 +845,7 @@ rcu_torture_writer(void *arg)
88148 i = rp->rtort_pipe_count;
88149 if (i > RCU_TORTURE_PIPE_LEN)
88150 i = RCU_TORTURE_PIPE_LEN;
88151 - atomic_inc(&rcu_torture_wcount[i]);
88152 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
88153 if (++rp->rtort_pipe_count >=
88154 RCU_TORTURE_PIPE_LEN) {
88155 rp->rtort_mbtest = 0;
88156 @@ -944,7 +944,7 @@ static void rcu_torture_timer(unsigned long unused)
88157 return;
88158 }
88159 if (p->rtort_mbtest == 0)
88160 - atomic_inc(&n_rcu_torture_mberror);
88161 + atomic_inc_unchecked(&n_rcu_torture_mberror);
88162 spin_lock(&rand_lock);
88163 cur_ops->read_delay(&rand);
88164 n_rcu_torture_timers++;
88165 @@ -1014,7 +1014,7 @@ rcu_torture_reader(void *arg)
88166 continue;
88167 }
88168 if (p->rtort_mbtest == 0)
88169 - atomic_inc(&n_rcu_torture_mberror);
88170 + atomic_inc_unchecked(&n_rcu_torture_mberror);
88171 cur_ops->read_delay(&rand);
88172 preempt_disable();
88173 pipe_count = p->rtort_pipe_count;
88174 @@ -1077,11 +1077,11 @@ rcu_torture_printk(char *page)
88175 rcu_torture_current,
88176 rcu_torture_current_version,
88177 list_empty(&rcu_torture_freelist),
88178 - atomic_read(&n_rcu_torture_alloc),
88179 - atomic_read(&n_rcu_torture_alloc_fail),
88180 - atomic_read(&n_rcu_torture_free));
88181 + atomic_read_unchecked(&n_rcu_torture_alloc),
88182 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
88183 + atomic_read_unchecked(&n_rcu_torture_free));
88184 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
88185 - atomic_read(&n_rcu_torture_mberror),
88186 + atomic_read_unchecked(&n_rcu_torture_mberror),
88187 n_rcu_torture_boost_ktrerror,
88188 n_rcu_torture_boost_rterror);
88189 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
88190 @@ -1100,14 +1100,14 @@ rcu_torture_printk(char *page)
88191 n_barrier_attempts,
88192 n_rcu_torture_barrier_error);
88193 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
88194 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
88195 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
88196 n_rcu_torture_barrier_error != 0 ||
88197 n_rcu_torture_boost_ktrerror != 0 ||
88198 n_rcu_torture_boost_rterror != 0 ||
88199 n_rcu_torture_boost_failure != 0 ||
88200 i > 1) {
88201 cnt += sprintf(&page[cnt], "!!! ");
88202 - atomic_inc(&n_rcu_torture_error);
88203 + atomic_inc_unchecked(&n_rcu_torture_error);
88204 WARN_ON_ONCE(1);
88205 }
88206 cnt += sprintf(&page[cnt], "Reader Pipe: ");
88207 @@ -1121,7 +1121,7 @@ rcu_torture_printk(char *page)
88208 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
88209 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
88210 cnt += sprintf(&page[cnt], " %d",
88211 - atomic_read(&rcu_torture_wcount[i]));
88212 + atomic_read_unchecked(&rcu_torture_wcount[i]));
88213 }
88214 cnt += sprintf(&page[cnt], "\n");
88215 if (cur_ops->stats)
88216 @@ -1836,7 +1836,7 @@ rcu_torture_cleanup(void)
88217
88218 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
88219
88220 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
88221 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
88222 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
88223 else if (n_online_successes != n_online_attempts ||
88224 n_offline_successes != n_offline_attempts)
88225 @@ -1958,18 +1958,18 @@ rcu_torture_init(void)
88226
88227 rcu_torture_current = NULL;
88228 rcu_torture_current_version = 0;
88229 - atomic_set(&n_rcu_torture_alloc, 0);
88230 - atomic_set(&n_rcu_torture_alloc_fail, 0);
88231 - atomic_set(&n_rcu_torture_free, 0);
88232 - atomic_set(&n_rcu_torture_mberror, 0);
88233 - atomic_set(&n_rcu_torture_error, 0);
88234 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
88235 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
88236 + atomic_set_unchecked(&n_rcu_torture_free, 0);
88237 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
88238 + atomic_set_unchecked(&n_rcu_torture_error, 0);
88239 n_rcu_torture_barrier_error = 0;
88240 n_rcu_torture_boost_ktrerror = 0;
88241 n_rcu_torture_boost_rterror = 0;
88242 n_rcu_torture_boost_failure = 0;
88243 n_rcu_torture_boosts = 0;
88244 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
88245 - atomic_set(&rcu_torture_wcount[i], 0);
88246 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
88247 for_each_possible_cpu(cpu) {
88248 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
88249 per_cpu(rcu_torture_count, cpu)[i] = 0;
88250 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
88251 index dd08198..5ccccbe 100644
88252 --- a/kernel/rcu/tree.c
88253 +++ b/kernel/rcu/tree.c
88254 @@ -383,9 +383,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
88255 rcu_prepare_for_idle(smp_processor_id());
88256 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
88257 smp_mb__before_atomic_inc(); /* See above. */
88258 - atomic_inc(&rdtp->dynticks);
88259 + atomic_inc_unchecked(&rdtp->dynticks);
88260 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
88261 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
88262 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
88263
88264 /*
88265 * It is illegal to enter an extended quiescent state while
88266 @@ -502,10 +502,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
88267 int user)
88268 {
88269 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
88270 - atomic_inc(&rdtp->dynticks);
88271 + atomic_inc_unchecked(&rdtp->dynticks);
88272 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
88273 smp_mb__after_atomic_inc(); /* See above. */
88274 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
88275 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
88276 rcu_cleanup_after_idle(smp_processor_id());
88277 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
88278 if (!user && !is_idle_task(current)) {
88279 @@ -625,14 +625,14 @@ void rcu_nmi_enter(void)
88280 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
88281
88282 if (rdtp->dynticks_nmi_nesting == 0 &&
88283 - (atomic_read(&rdtp->dynticks) & 0x1))
88284 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
88285 return;
88286 rdtp->dynticks_nmi_nesting++;
88287 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
88288 - atomic_inc(&rdtp->dynticks);
88289 + atomic_inc_unchecked(&rdtp->dynticks);
88290 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
88291 smp_mb__after_atomic_inc(); /* See above. */
88292 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
88293 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
88294 }
88295
88296 /**
88297 @@ -651,9 +651,9 @@ void rcu_nmi_exit(void)
88298 return;
88299 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
88300 smp_mb__before_atomic_inc(); /* See above. */
88301 - atomic_inc(&rdtp->dynticks);
88302 + atomic_inc_unchecked(&rdtp->dynticks);
88303 smp_mb__after_atomic_inc(); /* Force delay to next write. */
88304 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
88305 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
88306 }
88307
88308 /**
88309 @@ -666,7 +666,7 @@ void rcu_nmi_exit(void)
88310 */
88311 bool notrace __rcu_is_watching(void)
88312 {
88313 - return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
88314 + return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
88315 }
88316
88317 /**
88318 @@ -749,7 +749,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
88319 static int dyntick_save_progress_counter(struct rcu_data *rdp,
88320 bool *isidle, unsigned long *maxj)
88321 {
88322 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
88323 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
88324 rcu_sysidle_check_cpu(rdp, isidle, maxj);
88325 return (rdp->dynticks_snap & 0x1) == 0;
88326 }
88327 @@ -766,7 +766,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
88328 unsigned int curr;
88329 unsigned int snap;
88330
88331 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
88332 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
88333 snap = (unsigned int)rdp->dynticks_snap;
88334
88335 /*
88336 @@ -1412,9 +1412,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
88337 rdp = this_cpu_ptr(rsp->rda);
88338 rcu_preempt_check_blocked_tasks(rnp);
88339 rnp->qsmask = rnp->qsmaskinit;
88340 - ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
88341 + ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
88342 WARN_ON_ONCE(rnp->completed != rsp->completed);
88343 - ACCESS_ONCE(rnp->completed) = rsp->completed;
88344 + ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
88345 if (rnp == rdp->mynode)
88346 __note_gp_changes(rsp, rnp, rdp);
88347 rcu_preempt_boost_start_gp(rnp);
88348 @@ -1505,7 +1505,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
88349 */
88350 rcu_for_each_node_breadth_first(rsp, rnp) {
88351 raw_spin_lock_irq(&rnp->lock);
88352 - ACCESS_ONCE(rnp->completed) = rsp->gpnum;
88353 + ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
88354 rdp = this_cpu_ptr(rsp->rda);
88355 if (rnp == rdp->mynode)
88356 __note_gp_changes(rsp, rnp, rdp);
88357 @@ -1865,7 +1865,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
88358 rsp->qlen += rdp->qlen;
88359 rdp->n_cbs_orphaned += rdp->qlen;
88360 rdp->qlen_lazy = 0;
88361 - ACCESS_ONCE(rdp->qlen) = 0;
88362 + ACCESS_ONCE_RW(rdp->qlen) = 0;
88363 }
88364
88365 /*
88366 @@ -2111,7 +2111,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
88367 }
88368 smp_mb(); /* List handling before counting for rcu_barrier(). */
88369 rdp->qlen_lazy -= count_lazy;
88370 - ACCESS_ONCE(rdp->qlen) -= count;
88371 + ACCESS_ONCE_RW(rdp->qlen) -= count;
88372 rdp->n_cbs_invoked += count;
88373
88374 /* Reinstate batch limit if we have worked down the excess. */
88375 @@ -2308,7 +2308,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
88376 /*
88377 * Do RCU core processing for the current CPU.
88378 */
88379 -static void rcu_process_callbacks(struct softirq_action *unused)
88380 +static void rcu_process_callbacks(void)
88381 {
88382 struct rcu_state *rsp;
88383
88384 @@ -2415,7 +2415,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
88385 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
88386 if (debug_rcu_head_queue(head)) {
88387 /* Probable double call_rcu(), so leak the callback. */
88388 - ACCESS_ONCE(head->func) = rcu_leak_callback;
88389 + ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
88390 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
88391 return;
88392 }
88393 @@ -2443,7 +2443,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
88394 local_irq_restore(flags);
88395 return;
88396 }
88397 - ACCESS_ONCE(rdp->qlen)++;
88398 + ACCESS_ONCE_RW(rdp->qlen)++;
88399 if (lazy)
88400 rdp->qlen_lazy++;
88401 else
88402 @@ -2652,11 +2652,11 @@ void synchronize_sched_expedited(void)
88403 * counter wrap on a 32-bit system. Quite a few more CPUs would of
88404 * course be required on a 64-bit system.
88405 */
88406 - if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
88407 + if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
88408 (ulong)atomic_long_read(&rsp->expedited_done) +
88409 ULONG_MAX / 8)) {
88410 synchronize_sched();
88411 - atomic_long_inc(&rsp->expedited_wrap);
88412 + atomic_long_inc_unchecked(&rsp->expedited_wrap);
88413 return;
88414 }
88415
88416 @@ -2664,7 +2664,7 @@ void synchronize_sched_expedited(void)
88417 * Take a ticket. Note that atomic_inc_return() implies a
88418 * full memory barrier.
88419 */
88420 - snap = atomic_long_inc_return(&rsp->expedited_start);
88421 + snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
88422 firstsnap = snap;
88423 get_online_cpus();
88424 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
88425 @@ -2677,14 +2677,14 @@ void synchronize_sched_expedited(void)
88426 synchronize_sched_expedited_cpu_stop,
88427 NULL) == -EAGAIN) {
88428 put_online_cpus();
88429 - atomic_long_inc(&rsp->expedited_tryfail);
88430 + atomic_long_inc_unchecked(&rsp->expedited_tryfail);
88431
88432 /* Check to see if someone else did our work for us. */
88433 s = atomic_long_read(&rsp->expedited_done);
88434 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
88435 /* ensure test happens before caller kfree */
88436 smp_mb__before_atomic_inc(); /* ^^^ */
88437 - atomic_long_inc(&rsp->expedited_workdone1);
88438 + atomic_long_inc_unchecked(&rsp->expedited_workdone1);
88439 return;
88440 }
88441
88442 @@ -2693,7 +2693,7 @@ void synchronize_sched_expedited(void)
88443 udelay(trycount * num_online_cpus());
88444 } else {
88445 wait_rcu_gp(call_rcu_sched);
88446 - atomic_long_inc(&rsp->expedited_normal);
88447 + atomic_long_inc_unchecked(&rsp->expedited_normal);
88448 return;
88449 }
88450
88451 @@ -2702,7 +2702,7 @@ void synchronize_sched_expedited(void)
88452 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
88453 /* ensure test happens before caller kfree */
88454 smp_mb__before_atomic_inc(); /* ^^^ */
88455 - atomic_long_inc(&rsp->expedited_workdone2);
88456 + atomic_long_inc_unchecked(&rsp->expedited_workdone2);
88457 return;
88458 }
88459
88460 @@ -2714,10 +2714,10 @@ void synchronize_sched_expedited(void)
88461 * period works for us.
88462 */
88463 get_online_cpus();
88464 - snap = atomic_long_read(&rsp->expedited_start);
88465 + snap = atomic_long_read_unchecked(&rsp->expedited_start);
88466 smp_mb(); /* ensure read is before try_stop_cpus(). */
88467 }
88468 - atomic_long_inc(&rsp->expedited_stoppedcpus);
88469 + atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
88470
88471 /*
88472 * Everyone up to our most recent fetch is covered by our grace
88473 @@ -2726,16 +2726,16 @@ void synchronize_sched_expedited(void)
88474 * than we did already did their update.
88475 */
88476 do {
88477 - atomic_long_inc(&rsp->expedited_done_tries);
88478 + atomic_long_inc_unchecked(&rsp->expedited_done_tries);
88479 s = atomic_long_read(&rsp->expedited_done);
88480 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
88481 /* ensure test happens before caller kfree */
88482 smp_mb__before_atomic_inc(); /* ^^^ */
88483 - atomic_long_inc(&rsp->expedited_done_lost);
88484 + atomic_long_inc_unchecked(&rsp->expedited_done_lost);
88485 break;
88486 }
88487 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
88488 - atomic_long_inc(&rsp->expedited_done_exit);
88489 + atomic_long_inc_unchecked(&rsp->expedited_done_exit);
88490
88491 put_online_cpus();
88492 }
88493 @@ -2931,7 +2931,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
88494 * ACCESS_ONCE() to prevent the compiler from speculating
88495 * the increment to precede the early-exit check.
88496 */
88497 - ACCESS_ONCE(rsp->n_barrier_done)++;
88498 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
88499 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
88500 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
88501 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
88502 @@ -2981,7 +2981,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
88503
88504 /* Increment ->n_barrier_done to prevent duplicate work. */
88505 smp_mb(); /* Keep increment after above mechanism. */
88506 - ACCESS_ONCE(rsp->n_barrier_done)++;
88507 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
88508 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
88509 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
88510 smp_mb(); /* Keep increment before caller's subsequent code. */
88511 @@ -3026,10 +3026,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
88512 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
88513 init_callback_list(rdp);
88514 rdp->qlen_lazy = 0;
88515 - ACCESS_ONCE(rdp->qlen) = 0;
88516 + ACCESS_ONCE_RW(rdp->qlen) = 0;
88517 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
88518 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
88519 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
88520 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
88521 rdp->cpu = cpu;
88522 rdp->rsp = rsp;
88523 rcu_boot_init_nocb_percpu_data(rdp);
88524 @@ -3063,8 +3063,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
88525 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
88526 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
88527 rcu_sysidle_init_percpu_data(rdp->dynticks);
88528 - atomic_set(&rdp->dynticks->dynticks,
88529 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
88530 + atomic_set_unchecked(&rdp->dynticks->dynticks,
88531 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
88532 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
88533
88534 /* Add CPU to rcu_node bitmasks. */
88535 diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
88536 index 52be957..365ded3 100644
88537 --- a/kernel/rcu/tree.h
88538 +++ b/kernel/rcu/tree.h
88539 @@ -87,11 +87,11 @@ struct rcu_dynticks {
88540 long long dynticks_nesting; /* Track irq/process nesting level. */
88541 /* Process level is worth LLONG_MAX/2. */
88542 int dynticks_nmi_nesting; /* Track NMI nesting level. */
88543 - atomic_t dynticks; /* Even value for idle, else odd. */
88544 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
88545 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
88546 long long dynticks_idle_nesting;
88547 /* irq/process nesting level from idle. */
88548 - atomic_t dynticks_idle; /* Even value for idle, else odd. */
88549 + atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
88550 /* "Idle" excludes userspace execution. */
88551 unsigned long dynticks_idle_jiffies;
88552 /* End of last non-NMI non-idle period. */
88553 @@ -429,17 +429,17 @@ struct rcu_state {
88554 /* _rcu_barrier(). */
88555 /* End of fields guarded by barrier_mutex. */
88556
88557 - atomic_long_t expedited_start; /* Starting ticket. */
88558 - atomic_long_t expedited_done; /* Done ticket. */
88559 - atomic_long_t expedited_wrap; /* # near-wrap incidents. */
88560 - atomic_long_t expedited_tryfail; /* # acquisition failures. */
88561 - atomic_long_t expedited_workdone1; /* # done by others #1. */
88562 - atomic_long_t expedited_workdone2; /* # done by others #2. */
88563 - atomic_long_t expedited_normal; /* # fallbacks to normal. */
88564 - atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
88565 - atomic_long_t expedited_done_tries; /* # tries to update _done. */
88566 - atomic_long_t expedited_done_lost; /* # times beaten to _done. */
88567 - atomic_long_t expedited_done_exit; /* # times exited _done loop. */
88568 + atomic_long_unchecked_t expedited_start; /* Starting ticket. */
88569 + atomic_long_t expedited_done; /* Done ticket. */
88570 + atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
88571 + atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
88572 + atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
88573 + atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
88574 + atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
88575 + atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
88576 + atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
88577 + atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
88578 + atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
88579
88580 unsigned long jiffies_force_qs; /* Time at which to invoke */
88581 /* force_quiescent_state(). */
88582 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
88583 index 08a7652..3598c7e 100644
88584 --- a/kernel/rcu/tree_plugin.h
88585 +++ b/kernel/rcu/tree_plugin.h
88586 @@ -749,7 +749,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
88587 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
88588 {
88589 return !rcu_preempted_readers_exp(rnp) &&
88590 - ACCESS_ONCE(rnp->expmask) == 0;
88591 + ACCESS_ONCE_RW(rnp->expmask) == 0;
88592 }
88593
88594 /*
88595 @@ -905,7 +905,7 @@ void synchronize_rcu_expedited(void)
88596
88597 /* Clean up and exit. */
88598 smp_mb(); /* ensure expedited GP seen before counter increment. */
88599 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
88600 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
88601 unlock_mb_ret:
88602 mutex_unlock(&sync_rcu_preempt_exp_mutex);
88603 mb_ret:
88604 @@ -1479,7 +1479,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
88605 free_cpumask_var(cm);
88606 }
88607
88608 -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
88609 +static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
88610 .store = &rcu_cpu_kthread_task,
88611 .thread_should_run = rcu_cpu_kthread_should_run,
88612 .thread_fn = rcu_cpu_kthread,
88613 @@ -1946,7 +1946,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
88614 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
88615 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
88616 cpu, ticks_value, ticks_title,
88617 - atomic_read(&rdtp->dynticks) & 0xfff,
88618 + atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
88619 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
88620 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
88621 fast_no_hz);
88622 @@ -2109,7 +2109,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
88623
88624 /* Enqueue the callback on the nocb list and update counts. */
88625 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
88626 - ACCESS_ONCE(*old_rhpp) = rhp;
88627 + ACCESS_ONCE_RW(*old_rhpp) = rhp;
88628 atomic_long_add(rhcount, &rdp->nocb_q_count);
88629 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
88630
88631 @@ -2272,12 +2272,12 @@ static int rcu_nocb_kthread(void *arg)
88632 * Extract queued callbacks, update counts, and wait
88633 * for a grace period to elapse.
88634 */
88635 - ACCESS_ONCE(rdp->nocb_head) = NULL;
88636 + ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
88637 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
88638 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
88639 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
88640 - ACCESS_ONCE(rdp->nocb_p_count) += c;
88641 - ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
88642 + ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
88643 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
88644 rcu_nocb_wait_gp(rdp);
88645
88646 /* Each pass through the following loop invokes a callback. */
88647 @@ -2303,8 +2303,8 @@ static int rcu_nocb_kthread(void *arg)
88648 list = next;
88649 }
88650 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
88651 - ACCESS_ONCE(rdp->nocb_p_count) -= c;
88652 - ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
88653 + ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
88654 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
88655 rdp->n_nocbs_invoked += c;
88656 }
88657 return 0;
88658 @@ -2331,7 +2331,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
88659 t = kthread_run(rcu_nocb_kthread, rdp,
88660 "rcuo%c/%d", rsp->abbr, cpu);
88661 BUG_ON(IS_ERR(t));
88662 - ACCESS_ONCE(rdp->nocb_kthread) = t;
88663 + ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
88664 }
88665 }
88666
88667 @@ -2457,11 +2457,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
88668
88669 /* Record start of fully idle period. */
88670 j = jiffies;
88671 - ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
88672 + ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
88673 smp_mb__before_atomic_inc();
88674 - atomic_inc(&rdtp->dynticks_idle);
88675 + atomic_inc_unchecked(&rdtp->dynticks_idle);
88676 smp_mb__after_atomic_inc();
88677 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
88678 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
88679 }
88680
88681 /*
88682 @@ -2526,9 +2526,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
88683
88684 /* Record end of idle period. */
88685 smp_mb__before_atomic_inc();
88686 - atomic_inc(&rdtp->dynticks_idle);
88687 + atomic_inc_unchecked(&rdtp->dynticks_idle);
88688 smp_mb__after_atomic_inc();
88689 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
88690 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
88691
88692 /*
88693 * If we are the timekeeping CPU, we are permitted to be non-idle
88694 @@ -2569,7 +2569,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
88695 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
88696
88697 /* Pick up current idle and NMI-nesting counter and check. */
88698 - cur = atomic_read(&rdtp->dynticks_idle);
88699 + cur = atomic_read_unchecked(&rdtp->dynticks_idle);
88700 if (cur & 0x1) {
88701 *isidle = false; /* We are not idle! */
88702 return;
88703 @@ -2632,7 +2632,7 @@ static void rcu_sysidle(unsigned long j)
88704 case RCU_SYSIDLE_NOT:
88705
88706 /* First time all are idle, so note a short idle period. */
88707 - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
88708 + ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
88709 break;
88710
88711 case RCU_SYSIDLE_SHORT:
88712 @@ -2669,7 +2669,7 @@ static void rcu_sysidle(unsigned long j)
88713 static void rcu_sysidle_cancel(void)
88714 {
88715 smp_mb();
88716 - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
88717 + ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
88718 }
88719
88720 /*
88721 @@ -2717,7 +2717,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
88722 smp_mb(); /* grace period precedes setting inuse. */
88723
88724 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
88725 - ACCESS_ONCE(rshp->inuse) = 0;
88726 + ACCESS_ONCE_RW(rshp->inuse) = 0;
88727 }
88728
88729 /*
88730 diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
88731 index 3596797..f78391c 100644
88732 --- a/kernel/rcu/tree_trace.c
88733 +++ b/kernel/rcu/tree_trace.c
88734 @@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
88735 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
88736 rdp->passed_quiesce, rdp->qs_pending);
88737 seq_printf(m, " dt=%d/%llx/%d df=%lu",
88738 - atomic_read(&rdp->dynticks->dynticks),
88739 + atomic_read_unchecked(&rdp->dynticks->dynticks),
88740 rdp->dynticks->dynticks_nesting,
88741 rdp->dynticks->dynticks_nmi_nesting,
88742 rdp->dynticks_fqs);
88743 @@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
88744 struct rcu_state *rsp = (struct rcu_state *)m->private;
88745
88746 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
88747 - atomic_long_read(&rsp->expedited_start),
88748 + atomic_long_read_unchecked(&rsp->expedited_start),
88749 atomic_long_read(&rsp->expedited_done),
88750 - atomic_long_read(&rsp->expedited_wrap),
88751 - atomic_long_read(&rsp->expedited_tryfail),
88752 - atomic_long_read(&rsp->expedited_workdone1),
88753 - atomic_long_read(&rsp->expedited_workdone2),
88754 - atomic_long_read(&rsp->expedited_normal),
88755 - atomic_long_read(&rsp->expedited_stoppedcpus),
88756 - atomic_long_read(&rsp->expedited_done_tries),
88757 - atomic_long_read(&rsp->expedited_done_lost),
88758 - atomic_long_read(&rsp->expedited_done_exit));
88759 + atomic_long_read_unchecked(&rsp->expedited_wrap),
88760 + atomic_long_read_unchecked(&rsp->expedited_tryfail),
88761 + atomic_long_read_unchecked(&rsp->expedited_workdone1),
88762 + atomic_long_read_unchecked(&rsp->expedited_workdone2),
88763 + atomic_long_read_unchecked(&rsp->expedited_normal),
88764 + atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
88765 + atomic_long_read_unchecked(&rsp->expedited_done_tries),
88766 + atomic_long_read_unchecked(&rsp->expedited_done_lost),
88767 + atomic_long_read_unchecked(&rsp->expedited_done_exit));
88768 return 0;
88769 }
88770
88771 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
88772 index 6cb3dff..dc5710f 100644
88773 --- a/kernel/rcu/update.c
88774 +++ b/kernel/rcu/update.c
88775 @@ -318,10 +318,10 @@ int rcu_jiffies_till_stall_check(void)
88776 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
88777 */
88778 if (till_stall_check < 3) {
88779 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
88780 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
88781 till_stall_check = 3;
88782 } else if (till_stall_check > 300) {
88783 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
88784 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
88785 till_stall_check = 300;
88786 }
88787 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
88788 diff --git a/kernel/resource.c b/kernel/resource.c
88789 index 3f285dc..5755f62 100644
88790 --- a/kernel/resource.c
88791 +++ b/kernel/resource.c
88792 @@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
88793
88794 static int __init ioresources_init(void)
88795 {
88796 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
88797 +#ifdef CONFIG_GRKERNSEC_PROC_USER
88798 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
88799 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
88800 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88801 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
88802 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
88803 +#endif
88804 +#else
88805 proc_create("ioports", 0, NULL, &proc_ioports_operations);
88806 proc_create("iomem", 0, NULL, &proc_iomem_operations);
88807 +#endif
88808 return 0;
88809 }
88810 __initcall(ioresources_init);
88811 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
88812 index 4a07353..66b5291 100644
88813 --- a/kernel/sched/auto_group.c
88814 +++ b/kernel/sched/auto_group.c
88815 @@ -11,7 +11,7 @@
88816
88817 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
88818 static struct autogroup autogroup_default;
88819 -static atomic_t autogroup_seq_nr;
88820 +static atomic_unchecked_t autogroup_seq_nr;
88821
88822 void __init autogroup_init(struct task_struct *init_task)
88823 {
88824 @@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
88825
88826 kref_init(&ag->kref);
88827 init_rwsem(&ag->lock);
88828 - ag->id = atomic_inc_return(&autogroup_seq_nr);
88829 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
88830 ag->tg = tg;
88831 #ifdef CONFIG_RT_GROUP_SCHED
88832 /*
88833 diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
88834 index a63f4dc..349bbb0 100644
88835 --- a/kernel/sched/completion.c
88836 +++ b/kernel/sched/completion.c
88837 @@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
88838 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
88839 * or number of jiffies left till timeout) if completed.
88840 */
88841 -long __sched
88842 +long __sched __intentional_overflow(-1)
88843 wait_for_completion_interruptible_timeout(struct completion *x,
88844 unsigned long timeout)
88845 {
88846 @@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
88847 *
88848 * Return: -ERESTARTSYS if interrupted, 0 if completed.
88849 */
88850 -int __sched wait_for_completion_killable(struct completion *x)
88851 +int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
88852 {
88853 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
88854 if (t == -ERESTARTSYS)
88855 @@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
88856 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
88857 * or number of jiffies left till timeout) if completed.
88858 */
88859 -long __sched
88860 +long __sched __intentional_overflow(-1)
88861 wait_for_completion_killable_timeout(struct completion *x,
88862 unsigned long timeout)
88863 {
88864 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
88865 index c677510..132bb14 100644
88866 --- a/kernel/sched/core.c
88867 +++ b/kernel/sched/core.c
88868 @@ -1768,7 +1768,7 @@ void set_numabalancing_state(bool enabled)
88869 int sysctl_numa_balancing(struct ctl_table *table, int write,
88870 void __user *buffer, size_t *lenp, loff_t *ppos)
88871 {
88872 - struct ctl_table t;
88873 + ctl_table_no_const t;
88874 int err;
88875 int state = numabalancing_enabled;
88876
88877 @@ -2893,6 +2893,8 @@ int can_nice(const struct task_struct *p, const int nice)
88878 /* convert nice value [19,-20] to rlimit style value [1,40] */
88879 int nice_rlim = 20 - nice;
88880
88881 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
88882 +
88883 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
88884 capable(CAP_SYS_NICE));
88885 }
88886 @@ -2926,7 +2928,8 @@ SYSCALL_DEFINE1(nice, int, increment)
88887 if (nice > 19)
88888 nice = 19;
88889
88890 - if (increment < 0 && !can_nice(current, nice))
88891 + if (increment < 0 && (!can_nice(current, nice) ||
88892 + gr_handle_chroot_nice()))
88893 return -EPERM;
88894
88895 retval = security_task_setnice(current, nice);
88896 @@ -3088,6 +3091,7 @@ recheck:
88897 unsigned long rlim_rtprio =
88898 task_rlimit(p, RLIMIT_RTPRIO);
88899
88900 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
88901 /* can't set/change the rt policy */
88902 if (policy != p->policy && !rlim_rtprio)
88903 return -EPERM;
88904 @@ -4254,7 +4258,7 @@ static void migrate_tasks(unsigned int dead_cpu)
88905
88906 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
88907
88908 -static struct ctl_table sd_ctl_dir[] = {
88909 +static ctl_table_no_const sd_ctl_dir[] __read_only = {
88910 {
88911 .procname = "sched_domain",
88912 .mode = 0555,
88913 @@ -4271,17 +4275,17 @@ static struct ctl_table sd_ctl_root[] = {
88914 {}
88915 };
88916
88917 -static struct ctl_table *sd_alloc_ctl_entry(int n)
88918 +static ctl_table_no_const *sd_alloc_ctl_entry(int n)
88919 {
88920 - struct ctl_table *entry =
88921 + ctl_table_no_const *entry =
88922 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
88923
88924 return entry;
88925 }
88926
88927 -static void sd_free_ctl_entry(struct ctl_table **tablep)
88928 +static void sd_free_ctl_entry(ctl_table_no_const *tablep)
88929 {
88930 - struct ctl_table *entry;
88931 + ctl_table_no_const *entry;
88932
88933 /*
88934 * In the intermediate directories, both the child directory and
88935 @@ -4289,22 +4293,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
88936 * will always be set. In the lowest directory the names are
88937 * static strings and all have proc handlers.
88938 */
88939 - for (entry = *tablep; entry->mode; entry++) {
88940 - if (entry->child)
88941 - sd_free_ctl_entry(&entry->child);
88942 + for (entry = tablep; entry->mode; entry++) {
88943 + if (entry->child) {
88944 + sd_free_ctl_entry(entry->child);
88945 + pax_open_kernel();
88946 + entry->child = NULL;
88947 + pax_close_kernel();
88948 + }
88949 if (entry->proc_handler == NULL)
88950 kfree(entry->procname);
88951 }
88952
88953 - kfree(*tablep);
88954 - *tablep = NULL;
88955 + kfree(tablep);
88956 }
88957
88958 static int min_load_idx = 0;
88959 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
88960
88961 static void
88962 -set_table_entry(struct ctl_table *entry,
88963 +set_table_entry(ctl_table_no_const *entry,
88964 const char *procname, void *data, int maxlen,
88965 umode_t mode, proc_handler *proc_handler,
88966 bool load_idx)
88967 @@ -4324,7 +4331,7 @@ set_table_entry(struct ctl_table *entry,
88968 static struct ctl_table *
88969 sd_alloc_ctl_domain_table(struct sched_domain *sd)
88970 {
88971 - struct ctl_table *table = sd_alloc_ctl_entry(13);
88972 + ctl_table_no_const *table = sd_alloc_ctl_entry(13);
88973
88974 if (table == NULL)
88975 return NULL;
88976 @@ -4359,9 +4366,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
88977 return table;
88978 }
88979
88980 -static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
88981 +static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
88982 {
88983 - struct ctl_table *entry, *table;
88984 + ctl_table_no_const *entry, *table;
88985 struct sched_domain *sd;
88986 int domain_num = 0, i;
88987 char buf[32];
88988 @@ -4388,11 +4395,13 @@ static struct ctl_table_header *sd_sysctl_header;
88989 static void register_sched_domain_sysctl(void)
88990 {
88991 int i, cpu_num = num_possible_cpus();
88992 - struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
88993 + ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
88994 char buf[32];
88995
88996 WARN_ON(sd_ctl_dir[0].child);
88997 + pax_open_kernel();
88998 sd_ctl_dir[0].child = entry;
88999 + pax_close_kernel();
89000
89001 if (entry == NULL)
89002 return;
89003 @@ -4415,8 +4424,12 @@ static void unregister_sched_domain_sysctl(void)
89004 if (sd_sysctl_header)
89005 unregister_sysctl_table(sd_sysctl_header);
89006 sd_sysctl_header = NULL;
89007 - if (sd_ctl_dir[0].child)
89008 - sd_free_ctl_entry(&sd_ctl_dir[0].child);
89009 + if (sd_ctl_dir[0].child) {
89010 + sd_free_ctl_entry(sd_ctl_dir[0].child);
89011 + pax_open_kernel();
89012 + sd_ctl_dir[0].child = NULL;
89013 + pax_close_kernel();
89014 + }
89015 }
89016 #else
89017 static void register_sched_domain_sysctl(void)
89018 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
89019 index ce501de..1805320 100644
89020 --- a/kernel/sched/fair.c
89021 +++ b/kernel/sched/fair.c
89022 @@ -1652,7 +1652,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
89023
89024 static void reset_ptenuma_scan(struct task_struct *p)
89025 {
89026 - ACCESS_ONCE(p->mm->numa_scan_seq)++;
89027 + ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
89028 p->mm->numa_scan_offset = 0;
89029 }
89030
89031 @@ -6863,7 +6863,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
89032 * run_rebalance_domains is triggered when needed from the scheduler tick.
89033 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
89034 */
89035 -static void run_rebalance_domains(struct softirq_action *h)
89036 +static __latent_entropy void run_rebalance_domains(void)
89037 {
89038 int this_cpu = smp_processor_id();
89039 struct rq *this_rq = cpu_rq(this_cpu);
89040 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
89041 index 88c85b2..a1dec86 100644
89042 --- a/kernel/sched/sched.h
89043 +++ b/kernel/sched/sched.h
89044 @@ -1035,7 +1035,7 @@ struct sched_class {
89045 #ifdef CONFIG_FAIR_GROUP_SCHED
89046 void (*task_move_group) (struct task_struct *p, int on_rq);
89047 #endif
89048 -};
89049 +} __do_const;
89050
89051 #define sched_class_highest (&stop_sched_class)
89052 #define for_each_class(class) \
89053 diff --git a/kernel/signal.c b/kernel/signal.c
89054 index 940b30e..7fd6041 100644
89055 --- a/kernel/signal.c
89056 +++ b/kernel/signal.c
89057 @@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
89058
89059 int print_fatal_signals __read_mostly;
89060
89061 -static void __user *sig_handler(struct task_struct *t, int sig)
89062 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
89063 {
89064 return t->sighand->action[sig - 1].sa.sa_handler;
89065 }
89066
89067 -static int sig_handler_ignored(void __user *handler, int sig)
89068 +static int sig_handler_ignored(__sighandler_t handler, int sig)
89069 {
89070 /* Is it explicitly or implicitly ignored? */
89071 return handler == SIG_IGN ||
89072 @@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
89073
89074 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
89075 {
89076 - void __user *handler;
89077 + __sighandler_t handler;
89078
89079 handler = sig_handler(t, sig);
89080
89081 @@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
89082 atomic_inc(&user->sigpending);
89083 rcu_read_unlock();
89084
89085 + if (!override_rlimit)
89086 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
89087 +
89088 if (override_rlimit ||
89089 atomic_read(&user->sigpending) <=
89090 task_rlimit(t, RLIMIT_SIGPENDING)) {
89091 @@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
89092
89093 int unhandled_signal(struct task_struct *tsk, int sig)
89094 {
89095 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
89096 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
89097 if (is_global_init(tsk))
89098 return 1;
89099 if (handler != SIG_IGN && handler != SIG_DFL)
89100 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
89101 }
89102 }
89103
89104 + /* allow glibc communication via tgkill to other threads in our
89105 + thread group */
89106 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
89107 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
89108 + && gr_handle_signal(t, sig))
89109 + return -EPERM;
89110 +
89111 return security_task_kill(t, info, sig, 0);
89112 }
89113
89114 @@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
89115 return send_signal(sig, info, p, 1);
89116 }
89117
89118 -static int
89119 +int
89120 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
89121 {
89122 return send_signal(sig, info, t, 0);
89123 @@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
89124 unsigned long int flags;
89125 int ret, blocked, ignored;
89126 struct k_sigaction *action;
89127 + int is_unhandled = 0;
89128
89129 spin_lock_irqsave(&t->sighand->siglock, flags);
89130 action = &t->sighand->action[sig-1];
89131 @@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
89132 }
89133 if (action->sa.sa_handler == SIG_DFL)
89134 t->signal->flags &= ~SIGNAL_UNKILLABLE;
89135 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
89136 + is_unhandled = 1;
89137 ret = specific_send_sig_info(sig, info, t);
89138 spin_unlock_irqrestore(&t->sighand->siglock, flags);
89139
89140 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
89141 + normal operation */
89142 + if (is_unhandled) {
89143 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
89144 + gr_handle_crash(t, sig);
89145 + }
89146 +
89147 return ret;
89148 }
89149
89150 @@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
89151 ret = check_kill_permission(sig, info, p);
89152 rcu_read_unlock();
89153
89154 - if (!ret && sig)
89155 + if (!ret && sig) {
89156 ret = do_send_sig_info(sig, info, p, true);
89157 + if (!ret)
89158 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
89159 + }
89160
89161 return ret;
89162 }
89163 @@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
89164 int error = -ESRCH;
89165
89166 rcu_read_lock();
89167 - p = find_task_by_vpid(pid);
89168 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
89169 + /* allow glibc communication via tgkill to other threads in our
89170 + thread group */
89171 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
89172 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
89173 + p = find_task_by_vpid_unrestricted(pid);
89174 + else
89175 +#endif
89176 + p = find_task_by_vpid(pid);
89177 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
89178 error = check_kill_permission(sig, info, p);
89179 /*
89180 @@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
89181 }
89182 seg = get_fs();
89183 set_fs(KERNEL_DS);
89184 - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
89185 - (stack_t __force __user *) &uoss,
89186 + ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
89187 + (stack_t __force_user *) &uoss,
89188 compat_user_stack_pointer());
89189 set_fs(seg);
89190 if (ret >= 0 && uoss_ptr) {
89191 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
89192 index eb89e18..a4e6792 100644
89193 --- a/kernel/smpboot.c
89194 +++ b/kernel/smpboot.c
89195 @@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
89196 }
89197 smpboot_unpark_thread(plug_thread, cpu);
89198 }
89199 - list_add(&plug_thread->list, &hotplug_threads);
89200 + pax_list_add(&plug_thread->list, &hotplug_threads);
89201 out:
89202 mutex_unlock(&smpboot_threads_lock);
89203 return ret;
89204 @@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
89205 {
89206 get_online_cpus();
89207 mutex_lock(&smpboot_threads_lock);
89208 - list_del(&plug_thread->list);
89209 + pax_list_del(&plug_thread->list);
89210 smpboot_destroy_threads(plug_thread);
89211 mutex_unlock(&smpboot_threads_lock);
89212 put_online_cpus();
89213 diff --git a/kernel/softirq.c b/kernel/softirq.c
89214 index 11025cc..bc0e4dc 100644
89215 --- a/kernel/softirq.c
89216 +++ b/kernel/softirq.c
89217 @@ -50,11 +50,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
89218 EXPORT_SYMBOL(irq_stat);
89219 #endif
89220
89221 -static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
89222 +static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
89223
89224 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
89225
89226 -char *softirq_to_name[NR_SOFTIRQS] = {
89227 +const char * const softirq_to_name[NR_SOFTIRQS] = {
89228 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
89229 "TASKLET", "SCHED", "HRTIMER", "RCU"
89230 };
89231 @@ -250,7 +250,7 @@ restart:
89232 kstat_incr_softirqs_this_cpu(vec_nr);
89233
89234 trace_softirq_entry(vec_nr);
89235 - h->action(h);
89236 + h->action();
89237 trace_softirq_exit(vec_nr);
89238 if (unlikely(prev_count != preempt_count())) {
89239 printk(KERN_ERR "huh, entered softirq %u %s %p"
89240 @@ -419,7 +419,7 @@ void __raise_softirq_irqoff(unsigned int nr)
89241 or_softirq_pending(1UL << nr);
89242 }
89243
89244 -void open_softirq(int nr, void (*action)(struct softirq_action *))
89245 +void __init open_softirq(int nr, void (*action)(void))
89246 {
89247 softirq_vec[nr].action = action;
89248 }
89249 @@ -475,7 +475,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
89250
89251 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
89252
89253 -static void tasklet_action(struct softirq_action *a)
89254 +static __latent_entropy void tasklet_action(void)
89255 {
89256 struct tasklet_struct *list;
89257
89258 @@ -510,7 +510,7 @@ static void tasklet_action(struct softirq_action *a)
89259 }
89260 }
89261
89262 -static void tasklet_hi_action(struct softirq_action *a)
89263 +static __latent_entropy void tasklet_hi_action(void)
89264 {
89265 struct tasklet_struct *list;
89266
89267 @@ -740,7 +740,7 @@ static struct notifier_block cpu_nfb = {
89268 .notifier_call = cpu_callback
89269 };
89270
89271 -static struct smp_hotplug_thread softirq_threads = {
89272 +static struct smp_hotplug_thread softirq_threads __read_only = {
89273 .store = &ksoftirqd,
89274 .thread_should_run = ksoftirqd_should_run,
89275 .thread_fn = run_ksoftirqd,
89276 diff --git a/kernel/sys.c b/kernel/sys.c
89277 index c723113..46bf922 100644
89278 --- a/kernel/sys.c
89279 +++ b/kernel/sys.c
89280 @@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
89281 error = -EACCES;
89282 goto out;
89283 }
89284 +
89285 + if (gr_handle_chroot_setpriority(p, niceval)) {
89286 + error = -EACCES;
89287 + goto out;
89288 + }
89289 +
89290 no_nice = security_task_setnice(p, niceval);
89291 if (no_nice) {
89292 error = no_nice;
89293 @@ -351,6 +357,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
89294 goto error;
89295 }
89296
89297 + if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
89298 + goto error;
89299 +
89300 if (rgid != (gid_t) -1 ||
89301 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
89302 new->sgid = new->egid;
89303 @@ -386,6 +395,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
89304 old = current_cred();
89305
89306 retval = -EPERM;
89307 +
89308 + if (gr_check_group_change(kgid, kgid, kgid))
89309 + goto error;
89310 +
89311 if (ns_capable(old->user_ns, CAP_SETGID))
89312 new->gid = new->egid = new->sgid = new->fsgid = kgid;
89313 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
89314 @@ -403,7 +416,7 @@ error:
89315 /*
89316 * change the user struct in a credentials set to match the new UID
89317 */
89318 -static int set_user(struct cred *new)
89319 +int set_user(struct cred *new)
89320 {
89321 struct user_struct *new_user;
89322
89323 @@ -483,6 +496,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
89324 goto error;
89325 }
89326
89327 + if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
89328 + goto error;
89329 +
89330 if (!uid_eq(new->uid, old->uid)) {
89331 retval = set_user(new);
89332 if (retval < 0)
89333 @@ -533,6 +549,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
89334 old = current_cred();
89335
89336 retval = -EPERM;
89337 +
89338 + if (gr_check_crash_uid(kuid))
89339 + goto error;
89340 + if (gr_check_user_change(kuid, kuid, kuid))
89341 + goto error;
89342 +
89343 if (ns_capable(old->user_ns, CAP_SETUID)) {
89344 new->suid = new->uid = kuid;
89345 if (!uid_eq(kuid, old->uid)) {
89346 @@ -602,6 +624,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
89347 goto error;
89348 }
89349
89350 + if (gr_check_user_change(kruid, keuid, INVALID_UID))
89351 + goto error;
89352 +
89353 if (ruid != (uid_t) -1) {
89354 new->uid = kruid;
89355 if (!uid_eq(kruid, old->uid)) {
89356 @@ -684,6 +709,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
89357 goto error;
89358 }
89359
89360 + if (gr_check_group_change(krgid, kegid, INVALID_GID))
89361 + goto error;
89362 +
89363 if (rgid != (gid_t) -1)
89364 new->gid = krgid;
89365 if (egid != (gid_t) -1)
89366 @@ -745,12 +773,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
89367 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
89368 ns_capable(old->user_ns, CAP_SETUID)) {
89369 if (!uid_eq(kuid, old->fsuid)) {
89370 + if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
89371 + goto error;
89372 +
89373 new->fsuid = kuid;
89374 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
89375 goto change_okay;
89376 }
89377 }
89378
89379 +error:
89380 abort_creds(new);
89381 return old_fsuid;
89382
89383 @@ -783,12 +815,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
89384 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
89385 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
89386 ns_capable(old->user_ns, CAP_SETGID)) {
89387 + if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
89388 + goto error;
89389 +
89390 if (!gid_eq(kgid, old->fsgid)) {
89391 new->fsgid = kgid;
89392 goto change_okay;
89393 }
89394 }
89395
89396 +error:
89397 abort_creds(new);
89398 return old_fsgid;
89399
89400 @@ -1168,19 +1204,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
89401 return -EFAULT;
89402
89403 down_read(&uts_sem);
89404 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
89405 + error = __copy_to_user(name->sysname, &utsname()->sysname,
89406 __OLD_UTS_LEN);
89407 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
89408 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
89409 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
89410 __OLD_UTS_LEN);
89411 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
89412 - error |= __copy_to_user(&name->release, &utsname()->release,
89413 + error |= __copy_to_user(name->release, &utsname()->release,
89414 __OLD_UTS_LEN);
89415 error |= __put_user(0, name->release + __OLD_UTS_LEN);
89416 - error |= __copy_to_user(&name->version, &utsname()->version,
89417 + error |= __copy_to_user(name->version, &utsname()->version,
89418 __OLD_UTS_LEN);
89419 error |= __put_user(0, name->version + __OLD_UTS_LEN);
89420 - error |= __copy_to_user(&name->machine, &utsname()->machine,
89421 + error |= __copy_to_user(name->machine, &utsname()->machine,
89422 __OLD_UTS_LEN);
89423 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
89424 up_read(&uts_sem);
89425 @@ -1382,6 +1418,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
89426 */
89427 new_rlim->rlim_cur = 1;
89428 }
89429 + /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
89430 + is changed to a lower value. Since tasks can be created by the same
89431 + user in between this limit change and an execve by this task, force
89432 + a recheck only for this task by setting PF_NPROC_EXCEEDED
89433 + */
89434 + if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
89435 + tsk->flags |= PF_NPROC_EXCEEDED;
89436 }
89437 if (!retval) {
89438 if (old_rlim)
89439 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
89440 index 06962ba..a54d45e 100644
89441 --- a/kernel/sysctl.c
89442 +++ b/kernel/sysctl.c
89443 @@ -93,7 +93,6 @@
89444
89445
89446 #if defined(CONFIG_SYSCTL)
89447 -
89448 /* External variables not in a header file. */
89449 extern int sysctl_overcommit_memory;
89450 extern int sysctl_overcommit_ratio;
89451 @@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
89452
89453 /* Constants used for minimum and maximum */
89454 #ifdef CONFIG_LOCKUP_DETECTOR
89455 -static int sixty = 60;
89456 +static int sixty __read_only = 60;
89457 #endif
89458
89459 -static int zero;
89460 -static int __maybe_unused one = 1;
89461 -static int __maybe_unused two = 2;
89462 -static int __maybe_unused three = 3;
89463 -static unsigned long one_ul = 1;
89464 -static int one_hundred = 100;
89465 +static int neg_one __read_only = -1;
89466 +static int zero __read_only = 0;
89467 +static int __maybe_unused one __read_only = 1;
89468 +static int __maybe_unused two __read_only = 2;
89469 +static int __maybe_unused three __read_only = 3;
89470 +static unsigned long one_ul __read_only = 1;
89471 +static int one_hundred __read_only = 100;
89472 #ifdef CONFIG_PRINTK
89473 -static int ten_thousand = 10000;
89474 +static int ten_thousand __read_only = 10000;
89475 #endif
89476
89477 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
89478 @@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
89479 void __user *buffer, size_t *lenp, loff_t *ppos);
89480 #endif
89481
89482 -#ifdef CONFIG_PRINTK
89483 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89484 void __user *buffer, size_t *lenp, loff_t *ppos);
89485 -#endif
89486
89487 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
89488 void __user *buffer, size_t *lenp, loff_t *ppos);
89489 @@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
89490
89491 #endif
89492
89493 +extern struct ctl_table grsecurity_table[];
89494 +
89495 static struct ctl_table kern_table[];
89496 static struct ctl_table vm_table[];
89497 static struct ctl_table fs_table[];
89498 @@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
89499 int sysctl_legacy_va_layout;
89500 #endif
89501
89502 +#ifdef CONFIG_PAX_SOFTMODE
89503 +static ctl_table pax_table[] = {
89504 + {
89505 + .procname = "softmode",
89506 + .data = &pax_softmode,
89507 + .maxlen = sizeof(unsigned int),
89508 + .mode = 0600,
89509 + .proc_handler = &proc_dointvec,
89510 + },
89511 +
89512 + { }
89513 +};
89514 +#endif
89515 +
89516 /* The default sysctl tables: */
89517
89518 static struct ctl_table sysctl_base_table[] = {
89519 @@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
89520 #endif
89521
89522 static struct ctl_table kern_table[] = {
89523 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
89524 + {
89525 + .procname = "grsecurity",
89526 + .mode = 0500,
89527 + .child = grsecurity_table,
89528 + },
89529 +#endif
89530 +
89531 +#ifdef CONFIG_PAX_SOFTMODE
89532 + {
89533 + .procname = "pax",
89534 + .mode = 0500,
89535 + .child = pax_table,
89536 + },
89537 +#endif
89538 +
89539 {
89540 .procname = "sched_child_runs_first",
89541 .data = &sysctl_sched_child_runs_first,
89542 @@ -629,7 +659,7 @@ static struct ctl_table kern_table[] = {
89543 .data = &modprobe_path,
89544 .maxlen = KMOD_PATH_LEN,
89545 .mode = 0644,
89546 - .proc_handler = proc_dostring,
89547 + .proc_handler = proc_dostring_modpriv,
89548 },
89549 {
89550 .procname = "modules_disabled",
89551 @@ -796,16 +826,20 @@ static struct ctl_table kern_table[] = {
89552 .extra1 = &zero,
89553 .extra2 = &one,
89554 },
89555 +#endif
89556 {
89557 .procname = "kptr_restrict",
89558 .data = &kptr_restrict,
89559 .maxlen = sizeof(int),
89560 .mode = 0644,
89561 .proc_handler = proc_dointvec_minmax_sysadmin,
89562 +#ifdef CONFIG_GRKERNSEC_HIDESYM
89563 + .extra1 = &two,
89564 +#else
89565 .extra1 = &zero,
89566 +#endif
89567 .extra2 = &two,
89568 },
89569 -#endif
89570 {
89571 .procname = "ngroups_max",
89572 .data = &ngroups_max,
89573 @@ -1048,10 +1082,17 @@ static struct ctl_table kern_table[] = {
89574 */
89575 {
89576 .procname = "perf_event_paranoid",
89577 - .data = &sysctl_perf_event_paranoid,
89578 - .maxlen = sizeof(sysctl_perf_event_paranoid),
89579 + .data = &sysctl_perf_event_legitimately_concerned,
89580 + .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
89581 .mode = 0644,
89582 - .proc_handler = proc_dointvec,
89583 + /* go ahead, be a hero */
89584 + .proc_handler = proc_dointvec_minmax_sysadmin,
89585 + .extra1 = &neg_one,
89586 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89587 + .extra2 = &three,
89588 +#else
89589 + .extra2 = &two,
89590 +#endif
89591 },
89592 {
89593 .procname = "perf_event_mlock_kb",
89594 @@ -1315,6 +1356,13 @@ static struct ctl_table vm_table[] = {
89595 .proc_handler = proc_dointvec_minmax,
89596 .extra1 = &zero,
89597 },
89598 + {
89599 + .procname = "heap_stack_gap",
89600 + .data = &sysctl_heap_stack_gap,
89601 + .maxlen = sizeof(sysctl_heap_stack_gap),
89602 + .mode = 0644,
89603 + .proc_handler = proc_doulongvec_minmax,
89604 + },
89605 #else
89606 {
89607 .procname = "nr_trim_pages",
89608 @@ -1779,6 +1827,16 @@ int proc_dostring(struct ctl_table *table, int write,
89609 buffer, lenp, ppos);
89610 }
89611
89612 +int proc_dostring_modpriv(struct ctl_table *table, int write,
89613 + void __user *buffer, size_t *lenp, loff_t *ppos)
89614 +{
89615 + if (write && !capable(CAP_SYS_MODULE))
89616 + return -EPERM;
89617 +
89618 + return _proc_do_string(table->data, table->maxlen, write,
89619 + buffer, lenp, ppos);
89620 +}
89621 +
89622 static size_t proc_skip_spaces(char **buf)
89623 {
89624 size_t ret;
89625 @@ -1884,6 +1942,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
89626 len = strlen(tmp);
89627 if (len > *size)
89628 len = *size;
89629 + if (len > sizeof(tmp))
89630 + len = sizeof(tmp);
89631 if (copy_to_user(*buf, tmp, len))
89632 return -EFAULT;
89633 *size -= len;
89634 @@ -2048,7 +2108,7 @@ int proc_dointvec(struct ctl_table *table, int write,
89635 static int proc_taint(struct ctl_table *table, int write,
89636 void __user *buffer, size_t *lenp, loff_t *ppos)
89637 {
89638 - struct ctl_table t;
89639 + ctl_table_no_const t;
89640 unsigned long tmptaint = get_taint();
89641 int err;
89642
89643 @@ -2076,7 +2136,6 @@ static int proc_taint(struct ctl_table *table, int write,
89644 return err;
89645 }
89646
89647 -#ifdef CONFIG_PRINTK
89648 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89649 void __user *buffer, size_t *lenp, loff_t *ppos)
89650 {
89651 @@ -2085,7 +2144,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89652
89653 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
89654 }
89655 -#endif
89656
89657 struct do_proc_dointvec_minmax_conv_param {
89658 int *min;
89659 @@ -2632,6 +2690,12 @@ int proc_dostring(struct ctl_table *table, int write,
89660 return -ENOSYS;
89661 }
89662
89663 +int proc_dostring_modpriv(struct ctl_table *table, int write,
89664 + void __user *buffer, size_t *lenp, loff_t *ppos)
89665 +{
89666 + return -ENOSYS;
89667 +}
89668 +
89669 int proc_dointvec(struct ctl_table *table, int write,
89670 void __user *buffer, size_t *lenp, loff_t *ppos)
89671 {
89672 @@ -2688,5 +2752,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
89673 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
89674 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
89675 EXPORT_SYMBOL(proc_dostring);
89676 +EXPORT_SYMBOL(proc_dostring_modpriv);
89677 EXPORT_SYMBOL(proc_doulongvec_minmax);
89678 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
89679 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
89680 index 13d2f7c..c93d0b0 100644
89681 --- a/kernel/taskstats.c
89682 +++ b/kernel/taskstats.c
89683 @@ -28,9 +28,12 @@
89684 #include <linux/fs.h>
89685 #include <linux/file.h>
89686 #include <linux/pid_namespace.h>
89687 +#include <linux/grsecurity.h>
89688 #include <net/genetlink.h>
89689 #include <linux/atomic.h>
89690
89691 +extern int gr_is_taskstats_denied(int pid);
89692 +
89693 /*
89694 * Maximum length of a cpumask that can be specified in
89695 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
89696 @@ -576,6 +579,9 @@ err:
89697
89698 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
89699 {
89700 + if (gr_is_taskstats_denied(current->pid))
89701 + return -EACCES;
89702 +
89703 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
89704 return cmd_attr_register_cpumask(info);
89705 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
89706 diff --git a/kernel/time.c b/kernel/time.c
89707 index 7c7964c..2a0d412 100644
89708 --- a/kernel/time.c
89709 +++ b/kernel/time.c
89710 @@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
89711 return error;
89712
89713 if (tz) {
89714 + /* we log in do_settimeofday called below, so don't log twice
89715 + */
89716 + if (!tv)
89717 + gr_log_timechange();
89718 +
89719 sys_tz = *tz;
89720 update_vsyscall_tz();
89721 if (firsttime) {
89722 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
89723 index 88c9c65..7497ebc 100644
89724 --- a/kernel/time/alarmtimer.c
89725 +++ b/kernel/time/alarmtimer.c
89726 @@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
89727 struct platform_device *pdev;
89728 int error = 0;
89729 int i;
89730 - struct k_clock alarm_clock = {
89731 + static struct k_clock alarm_clock = {
89732 .clock_getres = alarm_clock_getres,
89733 .clock_get = alarm_clock_get,
89734 .timer_create = alarm_timer_create,
89735 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
89736 index b415457..c26876d 100644
89737 --- a/kernel/time/timekeeping.c
89738 +++ b/kernel/time/timekeeping.c
89739 @@ -15,6 +15,7 @@
89740 #include <linux/init.h>
89741 #include <linux/mm.h>
89742 #include <linux/sched.h>
89743 +#include <linux/grsecurity.h>
89744 #include <linux/syscore_ops.h>
89745 #include <linux/clocksource.h>
89746 #include <linux/jiffies.h>
89747 @@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
89748 if (!timespec_valid_strict(tv))
89749 return -EINVAL;
89750
89751 + gr_log_timechange();
89752 +
89753 raw_spin_lock_irqsave(&timekeeper_lock, flags);
89754 write_seqcount_begin(&timekeeper_seq);
89755
89756 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
89757 index 61ed862..3b52c65 100644
89758 --- a/kernel/time/timer_list.c
89759 +++ b/kernel/time/timer_list.c
89760 @@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
89761
89762 static void print_name_offset(struct seq_file *m, void *sym)
89763 {
89764 +#ifdef CONFIG_GRKERNSEC_HIDESYM
89765 + SEQ_printf(m, "<%p>", NULL);
89766 +#else
89767 char symname[KSYM_NAME_LEN];
89768
89769 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
89770 SEQ_printf(m, "<%pK>", sym);
89771 else
89772 SEQ_printf(m, "%s", symname);
89773 +#endif
89774 }
89775
89776 static void
89777 @@ -119,7 +123,11 @@ next_one:
89778 static void
89779 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
89780 {
89781 +#ifdef CONFIG_GRKERNSEC_HIDESYM
89782 + SEQ_printf(m, " .base: %p\n", NULL);
89783 +#else
89784 SEQ_printf(m, " .base: %pK\n", base);
89785 +#endif
89786 SEQ_printf(m, " .index: %d\n",
89787 base->index);
89788 SEQ_printf(m, " .resolution: %Lu nsecs\n",
89789 @@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
89790 {
89791 struct proc_dir_entry *pe;
89792
89793 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
89794 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
89795 +#else
89796 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
89797 +#endif
89798 if (!pe)
89799 return -ENOMEM;
89800 return 0;
89801 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
89802 index 1fb08f2..ca4bb1e 100644
89803 --- a/kernel/time/timer_stats.c
89804 +++ b/kernel/time/timer_stats.c
89805 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
89806 static unsigned long nr_entries;
89807 static struct entry entries[MAX_ENTRIES];
89808
89809 -static atomic_t overflow_count;
89810 +static atomic_unchecked_t overflow_count;
89811
89812 /*
89813 * The entries are in a hash-table, for fast lookup:
89814 @@ -140,7 +140,7 @@ static void reset_entries(void)
89815 nr_entries = 0;
89816 memset(entries, 0, sizeof(entries));
89817 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
89818 - atomic_set(&overflow_count, 0);
89819 + atomic_set_unchecked(&overflow_count, 0);
89820 }
89821
89822 static struct entry *alloc_entry(void)
89823 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
89824 if (likely(entry))
89825 entry->count++;
89826 else
89827 - atomic_inc(&overflow_count);
89828 + atomic_inc_unchecked(&overflow_count);
89829
89830 out_unlock:
89831 raw_spin_unlock_irqrestore(lock, flags);
89832 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
89833
89834 static void print_name_offset(struct seq_file *m, unsigned long addr)
89835 {
89836 +#ifdef CONFIG_GRKERNSEC_HIDESYM
89837 + seq_printf(m, "<%p>", NULL);
89838 +#else
89839 char symname[KSYM_NAME_LEN];
89840
89841 if (lookup_symbol_name(addr, symname) < 0)
89842 - seq_printf(m, "<%p>", (void *)addr);
89843 + seq_printf(m, "<%pK>", (void *)addr);
89844 else
89845 seq_printf(m, "%s", symname);
89846 +#endif
89847 }
89848
89849 static int tstats_show(struct seq_file *m, void *v)
89850 @@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
89851
89852 seq_puts(m, "Timer Stats Version: v0.3\n");
89853 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
89854 - if (atomic_read(&overflow_count))
89855 - seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
89856 + if (atomic_read_unchecked(&overflow_count))
89857 + seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
89858 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
89859
89860 for (i = 0; i < nr_entries; i++) {
89861 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
89862 {
89863 struct proc_dir_entry *pe;
89864
89865 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
89866 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
89867 +#else
89868 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
89869 +#endif
89870 if (!pe)
89871 return -ENOMEM;
89872 return 0;
89873 diff --git a/kernel/timer.c b/kernel/timer.c
89874 index accfd24..e00f0c0 100644
89875 --- a/kernel/timer.c
89876 +++ b/kernel/timer.c
89877 @@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
89878 /*
89879 * This function runs timers and the timer-tq in bottom half context.
89880 */
89881 -static void run_timer_softirq(struct softirq_action *h)
89882 +static __latent_entropy void run_timer_softirq(void)
89883 {
89884 struct tvec_base *base = __this_cpu_read(tvec_bases);
89885
89886 @@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
89887 *
89888 * In all cases the return value is guaranteed to be non-negative.
89889 */
89890 -signed long __sched schedule_timeout(signed long timeout)
89891 +signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
89892 {
89893 struct timer_list timer;
89894 unsigned long expire;
89895 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
89896 index f785aef..59f1b18 100644
89897 --- a/kernel/trace/blktrace.c
89898 +++ b/kernel/trace/blktrace.c
89899 @@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
89900 struct blk_trace *bt = filp->private_data;
89901 char buf[16];
89902
89903 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
89904 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
89905
89906 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
89907 }
89908 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
89909 return 1;
89910
89911 bt = buf->chan->private_data;
89912 - atomic_inc(&bt->dropped);
89913 + atomic_inc_unchecked(&bt->dropped);
89914 return 0;
89915 }
89916
89917 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
89918
89919 bt->dir = dir;
89920 bt->dev = dev;
89921 - atomic_set(&bt->dropped, 0);
89922 + atomic_set_unchecked(&bt->dropped, 0);
89923 INIT_LIST_HEAD(&bt->running_list);
89924
89925 ret = -EIO;
89926 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
89927 index 38463d2..68abe92 100644
89928 --- a/kernel/trace/ftrace.c
89929 +++ b/kernel/trace/ftrace.c
89930 @@ -1978,12 +1978,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
89931 if (unlikely(ftrace_disabled))
89932 return 0;
89933
89934 + ret = ftrace_arch_code_modify_prepare();
89935 + FTRACE_WARN_ON(ret);
89936 + if (ret)
89937 + return 0;
89938 +
89939 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
89940 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
89941 if (ret) {
89942 ftrace_bug(ret, ip);
89943 - return 0;
89944 }
89945 - return 1;
89946 + return ret ? 0 : 1;
89947 }
89948
89949 /*
89950 @@ -4190,8 +4195,10 @@ static int ftrace_process_locs(struct module *mod,
89951 if (!count)
89952 return 0;
89953
89954 + pax_open_kernel();
89955 sort(start, count, sizeof(*start),
89956 ftrace_cmp_ips, ftrace_swap_ips);
89957 + pax_close_kernel();
89958
89959 start_pg = ftrace_allocate_pages(count);
89960 if (!start_pg)
89961 @@ -4922,8 +4929,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
89962 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
89963
89964 static int ftrace_graph_active;
89965 -static struct notifier_block ftrace_suspend_notifier;
89966 -
89967 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
89968 {
89969 return 0;
89970 @@ -5099,6 +5104,10 @@ static void update_function_graph_func(void)
89971 ftrace_graph_entry = ftrace_graph_entry_test;
89972 }
89973
89974 +static struct notifier_block ftrace_suspend_notifier = {
89975 + .notifier_call = ftrace_suspend_notifier_call
89976 +};
89977 +
89978 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
89979 trace_func_graph_ent_t entryfunc)
89980 {
89981 @@ -5112,7 +5121,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
89982 goto out;
89983 }
89984
89985 - ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
89986 register_pm_notifier(&ftrace_suspend_notifier);
89987
89988 ftrace_graph_active++;
89989 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
89990 index 0e337ee..3370631 100644
89991 --- a/kernel/trace/ring_buffer.c
89992 +++ b/kernel/trace/ring_buffer.c
89993 @@ -352,9 +352,9 @@ struct buffer_data_page {
89994 */
89995 struct buffer_page {
89996 struct list_head list; /* list of buffer pages */
89997 - local_t write; /* index for next write */
89998 + local_unchecked_t write; /* index for next write */
89999 unsigned read; /* index for next read */
90000 - local_t entries; /* entries on this page */
90001 + local_unchecked_t entries; /* entries on this page */
90002 unsigned long real_end; /* real end of data */
90003 struct buffer_data_page *page; /* Actual data page */
90004 };
90005 @@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
90006 unsigned long last_overrun;
90007 local_t entries_bytes;
90008 local_t entries;
90009 - local_t overrun;
90010 - local_t commit_overrun;
90011 + local_unchecked_t overrun;
90012 + local_unchecked_t commit_overrun;
90013 local_t dropped_events;
90014 local_t committing;
90015 local_t commits;
90016 @@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
90017 *
90018 * We add a counter to the write field to denote this.
90019 */
90020 - old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
90021 - old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
90022 + old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
90023 + old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
90024
90025 /*
90026 * Just make sure we have seen our old_write and synchronize
90027 @@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
90028 * cmpxchg to only update if an interrupt did not already
90029 * do it for us. If the cmpxchg fails, we don't care.
90030 */
90031 - (void)local_cmpxchg(&next_page->write, old_write, val);
90032 - (void)local_cmpxchg(&next_page->entries, old_entries, eval);
90033 + (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
90034 + (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
90035
90036 /*
90037 * No need to worry about races with clearing out the commit.
90038 @@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
90039
90040 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
90041 {
90042 - return local_read(&bpage->entries) & RB_WRITE_MASK;
90043 + return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
90044 }
90045
90046 static inline unsigned long rb_page_write(struct buffer_page *bpage)
90047 {
90048 - return local_read(&bpage->write) & RB_WRITE_MASK;
90049 + return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
90050 }
90051
90052 static int
90053 @@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
90054 * bytes consumed in ring buffer from here.
90055 * Increment overrun to account for the lost events.
90056 */
90057 - local_add(page_entries, &cpu_buffer->overrun);
90058 + local_add_unchecked(page_entries, &cpu_buffer->overrun);
90059 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
90060 }
90061
90062 @@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
90063 * it is our responsibility to update
90064 * the counters.
90065 */
90066 - local_add(entries, &cpu_buffer->overrun);
90067 + local_add_unchecked(entries, &cpu_buffer->overrun);
90068 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
90069
90070 /*
90071 @@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
90072 if (tail == BUF_PAGE_SIZE)
90073 tail_page->real_end = 0;
90074
90075 - local_sub(length, &tail_page->write);
90076 + local_sub_unchecked(length, &tail_page->write);
90077 return;
90078 }
90079
90080 @@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
90081 rb_event_set_padding(event);
90082
90083 /* Set the write back to the previous setting */
90084 - local_sub(length, &tail_page->write);
90085 + local_sub_unchecked(length, &tail_page->write);
90086 return;
90087 }
90088
90089 @@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
90090
90091 /* Set write to end of buffer */
90092 length = (tail + length) - BUF_PAGE_SIZE;
90093 - local_sub(length, &tail_page->write);
90094 + local_sub_unchecked(length, &tail_page->write);
90095 }
90096
90097 /*
90098 @@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
90099 * about it.
90100 */
90101 if (unlikely(next_page == commit_page)) {
90102 - local_inc(&cpu_buffer->commit_overrun);
90103 + local_inc_unchecked(&cpu_buffer->commit_overrun);
90104 goto out_reset;
90105 }
90106
90107 @@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
90108 cpu_buffer->tail_page) &&
90109 (cpu_buffer->commit_page ==
90110 cpu_buffer->reader_page))) {
90111 - local_inc(&cpu_buffer->commit_overrun);
90112 + local_inc_unchecked(&cpu_buffer->commit_overrun);
90113 goto out_reset;
90114 }
90115 }
90116 @@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
90117 length += RB_LEN_TIME_EXTEND;
90118
90119 tail_page = cpu_buffer->tail_page;
90120 - write = local_add_return(length, &tail_page->write);
90121 + write = local_add_return_unchecked(length, &tail_page->write);
90122
90123 /* set write to only the index of the write */
90124 write &= RB_WRITE_MASK;
90125 @@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
90126 kmemcheck_annotate_bitfield(event, bitfield);
90127 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
90128
90129 - local_inc(&tail_page->entries);
90130 + local_inc_unchecked(&tail_page->entries);
90131
90132 /*
90133 * If this is the first commit on the page, then update
90134 @@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
90135
90136 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
90137 unsigned long write_mask =
90138 - local_read(&bpage->write) & ~RB_WRITE_MASK;
90139 + local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
90140 unsigned long event_length = rb_event_length(event);
90141 /*
90142 * This is on the tail page. It is possible that
90143 @@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
90144 */
90145 old_index += write_mask;
90146 new_index += write_mask;
90147 - index = local_cmpxchg(&bpage->write, old_index, new_index);
90148 + index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
90149 if (index == old_index) {
90150 /* update counters */
90151 local_sub(event_length, &cpu_buffer->entries_bytes);
90152 @@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
90153
90154 /* Do the likely case first */
90155 if (likely(bpage->page == (void *)addr)) {
90156 - local_dec(&bpage->entries);
90157 + local_dec_unchecked(&bpage->entries);
90158 return;
90159 }
90160
90161 @@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
90162 start = bpage;
90163 do {
90164 if (bpage->page == (void *)addr) {
90165 - local_dec(&bpage->entries);
90166 + local_dec_unchecked(&bpage->entries);
90167 return;
90168 }
90169 rb_inc_page(cpu_buffer, &bpage);
90170 @@ -3146,7 +3146,7 @@ static inline unsigned long
90171 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
90172 {
90173 return local_read(&cpu_buffer->entries) -
90174 - (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
90175 + (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
90176 }
90177
90178 /**
90179 @@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
90180 return 0;
90181
90182 cpu_buffer = buffer->buffers[cpu];
90183 - ret = local_read(&cpu_buffer->overrun);
90184 + ret = local_read_unchecked(&cpu_buffer->overrun);
90185
90186 return ret;
90187 }
90188 @@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
90189 return 0;
90190
90191 cpu_buffer = buffer->buffers[cpu];
90192 - ret = local_read(&cpu_buffer->commit_overrun);
90193 + ret = local_read_unchecked(&cpu_buffer->commit_overrun);
90194
90195 return ret;
90196 }
90197 @@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
90198 /* if you care about this being correct, lock the buffer */
90199 for_each_buffer_cpu(buffer, cpu) {
90200 cpu_buffer = buffer->buffers[cpu];
90201 - overruns += local_read(&cpu_buffer->overrun);
90202 + overruns += local_read_unchecked(&cpu_buffer->overrun);
90203 }
90204
90205 return overruns;
90206 @@ -3519,8 +3519,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
90207 /*
90208 * Reset the reader page to size zero.
90209 */
90210 - local_set(&cpu_buffer->reader_page->write, 0);
90211 - local_set(&cpu_buffer->reader_page->entries, 0);
90212 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
90213 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
90214 local_set(&cpu_buffer->reader_page->page->commit, 0);
90215 cpu_buffer->reader_page->real_end = 0;
90216
90217 @@ -3554,7 +3554,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
90218 * want to compare with the last_overrun.
90219 */
90220 smp_mb();
90221 - overwrite = local_read(&(cpu_buffer->overrun));
90222 + overwrite = local_read_unchecked(&(cpu_buffer->overrun));
90223
90224 /*
90225 * Here's the tricky part.
90226 @@ -4124,8 +4124,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
90227
90228 cpu_buffer->head_page
90229 = list_entry(cpu_buffer->pages, struct buffer_page, list);
90230 - local_set(&cpu_buffer->head_page->write, 0);
90231 - local_set(&cpu_buffer->head_page->entries, 0);
90232 + local_set_unchecked(&cpu_buffer->head_page->write, 0);
90233 + local_set_unchecked(&cpu_buffer->head_page->entries, 0);
90234 local_set(&cpu_buffer->head_page->page->commit, 0);
90235
90236 cpu_buffer->head_page->read = 0;
90237 @@ -4135,14 +4135,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
90238
90239 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
90240 INIT_LIST_HEAD(&cpu_buffer->new_pages);
90241 - local_set(&cpu_buffer->reader_page->write, 0);
90242 - local_set(&cpu_buffer->reader_page->entries, 0);
90243 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
90244 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
90245 local_set(&cpu_buffer->reader_page->page->commit, 0);
90246 cpu_buffer->reader_page->read = 0;
90247
90248 local_set(&cpu_buffer->entries_bytes, 0);
90249 - local_set(&cpu_buffer->overrun, 0);
90250 - local_set(&cpu_buffer->commit_overrun, 0);
90251 + local_set_unchecked(&cpu_buffer->overrun, 0);
90252 + local_set_unchecked(&cpu_buffer->commit_overrun, 0);
90253 local_set(&cpu_buffer->dropped_events, 0);
90254 local_set(&cpu_buffer->entries, 0);
90255 local_set(&cpu_buffer->committing, 0);
90256 @@ -4547,8 +4547,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
90257 rb_init_page(bpage);
90258 bpage = reader->page;
90259 reader->page = *data_page;
90260 - local_set(&reader->write, 0);
90261 - local_set(&reader->entries, 0);
90262 + local_set_unchecked(&reader->write, 0);
90263 + local_set_unchecked(&reader->entries, 0);
90264 reader->read = 0;
90265 *data_page = bpage;
90266
90267 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
90268 index 0a360ce..7bd800e 100644
90269 --- a/kernel/trace/trace.c
90270 +++ b/kernel/trace/trace.c
90271 @@ -3352,7 +3352,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
90272 return 0;
90273 }
90274
90275 -int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
90276 +int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
90277 {
90278 /* do nothing if flag is already set */
90279 if (!!(trace_flags & mask) == !!enabled)
90280 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
90281 index ea189e0..a5b48c4 100644
90282 --- a/kernel/trace/trace.h
90283 +++ b/kernel/trace/trace.h
90284 @@ -1040,7 +1040,7 @@ extern const char *__stop___tracepoint_str[];
90285 void trace_printk_init_buffers(void);
90286 void trace_printk_start_comm(void);
90287 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
90288 -int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
90289 +int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
90290
90291 /*
90292 * Normal trace_printk() and friends allocates special buffers
90293 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
90294 index 26dc348..8708ca7 100644
90295 --- a/kernel/trace/trace_clock.c
90296 +++ b/kernel/trace/trace_clock.c
90297 @@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
90298 return now;
90299 }
90300
90301 -static atomic64_t trace_counter;
90302 +static atomic64_unchecked_t trace_counter;
90303
90304 /*
90305 * trace_clock_counter(): simply an atomic counter.
90306 @@ -132,5 +132,5 @@ static atomic64_t trace_counter;
90307 */
90308 u64 notrace trace_clock_counter(void)
90309 {
90310 - return atomic64_add_return(1, &trace_counter);
90311 + return atomic64_inc_return_unchecked(&trace_counter);
90312 }
90313 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
90314 index 2e58196..fdd3d61 100644
90315 --- a/kernel/trace/trace_events.c
90316 +++ b/kernel/trace/trace_events.c
90317 @@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
90318 return 0;
90319 }
90320
90321 -struct ftrace_module_file_ops;
90322 static void __add_event_to_tracers(struct ftrace_event_call *call);
90323
90324 /* Add an additional event_call dynamically */
90325 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
90326 index 0abd9b8..6a663a2 100644
90327 --- a/kernel/trace/trace_mmiotrace.c
90328 +++ b/kernel/trace/trace_mmiotrace.c
90329 @@ -24,7 +24,7 @@ struct header_iter {
90330 static struct trace_array *mmio_trace_array;
90331 static bool overrun_detected;
90332 static unsigned long prev_overruns;
90333 -static atomic_t dropped_count;
90334 +static atomic_unchecked_t dropped_count;
90335
90336 static void mmio_reset_data(struct trace_array *tr)
90337 {
90338 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
90339
90340 static unsigned long count_overruns(struct trace_iterator *iter)
90341 {
90342 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
90343 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
90344 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
90345
90346 if (over > prev_overruns)
90347 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
90348 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
90349 sizeof(*entry), 0, pc);
90350 if (!event) {
90351 - atomic_inc(&dropped_count);
90352 + atomic_inc_unchecked(&dropped_count);
90353 return;
90354 }
90355 entry = ring_buffer_event_data(event);
90356 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
90357 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
90358 sizeof(*entry), 0, pc);
90359 if (!event) {
90360 - atomic_inc(&dropped_count);
90361 + atomic_inc_unchecked(&dropped_count);
90362 return;
90363 }
90364 entry = ring_buffer_event_data(event);
90365 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
90366 index ed32284..884d6c3 100644
90367 --- a/kernel/trace/trace_output.c
90368 +++ b/kernel/trace/trace_output.c
90369 @@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
90370
90371 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
90372 if (!IS_ERR(p)) {
90373 - p = mangle_path(s->buffer + s->len, p, "\n");
90374 + p = mangle_path(s->buffer + s->len, p, "\n\\");
90375 if (p) {
90376 s->len = p - s->buffer;
90377 return 1;
90378 @@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event)
90379 goto out;
90380 }
90381
90382 + pax_open_kernel();
90383 if (event->funcs->trace == NULL)
90384 - event->funcs->trace = trace_nop_print;
90385 + *(void **)&event->funcs->trace = trace_nop_print;
90386 if (event->funcs->raw == NULL)
90387 - event->funcs->raw = trace_nop_print;
90388 + *(void **)&event->funcs->raw = trace_nop_print;
90389 if (event->funcs->hex == NULL)
90390 - event->funcs->hex = trace_nop_print;
90391 + *(void **)&event->funcs->hex = trace_nop_print;
90392 if (event->funcs->binary == NULL)
90393 - event->funcs->binary = trace_nop_print;
90394 + *(void **)&event->funcs->binary = trace_nop_print;
90395 + pax_close_kernel();
90396
90397 key = event->type & (EVENT_HASHSIZE - 1);
90398
90399 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
90400 index b20428c..4845a10 100644
90401 --- a/kernel/trace/trace_stack.c
90402 +++ b/kernel/trace/trace_stack.c
90403 @@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
90404 return;
90405
90406 /* we do not handle interrupt stacks yet */
90407 - if (!object_is_on_stack(stack))
90408 + if (!object_starts_on_stack(stack))
90409 return;
90410
90411 local_irq_save(flags);
90412 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
90413 index 240fb62..583473e 100644
90414 --- a/kernel/user_namespace.c
90415 +++ b/kernel/user_namespace.c
90416 @@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
90417 !kgid_has_mapping(parent_ns, group))
90418 return -EPERM;
90419
90420 +#ifdef CONFIG_GRKERNSEC
90421 + /*
90422 + * This doesn't really inspire confidence:
90423 + * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
90424 + * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
90425 + * Increases kernel attack surface in areas developers
90426 + * previously cared little about ("low importance due
90427 + * to requiring "root" capability")
90428 + * To be removed when this code receives *proper* review
90429 + */
90430 + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
90431 + !capable(CAP_SETGID))
90432 + return -EPERM;
90433 +#endif
90434 +
90435 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
90436 if (!ns)
90437 return -ENOMEM;
90438 @@ -866,7 +881,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
90439 if (atomic_read(&current->mm->mm_users) > 1)
90440 return -EINVAL;
90441
90442 - if (current->fs->users != 1)
90443 + if (atomic_read(&current->fs->users) != 1)
90444 return -EINVAL;
90445
90446 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
90447 diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
90448 index 4f69f9a..7c6f8f8 100644
90449 --- a/kernel/utsname_sysctl.c
90450 +++ b/kernel/utsname_sysctl.c
90451 @@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
90452 static int proc_do_uts_string(ctl_table *table, int write,
90453 void __user *buffer, size_t *lenp, loff_t *ppos)
90454 {
90455 - struct ctl_table uts_table;
90456 + ctl_table_no_const uts_table;
90457 int r;
90458 memcpy(&uts_table, table, sizeof(uts_table));
90459 uts_table.data = get_uts(table, write);
90460 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
90461 index 4431610..4265616 100644
90462 --- a/kernel/watchdog.c
90463 +++ b/kernel/watchdog.c
90464 @@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
90465 static void watchdog_nmi_disable(unsigned int cpu) { return; }
90466 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
90467
90468 -static struct smp_hotplug_thread watchdog_threads = {
90469 +static struct smp_hotplug_thread watchdog_threads __read_only = {
90470 .store = &softlockup_watchdog,
90471 .thread_should_run = watchdog_should_run,
90472 .thread_fn = watchdog,
90473 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
90474 index a8381cf..1ce1331 100644
90475 --- a/kernel/workqueue.c
90476 +++ b/kernel/workqueue.c
90477 @@ -4678,7 +4678,7 @@ static void rebind_workers(struct worker_pool *pool)
90478 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
90479 worker_flags |= WORKER_REBOUND;
90480 worker_flags &= ~WORKER_UNBOUND;
90481 - ACCESS_ONCE(worker->flags) = worker_flags;
90482 + ACCESS_ONCE_RW(worker->flags) = worker_flags;
90483 }
90484
90485 spin_unlock_irq(&pool->lock);
90486 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
90487 index db25707..8b16430 100644
90488 --- a/lib/Kconfig.debug
90489 +++ b/lib/Kconfig.debug
90490 @@ -845,7 +845,7 @@ config DEBUG_MUTEXES
90491
90492 config DEBUG_WW_MUTEX_SLOWPATH
90493 bool "Wait/wound mutex debugging: Slowpath testing"
90494 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90495 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90496 select DEBUG_LOCK_ALLOC
90497 select DEBUG_SPINLOCK
90498 select DEBUG_MUTEXES
90499 @@ -858,7 +858,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
90500
90501 config DEBUG_LOCK_ALLOC
90502 bool "Lock debugging: detect incorrect freeing of live locks"
90503 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90504 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90505 select DEBUG_SPINLOCK
90506 select DEBUG_MUTEXES
90507 select LOCKDEP
90508 @@ -872,7 +872,7 @@ config DEBUG_LOCK_ALLOC
90509
90510 config PROVE_LOCKING
90511 bool "Lock debugging: prove locking correctness"
90512 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90513 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90514 select LOCKDEP
90515 select DEBUG_SPINLOCK
90516 select DEBUG_MUTEXES
90517 @@ -923,7 +923,7 @@ config LOCKDEP
90518
90519 config LOCK_STAT
90520 bool "Lock usage statistics"
90521 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90522 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90523 select LOCKDEP
90524 select DEBUG_SPINLOCK
90525 select DEBUG_MUTEXES
90526 @@ -1385,6 +1385,7 @@ config LATENCYTOP
90527 depends on DEBUG_KERNEL
90528 depends on STACKTRACE_SUPPORT
90529 depends on PROC_FS
90530 + depends on !GRKERNSEC_HIDESYM
90531 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
90532 select KALLSYMS
90533 select KALLSYMS_ALL
90534 @@ -1401,7 +1402,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
90535 config DEBUG_STRICT_USER_COPY_CHECKS
90536 bool "Strict user copy size checks"
90537 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
90538 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
90539 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
90540 help
90541 Enabling this option turns a certain set of sanity checks for user
90542 copy operations into compile time failures.
90543 @@ -1520,7 +1521,7 @@ endmenu # runtime tests
90544
90545 config PROVIDE_OHCI1394_DMA_INIT
90546 bool "Remote debugging over FireWire early on boot"
90547 - depends on PCI && X86
90548 + depends on PCI && X86 && !GRKERNSEC
90549 help
90550 If you want to debug problems which hang or crash the kernel early
90551 on boot and the crashing machine has a FireWire port, you can use
90552 @@ -1549,7 +1550,7 @@ config PROVIDE_OHCI1394_DMA_INIT
90553
90554 config FIREWIRE_OHCI_REMOTE_DMA
90555 bool "Remote debugging over FireWire with firewire-ohci"
90556 - depends on FIREWIRE_OHCI
90557 + depends on FIREWIRE_OHCI && !GRKERNSEC
90558 help
90559 This option lets you use the FireWire bus for remote debugging
90560 with help of the firewire-ohci driver. It enables unfiltered
90561 diff --git a/lib/Makefile b/lib/Makefile
90562 index 04944e9..f43eabe 100644
90563 --- a/lib/Makefile
90564 +++ b/lib/Makefile
90565 @@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
90566 obj-$(CONFIG_BTREE) += btree.o
90567 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
90568 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
90569 -obj-$(CONFIG_DEBUG_LIST) += list_debug.o
90570 +obj-y += list_debug.o
90571 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
90572
90573 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
90574 diff --git a/lib/bitmap.c b/lib/bitmap.c
90575 index 06f7e4f..f3cf2b0 100644
90576 --- a/lib/bitmap.c
90577 +++ b/lib/bitmap.c
90578 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
90579 {
90580 int c, old_c, totaldigits, ndigits, nchunks, nbits;
90581 u32 chunk;
90582 - const char __user __force *ubuf = (const char __user __force *)buf;
90583 + const char __user *ubuf = (const char __force_user *)buf;
90584
90585 bitmap_zero(maskp, nmaskbits);
90586
90587 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
90588 {
90589 if (!access_ok(VERIFY_READ, ubuf, ulen))
90590 return -EFAULT;
90591 - return __bitmap_parse((const char __force *)ubuf,
90592 + return __bitmap_parse((const char __force_kernel *)ubuf,
90593 ulen, 1, maskp, nmaskbits);
90594
90595 }
90596 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
90597 {
90598 unsigned a, b;
90599 int c, old_c, totaldigits;
90600 - const char __user __force *ubuf = (const char __user __force *)buf;
90601 + const char __user *ubuf = (const char __force_user *)buf;
90602 int exp_digit, in_range;
90603
90604 totaldigits = c = 0;
90605 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
90606 {
90607 if (!access_ok(VERIFY_READ, ubuf, ulen))
90608 return -EFAULT;
90609 - return __bitmap_parselist((const char __force *)ubuf,
90610 + return __bitmap_parselist((const char __force_kernel *)ubuf,
90611 ulen, 1, maskp, nmaskbits);
90612 }
90613 EXPORT_SYMBOL(bitmap_parselist_user);
90614 diff --git a/lib/bug.c b/lib/bug.c
90615 index 1686034..a9c00c8 100644
90616 --- a/lib/bug.c
90617 +++ b/lib/bug.c
90618 @@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
90619 return BUG_TRAP_TYPE_NONE;
90620
90621 bug = find_bug(bugaddr);
90622 + if (!bug)
90623 + return BUG_TRAP_TYPE_NONE;
90624
90625 file = NULL;
90626 line = 0;
90627 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
90628 index e0731c3..ad66444 100644
90629 --- a/lib/debugobjects.c
90630 +++ b/lib/debugobjects.c
90631 @@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
90632 if (limit > 4)
90633 return;
90634
90635 - is_on_stack = object_is_on_stack(addr);
90636 + is_on_stack = object_starts_on_stack(addr);
90637 if (is_on_stack == onstack)
90638 return;
90639
90640 diff --git a/lib/devres.c b/lib/devres.c
90641 index 8235331..5881053 100644
90642 --- a/lib/devres.c
90643 +++ b/lib/devres.c
90644 @@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
90645 void devm_iounmap(struct device *dev, void __iomem *addr)
90646 {
90647 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
90648 - (void *)addr));
90649 + (void __force *)addr));
90650 iounmap(addr);
90651 }
90652 EXPORT_SYMBOL(devm_iounmap);
90653 @@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
90654 {
90655 ioport_unmap(addr);
90656 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
90657 - devm_ioport_map_match, (void *)addr));
90658 + devm_ioport_map_match, (void __force *)addr));
90659 }
90660 EXPORT_SYMBOL(devm_ioport_unmap);
90661 #endif /* CONFIG_HAS_IOPORT */
90662 diff --git a/lib/div64.c b/lib/div64.c
90663 index 4382ad7..08aa558 100644
90664 --- a/lib/div64.c
90665 +++ b/lib/div64.c
90666 @@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
90667 EXPORT_SYMBOL(__div64_32);
90668
90669 #ifndef div_s64_rem
90670 -s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
90671 +s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
90672 {
90673 u64 quotient;
90674
90675 @@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
90676 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
90677 */
90678 #ifndef div64_u64
90679 -u64 div64_u64(u64 dividend, u64 divisor)
90680 +u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
90681 {
90682 u32 high = divisor >> 32;
90683 u64 quot;
90684 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
90685 index d87a17a..ac0d79a 100644
90686 --- a/lib/dma-debug.c
90687 +++ b/lib/dma-debug.c
90688 @@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
90689
90690 void dma_debug_add_bus(struct bus_type *bus)
90691 {
90692 - struct notifier_block *nb;
90693 + notifier_block_no_const *nb;
90694
90695 if (global_disable)
90696 return;
90697 @@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
90698
90699 static void check_for_stack(struct device *dev, void *addr)
90700 {
90701 - if (object_is_on_stack(addr))
90702 + if (object_starts_on_stack(addr))
90703 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
90704 "stack [addr=%p]\n", addr);
90705 }
90706 diff --git a/lib/inflate.c b/lib/inflate.c
90707 index 013a761..c28f3fc 100644
90708 --- a/lib/inflate.c
90709 +++ b/lib/inflate.c
90710 @@ -269,7 +269,7 @@ static void free(void *where)
90711 malloc_ptr = free_mem_ptr;
90712 }
90713 #else
90714 -#define malloc(a) kmalloc(a, GFP_KERNEL)
90715 +#define malloc(a) kmalloc((a), GFP_KERNEL)
90716 #define free(a) kfree(a)
90717 #endif
90718
90719 diff --git a/lib/ioremap.c b/lib/ioremap.c
90720 index 0c9216c..863bd89 100644
90721 --- a/lib/ioremap.c
90722 +++ b/lib/ioremap.c
90723 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
90724 unsigned long next;
90725
90726 phys_addr -= addr;
90727 - pmd = pmd_alloc(&init_mm, pud, addr);
90728 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
90729 if (!pmd)
90730 return -ENOMEM;
90731 do {
90732 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
90733 unsigned long next;
90734
90735 phys_addr -= addr;
90736 - pud = pud_alloc(&init_mm, pgd, addr);
90737 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
90738 if (!pud)
90739 return -ENOMEM;
90740 do {
90741 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
90742 index bd2bea9..6b3c95e 100644
90743 --- a/lib/is_single_threaded.c
90744 +++ b/lib/is_single_threaded.c
90745 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
90746 struct task_struct *p, *t;
90747 bool ret;
90748
90749 + if (!mm)
90750 + return true;
90751 +
90752 if (atomic_read(&task->signal->live) != 1)
90753 return false;
90754
90755 diff --git a/lib/kobject.c b/lib/kobject.c
90756 index 5b4b888..c2950f7 100644
90757 --- a/lib/kobject.c
90758 +++ b/lib/kobject.c
90759 @@ -957,9 +957,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
90760
90761
90762 static DEFINE_SPINLOCK(kobj_ns_type_lock);
90763 -static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
90764 +static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
90765
90766 -int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
90767 +int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
90768 {
90769 enum kobj_ns_type type = ops->type;
90770 int error;
90771 diff --git a/lib/list_debug.c b/lib/list_debug.c
90772 index c24c2f7..f0296f4 100644
90773 --- a/lib/list_debug.c
90774 +++ b/lib/list_debug.c
90775 @@ -11,7 +11,9 @@
90776 #include <linux/bug.h>
90777 #include <linux/kernel.h>
90778 #include <linux/rculist.h>
90779 +#include <linux/mm.h>
90780
90781 +#ifdef CONFIG_DEBUG_LIST
90782 /*
90783 * Insert a new entry between two known consecutive entries.
90784 *
90785 @@ -19,21 +21,40 @@
90786 * the prev/next entries already!
90787 */
90788
90789 +static bool __list_add_debug(struct list_head *new,
90790 + struct list_head *prev,
90791 + struct list_head *next)
90792 +{
90793 + if (unlikely(next->prev != prev)) {
90794 + printk(KERN_ERR "list_add corruption. next->prev should be "
90795 + "prev (%p), but was %p. (next=%p).\n",
90796 + prev, next->prev, next);
90797 + BUG();
90798 + return false;
90799 + }
90800 + if (unlikely(prev->next != next)) {
90801 + printk(KERN_ERR "list_add corruption. prev->next should be "
90802 + "next (%p), but was %p. (prev=%p).\n",
90803 + next, prev->next, prev);
90804 + BUG();
90805 + return false;
90806 + }
90807 + if (unlikely(new == prev || new == next)) {
90808 + printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
90809 + new, prev, next);
90810 + BUG();
90811 + return false;
90812 + }
90813 + return true;
90814 +}
90815 +
90816 void __list_add(struct list_head *new,
90817 - struct list_head *prev,
90818 - struct list_head *next)
90819 + struct list_head *prev,
90820 + struct list_head *next)
90821 {
90822 - WARN(next->prev != prev,
90823 - "list_add corruption. next->prev should be "
90824 - "prev (%p), but was %p. (next=%p).\n",
90825 - prev, next->prev, next);
90826 - WARN(prev->next != next,
90827 - "list_add corruption. prev->next should be "
90828 - "next (%p), but was %p. (prev=%p).\n",
90829 - next, prev->next, prev);
90830 - WARN(new == prev || new == next,
90831 - "list_add double add: new=%p, prev=%p, next=%p.\n",
90832 - new, prev, next);
90833 + if (!__list_add_debug(new, prev, next))
90834 + return;
90835 +
90836 next->prev = new;
90837 new->next = next;
90838 new->prev = prev;
90839 @@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
90840 }
90841 EXPORT_SYMBOL(__list_add);
90842
90843 -void __list_del_entry(struct list_head *entry)
90844 +static bool __list_del_entry_debug(struct list_head *entry)
90845 {
90846 struct list_head *prev, *next;
90847
90848 prev = entry->prev;
90849 next = entry->next;
90850
90851 - if (WARN(next == LIST_POISON1,
90852 - "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
90853 - entry, LIST_POISON1) ||
90854 - WARN(prev == LIST_POISON2,
90855 - "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
90856 - entry, LIST_POISON2) ||
90857 - WARN(prev->next != entry,
90858 - "list_del corruption. prev->next should be %p, "
90859 - "but was %p\n", entry, prev->next) ||
90860 - WARN(next->prev != entry,
90861 - "list_del corruption. next->prev should be %p, "
90862 - "but was %p\n", entry, next->prev))
90863 + if (unlikely(next == LIST_POISON1)) {
90864 + printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
90865 + entry, LIST_POISON1);
90866 + BUG();
90867 + return false;
90868 + }
90869 + if (unlikely(prev == LIST_POISON2)) {
90870 + printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
90871 + entry, LIST_POISON2);
90872 + BUG();
90873 + return false;
90874 + }
90875 + if (unlikely(entry->prev->next != entry)) {
90876 + printk(KERN_ERR "list_del corruption. prev->next should be %p, "
90877 + "but was %p\n", entry, prev->next);
90878 + BUG();
90879 + return false;
90880 + }
90881 + if (unlikely(entry->next->prev != entry)) {
90882 + printk(KERN_ERR "list_del corruption. next->prev should be %p, "
90883 + "but was %p\n", entry, next->prev);
90884 + BUG();
90885 + return false;
90886 + }
90887 + return true;
90888 +}
90889 +
90890 +void __list_del_entry(struct list_head *entry)
90891 +{
90892 + if (!__list_del_entry_debug(entry))
90893 return;
90894
90895 - __list_del(prev, next);
90896 + __list_del(entry->prev, entry->next);
90897 }
90898 EXPORT_SYMBOL(__list_del_entry);
90899
90900 @@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
90901 void __list_add_rcu(struct list_head *new,
90902 struct list_head *prev, struct list_head *next)
90903 {
90904 - WARN(next->prev != prev,
90905 - "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
90906 - prev, next->prev, next);
90907 - WARN(prev->next != next,
90908 - "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
90909 - next, prev->next, prev);
90910 + if (!__list_add_debug(new, prev, next))
90911 + return;
90912 +
90913 new->next = next;
90914 new->prev = prev;
90915 rcu_assign_pointer(list_next_rcu(prev), new);
90916 next->prev = new;
90917 }
90918 EXPORT_SYMBOL(__list_add_rcu);
90919 +#endif
90920 +
90921 +void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
90922 +{
90923 +#ifdef CONFIG_DEBUG_LIST
90924 + if (!__list_add_debug(new, prev, next))
90925 + return;
90926 +#endif
90927 +
90928 + pax_open_kernel();
90929 + next->prev = new;
90930 + new->next = next;
90931 + new->prev = prev;
90932 + prev->next = new;
90933 + pax_close_kernel();
90934 +}
90935 +EXPORT_SYMBOL(__pax_list_add);
90936 +
90937 +void pax_list_del(struct list_head *entry)
90938 +{
90939 +#ifdef CONFIG_DEBUG_LIST
90940 + if (!__list_del_entry_debug(entry))
90941 + return;
90942 +#endif
90943 +
90944 + pax_open_kernel();
90945 + __list_del(entry->prev, entry->next);
90946 + entry->next = LIST_POISON1;
90947 + entry->prev = LIST_POISON2;
90948 + pax_close_kernel();
90949 +}
90950 +EXPORT_SYMBOL(pax_list_del);
90951 +
90952 +void pax_list_del_init(struct list_head *entry)
90953 +{
90954 + pax_open_kernel();
90955 + __list_del(entry->prev, entry->next);
90956 + INIT_LIST_HEAD(entry);
90957 + pax_close_kernel();
90958 +}
90959 +EXPORT_SYMBOL(pax_list_del_init);
90960 +
90961 +void __pax_list_add_rcu(struct list_head *new,
90962 + struct list_head *prev, struct list_head *next)
90963 +{
90964 +#ifdef CONFIG_DEBUG_LIST
90965 + if (!__list_add_debug(new, prev, next))
90966 + return;
90967 +#endif
90968 +
90969 + pax_open_kernel();
90970 + new->next = next;
90971 + new->prev = prev;
90972 + rcu_assign_pointer(list_next_rcu(prev), new);
90973 + next->prev = new;
90974 + pax_close_kernel();
90975 +}
90976 +EXPORT_SYMBOL(__pax_list_add_rcu);
90977 +
90978 +void pax_list_del_rcu(struct list_head *entry)
90979 +{
90980 +#ifdef CONFIG_DEBUG_LIST
90981 + if (!__list_del_entry_debug(entry))
90982 + return;
90983 +#endif
90984 +
90985 + pax_open_kernel();
90986 + __list_del(entry->prev, entry->next);
90987 + entry->next = LIST_POISON1;
90988 + entry->prev = LIST_POISON2;
90989 + pax_close_kernel();
90990 +}
90991 +EXPORT_SYMBOL(pax_list_del_rcu);
90992 diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
90993 index 1a53d49..ace934c 100644
90994 --- a/lib/percpu-refcount.c
90995 +++ b/lib/percpu-refcount.c
90996 @@ -29,7 +29,7 @@
90997 * can't hit 0 before we've added up all the percpu refs.
90998 */
90999
91000 -#define PCPU_COUNT_BIAS (1U << 31)
91001 +#define PCPU_COUNT_BIAS (1U << 30)
91002
91003 /**
91004 * percpu_ref_init - initialize a percpu refcount
91005 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
91006 index 7811ed3..f80ca19 100644
91007 --- a/lib/radix-tree.c
91008 +++ b/lib/radix-tree.c
91009 @@ -93,7 +93,7 @@ struct radix_tree_preload {
91010 int nr;
91011 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
91012 };
91013 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
91014 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
91015
91016 static inline void *ptr_to_indirect(void *ptr)
91017 {
91018 diff --git a/lib/random32.c b/lib/random32.c
91019 index 1e5b2df..fb616c7 100644
91020 --- a/lib/random32.c
91021 +++ b/lib/random32.c
91022 @@ -44,7 +44,7 @@
91023 static void __init prandom_state_selftest(void);
91024 #endif
91025
91026 -static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
91027 +static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
91028
91029 /**
91030 * prandom_u32_state - seeded pseudo-random number generator.
91031 diff --git a/lib/rbtree.c b/lib/rbtree.c
91032 index 65f4eff..2cfa167 100644
91033 --- a/lib/rbtree.c
91034 +++ b/lib/rbtree.c
91035 @@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
91036 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
91037
91038 static const struct rb_augment_callbacks dummy_callbacks = {
91039 - dummy_propagate, dummy_copy, dummy_rotate
91040 + .propagate = dummy_propagate,
91041 + .copy = dummy_copy,
91042 + .rotate = dummy_rotate
91043 };
91044
91045 void rb_insert_color(struct rb_node *node, struct rb_root *root)
91046 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
91047 index bb2b201..46abaf9 100644
91048 --- a/lib/strncpy_from_user.c
91049 +++ b/lib/strncpy_from_user.c
91050 @@ -21,7 +21,7 @@
91051 */
91052 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
91053 {
91054 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
91055 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
91056 long res = 0;
91057
91058 /*
91059 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
91060 index a28df52..3d55877 100644
91061 --- a/lib/strnlen_user.c
91062 +++ b/lib/strnlen_user.c
91063 @@ -26,7 +26,7 @@
91064 */
91065 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
91066 {
91067 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
91068 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
91069 long align, res = 0;
91070 unsigned long c;
91071
91072 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
91073 index e4399fa..5e8b214 100644
91074 --- a/lib/swiotlb.c
91075 +++ b/lib/swiotlb.c
91076 @@ -668,7 +668,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
91077
91078 void
91079 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
91080 - dma_addr_t dev_addr)
91081 + dma_addr_t dev_addr, struct dma_attrs *attrs)
91082 {
91083 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
91084
91085 diff --git a/lib/usercopy.c b/lib/usercopy.c
91086 index 4f5b1dd..7cab418 100644
91087 --- a/lib/usercopy.c
91088 +++ b/lib/usercopy.c
91089 @@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
91090 WARN(1, "Buffer overflow detected!\n");
91091 }
91092 EXPORT_SYMBOL(copy_from_user_overflow);
91093 +
91094 +void copy_to_user_overflow(void)
91095 +{
91096 + WARN(1, "Buffer overflow detected!\n");
91097 +}
91098 +EXPORT_SYMBOL(copy_to_user_overflow);
91099 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
91100 index 10909c5..653e1b8 100644
91101 --- a/lib/vsprintf.c
91102 +++ b/lib/vsprintf.c
91103 @@ -16,6 +16,9 @@
91104 * - scnprintf and vscnprintf
91105 */
91106
91107 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91108 +#define __INCLUDED_BY_HIDESYM 1
91109 +#endif
91110 #include <stdarg.h>
91111 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
91112 #include <linux/types.h>
91113 @@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
91114 return number(buf, end, *(const netdev_features_t *)addr, spec);
91115 }
91116
91117 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91118 +int kptr_restrict __read_mostly = 2;
91119 +#else
91120 int kptr_restrict __read_mostly;
91121 +#endif
91122
91123 /*
91124 * Show a '%p' thing. A kernel extension is that the '%p' is followed
91125 @@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
91126 * - 'f' For simple symbolic function names without offset
91127 * - 'S' For symbolic direct pointers with offset
91128 * - 's' For symbolic direct pointers without offset
91129 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
91130 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
91131 * - 'B' For backtraced symbolic direct pointers with offset
91132 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
91133 @@ -1234,12 +1242,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
91134
91135 if (!ptr && *fmt != 'K') {
91136 /*
91137 - * Print (null) with the same width as a pointer so it makes
91138 + * Print (nil) with the same width as a pointer so it makes
91139 * tabular output look nice.
91140 */
91141 if (spec.field_width == -1)
91142 spec.field_width = default_width;
91143 - return string(buf, end, "(null)", spec);
91144 + return string(buf, end, "(nil)", spec);
91145 }
91146
91147 switch (*fmt) {
91148 @@ -1249,6 +1257,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
91149 /* Fallthrough */
91150 case 'S':
91151 case 's':
91152 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91153 + break;
91154 +#else
91155 + return symbol_string(buf, end, ptr, spec, fmt);
91156 +#endif
91157 + case 'A':
91158 case 'B':
91159 return symbol_string(buf, end, ptr, spec, fmt);
91160 case 'R':
91161 @@ -1304,6 +1318,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
91162 va_end(va);
91163 return buf;
91164 }
91165 + case 'P':
91166 + break;
91167 case 'K':
91168 /*
91169 * %pK cannot be used in IRQ context because its test
91170 @@ -1365,6 +1381,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
91171 ((const struct file *)ptr)->f_path.dentry,
91172 spec, fmt);
91173 }
91174 +
91175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
91176 + /* 'P' = approved pointers to copy to userland,
91177 + as in the /proc/kallsyms case, as we make it display nothing
91178 + for non-root users, and the real contents for root users
91179 + Also ignore 'K' pointers, since we force their NULLing for non-root users
91180 + above
91181 + */
91182 + if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
91183 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
91184 + dump_stack();
91185 + ptr = NULL;
91186 + }
91187 +#endif
91188 +
91189 spec.flags |= SMALL;
91190 if (spec.field_width == -1) {
91191 spec.field_width = default_width;
91192 @@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
91193 typeof(type) value; \
91194 if (sizeof(type) == 8) { \
91195 args = PTR_ALIGN(args, sizeof(u32)); \
91196 - *(u32 *)&value = *(u32 *)args; \
91197 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
91198 + *(u32 *)&value = *(const u32 *)args; \
91199 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
91200 } else { \
91201 args = PTR_ALIGN(args, sizeof(type)); \
91202 - value = *(typeof(type) *)args; \
91203 + value = *(const typeof(type) *)args; \
91204 } \
91205 args += sizeof(type); \
91206 value; \
91207 @@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
91208 case FORMAT_TYPE_STR: {
91209 const char *str_arg = args;
91210 args += strlen(str_arg) + 1;
91211 - str = string(str, end, (char *)str_arg, spec);
91212 + str = string(str, end, str_arg, spec);
91213 break;
91214 }
91215
91216 diff --git a/localversion-grsec b/localversion-grsec
91217 new file mode 100644
91218 index 0000000..7cd6065
91219 --- /dev/null
91220 +++ b/localversion-grsec
91221 @@ -0,0 +1 @@
91222 +-grsec
91223 diff --git a/mm/Kconfig b/mm/Kconfig
91224 index 723bbe0..ea624b1 100644
91225 --- a/mm/Kconfig
91226 +++ b/mm/Kconfig
91227 @@ -326,10 +326,11 @@ config KSM
91228 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
91229
91230 config DEFAULT_MMAP_MIN_ADDR
91231 - int "Low address space to protect from user allocation"
91232 + int "Low address space to protect from user allocation"
91233 depends on MMU
91234 - default 4096
91235 - help
91236 + default 32768 if ALPHA || ARM || PARISC || SPARC32
91237 + default 65536
91238 + help
91239 This is the portion of low virtual memory which should be protected
91240 from userspace allocation. Keeping a user from writing to low pages
91241 can help reduce the impact of kernel NULL pointer bugs.
91242 @@ -360,7 +361,7 @@ config MEMORY_FAILURE
91243
91244 config HWPOISON_INJECT
91245 tristate "HWPoison pages injector"
91246 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
91247 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
91248 select PROC_PAGE_MONITOR
91249
91250 config NOMMU_INITIAL_TRIM_EXCESS
91251 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
91252 index ce682f7..1fb54f9 100644
91253 --- a/mm/backing-dev.c
91254 +++ b/mm/backing-dev.c
91255 @@ -12,7 +12,7 @@
91256 #include <linux/device.h>
91257 #include <trace/events/writeback.h>
91258
91259 -static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
91260 +static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
91261
91262 struct backing_dev_info default_backing_dev_info = {
91263 .name = "default",
91264 @@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
91265 return err;
91266
91267 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
91268 - atomic_long_inc_return(&bdi_seq));
91269 + atomic_long_inc_return_unchecked(&bdi_seq));
91270 if (err) {
91271 bdi_destroy(bdi);
91272 return err;
91273 diff --git a/mm/filemap.c b/mm/filemap.c
91274 index b7749a9..50d1123 100644
91275 --- a/mm/filemap.c
91276 +++ b/mm/filemap.c
91277 @@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
91278 struct address_space *mapping = file->f_mapping;
91279
91280 if (!mapping->a_ops->readpage)
91281 - return -ENOEXEC;
91282 + return -ENODEV;
91283 file_accessed(file);
91284 vma->vm_ops = &generic_file_vm_ops;
91285 return 0;
91286 @@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
91287
91288 while (bytes) {
91289 char __user *buf = iov->iov_base + base;
91290 - int copy = min(bytes, iov->iov_len - base);
91291 + size_t copy = min(bytes, iov->iov_len - base);
91292
91293 base = 0;
91294 left = __copy_from_user_inatomic(vaddr, buf, copy);
91295 @@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
91296 BUG_ON(!in_atomic());
91297 kaddr = kmap_atomic(page);
91298 if (likely(i->nr_segs == 1)) {
91299 - int left;
91300 + size_t left;
91301 char __user *buf = i->iov->iov_base + i->iov_offset;
91302 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
91303 copied = bytes - left;
91304 @@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
91305
91306 kaddr = kmap(page);
91307 if (likely(i->nr_segs == 1)) {
91308 - int left;
91309 + size_t left;
91310 char __user *buf = i->iov->iov_base + i->iov_offset;
91311 left = __copy_from_user(kaddr + offset, buf, bytes);
91312 copied = bytes - left;
91313 @@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
91314 * zero-length segments (without overruning the iovec).
91315 */
91316 while (bytes || unlikely(i->count && !iov->iov_len)) {
91317 - int copy;
91318 + size_t copy;
91319
91320 copy = min(bytes, iov->iov_len - base);
91321 BUG_ON(!i->count || i->count < copy);
91322 @@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
91323 *pos = i_size_read(inode);
91324
91325 if (limit != RLIM_INFINITY) {
91326 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
91327 if (*pos >= limit) {
91328 send_sig(SIGXFSZ, current, 0);
91329 return -EFBIG;
91330 diff --git a/mm/fremap.c b/mm/fremap.c
91331 index bbc4d66..117b798 100644
91332 --- a/mm/fremap.c
91333 +++ b/mm/fremap.c
91334 @@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
91335 retry:
91336 vma = find_vma(mm, start);
91337
91338 +#ifdef CONFIG_PAX_SEGMEXEC
91339 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
91340 + goto out;
91341 +#endif
91342 +
91343 /*
91344 * Make sure the vma is shared, that it supports prefaulting,
91345 * and that the remapped range is valid and fully within
91346 diff --git a/mm/highmem.c b/mm/highmem.c
91347 index b32b70c..e512eb0 100644
91348 --- a/mm/highmem.c
91349 +++ b/mm/highmem.c
91350 @@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
91351 * So no dangers, even with speculative execution.
91352 */
91353 page = pte_page(pkmap_page_table[i]);
91354 + pax_open_kernel();
91355 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
91356 -
91357 + pax_close_kernel();
91358 set_page_address(page, NULL);
91359 need_flush = 1;
91360 }
91361 @@ -198,9 +199,11 @@ start:
91362 }
91363 }
91364 vaddr = PKMAP_ADDR(last_pkmap_nr);
91365 +
91366 + pax_open_kernel();
91367 set_pte_at(&init_mm, vaddr,
91368 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
91369 -
91370 + pax_close_kernel();
91371 pkmap_count[last_pkmap_nr] = 1;
91372 set_page_address(page, (void *)vaddr);
91373
91374 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
91375 index dee6cf4..52b94f7 100644
91376 --- a/mm/hugetlb.c
91377 +++ b/mm/hugetlb.c
91378 @@ -2077,15 +2077,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
91379 struct hstate *h = &default_hstate;
91380 unsigned long tmp;
91381 int ret;
91382 + ctl_table_no_const hugetlb_table;
91383
91384 tmp = h->max_huge_pages;
91385
91386 if (write && h->order >= MAX_ORDER)
91387 return -EINVAL;
91388
91389 - table->data = &tmp;
91390 - table->maxlen = sizeof(unsigned long);
91391 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
91392 + hugetlb_table = *table;
91393 + hugetlb_table.data = &tmp;
91394 + hugetlb_table.maxlen = sizeof(unsigned long);
91395 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
91396 if (ret)
91397 goto out;
91398
91399 @@ -2130,15 +2132,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
91400 struct hstate *h = &default_hstate;
91401 unsigned long tmp;
91402 int ret;
91403 + ctl_table_no_const hugetlb_table;
91404
91405 tmp = h->nr_overcommit_huge_pages;
91406
91407 if (write && h->order >= MAX_ORDER)
91408 return -EINVAL;
91409
91410 - table->data = &tmp;
91411 - table->maxlen = sizeof(unsigned long);
91412 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
91413 + hugetlb_table = *table;
91414 + hugetlb_table.data = &tmp;
91415 + hugetlb_table.maxlen = sizeof(unsigned long);
91416 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
91417 if (ret)
91418 goto out;
91419
91420 @@ -2596,6 +2600,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
91421 return 1;
91422 }
91423
91424 +#ifdef CONFIG_PAX_SEGMEXEC
91425 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
91426 +{
91427 + struct mm_struct *mm = vma->vm_mm;
91428 + struct vm_area_struct *vma_m;
91429 + unsigned long address_m;
91430 + pte_t *ptep_m;
91431 +
91432 + vma_m = pax_find_mirror_vma(vma);
91433 + if (!vma_m)
91434 + return;
91435 +
91436 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91437 + address_m = address + SEGMEXEC_TASK_SIZE;
91438 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
91439 + get_page(page_m);
91440 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
91441 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
91442 +}
91443 +#endif
91444 +
91445 /*
91446 * Hugetlb_cow() should be called with page lock of the original hugepage held.
91447 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
91448 @@ -2712,6 +2737,11 @@ retry_avoidcopy:
91449 make_huge_pte(vma, new_page, 1));
91450 page_remove_rmap(old_page);
91451 hugepage_add_new_anon_rmap(new_page, vma, address);
91452 +
91453 +#ifdef CONFIG_PAX_SEGMEXEC
91454 + pax_mirror_huge_pte(vma, address, new_page);
91455 +#endif
91456 +
91457 /* Make the old page be freed below */
91458 new_page = old_page;
91459 }
91460 @@ -2876,6 +2906,10 @@ retry:
91461 && (vma->vm_flags & VM_SHARED)));
91462 set_huge_pte_at(mm, address, ptep, new_pte);
91463
91464 +#ifdef CONFIG_PAX_SEGMEXEC
91465 + pax_mirror_huge_pte(vma, address, page);
91466 +#endif
91467 +
91468 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
91469 /* Optimization, do the COW without a second fault */
91470 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
91471 @@ -2906,6 +2940,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91472 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
91473 struct hstate *h = hstate_vma(vma);
91474
91475 +#ifdef CONFIG_PAX_SEGMEXEC
91476 + struct vm_area_struct *vma_m;
91477 +#endif
91478 +
91479 address &= huge_page_mask(h);
91480
91481 ptep = huge_pte_offset(mm, address);
91482 @@ -2919,6 +2957,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91483 VM_FAULT_SET_HINDEX(hstate_index(h));
91484 }
91485
91486 +#ifdef CONFIG_PAX_SEGMEXEC
91487 + vma_m = pax_find_mirror_vma(vma);
91488 + if (vma_m) {
91489 + unsigned long address_m;
91490 +
91491 + if (vma->vm_start > vma_m->vm_start) {
91492 + address_m = address;
91493 + address -= SEGMEXEC_TASK_SIZE;
91494 + vma = vma_m;
91495 + h = hstate_vma(vma);
91496 + } else
91497 + address_m = address + SEGMEXEC_TASK_SIZE;
91498 +
91499 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
91500 + return VM_FAULT_OOM;
91501 + address_m &= HPAGE_MASK;
91502 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
91503 + }
91504 +#endif
91505 +
91506 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
91507 if (!ptep)
91508 return VM_FAULT_OOM;
91509 diff --git a/mm/internal.h b/mm/internal.h
91510 index 8b6cfd6..ec809a6 100644
91511 --- a/mm/internal.h
91512 +++ b/mm/internal.h
91513 @@ -96,6 +96,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
91514 * in mm/page_alloc.c
91515 */
91516 extern void __free_pages_bootmem(struct page *page, unsigned int order);
91517 +extern void free_compound_page(struct page *page);
91518 extern void prep_compound_page(struct page *page, unsigned long order);
91519 #ifdef CONFIG_MEMORY_FAILURE
91520 extern bool is_free_buddy_page(struct page *page);
91521 @@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
91522
91523 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
91524 unsigned long, unsigned long,
91525 - unsigned long, unsigned long);
91526 + unsigned long, unsigned long) __intentional_overflow(-1);
91527
91528 extern void set_pageblock_order(void);
91529 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
91530 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
91531 index 31f01c5..7015178 100644
91532 --- a/mm/kmemleak.c
91533 +++ b/mm/kmemleak.c
91534 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
91535
91536 for (i = 0; i < object->trace_len; i++) {
91537 void *ptr = (void *)object->trace[i];
91538 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
91539 + seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
91540 }
91541 }
91542
91543 @@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
91544 return -ENOMEM;
91545 }
91546
91547 - dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
91548 + dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
91549 &kmemleak_fops);
91550 if (!dentry)
91551 pr_warning("Failed to create the debugfs kmemleak file\n");
91552 diff --git a/mm/maccess.c b/mm/maccess.c
91553 index d53adf9..03a24bf 100644
91554 --- a/mm/maccess.c
91555 +++ b/mm/maccess.c
91556 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
91557 set_fs(KERNEL_DS);
91558 pagefault_disable();
91559 ret = __copy_from_user_inatomic(dst,
91560 - (__force const void __user *)src, size);
91561 + (const void __force_user *)src, size);
91562 pagefault_enable();
91563 set_fs(old_fs);
91564
91565 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
91566
91567 set_fs(KERNEL_DS);
91568 pagefault_disable();
91569 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
91570 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
91571 pagefault_enable();
91572 set_fs(old_fs);
91573
91574 diff --git a/mm/madvise.c b/mm/madvise.c
91575 index 539eeb9..e24a987 100644
91576 --- a/mm/madvise.c
91577 +++ b/mm/madvise.c
91578 @@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
91579 pgoff_t pgoff;
91580 unsigned long new_flags = vma->vm_flags;
91581
91582 +#ifdef CONFIG_PAX_SEGMEXEC
91583 + struct vm_area_struct *vma_m;
91584 +#endif
91585 +
91586 switch (behavior) {
91587 case MADV_NORMAL:
91588 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
91589 @@ -126,6 +130,13 @@ success:
91590 /*
91591 * vm_flags is protected by the mmap_sem held in write mode.
91592 */
91593 +
91594 +#ifdef CONFIG_PAX_SEGMEXEC
91595 + vma_m = pax_find_mirror_vma(vma);
91596 + if (vma_m)
91597 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
91598 +#endif
91599 +
91600 vma->vm_flags = new_flags;
91601
91602 out:
91603 @@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
91604 struct vm_area_struct **prev,
91605 unsigned long start, unsigned long end)
91606 {
91607 +
91608 +#ifdef CONFIG_PAX_SEGMEXEC
91609 + struct vm_area_struct *vma_m;
91610 +#endif
91611 +
91612 *prev = vma;
91613 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
91614 return -EINVAL;
91615 @@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
91616 zap_page_range(vma, start, end - start, &details);
91617 } else
91618 zap_page_range(vma, start, end - start, NULL);
91619 +
91620 +#ifdef CONFIG_PAX_SEGMEXEC
91621 + vma_m = pax_find_mirror_vma(vma);
91622 + if (vma_m) {
91623 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
91624 + struct zap_details details = {
91625 + .nonlinear_vma = vma_m,
91626 + .last_index = ULONG_MAX,
91627 + };
91628 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
91629 + } else
91630 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
91631 + }
91632 +#endif
91633 +
91634 return 0;
91635 }
91636
91637 @@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
91638 if (end < start)
91639 return error;
91640
91641 +#ifdef CONFIG_PAX_SEGMEXEC
91642 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
91643 + if (end > SEGMEXEC_TASK_SIZE)
91644 + return error;
91645 + } else
91646 +#endif
91647 +
91648 + if (end > TASK_SIZE)
91649 + return error;
91650 +
91651 error = 0;
91652 if (end == start)
91653 return error;
91654 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
91655 index 90977ac..487ab84 100644
91656 --- a/mm/memory-failure.c
91657 +++ b/mm/memory-failure.c
91658 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
91659
91660 int sysctl_memory_failure_recovery __read_mostly = 1;
91661
91662 -atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
91663 +atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
91664
91665 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
91666
91667 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
91668 pfn, t->comm, t->pid);
91669 si.si_signo = SIGBUS;
91670 si.si_errno = 0;
91671 - si.si_addr = (void *)addr;
91672 + si.si_addr = (void __user *)addr;
91673 #ifdef __ARCH_SI_TRAPNO
91674 si.si_trapno = trapno;
91675 #endif
91676 @@ -762,7 +762,7 @@ static struct page_state {
91677 unsigned long res;
91678 char *msg;
91679 int (*action)(struct page *p, unsigned long pfn);
91680 -} error_states[] = {
91681 +} __do_const error_states[] = {
91682 { reserved, reserved, "reserved kernel", me_kernel },
91683 /*
91684 * free pages are specially detected outside this table:
91685 @@ -1062,7 +1062,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91686 nr_pages = 1 << compound_order(hpage);
91687 else /* normal page or thp */
91688 nr_pages = 1;
91689 - atomic_long_add(nr_pages, &num_poisoned_pages);
91690 + atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
91691
91692 /*
91693 * We need/can do nothing about count=0 pages.
91694 @@ -1092,7 +1092,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91695 if (!PageHWPoison(hpage)
91696 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
91697 || (p != hpage && TestSetPageHWPoison(hpage))) {
91698 - atomic_long_sub(nr_pages, &num_poisoned_pages);
91699 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91700 return 0;
91701 }
91702 set_page_hwpoison_huge_page(hpage);
91703 @@ -1161,7 +1161,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91704 }
91705 if (hwpoison_filter(p)) {
91706 if (TestClearPageHWPoison(p))
91707 - atomic_long_sub(nr_pages, &num_poisoned_pages);
91708 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91709 unlock_page(hpage);
91710 put_page(hpage);
91711 return 0;
91712 @@ -1383,7 +1383,7 @@ int unpoison_memory(unsigned long pfn)
91713 return 0;
91714 }
91715 if (TestClearPageHWPoison(p))
91716 - atomic_long_dec(&num_poisoned_pages);
91717 + atomic_long_dec_unchecked(&num_poisoned_pages);
91718 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
91719 return 0;
91720 }
91721 @@ -1397,7 +1397,7 @@ int unpoison_memory(unsigned long pfn)
91722 */
91723 if (TestClearPageHWPoison(page)) {
91724 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
91725 - atomic_long_sub(nr_pages, &num_poisoned_pages);
91726 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91727 freeit = 1;
91728 if (PageHuge(page))
91729 clear_page_hwpoison_huge_page(page);
91730 @@ -1522,11 +1522,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
91731 if (PageHuge(page)) {
91732 set_page_hwpoison_huge_page(hpage);
91733 dequeue_hwpoisoned_huge_page(hpage);
91734 - atomic_long_add(1 << compound_order(hpage),
91735 + atomic_long_add_unchecked(1 << compound_order(hpage),
91736 &num_poisoned_pages);
91737 } else {
91738 SetPageHWPoison(page);
91739 - atomic_long_inc(&num_poisoned_pages);
91740 + atomic_long_inc_unchecked(&num_poisoned_pages);
91741 }
91742 }
91743 return ret;
91744 @@ -1565,7 +1565,7 @@ static int __soft_offline_page(struct page *page, int flags)
91745 put_page(page);
91746 pr_info("soft_offline: %#lx: invalidated\n", pfn);
91747 SetPageHWPoison(page);
91748 - atomic_long_inc(&num_poisoned_pages);
91749 + atomic_long_inc_unchecked(&num_poisoned_pages);
91750 return 0;
91751 }
91752
91753 @@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
91754 if (!is_free_buddy_page(page))
91755 pr_info("soft offline: %#lx: page leaked\n",
91756 pfn);
91757 - atomic_long_inc(&num_poisoned_pages);
91758 + atomic_long_inc_unchecked(&num_poisoned_pages);
91759 }
91760 } else {
91761 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
91762 @@ -1684,11 +1684,11 @@ int soft_offline_page(struct page *page, int flags)
91763 if (PageHuge(page)) {
91764 set_page_hwpoison_huge_page(hpage);
91765 dequeue_hwpoisoned_huge_page(hpage);
91766 - atomic_long_add(1 << compound_order(hpage),
91767 + atomic_long_add_unchecked(1 << compound_order(hpage),
91768 &num_poisoned_pages);
91769 } else {
91770 SetPageHWPoison(page);
91771 - atomic_long_inc(&num_poisoned_pages);
91772 + atomic_long_inc_unchecked(&num_poisoned_pages);
91773 }
91774 }
91775 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
91776 diff --git a/mm/memory.c b/mm/memory.c
91777 index dda27b9..c56b9d6 100644
91778 --- a/mm/memory.c
91779 +++ b/mm/memory.c
91780 @@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
91781 free_pte_range(tlb, pmd, addr);
91782 } while (pmd++, addr = next, addr != end);
91783
91784 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
91785 start &= PUD_MASK;
91786 if (start < floor)
91787 return;
91788 @@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
91789 pmd = pmd_offset(pud, start);
91790 pud_clear(pud);
91791 pmd_free_tlb(tlb, pmd, start);
91792 +#endif
91793 +
91794 }
91795
91796 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91797 @@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91798 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
91799 } while (pud++, addr = next, addr != end);
91800
91801 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
91802 start &= PGDIR_MASK;
91803 if (start < floor)
91804 return;
91805 @@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91806 pud = pud_offset(pgd, start);
91807 pgd_clear(pgd);
91808 pud_free_tlb(tlb, pud, start);
91809 +#endif
91810 +
91811 }
91812
91813 /*
91814 @@ -1635,12 +1641,6 @@ no_page_table:
91815 return page;
91816 }
91817
91818 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
91819 -{
91820 - return stack_guard_page_start(vma, addr) ||
91821 - stack_guard_page_end(vma, addr+PAGE_SIZE);
91822 -}
91823 -
91824 /**
91825 * __get_user_pages() - pin user pages in memory
91826 * @tsk: task_struct of target task
91827 @@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91828
91829 i = 0;
91830
91831 - do {
91832 + while (nr_pages) {
91833 struct vm_area_struct *vma;
91834
91835 - vma = find_extend_vma(mm, start);
91836 + vma = find_vma(mm, start);
91837 if (!vma && in_gate_area(mm, start)) {
91838 unsigned long pg = start & PAGE_MASK;
91839 pgd_t *pgd;
91840 @@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91841 goto next_page;
91842 }
91843
91844 - if (!vma ||
91845 + if (!vma || start < vma->vm_start ||
91846 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
91847 !(vm_flags & vma->vm_flags))
91848 return i ? : -EFAULT;
91849 @@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91850 int ret;
91851 unsigned int fault_flags = 0;
91852
91853 - /* For mlock, just skip the stack guard page. */
91854 - if (foll_flags & FOLL_MLOCK) {
91855 - if (stack_guard_page(vma, start))
91856 - goto next_page;
91857 - }
91858 if (foll_flags & FOLL_WRITE)
91859 fault_flags |= FAULT_FLAG_WRITE;
91860 if (nonblocking)
91861 @@ -1892,7 +1887,7 @@ next_page:
91862 start += page_increm * PAGE_SIZE;
91863 nr_pages -= page_increm;
91864 } while (nr_pages && start < vma->vm_end);
91865 - } while (nr_pages);
91866 + }
91867 return i;
91868 }
91869 EXPORT_SYMBOL(__get_user_pages);
91870 @@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
91871 page_add_file_rmap(page);
91872 set_pte_at(mm, addr, pte, mk_pte(page, prot));
91873
91874 +#ifdef CONFIG_PAX_SEGMEXEC
91875 + pax_mirror_file_pte(vma, addr, page, ptl);
91876 +#endif
91877 +
91878 retval = 0;
91879 pte_unmap_unlock(pte, ptl);
91880 return retval;
91881 @@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
91882 if (!page_count(page))
91883 return -EINVAL;
91884 if (!(vma->vm_flags & VM_MIXEDMAP)) {
91885 +
91886 +#ifdef CONFIG_PAX_SEGMEXEC
91887 + struct vm_area_struct *vma_m;
91888 +#endif
91889 +
91890 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
91891 BUG_ON(vma->vm_flags & VM_PFNMAP);
91892 vma->vm_flags |= VM_MIXEDMAP;
91893 +
91894 +#ifdef CONFIG_PAX_SEGMEXEC
91895 + vma_m = pax_find_mirror_vma(vma);
91896 + if (vma_m)
91897 + vma_m->vm_flags |= VM_MIXEDMAP;
91898 +#endif
91899 +
91900 }
91901 return insert_page(vma, addr, page, vma->vm_page_prot);
91902 }
91903 @@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
91904 unsigned long pfn)
91905 {
91906 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
91907 + BUG_ON(vma->vm_mirror);
91908
91909 if (addr < vma->vm_start || addr >= vma->vm_end)
91910 return -EFAULT;
91911 @@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
91912
91913 BUG_ON(pud_huge(*pud));
91914
91915 - pmd = pmd_alloc(mm, pud, addr);
91916 + pmd = (mm == &init_mm) ?
91917 + pmd_alloc_kernel(mm, pud, addr) :
91918 + pmd_alloc(mm, pud, addr);
91919 if (!pmd)
91920 return -ENOMEM;
91921 do {
91922 @@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
91923 unsigned long next;
91924 int err;
91925
91926 - pud = pud_alloc(mm, pgd, addr);
91927 + pud = (mm == &init_mm) ?
91928 + pud_alloc_kernel(mm, pgd, addr) :
91929 + pud_alloc(mm, pgd, addr);
91930 if (!pud)
91931 return -ENOMEM;
91932 do {
91933 @@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
91934 copy_user_highpage(dst, src, va, vma);
91935 }
91936
91937 +#ifdef CONFIG_PAX_SEGMEXEC
91938 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
91939 +{
91940 + struct mm_struct *mm = vma->vm_mm;
91941 + spinlock_t *ptl;
91942 + pte_t *pte, entry;
91943 +
91944 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
91945 + entry = *pte;
91946 + if (!pte_present(entry)) {
91947 + if (!pte_none(entry)) {
91948 + BUG_ON(pte_file(entry));
91949 + free_swap_and_cache(pte_to_swp_entry(entry));
91950 + pte_clear_not_present_full(mm, address, pte, 0);
91951 + }
91952 + } else {
91953 + struct page *page;
91954 +
91955 + flush_cache_page(vma, address, pte_pfn(entry));
91956 + entry = ptep_clear_flush(vma, address, pte);
91957 + BUG_ON(pte_dirty(entry));
91958 + page = vm_normal_page(vma, address, entry);
91959 + if (page) {
91960 + update_hiwater_rss(mm);
91961 + if (PageAnon(page))
91962 + dec_mm_counter_fast(mm, MM_ANONPAGES);
91963 + else
91964 + dec_mm_counter_fast(mm, MM_FILEPAGES);
91965 + page_remove_rmap(page);
91966 + page_cache_release(page);
91967 + }
91968 + }
91969 + pte_unmap_unlock(pte, ptl);
91970 +}
91971 +
91972 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
91973 + *
91974 + * the ptl of the lower mapped page is held on entry and is not released on exit
91975 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
91976 + */
91977 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
91978 +{
91979 + struct mm_struct *mm = vma->vm_mm;
91980 + unsigned long address_m;
91981 + spinlock_t *ptl_m;
91982 + struct vm_area_struct *vma_m;
91983 + pmd_t *pmd_m;
91984 + pte_t *pte_m, entry_m;
91985 +
91986 + BUG_ON(!page_m || !PageAnon(page_m));
91987 +
91988 + vma_m = pax_find_mirror_vma(vma);
91989 + if (!vma_m)
91990 + return;
91991 +
91992 + BUG_ON(!PageLocked(page_m));
91993 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91994 + address_m = address + SEGMEXEC_TASK_SIZE;
91995 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91996 + pte_m = pte_offset_map(pmd_m, address_m);
91997 + ptl_m = pte_lockptr(mm, pmd_m);
91998 + if (ptl != ptl_m) {
91999 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
92000 + if (!pte_none(*pte_m))
92001 + goto out;
92002 + }
92003 +
92004 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
92005 + page_cache_get(page_m);
92006 + page_add_anon_rmap(page_m, vma_m, address_m);
92007 + inc_mm_counter_fast(mm, MM_ANONPAGES);
92008 + set_pte_at(mm, address_m, pte_m, entry_m);
92009 + update_mmu_cache(vma_m, address_m, pte_m);
92010 +out:
92011 + if (ptl != ptl_m)
92012 + spin_unlock(ptl_m);
92013 + pte_unmap(pte_m);
92014 + unlock_page(page_m);
92015 +}
92016 +
92017 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
92018 +{
92019 + struct mm_struct *mm = vma->vm_mm;
92020 + unsigned long address_m;
92021 + spinlock_t *ptl_m;
92022 + struct vm_area_struct *vma_m;
92023 + pmd_t *pmd_m;
92024 + pte_t *pte_m, entry_m;
92025 +
92026 + BUG_ON(!page_m || PageAnon(page_m));
92027 +
92028 + vma_m = pax_find_mirror_vma(vma);
92029 + if (!vma_m)
92030 + return;
92031 +
92032 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
92033 + address_m = address + SEGMEXEC_TASK_SIZE;
92034 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
92035 + pte_m = pte_offset_map(pmd_m, address_m);
92036 + ptl_m = pte_lockptr(mm, pmd_m);
92037 + if (ptl != ptl_m) {
92038 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
92039 + if (!pte_none(*pte_m))
92040 + goto out;
92041 + }
92042 +
92043 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
92044 + page_cache_get(page_m);
92045 + page_add_file_rmap(page_m);
92046 + inc_mm_counter_fast(mm, MM_FILEPAGES);
92047 + set_pte_at(mm, address_m, pte_m, entry_m);
92048 + update_mmu_cache(vma_m, address_m, pte_m);
92049 +out:
92050 + if (ptl != ptl_m)
92051 + spin_unlock(ptl_m);
92052 + pte_unmap(pte_m);
92053 +}
92054 +
92055 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
92056 +{
92057 + struct mm_struct *mm = vma->vm_mm;
92058 + unsigned long address_m;
92059 + spinlock_t *ptl_m;
92060 + struct vm_area_struct *vma_m;
92061 + pmd_t *pmd_m;
92062 + pte_t *pte_m, entry_m;
92063 +
92064 + vma_m = pax_find_mirror_vma(vma);
92065 + if (!vma_m)
92066 + return;
92067 +
92068 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
92069 + address_m = address + SEGMEXEC_TASK_SIZE;
92070 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
92071 + pte_m = pte_offset_map(pmd_m, address_m);
92072 + ptl_m = pte_lockptr(mm, pmd_m);
92073 + if (ptl != ptl_m) {
92074 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
92075 + if (!pte_none(*pte_m))
92076 + goto out;
92077 + }
92078 +
92079 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
92080 + set_pte_at(mm, address_m, pte_m, entry_m);
92081 +out:
92082 + if (ptl != ptl_m)
92083 + spin_unlock(ptl_m);
92084 + pte_unmap(pte_m);
92085 +}
92086 +
92087 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
92088 +{
92089 + struct page *page_m;
92090 + pte_t entry;
92091 +
92092 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
92093 + goto out;
92094 +
92095 + entry = *pte;
92096 + page_m = vm_normal_page(vma, address, entry);
92097 + if (!page_m)
92098 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
92099 + else if (PageAnon(page_m)) {
92100 + if (pax_find_mirror_vma(vma)) {
92101 + pte_unmap_unlock(pte, ptl);
92102 + lock_page(page_m);
92103 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
92104 + if (pte_same(entry, *pte))
92105 + pax_mirror_anon_pte(vma, address, page_m, ptl);
92106 + else
92107 + unlock_page(page_m);
92108 + }
92109 + } else
92110 + pax_mirror_file_pte(vma, address, page_m, ptl);
92111 +
92112 +out:
92113 + pte_unmap_unlock(pte, ptl);
92114 +}
92115 +#endif
92116 +
92117 /*
92118 * This routine handles present pages, when users try to write
92119 * to a shared page. It is done by copying the page to a new address
92120 @@ -2807,6 +3003,12 @@ gotten:
92121 */
92122 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
92123 if (likely(pte_same(*page_table, orig_pte))) {
92124 +
92125 +#ifdef CONFIG_PAX_SEGMEXEC
92126 + if (pax_find_mirror_vma(vma))
92127 + BUG_ON(!trylock_page(new_page));
92128 +#endif
92129 +
92130 if (old_page) {
92131 if (!PageAnon(old_page)) {
92132 dec_mm_counter_fast(mm, MM_FILEPAGES);
92133 @@ -2858,6 +3060,10 @@ gotten:
92134 page_remove_rmap(old_page);
92135 }
92136
92137 +#ifdef CONFIG_PAX_SEGMEXEC
92138 + pax_mirror_anon_pte(vma, address, new_page, ptl);
92139 +#endif
92140 +
92141 /* Free the old page.. */
92142 new_page = old_page;
92143 ret |= VM_FAULT_WRITE;
92144 @@ -3135,6 +3341,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
92145 swap_free(entry);
92146 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
92147 try_to_free_swap(page);
92148 +
92149 +#ifdef CONFIG_PAX_SEGMEXEC
92150 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
92151 +#endif
92152 +
92153 unlock_page(page);
92154 if (page != swapcache) {
92155 /*
92156 @@ -3158,6 +3369,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
92157
92158 /* No need to invalidate - it was non-present before */
92159 update_mmu_cache(vma, address, page_table);
92160 +
92161 +#ifdef CONFIG_PAX_SEGMEXEC
92162 + pax_mirror_anon_pte(vma, address, page, ptl);
92163 +#endif
92164 +
92165 unlock:
92166 pte_unmap_unlock(page_table, ptl);
92167 out:
92168 @@ -3177,40 +3393,6 @@ out_release:
92169 }
92170
92171 /*
92172 - * This is like a special single-page "expand_{down|up}wards()",
92173 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
92174 - * doesn't hit another vma.
92175 - */
92176 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
92177 -{
92178 - address &= PAGE_MASK;
92179 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
92180 - struct vm_area_struct *prev = vma->vm_prev;
92181 -
92182 - /*
92183 - * Is there a mapping abutting this one below?
92184 - *
92185 - * That's only ok if it's the same stack mapping
92186 - * that has gotten split..
92187 - */
92188 - if (prev && prev->vm_end == address)
92189 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
92190 -
92191 - expand_downwards(vma, address - PAGE_SIZE);
92192 - }
92193 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
92194 - struct vm_area_struct *next = vma->vm_next;
92195 -
92196 - /* As VM_GROWSDOWN but s/below/above/ */
92197 - if (next && next->vm_start == address + PAGE_SIZE)
92198 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
92199 -
92200 - expand_upwards(vma, address + PAGE_SIZE);
92201 - }
92202 - return 0;
92203 -}
92204 -
92205 -/*
92206 * We enter with non-exclusive mmap_sem (to exclude vma changes,
92207 * but allow concurrent faults), and pte mapped but not yet locked.
92208 * We return with mmap_sem still held, but pte unmapped and unlocked.
92209 @@ -3219,27 +3401,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
92210 unsigned long address, pte_t *page_table, pmd_t *pmd,
92211 unsigned int flags)
92212 {
92213 - struct page *page;
92214 + struct page *page = NULL;
92215 spinlock_t *ptl;
92216 pte_t entry;
92217
92218 - pte_unmap(page_table);
92219 -
92220 - /* Check if we need to add a guard page to the stack */
92221 - if (check_stack_guard_page(vma, address) < 0)
92222 - return VM_FAULT_SIGBUS;
92223 -
92224 - /* Use the zero-page for reads */
92225 if (!(flags & FAULT_FLAG_WRITE)) {
92226 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
92227 vma->vm_page_prot));
92228 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
92229 + ptl = pte_lockptr(mm, pmd);
92230 + spin_lock(ptl);
92231 if (!pte_none(*page_table))
92232 goto unlock;
92233 goto setpte;
92234 }
92235
92236 /* Allocate our own private page. */
92237 + pte_unmap(page_table);
92238 +
92239 if (unlikely(anon_vma_prepare(vma)))
92240 goto oom;
92241 page = alloc_zeroed_user_highpage_movable(vma, address);
92242 @@ -3263,6 +3441,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
92243 if (!pte_none(*page_table))
92244 goto release;
92245
92246 +#ifdef CONFIG_PAX_SEGMEXEC
92247 + if (pax_find_mirror_vma(vma))
92248 + BUG_ON(!trylock_page(page));
92249 +#endif
92250 +
92251 inc_mm_counter_fast(mm, MM_ANONPAGES);
92252 page_add_new_anon_rmap(page, vma, address);
92253 setpte:
92254 @@ -3270,6 +3453,12 @@ setpte:
92255
92256 /* No need to invalidate - it was non-present before */
92257 update_mmu_cache(vma, address, page_table);
92258 +
92259 +#ifdef CONFIG_PAX_SEGMEXEC
92260 + if (page)
92261 + pax_mirror_anon_pte(vma, address, page, ptl);
92262 +#endif
92263 +
92264 unlock:
92265 pte_unmap_unlock(page_table, ptl);
92266 return 0;
92267 @@ -3413,6 +3602,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92268 */
92269 /* Only go through if we didn't race with anybody else... */
92270 if (likely(pte_same(*page_table, orig_pte))) {
92271 +
92272 +#ifdef CONFIG_PAX_SEGMEXEC
92273 + if (anon && pax_find_mirror_vma(vma))
92274 + BUG_ON(!trylock_page(page));
92275 +#endif
92276 +
92277 flush_icache_page(vma, page);
92278 entry = mk_pte(page, vma->vm_page_prot);
92279 if (flags & FAULT_FLAG_WRITE)
92280 @@ -3434,6 +3629,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92281
92282 /* no need to invalidate: a not-present page won't be cached */
92283 update_mmu_cache(vma, address, page_table);
92284 +
92285 +#ifdef CONFIG_PAX_SEGMEXEC
92286 + if (anon)
92287 + pax_mirror_anon_pte(vma, address, page, ptl);
92288 + else
92289 + pax_mirror_file_pte(vma, address, page, ptl);
92290 +#endif
92291 +
92292 } else {
92293 if (cow_page)
92294 mem_cgroup_uncharge_page(cow_page);
92295 @@ -3681,6 +3884,12 @@ static int handle_pte_fault(struct mm_struct *mm,
92296 if (flags & FAULT_FLAG_WRITE)
92297 flush_tlb_fix_spurious_fault(vma, address);
92298 }
92299 +
92300 +#ifdef CONFIG_PAX_SEGMEXEC
92301 + pax_mirror_pte(vma, address, pte, pmd, ptl);
92302 + return 0;
92303 +#endif
92304 +
92305 unlock:
92306 pte_unmap_unlock(pte, ptl);
92307 return 0;
92308 @@ -3697,9 +3906,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92309 pmd_t *pmd;
92310 pte_t *pte;
92311
92312 +#ifdef CONFIG_PAX_SEGMEXEC
92313 + struct vm_area_struct *vma_m;
92314 +#endif
92315 +
92316 if (unlikely(is_vm_hugetlb_page(vma)))
92317 return hugetlb_fault(mm, vma, address, flags);
92318
92319 +#ifdef CONFIG_PAX_SEGMEXEC
92320 + vma_m = pax_find_mirror_vma(vma);
92321 + if (vma_m) {
92322 + unsigned long address_m;
92323 + pgd_t *pgd_m;
92324 + pud_t *pud_m;
92325 + pmd_t *pmd_m;
92326 +
92327 + if (vma->vm_start > vma_m->vm_start) {
92328 + address_m = address;
92329 + address -= SEGMEXEC_TASK_SIZE;
92330 + vma = vma_m;
92331 + } else
92332 + address_m = address + SEGMEXEC_TASK_SIZE;
92333 +
92334 + pgd_m = pgd_offset(mm, address_m);
92335 + pud_m = pud_alloc(mm, pgd_m, address_m);
92336 + if (!pud_m)
92337 + return VM_FAULT_OOM;
92338 + pmd_m = pmd_alloc(mm, pud_m, address_m);
92339 + if (!pmd_m)
92340 + return VM_FAULT_OOM;
92341 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
92342 + return VM_FAULT_OOM;
92343 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
92344 + }
92345 +#endif
92346 +
92347 pgd = pgd_offset(mm, address);
92348 pud = pud_alloc(mm, pgd, address);
92349 if (!pud)
92350 @@ -3830,6 +4071,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
92351 spin_unlock(&mm->page_table_lock);
92352 return 0;
92353 }
92354 +
92355 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
92356 +{
92357 + pud_t *new = pud_alloc_one(mm, address);
92358 + if (!new)
92359 + return -ENOMEM;
92360 +
92361 + smp_wmb(); /* See comment in __pte_alloc */
92362 +
92363 + spin_lock(&mm->page_table_lock);
92364 + if (pgd_present(*pgd)) /* Another has populated it */
92365 + pud_free(mm, new);
92366 + else
92367 + pgd_populate_kernel(mm, pgd, new);
92368 + spin_unlock(&mm->page_table_lock);
92369 + return 0;
92370 +}
92371 #endif /* __PAGETABLE_PUD_FOLDED */
92372
92373 #ifndef __PAGETABLE_PMD_FOLDED
92374 @@ -3860,6 +4118,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
92375 spin_unlock(&mm->page_table_lock);
92376 return 0;
92377 }
92378 +
92379 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
92380 +{
92381 + pmd_t *new = pmd_alloc_one(mm, address);
92382 + if (!new)
92383 + return -ENOMEM;
92384 +
92385 + smp_wmb(); /* See comment in __pte_alloc */
92386 +
92387 + spin_lock(&mm->page_table_lock);
92388 +#ifndef __ARCH_HAS_4LEVEL_HACK
92389 + if (pud_present(*pud)) /* Another has populated it */
92390 + pmd_free(mm, new);
92391 + else
92392 + pud_populate_kernel(mm, pud, new);
92393 +#else
92394 + if (pgd_present(*pud)) /* Another has populated it */
92395 + pmd_free(mm, new);
92396 + else
92397 + pgd_populate_kernel(mm, pud, new);
92398 +#endif /* __ARCH_HAS_4LEVEL_HACK */
92399 + spin_unlock(&mm->page_table_lock);
92400 + return 0;
92401 +}
92402 #endif /* __PAGETABLE_PMD_FOLDED */
92403
92404 #if !defined(__HAVE_ARCH_GATE_AREA)
92405 @@ -3873,7 +4155,7 @@ static int __init gate_vma_init(void)
92406 gate_vma.vm_start = FIXADDR_USER_START;
92407 gate_vma.vm_end = FIXADDR_USER_END;
92408 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
92409 - gate_vma.vm_page_prot = __P101;
92410 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
92411
92412 return 0;
92413 }
92414 @@ -4007,8 +4289,8 @@ out:
92415 return ret;
92416 }
92417
92418 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
92419 - void *buf, int len, int write)
92420 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
92421 + void *buf, size_t len, int write)
92422 {
92423 resource_size_t phys_addr;
92424 unsigned long prot = 0;
92425 @@ -4034,8 +4316,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
92426 * Access another process' address space as given in mm. If non-NULL, use the
92427 * given task for page fault accounting.
92428 */
92429 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92430 - unsigned long addr, void *buf, int len, int write)
92431 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92432 + unsigned long addr, void *buf, size_t len, int write)
92433 {
92434 struct vm_area_struct *vma;
92435 void *old_buf = buf;
92436 @@ -4043,7 +4325,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92437 down_read(&mm->mmap_sem);
92438 /* ignore errors, just check how much was successfully transferred */
92439 while (len) {
92440 - int bytes, ret, offset;
92441 + ssize_t bytes, ret, offset;
92442 void *maddr;
92443 struct page *page = NULL;
92444
92445 @@ -4102,8 +4384,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92446 *
92447 * The caller must hold a reference on @mm.
92448 */
92449 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
92450 - void *buf, int len, int write)
92451 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
92452 + void *buf, size_t len, int write)
92453 {
92454 return __access_remote_vm(NULL, mm, addr, buf, len, write);
92455 }
92456 @@ -4113,11 +4395,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
92457 * Source/target buffer must be kernel space,
92458 * Do not walk the page table directly, use get_user_pages
92459 */
92460 -int access_process_vm(struct task_struct *tsk, unsigned long addr,
92461 - void *buf, int len, int write)
92462 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
92463 + void *buf, size_t len, int write)
92464 {
92465 struct mm_struct *mm;
92466 - int ret;
92467 + ssize_t ret;
92468
92469 mm = get_task_mm(tsk);
92470 if (!mm)
92471 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
92472 index cb2f3dd..fb80468 100644
92473 --- a/mm/mempolicy.c
92474 +++ b/mm/mempolicy.c
92475 @@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
92476 unsigned long vmstart;
92477 unsigned long vmend;
92478
92479 +#ifdef CONFIG_PAX_SEGMEXEC
92480 + struct vm_area_struct *vma_m;
92481 +#endif
92482 +
92483 vma = find_vma(mm, start);
92484 if (!vma || vma->vm_start > start)
92485 return -EFAULT;
92486 @@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
92487 err = vma_replace_policy(vma, new_pol);
92488 if (err)
92489 goto out;
92490 +
92491 +#ifdef CONFIG_PAX_SEGMEXEC
92492 + vma_m = pax_find_mirror_vma(vma);
92493 + if (vma_m) {
92494 + err = vma_replace_policy(vma_m, new_pol);
92495 + if (err)
92496 + goto out;
92497 + }
92498 +#endif
92499 +
92500 }
92501
92502 out:
92503 @@ -1255,6 +1269,17 @@ static long do_mbind(unsigned long start, unsigned long len,
92504
92505 if (end < start)
92506 return -EINVAL;
92507 +
92508 +#ifdef CONFIG_PAX_SEGMEXEC
92509 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
92510 + if (end > SEGMEXEC_TASK_SIZE)
92511 + return -EINVAL;
92512 + } else
92513 +#endif
92514 +
92515 + if (end > TASK_SIZE)
92516 + return -EINVAL;
92517 +
92518 if (end == start)
92519 return 0;
92520
92521 @@ -1483,8 +1508,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
92522 */
92523 tcred = __task_cred(task);
92524 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
92525 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
92526 - !capable(CAP_SYS_NICE)) {
92527 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
92528 rcu_read_unlock();
92529 err = -EPERM;
92530 goto out_put;
92531 @@ -1515,6 +1539,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
92532 goto out;
92533 }
92534
92535 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
92536 + if (mm != current->mm &&
92537 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
92538 + mmput(mm);
92539 + err = -EPERM;
92540 + goto out;
92541 + }
92542 +#endif
92543 +
92544 err = do_migrate_pages(mm, old, new,
92545 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
92546
92547 diff --git a/mm/migrate.c b/mm/migrate.c
92548 index 9194375..75c81e2 100644
92549 --- a/mm/migrate.c
92550 +++ b/mm/migrate.c
92551 @@ -1464,8 +1464,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
92552 */
92553 tcred = __task_cred(task);
92554 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
92555 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
92556 - !capable(CAP_SYS_NICE)) {
92557 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
92558 rcu_read_unlock();
92559 err = -EPERM;
92560 goto out;
92561 diff --git a/mm/mlock.c b/mm/mlock.c
92562 index 192e6ee..b044449 100644
92563 --- a/mm/mlock.c
92564 +++ b/mm/mlock.c
92565 @@ -14,6 +14,7 @@
92566 #include <linux/pagevec.h>
92567 #include <linux/mempolicy.h>
92568 #include <linux/syscalls.h>
92569 +#include <linux/security.h>
92570 #include <linux/sched.h>
92571 #include <linux/export.h>
92572 #include <linux/rmap.h>
92573 @@ -588,7 +589,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
92574 {
92575 unsigned long nstart, end, tmp;
92576 struct vm_area_struct * vma, * prev;
92577 - int error;
92578 + int error = 0;
92579
92580 VM_BUG_ON(start & ~PAGE_MASK);
92581 VM_BUG_ON(len != PAGE_ALIGN(len));
92582 @@ -597,6 +598,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
92583 return -EINVAL;
92584 if (end == start)
92585 return 0;
92586 + if (end > TASK_SIZE)
92587 + return -EINVAL;
92588 +
92589 vma = find_vma(current->mm, start);
92590 if (!vma || vma->vm_start > start)
92591 return -ENOMEM;
92592 @@ -608,6 +612,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
92593 for (nstart = start ; ; ) {
92594 vm_flags_t newflags;
92595
92596 +#ifdef CONFIG_PAX_SEGMEXEC
92597 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
92598 + break;
92599 +#endif
92600 +
92601 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
92602
92603 newflags = vma->vm_flags & ~VM_LOCKED;
92604 @@ -720,6 +729,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
92605 lock_limit >>= PAGE_SHIFT;
92606
92607 /* check against resource limits */
92608 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
92609 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
92610 error = do_mlock(start, len, 1);
92611 up_write(&current->mm->mmap_sem);
92612 @@ -754,6 +764,11 @@ static int do_mlockall(int flags)
92613 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
92614 vm_flags_t newflags;
92615
92616 +#ifdef CONFIG_PAX_SEGMEXEC
92617 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
92618 + break;
92619 +#endif
92620 +
92621 newflags = vma->vm_flags & ~VM_LOCKED;
92622 if (flags & MCL_CURRENT)
92623 newflags |= VM_LOCKED;
92624 @@ -787,6 +802,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
92625 lock_limit >>= PAGE_SHIFT;
92626
92627 ret = -ENOMEM;
92628 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
92629 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
92630 capable(CAP_IPC_LOCK))
92631 ret = do_mlockall(flags);
92632 diff --git a/mm/mmap.c b/mm/mmap.c
92633 index 546db74..650d1b9 100644
92634 --- a/mm/mmap.c
92635 +++ b/mm/mmap.c
92636 @@ -36,6 +36,7 @@
92637 #include <linux/sched/sysctl.h>
92638 #include <linux/notifier.h>
92639 #include <linux/memory.h>
92640 +#include <linux/random.h>
92641
92642 #include <asm/uaccess.h>
92643 #include <asm/cacheflush.h>
92644 @@ -52,6 +53,16 @@
92645 #define arch_rebalance_pgtables(addr, len) (addr)
92646 #endif
92647
92648 +static inline void verify_mm_writelocked(struct mm_struct *mm)
92649 +{
92650 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
92651 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
92652 + up_read(&mm->mmap_sem);
92653 + BUG();
92654 + }
92655 +#endif
92656 +}
92657 +
92658 static void unmap_region(struct mm_struct *mm,
92659 struct vm_area_struct *vma, struct vm_area_struct *prev,
92660 unsigned long start, unsigned long end);
92661 @@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
92662 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
92663 *
92664 */
92665 -pgprot_t protection_map[16] = {
92666 +pgprot_t protection_map[16] __read_only = {
92667 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
92668 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
92669 };
92670
92671 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
92672 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
92673 {
92674 - return __pgprot(pgprot_val(protection_map[vm_flags &
92675 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
92676 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
92677 pgprot_val(arch_vm_get_page_prot(vm_flags)));
92678 +
92679 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92680 + if (!(__supported_pte_mask & _PAGE_NX) &&
92681 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
92682 + (vm_flags & (VM_READ | VM_WRITE)))
92683 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
92684 +#endif
92685 +
92686 + return prot;
92687 }
92688 EXPORT_SYMBOL(vm_get_page_prot);
92689
92690 @@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
92691 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
92692 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
92693 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
92694 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
92695 /*
92696 * Make sure vm_committed_as in one cacheline and not cacheline shared with
92697 * other variables. It can be updated by several CPUs frequently.
92698 @@ -245,6 +266,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
92699 struct vm_area_struct *next = vma->vm_next;
92700
92701 might_sleep();
92702 + BUG_ON(vma->vm_mirror);
92703 if (vma->vm_ops && vma->vm_ops->close)
92704 vma->vm_ops->close(vma);
92705 if (vma->vm_file)
92706 @@ -289,6 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
92707 * not page aligned -Ram Gupta
92708 */
92709 rlim = rlimit(RLIMIT_DATA);
92710 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
92711 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
92712 (mm->end_data - mm->start_data) > rlim)
92713 goto out;
92714 @@ -939,6 +962,12 @@ static int
92715 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
92716 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
92717 {
92718 +
92719 +#ifdef CONFIG_PAX_SEGMEXEC
92720 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
92721 + return 0;
92722 +#endif
92723 +
92724 if (is_mergeable_vma(vma, file, vm_flags) &&
92725 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
92726 if (vma->vm_pgoff == vm_pgoff)
92727 @@ -958,6 +987,12 @@ static int
92728 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
92729 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
92730 {
92731 +
92732 +#ifdef CONFIG_PAX_SEGMEXEC
92733 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
92734 + return 0;
92735 +#endif
92736 +
92737 if (is_mergeable_vma(vma, file, vm_flags) &&
92738 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
92739 pgoff_t vm_pglen;
92740 @@ -1000,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
92741 struct vm_area_struct *vma_merge(struct mm_struct *mm,
92742 struct vm_area_struct *prev, unsigned long addr,
92743 unsigned long end, unsigned long vm_flags,
92744 - struct anon_vma *anon_vma, struct file *file,
92745 + struct anon_vma *anon_vma, struct file *file,
92746 pgoff_t pgoff, struct mempolicy *policy)
92747 {
92748 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
92749 struct vm_area_struct *area, *next;
92750 int err;
92751
92752 +#ifdef CONFIG_PAX_SEGMEXEC
92753 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
92754 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
92755 +
92756 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
92757 +#endif
92758 +
92759 /*
92760 * We later require that vma->vm_flags == vm_flags,
92761 * so this tests vma->vm_flags & VM_SPECIAL, too.
92762 @@ -1022,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92763 if (next && next->vm_end == end) /* cases 6, 7, 8 */
92764 next = next->vm_next;
92765
92766 +#ifdef CONFIG_PAX_SEGMEXEC
92767 + if (prev)
92768 + prev_m = pax_find_mirror_vma(prev);
92769 + if (area)
92770 + area_m = pax_find_mirror_vma(area);
92771 + if (next)
92772 + next_m = pax_find_mirror_vma(next);
92773 +#endif
92774 +
92775 /*
92776 * Can it merge with the predecessor?
92777 */
92778 @@ -1041,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92779 /* cases 1, 6 */
92780 err = vma_adjust(prev, prev->vm_start,
92781 next->vm_end, prev->vm_pgoff, NULL);
92782 - } else /* cases 2, 5, 7 */
92783 +
92784 +#ifdef CONFIG_PAX_SEGMEXEC
92785 + if (!err && prev_m)
92786 + err = vma_adjust(prev_m, prev_m->vm_start,
92787 + next_m->vm_end, prev_m->vm_pgoff, NULL);
92788 +#endif
92789 +
92790 + } else { /* cases 2, 5, 7 */
92791 err = vma_adjust(prev, prev->vm_start,
92792 end, prev->vm_pgoff, NULL);
92793 +
92794 +#ifdef CONFIG_PAX_SEGMEXEC
92795 + if (!err && prev_m)
92796 + err = vma_adjust(prev_m, prev_m->vm_start,
92797 + end_m, prev_m->vm_pgoff, NULL);
92798 +#endif
92799 +
92800 + }
92801 if (err)
92802 return NULL;
92803 khugepaged_enter_vma_merge(prev);
92804 @@ -1057,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92805 mpol_equal(policy, vma_policy(next)) &&
92806 can_vma_merge_before(next, vm_flags,
92807 anon_vma, file, pgoff+pglen)) {
92808 - if (prev && addr < prev->vm_end) /* case 4 */
92809 + if (prev && addr < prev->vm_end) { /* case 4 */
92810 err = vma_adjust(prev, prev->vm_start,
92811 addr, prev->vm_pgoff, NULL);
92812 - else /* cases 3, 8 */
92813 +
92814 +#ifdef CONFIG_PAX_SEGMEXEC
92815 + if (!err && prev_m)
92816 + err = vma_adjust(prev_m, prev_m->vm_start,
92817 + addr_m, prev_m->vm_pgoff, NULL);
92818 +#endif
92819 +
92820 + } else { /* cases 3, 8 */
92821 err = vma_adjust(area, addr, next->vm_end,
92822 next->vm_pgoff - pglen, NULL);
92823 +
92824 +#ifdef CONFIG_PAX_SEGMEXEC
92825 + if (!err && area_m)
92826 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
92827 + next_m->vm_pgoff - pglen, NULL);
92828 +#endif
92829 +
92830 + }
92831 if (err)
92832 return NULL;
92833 khugepaged_enter_vma_merge(area);
92834 @@ -1171,8 +1252,10 @@ none:
92835 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
92836 struct file *file, long pages)
92837 {
92838 - const unsigned long stack_flags
92839 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
92840 +
92841 +#ifdef CONFIG_PAX_RANDMMAP
92842 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
92843 +#endif
92844
92845 mm->total_vm += pages;
92846
92847 @@ -1180,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
92848 mm->shared_vm += pages;
92849 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
92850 mm->exec_vm += pages;
92851 - } else if (flags & stack_flags)
92852 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
92853 mm->stack_vm += pages;
92854 }
92855 #endif /* CONFIG_PROC_FS */
92856 @@ -1218,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92857 * (the exception is when the underlying filesystem is noexec
92858 * mounted, in which case we dont add PROT_EXEC.)
92859 */
92860 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
92861 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
92862 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
92863 prot |= PROT_EXEC;
92864
92865 @@ -1244,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92866 /* Obtain the address to map to. we verify (or select) it and ensure
92867 * that it represents a valid section of the address space.
92868 */
92869 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
92870 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
92871 if (addr & ~PAGE_MASK)
92872 return addr;
92873
92874 @@ -1255,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92875 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
92876 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
92877
92878 +#ifdef CONFIG_PAX_MPROTECT
92879 + if (mm->pax_flags & MF_PAX_MPROTECT) {
92880 +
92881 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
92882 + if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
92883 + mm->binfmt->handle_mmap)
92884 + mm->binfmt->handle_mmap(file);
92885 +#endif
92886 +
92887 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
92888 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
92889 + gr_log_rwxmmap(file);
92890 +
92891 +#ifdef CONFIG_PAX_EMUPLT
92892 + vm_flags &= ~VM_EXEC;
92893 +#else
92894 + return -EPERM;
92895 +#endif
92896 +
92897 + }
92898 +
92899 + if (!(vm_flags & VM_EXEC))
92900 + vm_flags &= ~VM_MAYEXEC;
92901 +#else
92902 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
92903 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
92904 +#endif
92905 + else
92906 + vm_flags &= ~VM_MAYWRITE;
92907 + }
92908 +#endif
92909 +
92910 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92911 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
92912 + vm_flags &= ~VM_PAGEEXEC;
92913 +#endif
92914 +
92915 if (flags & MAP_LOCKED)
92916 if (!can_do_mlock())
92917 return -EPERM;
92918 @@ -1266,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92919 locked += mm->locked_vm;
92920 lock_limit = rlimit(RLIMIT_MEMLOCK);
92921 lock_limit >>= PAGE_SHIFT;
92922 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
92923 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
92924 return -EAGAIN;
92925 }
92926 @@ -1350,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92927 vm_flags |= VM_NORESERVE;
92928 }
92929
92930 + if (!gr_acl_handle_mmap(file, prot))
92931 + return -EACCES;
92932 +
92933 addr = mmap_region(file, addr, len, vm_flags, pgoff);
92934 if (!IS_ERR_VALUE(addr) &&
92935 ((vm_flags & VM_LOCKED) ||
92936 @@ -1443,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
92937 vm_flags_t vm_flags = vma->vm_flags;
92938
92939 /* If it was private or non-writable, the write bit is already clear */
92940 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
92941 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
92942 return 0;
92943
92944 /* The backer wishes to know when pages are first written to? */
92945 @@ -1489,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
92946 struct rb_node **rb_link, *rb_parent;
92947 unsigned long charged = 0;
92948
92949 +#ifdef CONFIG_PAX_SEGMEXEC
92950 + struct vm_area_struct *vma_m = NULL;
92951 +#endif
92952 +
92953 + /*
92954 + * mm->mmap_sem is required to protect against another thread
92955 + * changing the mappings in case we sleep.
92956 + */
92957 + verify_mm_writelocked(mm);
92958 +
92959 /* Check against address space limit. */
92960 +
92961 +#ifdef CONFIG_PAX_RANDMMAP
92962 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
92963 +#endif
92964 +
92965 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
92966 unsigned long nr_pages;
92967
92968 @@ -1508,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
92969
92970 /* Clear old maps */
92971 error = -ENOMEM;
92972 -munmap_back:
92973 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
92974 if (do_munmap(mm, addr, len))
92975 return -ENOMEM;
92976 - goto munmap_back;
92977 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
92978 }
92979
92980 /*
92981 @@ -1543,6 +1681,16 @@ munmap_back:
92982 goto unacct_error;
92983 }
92984
92985 +#ifdef CONFIG_PAX_SEGMEXEC
92986 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
92987 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92988 + if (!vma_m) {
92989 + error = -ENOMEM;
92990 + goto free_vma;
92991 + }
92992 + }
92993 +#endif
92994 +
92995 vma->vm_mm = mm;
92996 vma->vm_start = addr;
92997 vma->vm_end = addr + len;
92998 @@ -1562,6 +1710,13 @@ munmap_back:
92999 if (error)
93000 goto unmap_and_free_vma;
93001
93002 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
93003 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
93004 + vma->vm_flags |= VM_PAGEEXEC;
93005 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
93006 + }
93007 +#endif
93008 +
93009 /* Can addr have changed??
93010 *
93011 * Answer: Yes, several device drivers can do it in their
93012 @@ -1595,6 +1750,12 @@ munmap_back:
93013 }
93014
93015 vma_link(mm, vma, prev, rb_link, rb_parent);
93016 +
93017 +#ifdef CONFIG_PAX_SEGMEXEC
93018 + if (vma_m)
93019 + BUG_ON(pax_mirror_vma(vma_m, vma));
93020 +#endif
93021 +
93022 /* Once vma denies write, undo our temporary denial count */
93023 if (vm_flags & VM_DENYWRITE)
93024 allow_write_access(file);
93025 @@ -1603,6 +1764,7 @@ out:
93026 perf_event_mmap(vma);
93027
93028 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
93029 + track_exec_limit(mm, addr, addr + len, vm_flags);
93030 if (vm_flags & VM_LOCKED) {
93031 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
93032 vma == get_gate_vma(current->mm)))
93033 @@ -1635,6 +1797,12 @@ unmap_and_free_vma:
93034 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
93035 charged = 0;
93036 free_vma:
93037 +
93038 +#ifdef CONFIG_PAX_SEGMEXEC
93039 + if (vma_m)
93040 + kmem_cache_free(vm_area_cachep, vma_m);
93041 +#endif
93042 +
93043 kmem_cache_free(vm_area_cachep, vma);
93044 unacct_error:
93045 if (charged)
93046 @@ -1642,7 +1810,63 @@ unacct_error:
93047 return error;
93048 }
93049
93050 -unsigned long unmapped_area(struct vm_unmapped_area_info *info)
93051 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
93052 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
93053 +{
93054 + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
93055 + return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
93056 +
93057 + return 0;
93058 +}
93059 +#endif
93060 +
93061 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
93062 +{
93063 + if (!vma) {
93064 +#ifdef CONFIG_STACK_GROWSUP
93065 + if (addr > sysctl_heap_stack_gap)
93066 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
93067 + else
93068 + vma = find_vma(current->mm, 0);
93069 + if (vma && (vma->vm_flags & VM_GROWSUP))
93070 + return false;
93071 +#endif
93072 + return true;
93073 + }
93074 +
93075 + if (addr + len > vma->vm_start)
93076 + return false;
93077 +
93078 + if (vma->vm_flags & VM_GROWSDOWN)
93079 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
93080 +#ifdef CONFIG_STACK_GROWSUP
93081 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
93082 + return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
93083 +#endif
93084 + else if (offset)
93085 + return offset <= vma->vm_start - addr - len;
93086 +
93087 + return true;
93088 +}
93089 +
93090 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
93091 +{
93092 + if (vma->vm_start < len)
93093 + return -ENOMEM;
93094 +
93095 + if (!(vma->vm_flags & VM_GROWSDOWN)) {
93096 + if (offset <= vma->vm_start - len)
93097 + return vma->vm_start - len - offset;
93098 + else
93099 + return -ENOMEM;
93100 + }
93101 +
93102 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
93103 + return vma->vm_start - len - sysctl_heap_stack_gap;
93104 + return -ENOMEM;
93105 +}
93106 +
93107 +unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
93108 {
93109 /*
93110 * We implement the search by looking for an rbtree node that
93111 @@ -1690,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
93112 }
93113 }
93114
93115 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
93116 + gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
93117 check_current:
93118 /* Check if current node has a suitable gap */
93119 if (gap_start > high_limit)
93120 return -ENOMEM;
93121 +
93122 + if (gap_end - gap_start > info->threadstack_offset)
93123 + gap_start += info->threadstack_offset;
93124 + else
93125 + gap_start = gap_end;
93126 +
93127 + if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
93128 + if (gap_end - gap_start > sysctl_heap_stack_gap)
93129 + gap_start += sysctl_heap_stack_gap;
93130 + else
93131 + gap_start = gap_end;
93132 + }
93133 + if (vma->vm_flags & VM_GROWSDOWN) {
93134 + if (gap_end - gap_start > sysctl_heap_stack_gap)
93135 + gap_end -= sysctl_heap_stack_gap;
93136 + else
93137 + gap_end = gap_start;
93138 + }
93139 if (gap_end >= low_limit && gap_end - gap_start >= length)
93140 goto found;
93141
93142 @@ -1744,7 +1986,7 @@ found:
93143 return gap_start;
93144 }
93145
93146 -unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
93147 +unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
93148 {
93149 struct mm_struct *mm = current->mm;
93150 struct vm_area_struct *vma;
93151 @@ -1798,6 +2040,24 @@ check_current:
93152 gap_end = vma->vm_start;
93153 if (gap_end < low_limit)
93154 return -ENOMEM;
93155 +
93156 + if (gap_end - gap_start > info->threadstack_offset)
93157 + gap_end -= info->threadstack_offset;
93158 + else
93159 + gap_end = gap_start;
93160 +
93161 + if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
93162 + if (gap_end - gap_start > sysctl_heap_stack_gap)
93163 + gap_start += sysctl_heap_stack_gap;
93164 + else
93165 + gap_start = gap_end;
93166 + }
93167 + if (vma->vm_flags & VM_GROWSDOWN) {
93168 + if (gap_end - gap_start > sysctl_heap_stack_gap)
93169 + gap_end -= sysctl_heap_stack_gap;
93170 + else
93171 + gap_end = gap_start;
93172 + }
93173 if (gap_start <= high_limit && gap_end - gap_start >= length)
93174 goto found;
93175
93176 @@ -1861,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
93177 struct mm_struct *mm = current->mm;
93178 struct vm_area_struct *vma;
93179 struct vm_unmapped_area_info info;
93180 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
93181
93182 if (len > TASK_SIZE - mmap_min_addr)
93183 return -ENOMEM;
93184 @@ -1868,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
93185 if (flags & MAP_FIXED)
93186 return addr;
93187
93188 +#ifdef CONFIG_PAX_RANDMMAP
93189 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
93190 +#endif
93191 +
93192 if (addr) {
93193 addr = PAGE_ALIGN(addr);
93194 vma = find_vma(mm, addr);
93195 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
93196 - (!vma || addr + len <= vma->vm_start))
93197 + check_heap_stack_gap(vma, addr, len, offset))
93198 return addr;
93199 }
93200
93201 @@ -1881,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
93202 info.low_limit = mm->mmap_base;
93203 info.high_limit = TASK_SIZE;
93204 info.align_mask = 0;
93205 + info.threadstack_offset = offset;
93206 return vm_unmapped_area(&info);
93207 }
93208 #endif
93209 @@ -1899,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93210 struct mm_struct *mm = current->mm;
93211 unsigned long addr = addr0;
93212 struct vm_unmapped_area_info info;
93213 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
93214
93215 /* requested length too big for entire address space */
93216 if (len > TASK_SIZE - mmap_min_addr)
93217 @@ -1907,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93218 if (flags & MAP_FIXED)
93219 return addr;
93220
93221 +#ifdef CONFIG_PAX_RANDMMAP
93222 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
93223 +#endif
93224 +
93225 /* requesting a specific address */
93226 if (addr) {
93227 addr = PAGE_ALIGN(addr);
93228 vma = find_vma(mm, addr);
93229 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
93230 - (!vma || addr + len <= vma->vm_start))
93231 + check_heap_stack_gap(vma, addr, len, offset))
93232 return addr;
93233 }
93234
93235 @@ -1921,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93236 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
93237 info.high_limit = mm->mmap_base;
93238 info.align_mask = 0;
93239 + info.threadstack_offset = offset;
93240 addr = vm_unmapped_area(&info);
93241
93242 /*
93243 @@ -1933,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93244 VM_BUG_ON(addr != -ENOMEM);
93245 info.flags = 0;
93246 info.low_limit = TASK_UNMAPPED_BASE;
93247 +
93248 +#ifdef CONFIG_PAX_RANDMMAP
93249 + if (mm->pax_flags & MF_PAX_RANDMMAP)
93250 + info.low_limit += mm->delta_mmap;
93251 +#endif
93252 +
93253 info.high_limit = TASK_SIZE;
93254 addr = vm_unmapped_area(&info);
93255 }
93256 @@ -2034,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
93257 return vma;
93258 }
93259
93260 +#ifdef CONFIG_PAX_SEGMEXEC
93261 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
93262 +{
93263 + struct vm_area_struct *vma_m;
93264 +
93265 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
93266 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
93267 + BUG_ON(vma->vm_mirror);
93268 + return NULL;
93269 + }
93270 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
93271 + vma_m = vma->vm_mirror;
93272 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
93273 + BUG_ON(vma->vm_file != vma_m->vm_file);
93274 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
93275 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
93276 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
93277 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
93278 + return vma_m;
93279 +}
93280 +#endif
93281 +
93282 /*
93283 * Verify that the stack growth is acceptable and
93284 * update accounting. This is shared with both the
93285 @@ -2050,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93286 return -ENOMEM;
93287
93288 /* Stack limit test */
93289 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
93290 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
93291 return -ENOMEM;
93292
93293 @@ -2060,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93294 locked = mm->locked_vm + grow;
93295 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
93296 limit >>= PAGE_SHIFT;
93297 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
93298 if (locked > limit && !capable(CAP_IPC_LOCK))
93299 return -ENOMEM;
93300 }
93301 @@ -2089,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93302 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
93303 * vma is the last one with address > vma->vm_end. Have to extend vma.
93304 */
93305 +#ifndef CONFIG_IA64
93306 +static
93307 +#endif
93308 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
93309 {
93310 int error;
93311 + bool locknext;
93312
93313 if (!(vma->vm_flags & VM_GROWSUP))
93314 return -EFAULT;
93315
93316 + /* Also guard against wrapping around to address 0. */
93317 + if (address < PAGE_ALIGN(address+1))
93318 + address = PAGE_ALIGN(address+1);
93319 + else
93320 + return -ENOMEM;
93321 +
93322 /*
93323 * We must make sure the anon_vma is allocated
93324 * so that the anon_vma locking is not a noop.
93325 */
93326 if (unlikely(anon_vma_prepare(vma)))
93327 return -ENOMEM;
93328 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
93329 + if (locknext && anon_vma_prepare(vma->vm_next))
93330 + return -ENOMEM;
93331 vma_lock_anon_vma(vma);
93332 + if (locknext)
93333 + vma_lock_anon_vma(vma->vm_next);
93334
93335 /*
93336 * vma->vm_start/vm_end cannot change under us because the caller
93337 * is required to hold the mmap_sem in read mode. We need the
93338 - * anon_vma lock to serialize against concurrent expand_stacks.
93339 - * Also guard against wrapping around to address 0.
93340 + * anon_vma locks to serialize against concurrent expand_stacks
93341 + * and expand_upwards.
93342 */
93343 - if (address < PAGE_ALIGN(address+4))
93344 - address = PAGE_ALIGN(address+4);
93345 - else {
93346 - vma_unlock_anon_vma(vma);
93347 - return -ENOMEM;
93348 - }
93349 error = 0;
93350
93351 /* Somebody else might have raced and expanded it already */
93352 - if (address > vma->vm_end) {
93353 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
93354 + error = -ENOMEM;
93355 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
93356 unsigned long size, grow;
93357
93358 size = address - vma->vm_start;
93359 @@ -2154,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
93360 }
93361 }
93362 }
93363 + if (locknext)
93364 + vma_unlock_anon_vma(vma->vm_next);
93365 vma_unlock_anon_vma(vma);
93366 khugepaged_enter_vma_merge(vma);
93367 validate_mm(vma->vm_mm);
93368 @@ -2168,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
93369 unsigned long address)
93370 {
93371 int error;
93372 + bool lockprev = false;
93373 + struct vm_area_struct *prev;
93374
93375 /*
93376 * We must make sure the anon_vma is allocated
93377 @@ -2181,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
93378 if (error)
93379 return error;
93380
93381 + prev = vma->vm_prev;
93382 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
93383 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
93384 +#endif
93385 + if (lockprev && anon_vma_prepare(prev))
93386 + return -ENOMEM;
93387 + if (lockprev)
93388 + vma_lock_anon_vma(prev);
93389 +
93390 vma_lock_anon_vma(vma);
93391
93392 /*
93393 @@ -2190,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
93394 */
93395
93396 /* Somebody else might have raced and expanded it already */
93397 - if (address < vma->vm_start) {
93398 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
93399 + error = -ENOMEM;
93400 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
93401 unsigned long size, grow;
93402
93403 +#ifdef CONFIG_PAX_SEGMEXEC
93404 + struct vm_area_struct *vma_m;
93405 +
93406 + vma_m = pax_find_mirror_vma(vma);
93407 +#endif
93408 +
93409 size = vma->vm_end - address;
93410 grow = (vma->vm_start - address) >> PAGE_SHIFT;
93411
93412 @@ -2217,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
93413 vma->vm_pgoff -= grow;
93414 anon_vma_interval_tree_post_update_vma(vma);
93415 vma_gap_update(vma);
93416 +
93417 +#ifdef CONFIG_PAX_SEGMEXEC
93418 + if (vma_m) {
93419 + anon_vma_interval_tree_pre_update_vma(vma_m);
93420 + vma_m->vm_start -= grow << PAGE_SHIFT;
93421 + vma_m->vm_pgoff -= grow;
93422 + anon_vma_interval_tree_post_update_vma(vma_m);
93423 + vma_gap_update(vma_m);
93424 + }
93425 +#endif
93426 +
93427 spin_unlock(&vma->vm_mm->page_table_lock);
93428
93429 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
93430 perf_event_mmap(vma);
93431 }
93432 }
93433 }
93434 vma_unlock_anon_vma(vma);
93435 + if (lockprev)
93436 + vma_unlock_anon_vma(prev);
93437 khugepaged_enter_vma_merge(vma);
93438 validate_mm(vma->vm_mm);
93439 return error;
93440 @@ -2321,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
93441 do {
93442 long nrpages = vma_pages(vma);
93443
93444 +#ifdef CONFIG_PAX_SEGMEXEC
93445 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
93446 + vma = remove_vma(vma);
93447 + continue;
93448 + }
93449 +#endif
93450 +
93451 if (vma->vm_flags & VM_ACCOUNT)
93452 nr_accounted += nrpages;
93453 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
93454 @@ -2365,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
93455 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
93456 vma->vm_prev = NULL;
93457 do {
93458 +
93459 +#ifdef CONFIG_PAX_SEGMEXEC
93460 + if (vma->vm_mirror) {
93461 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
93462 + vma->vm_mirror->vm_mirror = NULL;
93463 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
93464 + vma->vm_mirror = NULL;
93465 + }
93466 +#endif
93467 +
93468 vma_rb_erase(vma, &mm->mm_rb);
93469 mm->map_count--;
93470 tail_vma = vma;
93471 @@ -2390,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93472 struct vm_area_struct *new;
93473 int err = -ENOMEM;
93474
93475 +#ifdef CONFIG_PAX_SEGMEXEC
93476 + struct vm_area_struct *vma_m, *new_m = NULL;
93477 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
93478 +#endif
93479 +
93480 if (is_vm_hugetlb_page(vma) && (addr &
93481 ~(huge_page_mask(hstate_vma(vma)))))
93482 return -EINVAL;
93483
93484 +#ifdef CONFIG_PAX_SEGMEXEC
93485 + vma_m = pax_find_mirror_vma(vma);
93486 +#endif
93487 +
93488 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
93489 if (!new)
93490 goto out_err;
93491
93492 +#ifdef CONFIG_PAX_SEGMEXEC
93493 + if (vma_m) {
93494 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
93495 + if (!new_m) {
93496 + kmem_cache_free(vm_area_cachep, new);
93497 + goto out_err;
93498 + }
93499 + }
93500 +#endif
93501 +
93502 /* most fields are the same, copy all, and then fixup */
93503 *new = *vma;
93504
93505 @@ -2410,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93506 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
93507 }
93508
93509 +#ifdef CONFIG_PAX_SEGMEXEC
93510 + if (vma_m) {
93511 + *new_m = *vma_m;
93512 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
93513 + new_m->vm_mirror = new;
93514 + new->vm_mirror = new_m;
93515 +
93516 + if (new_below)
93517 + new_m->vm_end = addr_m;
93518 + else {
93519 + new_m->vm_start = addr_m;
93520 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
93521 + }
93522 + }
93523 +#endif
93524 +
93525 err = vma_dup_policy(vma, new);
93526 if (err)
93527 goto out_free_vma;
93528 @@ -2429,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93529 else
93530 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
93531
93532 +#ifdef CONFIG_PAX_SEGMEXEC
93533 + if (!err && vma_m) {
93534 + struct mempolicy *pol = vma_policy(new);
93535 +
93536 + if (anon_vma_clone(new_m, vma_m))
93537 + goto out_free_mpol;
93538 +
93539 + mpol_get(pol);
93540 + set_vma_policy(new_m, pol);
93541 +
93542 + if (new_m->vm_file)
93543 + get_file(new_m->vm_file);
93544 +
93545 + if (new_m->vm_ops && new_m->vm_ops->open)
93546 + new_m->vm_ops->open(new_m);
93547 +
93548 + if (new_below)
93549 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
93550 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
93551 + else
93552 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
93553 +
93554 + if (err) {
93555 + if (new_m->vm_ops && new_m->vm_ops->close)
93556 + new_m->vm_ops->close(new_m);
93557 + if (new_m->vm_file)
93558 + fput(new_m->vm_file);
93559 + mpol_put(pol);
93560 + }
93561 + }
93562 +#endif
93563 +
93564 /* Success. */
93565 if (!err)
93566 return 0;
93567 @@ -2438,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93568 new->vm_ops->close(new);
93569 if (new->vm_file)
93570 fput(new->vm_file);
93571 - unlink_anon_vmas(new);
93572 out_free_mpol:
93573 mpol_put(vma_policy(new));
93574 out_free_vma:
93575 +
93576 +#ifdef CONFIG_PAX_SEGMEXEC
93577 + if (new_m) {
93578 + unlink_anon_vmas(new_m);
93579 + kmem_cache_free(vm_area_cachep, new_m);
93580 + }
93581 +#endif
93582 +
93583 + unlink_anon_vmas(new);
93584 kmem_cache_free(vm_area_cachep, new);
93585 out_err:
93586 return err;
93587 @@ -2454,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93588 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93589 unsigned long addr, int new_below)
93590 {
93591 +
93592 +#ifdef CONFIG_PAX_SEGMEXEC
93593 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
93594 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
93595 + if (mm->map_count >= sysctl_max_map_count-1)
93596 + return -ENOMEM;
93597 + } else
93598 +#endif
93599 +
93600 if (mm->map_count >= sysctl_max_map_count)
93601 return -ENOMEM;
93602
93603 @@ -2465,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93604 * work. This now handles partial unmappings.
93605 * Jeremy Fitzhardinge <jeremy@goop.org>
93606 */
93607 +#ifdef CONFIG_PAX_SEGMEXEC
93608 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93609 {
93610 + int ret = __do_munmap(mm, start, len);
93611 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
93612 + return ret;
93613 +
93614 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
93615 +}
93616 +
93617 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93618 +#else
93619 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93620 +#endif
93621 +{
93622 unsigned long end;
93623 struct vm_area_struct *vma, *prev, *last;
93624
93625 + /*
93626 + * mm->mmap_sem is required to protect against another thread
93627 + * changing the mappings in case we sleep.
93628 + */
93629 + verify_mm_writelocked(mm);
93630 +
93631 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
93632 return -EINVAL;
93633
93634 @@ -2544,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93635 /* Fix up all other VM information */
93636 remove_vma_list(mm, vma);
93637
93638 + track_exec_limit(mm, start, end, 0UL);
93639 +
93640 return 0;
93641 }
93642
93643 @@ -2552,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
93644 int ret;
93645 struct mm_struct *mm = current->mm;
93646
93647 +
93648 +#ifdef CONFIG_PAX_SEGMEXEC
93649 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
93650 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
93651 + return -EINVAL;
93652 +#endif
93653 +
93654 down_write(&mm->mmap_sem);
93655 ret = do_munmap(mm, start, len);
93656 up_write(&mm->mmap_sem);
93657 @@ -2565,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
93658 return vm_munmap(addr, len);
93659 }
93660
93661 -static inline void verify_mm_writelocked(struct mm_struct *mm)
93662 -{
93663 -#ifdef CONFIG_DEBUG_VM
93664 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
93665 - WARN_ON(1);
93666 - up_read(&mm->mmap_sem);
93667 - }
93668 -#endif
93669 -}
93670 -
93671 /*
93672 * this is really a simplified "do_mmap". it only handles
93673 * anonymous maps. eventually we may be able to do some
93674 @@ -2588,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93675 struct rb_node ** rb_link, * rb_parent;
93676 pgoff_t pgoff = addr >> PAGE_SHIFT;
93677 int error;
93678 + unsigned long charged;
93679
93680 len = PAGE_ALIGN(len);
93681 if (!len)
93682 @@ -2595,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93683
93684 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
93685
93686 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
93687 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
93688 + flags &= ~VM_EXEC;
93689 +
93690 +#ifdef CONFIG_PAX_MPROTECT
93691 + if (mm->pax_flags & MF_PAX_MPROTECT)
93692 + flags &= ~VM_MAYEXEC;
93693 +#endif
93694 +
93695 + }
93696 +#endif
93697 +
93698 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
93699 if (error & ~PAGE_MASK)
93700 return error;
93701
93702 + charged = len >> PAGE_SHIFT;
93703 +
93704 /*
93705 * mlock MCL_FUTURE?
93706 */
93707 if (mm->def_flags & VM_LOCKED) {
93708 unsigned long locked, lock_limit;
93709 - locked = len >> PAGE_SHIFT;
93710 + locked = charged;
93711 locked += mm->locked_vm;
93712 lock_limit = rlimit(RLIMIT_MEMLOCK);
93713 lock_limit >>= PAGE_SHIFT;
93714 @@ -2621,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93715 /*
93716 * Clear old maps. this also does some error checking for us
93717 */
93718 - munmap_back:
93719 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
93720 if (do_munmap(mm, addr, len))
93721 return -ENOMEM;
93722 - goto munmap_back;
93723 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
93724 }
93725
93726 /* Check against address space limits *after* clearing old maps... */
93727 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
93728 + if (!may_expand_vm(mm, charged))
93729 return -ENOMEM;
93730
93731 if (mm->map_count > sysctl_max_map_count)
93732 return -ENOMEM;
93733
93734 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
93735 + if (security_vm_enough_memory_mm(mm, charged))
93736 return -ENOMEM;
93737
93738 /* Can we just expand an old private anonymous mapping? */
93739 @@ -2649,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93740 */
93741 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93742 if (!vma) {
93743 - vm_unacct_memory(len >> PAGE_SHIFT);
93744 + vm_unacct_memory(charged);
93745 return -ENOMEM;
93746 }
93747
93748 @@ -2663,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93749 vma_link(mm, vma, prev, rb_link, rb_parent);
93750 out:
93751 perf_event_mmap(vma);
93752 - mm->total_vm += len >> PAGE_SHIFT;
93753 + mm->total_vm += charged;
93754 if (flags & VM_LOCKED)
93755 - mm->locked_vm += (len >> PAGE_SHIFT);
93756 + mm->locked_vm += charged;
93757 vma->vm_flags |= VM_SOFTDIRTY;
93758 + track_exec_limit(mm, addr, addr + len, flags);
93759 return addr;
93760 }
93761
93762 @@ -2728,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
93763 while (vma) {
93764 if (vma->vm_flags & VM_ACCOUNT)
93765 nr_accounted += vma_pages(vma);
93766 + vma->vm_mirror = NULL;
93767 vma = remove_vma(vma);
93768 }
93769 vm_unacct_memory(nr_accounted);
93770 @@ -2745,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
93771 struct vm_area_struct *prev;
93772 struct rb_node **rb_link, *rb_parent;
93773
93774 +#ifdef CONFIG_PAX_SEGMEXEC
93775 + struct vm_area_struct *vma_m = NULL;
93776 +#endif
93777 +
93778 + if (security_mmap_addr(vma->vm_start))
93779 + return -EPERM;
93780 +
93781 /*
93782 * The vm_pgoff of a purely anonymous vma should be irrelevant
93783 * until its first write fault, when page's anon_vma and index
93784 @@ -2768,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
93785 security_vm_enough_memory_mm(mm, vma_pages(vma)))
93786 return -ENOMEM;
93787
93788 +#ifdef CONFIG_PAX_SEGMEXEC
93789 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
93790 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93791 + if (!vma_m)
93792 + return -ENOMEM;
93793 + }
93794 +#endif
93795 +
93796 vma_link(mm, vma, prev, rb_link, rb_parent);
93797 +
93798 +#ifdef CONFIG_PAX_SEGMEXEC
93799 + if (vma_m)
93800 + BUG_ON(pax_mirror_vma(vma_m, vma));
93801 +#endif
93802 +
93803 return 0;
93804 }
93805
93806 @@ -2787,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
93807 struct rb_node **rb_link, *rb_parent;
93808 bool faulted_in_anon_vma = true;
93809
93810 + BUG_ON(vma->vm_mirror);
93811 +
93812 /*
93813 * If anonymous vma has not yet been faulted, update new pgoff
93814 * to match new location, to increase its chance of merging.
93815 @@ -2851,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
93816 return NULL;
93817 }
93818
93819 +#ifdef CONFIG_PAX_SEGMEXEC
93820 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
93821 +{
93822 + struct vm_area_struct *prev_m;
93823 + struct rb_node **rb_link_m, *rb_parent_m;
93824 + struct mempolicy *pol_m;
93825 +
93826 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
93827 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
93828 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
93829 + *vma_m = *vma;
93830 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
93831 + if (anon_vma_clone(vma_m, vma))
93832 + return -ENOMEM;
93833 + pol_m = vma_policy(vma_m);
93834 + mpol_get(pol_m);
93835 + set_vma_policy(vma_m, pol_m);
93836 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
93837 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
93838 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
93839 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
93840 + if (vma_m->vm_file)
93841 + get_file(vma_m->vm_file);
93842 + if (vma_m->vm_ops && vma_m->vm_ops->open)
93843 + vma_m->vm_ops->open(vma_m);
93844 + BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
93845 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
93846 + vma_m->vm_mirror = vma;
93847 + vma->vm_mirror = vma_m;
93848 + return 0;
93849 +}
93850 +#endif
93851 +
93852 /*
93853 * Return true if the calling process may expand its vm space by the passed
93854 * number of pages
93855 @@ -2862,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
93856
93857 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
93858
93859 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
93860 if (cur + npages > lim)
93861 return 0;
93862 return 1;
93863 @@ -2932,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
93864 vma->vm_start = addr;
93865 vma->vm_end = addr + len;
93866
93867 +#ifdef CONFIG_PAX_MPROTECT
93868 + if (mm->pax_flags & MF_PAX_MPROTECT) {
93869 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
93870 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
93871 + return -EPERM;
93872 + if (!(vm_flags & VM_EXEC))
93873 + vm_flags &= ~VM_MAYEXEC;
93874 +#else
93875 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
93876 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
93877 +#endif
93878 + else
93879 + vm_flags &= ~VM_MAYWRITE;
93880 + }
93881 +#endif
93882 +
93883 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
93884 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
93885
93886 diff --git a/mm/mprotect.c b/mm/mprotect.c
93887 index bb53a65..249c052 100644
93888 --- a/mm/mprotect.c
93889 +++ b/mm/mprotect.c
93890 @@ -23,10 +23,18 @@
93891 #include <linux/mmu_notifier.h>
93892 #include <linux/migrate.h>
93893 #include <linux/perf_event.h>
93894 +#include <linux/sched/sysctl.h>
93895 +
93896 +#ifdef CONFIG_PAX_MPROTECT
93897 +#include <linux/elf.h>
93898 +#include <linux/binfmts.h>
93899 +#endif
93900 +
93901 #include <asm/uaccess.h>
93902 #include <asm/pgtable.h>
93903 #include <asm/cacheflush.h>
93904 #include <asm/tlbflush.h>
93905 +#include <asm/mmu_context.h>
93906
93907 #ifndef pgprot_modify
93908 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
93909 @@ -222,6 +230,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
93910 return pages;
93911 }
93912
93913 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
93914 +/* called while holding the mmap semaphor for writing except stack expansion */
93915 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
93916 +{
93917 + unsigned long oldlimit, newlimit = 0UL;
93918 +
93919 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
93920 + return;
93921 +
93922 + spin_lock(&mm->page_table_lock);
93923 + oldlimit = mm->context.user_cs_limit;
93924 + if ((prot & VM_EXEC) && oldlimit < end)
93925 + /* USER_CS limit moved up */
93926 + newlimit = end;
93927 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
93928 + /* USER_CS limit moved down */
93929 + newlimit = start;
93930 +
93931 + if (newlimit) {
93932 + mm->context.user_cs_limit = newlimit;
93933 +
93934 +#ifdef CONFIG_SMP
93935 + wmb();
93936 + cpus_clear(mm->context.cpu_user_cs_mask);
93937 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
93938 +#endif
93939 +
93940 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
93941 + }
93942 + spin_unlock(&mm->page_table_lock);
93943 + if (newlimit == end) {
93944 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
93945 +
93946 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
93947 + if (is_vm_hugetlb_page(vma))
93948 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
93949 + else
93950 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
93951 + }
93952 +}
93953 +#endif
93954 +
93955 int
93956 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93957 unsigned long start, unsigned long end, unsigned long newflags)
93958 @@ -234,11 +284,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93959 int error;
93960 int dirty_accountable = 0;
93961
93962 +#ifdef CONFIG_PAX_SEGMEXEC
93963 + struct vm_area_struct *vma_m = NULL;
93964 + unsigned long start_m, end_m;
93965 +
93966 + start_m = start + SEGMEXEC_TASK_SIZE;
93967 + end_m = end + SEGMEXEC_TASK_SIZE;
93968 +#endif
93969 +
93970 if (newflags == oldflags) {
93971 *pprev = vma;
93972 return 0;
93973 }
93974
93975 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
93976 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
93977 +
93978 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
93979 + return -ENOMEM;
93980 +
93981 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
93982 + return -ENOMEM;
93983 + }
93984 +
93985 /*
93986 * If we make a private mapping writable we increase our commit;
93987 * but (without finer accounting) cannot reduce our commit if we
93988 @@ -255,6 +323,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93989 }
93990 }
93991
93992 +#ifdef CONFIG_PAX_SEGMEXEC
93993 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
93994 + if (start != vma->vm_start) {
93995 + error = split_vma(mm, vma, start, 1);
93996 + if (error)
93997 + goto fail;
93998 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
93999 + *pprev = (*pprev)->vm_next;
94000 + }
94001 +
94002 + if (end != vma->vm_end) {
94003 + error = split_vma(mm, vma, end, 0);
94004 + if (error)
94005 + goto fail;
94006 + }
94007 +
94008 + if (pax_find_mirror_vma(vma)) {
94009 + error = __do_munmap(mm, start_m, end_m - start_m);
94010 + if (error)
94011 + goto fail;
94012 + } else {
94013 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
94014 + if (!vma_m) {
94015 + error = -ENOMEM;
94016 + goto fail;
94017 + }
94018 + vma->vm_flags = newflags;
94019 + error = pax_mirror_vma(vma_m, vma);
94020 + if (error) {
94021 + vma->vm_flags = oldflags;
94022 + goto fail;
94023 + }
94024 + }
94025 + }
94026 +#endif
94027 +
94028 /*
94029 * First try to merge with previous and/or next vma.
94030 */
94031 @@ -285,9 +389,21 @@ success:
94032 * vm_flags and vm_page_prot are protected by the mmap_sem
94033 * held in write mode.
94034 */
94035 +
94036 +#ifdef CONFIG_PAX_SEGMEXEC
94037 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
94038 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
94039 +#endif
94040 +
94041 vma->vm_flags = newflags;
94042 +
94043 +#ifdef CONFIG_PAX_MPROTECT
94044 + if (mm->binfmt && mm->binfmt->handle_mprotect)
94045 + mm->binfmt->handle_mprotect(vma, newflags);
94046 +#endif
94047 +
94048 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
94049 - vm_get_page_prot(newflags));
94050 + vm_get_page_prot(vma->vm_flags));
94051
94052 if (vma_wants_writenotify(vma)) {
94053 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
94054 @@ -326,6 +442,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
94055 end = start + len;
94056 if (end <= start)
94057 return -ENOMEM;
94058 +
94059 +#ifdef CONFIG_PAX_SEGMEXEC
94060 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
94061 + if (end > SEGMEXEC_TASK_SIZE)
94062 + return -EINVAL;
94063 + } else
94064 +#endif
94065 +
94066 + if (end > TASK_SIZE)
94067 + return -EINVAL;
94068 +
94069 if (!arch_validate_prot(prot))
94070 return -EINVAL;
94071
94072 @@ -333,7 +460,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
94073 /*
94074 * Does the application expect PROT_READ to imply PROT_EXEC:
94075 */
94076 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
94077 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
94078 prot |= PROT_EXEC;
94079
94080 vm_flags = calc_vm_prot_bits(prot);
94081 @@ -365,6 +492,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
94082 if (start > vma->vm_start)
94083 prev = vma;
94084
94085 +#ifdef CONFIG_PAX_MPROTECT
94086 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
94087 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
94088 +#endif
94089 +
94090 for (nstart = start ; ; ) {
94091 unsigned long newflags;
94092
94093 @@ -375,6 +507,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
94094
94095 /* newflags >> 4 shift VM_MAY% in place of VM_% */
94096 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
94097 + if (prot & (PROT_WRITE | PROT_EXEC))
94098 + gr_log_rwxmprotect(vma);
94099 +
94100 + error = -EACCES;
94101 + goto out;
94102 + }
94103 +
94104 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
94105 error = -EACCES;
94106 goto out;
94107 }
94108 @@ -389,6 +529,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
94109 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
94110 if (error)
94111 goto out;
94112 +
94113 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
94114 +
94115 nstart = tmp;
94116
94117 if (nstart < prev->vm_end)
94118 diff --git a/mm/mremap.c b/mm/mremap.c
94119 index 0843feb..4f5b2e6 100644
94120 --- a/mm/mremap.c
94121 +++ b/mm/mremap.c
94122 @@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
94123 continue;
94124 pte = ptep_get_and_clear(mm, old_addr, old_pte);
94125 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
94126 +
94127 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
94128 + if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
94129 + pte = pte_exprotect(pte);
94130 +#endif
94131 +
94132 pte = move_soft_dirty_pte(pte);
94133 set_pte_at(mm, new_addr, new_pte, pte);
94134 }
94135 @@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
94136 if (is_vm_hugetlb_page(vma))
94137 goto Einval;
94138
94139 +#ifdef CONFIG_PAX_SEGMEXEC
94140 + if (pax_find_mirror_vma(vma))
94141 + goto Einval;
94142 +#endif
94143 +
94144 /* We can't remap across vm area boundaries */
94145 if (old_len > vma->vm_end - addr)
94146 goto Efault;
94147 @@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
94148 unsigned long ret = -EINVAL;
94149 unsigned long charged = 0;
94150 unsigned long map_flags;
94151 + unsigned long pax_task_size = TASK_SIZE;
94152
94153 if (new_addr & ~PAGE_MASK)
94154 goto out;
94155
94156 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
94157 +#ifdef CONFIG_PAX_SEGMEXEC
94158 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
94159 + pax_task_size = SEGMEXEC_TASK_SIZE;
94160 +#endif
94161 +
94162 + pax_task_size -= PAGE_SIZE;
94163 +
94164 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
94165 goto out;
94166
94167 /* Check if the location we're moving into overlaps the
94168 * old location at all, and fail if it does.
94169 */
94170 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
94171 - goto out;
94172 -
94173 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
94174 + if (addr + old_len > new_addr && new_addr + new_len > addr)
94175 goto out;
94176
94177 ret = do_munmap(mm, new_addr, new_len);
94178 @@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
94179 unsigned long ret = -EINVAL;
94180 unsigned long charged = 0;
94181 bool locked = false;
94182 + unsigned long pax_task_size = TASK_SIZE;
94183
94184 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
94185 return ret;
94186 @@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
94187 if (!new_len)
94188 return ret;
94189
94190 +#ifdef CONFIG_PAX_SEGMEXEC
94191 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
94192 + pax_task_size = SEGMEXEC_TASK_SIZE;
94193 +#endif
94194 +
94195 + pax_task_size -= PAGE_SIZE;
94196 +
94197 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
94198 + old_len > pax_task_size || addr > pax_task_size-old_len)
94199 + return ret;
94200 +
94201 down_write(&current->mm->mmap_sem);
94202
94203 if (flags & MREMAP_FIXED) {
94204 @@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
94205 new_addr = addr;
94206 }
94207 ret = addr;
94208 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
94209 goto out;
94210 }
94211 }
94212 @@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
94213 goto out;
94214 }
94215
94216 + map_flags = vma->vm_flags;
94217 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
94218 + if (!(ret & ~PAGE_MASK)) {
94219 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
94220 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
94221 + }
94222 }
94223 out:
94224 if (ret & ~PAGE_MASK)
94225 diff --git a/mm/nommu.c b/mm/nommu.c
94226 index fec093a..8162f74 100644
94227 --- a/mm/nommu.c
94228 +++ b/mm/nommu.c
94229 @@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
94230 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
94231 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
94232 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
94233 -int heap_stack_gap = 0;
94234
94235 atomic_long_t mmap_pages_allocated;
94236
94237 @@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
94238 EXPORT_SYMBOL(find_vma);
94239
94240 /*
94241 - * find a VMA
94242 - * - we don't extend stack VMAs under NOMMU conditions
94243 - */
94244 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
94245 -{
94246 - return find_vma(mm, addr);
94247 -}
94248 -
94249 -/*
94250 * expand a stack to a given address
94251 * - not supported under NOMMU conditions
94252 */
94253 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
94254
94255 /* most fields are the same, copy all, and then fixup */
94256 *new = *vma;
94257 + INIT_LIST_HEAD(&new->anon_vma_chain);
94258 *region = *vma->vm_region;
94259 new->vm_region = region;
94260
94261 @@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
94262 }
94263 EXPORT_SYMBOL(generic_file_remap_pages);
94264
94265 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94266 - unsigned long addr, void *buf, int len, int write)
94267 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94268 + unsigned long addr, void *buf, size_t len, int write)
94269 {
94270 struct vm_area_struct *vma;
94271
94272 @@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94273 *
94274 * The caller must hold a reference on @mm.
94275 */
94276 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
94277 - void *buf, int len, int write)
94278 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
94279 + void *buf, size_t len, int write)
94280 {
94281 return __access_remote_vm(NULL, mm, addr, buf, len, write);
94282 }
94283 @@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
94284 * Access another process' address space.
94285 * - source/target buffer must be kernel space
94286 */
94287 -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
94288 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
94289 {
94290 struct mm_struct *mm;
94291
94292 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
94293 index 7106cb1..0805f48 100644
94294 --- a/mm/page-writeback.c
94295 +++ b/mm/page-writeback.c
94296 @@ -685,7 +685,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
94297 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
94298 * - the bdi dirty thresh drops quickly due to change of JBOD workload
94299 */
94300 -static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
94301 +static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
94302 unsigned long thresh,
94303 unsigned long bg_thresh,
94304 unsigned long dirty,
94305 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
94306 index 56f268d..4d35ec4 100644
94307 --- a/mm/page_alloc.c
94308 +++ b/mm/page_alloc.c
94309 @@ -61,6 +61,7 @@
94310 #include <linux/page-debug-flags.h>
94311 #include <linux/hugetlb.h>
94312 #include <linux/sched/rt.h>
94313 +#include <linux/random.h>
94314
94315 #include <asm/sections.h>
94316 #include <asm/tlbflush.h>
94317 @@ -354,7 +355,7 @@ out:
94318 * This usage means that zero-order pages may not be compound.
94319 */
94320
94321 -static void free_compound_page(struct page *page)
94322 +void free_compound_page(struct page *page)
94323 {
94324 __free_pages_ok(page, compound_order(page));
94325 }
94326 @@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
94327 int i;
94328 int bad = 0;
94329
94330 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94331 + unsigned long index = 1UL << order;
94332 +#endif
94333 +
94334 trace_mm_page_free(page, order);
94335 kmemcheck_free_shadow(page, order);
94336
94337 @@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
94338 debug_check_no_obj_freed(page_address(page),
94339 PAGE_SIZE << order);
94340 }
94341 +
94342 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94343 + for (; index; --index)
94344 + sanitize_highpage(page + index - 1);
94345 +#endif
94346 +
94347 arch_free_page(page, order);
94348 kernel_map_pages(page, 1 << order, 0);
94349
94350 @@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
94351 local_irq_restore(flags);
94352 }
94353
94354 +#ifdef CONFIG_PAX_LATENT_ENTROPY
94355 +bool __meminitdata extra_latent_entropy;
94356 +
94357 +static int __init setup_pax_extra_latent_entropy(char *str)
94358 +{
94359 + extra_latent_entropy = true;
94360 + return 0;
94361 +}
94362 +early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
94363 +
94364 +volatile u64 latent_entropy __latent_entropy;
94365 +EXPORT_SYMBOL(latent_entropy);
94366 +#endif
94367 +
94368 void __init __free_pages_bootmem(struct page *page, unsigned int order)
94369 {
94370 unsigned int nr_pages = 1 << order;
94371 @@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
94372 __ClearPageReserved(p);
94373 set_page_count(p, 0);
94374
94375 +#ifdef CONFIG_PAX_LATENT_ENTROPY
94376 + if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
94377 + u64 hash = 0;
94378 + size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
94379 + const u64 *data = lowmem_page_address(page);
94380 +
94381 + for (index = 0; index < end; index++)
94382 + hash ^= hash + data[index];
94383 + latent_entropy ^= hash;
94384 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
94385 + }
94386 +#endif
94387 +
94388 page_zone(page)->managed_pages += nr_pages;
94389 set_page_refcounted(page);
94390 __free_pages(page, order);
94391 @@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
94392 arch_alloc_page(page, order);
94393 kernel_map_pages(page, 1 << order, 1);
94394
94395 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
94396 if (gfp_flags & __GFP_ZERO)
94397 prep_zero_page(page, order, gfp_flags);
94398 +#endif
94399
94400 if (order && (gfp_flags & __GFP_COMP))
94401 prep_compound_page(page, order);
94402 diff --git a/mm/page_io.c b/mm/page_io.c
94403 index 8c79a47..a689e0d 100644
94404 --- a/mm/page_io.c
94405 +++ b/mm/page_io.c
94406 @@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
94407 struct file *swap_file = sis->swap_file;
94408 struct address_space *mapping = swap_file->f_mapping;
94409 struct iovec iov = {
94410 - .iov_base = kmap(page),
94411 + .iov_base = (void __force_user *)kmap(page),
94412 .iov_len = PAGE_SIZE,
94413 };
94414
94415 diff --git a/mm/percpu.c b/mm/percpu.c
94416 index 0d10def..6dc822d 100644
94417 --- a/mm/percpu.c
94418 +++ b/mm/percpu.c
94419 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
94420 static unsigned int pcpu_high_unit_cpu __read_mostly;
94421
94422 /* the address of the first chunk which starts with the kernel static area */
94423 -void *pcpu_base_addr __read_mostly;
94424 +void *pcpu_base_addr __read_only;
94425 EXPORT_SYMBOL_GPL(pcpu_base_addr);
94426
94427 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
94428 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
94429 index fd26d04..0cea1b0 100644
94430 --- a/mm/process_vm_access.c
94431 +++ b/mm/process_vm_access.c
94432 @@ -13,6 +13,7 @@
94433 #include <linux/uio.h>
94434 #include <linux/sched.h>
94435 #include <linux/highmem.h>
94436 +#include <linux/security.h>
94437 #include <linux/ptrace.h>
94438 #include <linux/slab.h>
94439 #include <linux/syscalls.h>
94440 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
94441 size_t iov_l_curr_offset = 0;
94442 ssize_t iov_len;
94443
94444 + return -ENOSYS; // PaX: until properly audited
94445 +
94446 /*
94447 * Work out how many pages of struct pages we're going to need
94448 * when eventually calling get_user_pages
94449 */
94450 for (i = 0; i < riovcnt; i++) {
94451 iov_len = rvec[i].iov_len;
94452 - if (iov_len > 0) {
94453 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
94454 - + iov_len)
94455 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
94456 - / PAGE_SIZE + 1;
94457 - nr_pages = max(nr_pages, nr_pages_iov);
94458 - }
94459 + if (iov_len <= 0)
94460 + continue;
94461 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
94462 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
94463 + nr_pages = max(nr_pages, nr_pages_iov);
94464 }
94465
94466 if (nr_pages == 0)
94467 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
94468 goto free_proc_pages;
94469 }
94470
94471 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
94472 + rc = -EPERM;
94473 + goto put_task_struct;
94474 + }
94475 +
94476 mm = mm_access(task, PTRACE_MODE_ATTACH);
94477 if (!mm || IS_ERR(mm)) {
94478 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
94479 diff --git a/mm/rmap.c b/mm/rmap.c
94480 index 068522d..f539f21 100644
94481 --- a/mm/rmap.c
94482 +++ b/mm/rmap.c
94483 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94484 struct anon_vma *anon_vma = vma->anon_vma;
94485 struct anon_vma_chain *avc;
94486
94487 +#ifdef CONFIG_PAX_SEGMEXEC
94488 + struct anon_vma_chain *avc_m = NULL;
94489 +#endif
94490 +
94491 might_sleep();
94492 if (unlikely(!anon_vma)) {
94493 struct mm_struct *mm = vma->vm_mm;
94494 @@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94495 if (!avc)
94496 goto out_enomem;
94497
94498 +#ifdef CONFIG_PAX_SEGMEXEC
94499 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
94500 + if (!avc_m)
94501 + goto out_enomem_free_avc;
94502 +#endif
94503 +
94504 anon_vma = find_mergeable_anon_vma(vma);
94505 allocated = NULL;
94506 if (!anon_vma) {
94507 @@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94508 /* page_table_lock to protect against threads */
94509 spin_lock(&mm->page_table_lock);
94510 if (likely(!vma->anon_vma)) {
94511 +
94512 +#ifdef CONFIG_PAX_SEGMEXEC
94513 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
94514 +
94515 + if (vma_m) {
94516 + BUG_ON(vma_m->anon_vma);
94517 + vma_m->anon_vma = anon_vma;
94518 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
94519 + avc_m = NULL;
94520 + }
94521 +#endif
94522 +
94523 vma->anon_vma = anon_vma;
94524 anon_vma_chain_link(vma, avc, anon_vma);
94525 allocated = NULL;
94526 @@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94527
94528 if (unlikely(allocated))
94529 put_anon_vma(allocated);
94530 +
94531 +#ifdef CONFIG_PAX_SEGMEXEC
94532 + if (unlikely(avc_m))
94533 + anon_vma_chain_free(avc_m);
94534 +#endif
94535 +
94536 if (unlikely(avc))
94537 anon_vma_chain_free(avc);
94538 }
94539 return 0;
94540
94541 out_enomem_free_avc:
94542 +
94543 +#ifdef CONFIG_PAX_SEGMEXEC
94544 + if (avc_m)
94545 + anon_vma_chain_free(avc_m);
94546 +#endif
94547 +
94548 anon_vma_chain_free(avc);
94549 out_enomem:
94550 return -ENOMEM;
94551 @@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
94552 * Attach the anon_vmas from src to dst.
94553 * Returns 0 on success, -ENOMEM on failure.
94554 */
94555 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
94556 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
94557 {
94558 struct anon_vma_chain *avc, *pavc;
94559 struct anon_vma *root = NULL;
94560 @@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
94561 * the corresponding VMA in the parent process is attached to.
94562 * Returns 0 on success, non-zero on failure.
94563 */
94564 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
94565 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
94566 {
94567 struct anon_vma_chain *avc;
94568 struct anon_vma *anon_vma;
94569 @@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
94570 void __init anon_vma_init(void)
94571 {
94572 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
94573 - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
94574 - anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
94575 + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
94576 + anon_vma_ctor);
94577 + anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
94578 + SLAB_PANIC|SLAB_NO_SANITIZE);
94579 }
94580
94581 /*
94582 diff --git a/mm/shmem.c b/mm/shmem.c
94583 index 902a148..58f9d59 100644
94584 --- a/mm/shmem.c
94585 +++ b/mm/shmem.c
94586 @@ -33,7 +33,7 @@
94587 #include <linux/swap.h>
94588 #include <linux/aio.h>
94589
94590 -static struct vfsmount *shm_mnt;
94591 +struct vfsmount *shm_mnt;
94592
94593 #ifdef CONFIG_SHMEM
94594 /*
94595 @@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
94596 #define BOGO_DIRENT_SIZE 20
94597
94598 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94599 -#define SHORT_SYMLINK_LEN 128
94600 +#define SHORT_SYMLINK_LEN 64
94601
94602 /*
94603 * shmem_fallocate and shmem_writepage communicate via inode->i_private
94604 @@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
94605 static int shmem_xattr_validate(const char *name)
94606 {
94607 struct { const char *prefix; size_t len; } arr[] = {
94608 +
94609 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94610 + { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
94611 +#endif
94612 +
94613 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
94614 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
94615 };
94616 @@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
94617 if (err)
94618 return err;
94619
94620 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94621 + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
94622 + if (strcmp(name, XATTR_NAME_PAX_FLAGS))
94623 + return -EOPNOTSUPP;
94624 + if (size > 8)
94625 + return -EINVAL;
94626 + }
94627 +#endif
94628 +
94629 return simple_xattr_set(&info->xattrs, name, value, size, flags);
94630 }
94631
94632 @@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
94633 int err = -ENOMEM;
94634
94635 /* Round up to L1_CACHE_BYTES to resist false sharing */
94636 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
94637 - L1_CACHE_BYTES), GFP_KERNEL);
94638 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
94639 if (!sbinfo)
94640 return -ENOMEM;
94641
94642 diff --git a/mm/slab.c b/mm/slab.c
94643 index eb043bf..d82f5a8 100644
94644 --- a/mm/slab.c
94645 +++ b/mm/slab.c
94646 @@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
94647 if ((x)->max_freeable < i) \
94648 (x)->max_freeable = i; \
94649 } while (0)
94650 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
94651 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
94652 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
94653 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
94654 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
94655 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
94656 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
94657 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
94658 +#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
94659 +#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
94660 #else
94661 #define STATS_INC_ACTIVE(x) do { } while (0)
94662 #define STATS_DEC_ACTIVE(x) do { } while (0)
94663 @@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
94664 #define STATS_INC_ALLOCMISS(x) do { } while (0)
94665 #define STATS_INC_FREEHIT(x) do { } while (0)
94666 #define STATS_INC_FREEMISS(x) do { } while (0)
94667 +#define STATS_INC_SANITIZED(x) do { } while (0)
94668 +#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
94669 #endif
94670
94671 #if DEBUG
94672 @@ -403,7 +407,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
94673 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
94674 */
94675 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
94676 - const struct page *page, void *obj)
94677 + const struct page *page, const void *obj)
94678 {
94679 u32 offset = (obj - page->s_mem);
94680 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
94681 @@ -1489,12 +1493,12 @@ void __init kmem_cache_init(void)
94682 */
94683
94684 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
94685 - kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
94686 + kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
94687
94688 if (INDEX_AC != INDEX_NODE)
94689 kmalloc_caches[INDEX_NODE] =
94690 create_kmalloc_cache("kmalloc-node",
94691 - kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
94692 + kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
94693
94694 slab_early_init = 0;
94695
94696 @@ -3428,6 +3432,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
94697 struct array_cache *ac = cpu_cache_get(cachep);
94698
94699 check_irq_off();
94700 +
94701 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94702 + if (pax_sanitize_slab) {
94703 + if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
94704 + memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
94705 +
94706 + if (cachep->ctor)
94707 + cachep->ctor(objp);
94708 +
94709 + STATS_INC_SANITIZED(cachep);
94710 + } else
94711 + STATS_INC_NOT_SANITIZED(cachep);
94712 + }
94713 +#endif
94714 +
94715 kmemleak_free_recursive(objp, cachep->flags);
94716 objp = cache_free_debugcheck(cachep, objp, caller);
94717
94718 @@ -3656,6 +3675,7 @@ void kfree(const void *objp)
94719
94720 if (unlikely(ZERO_OR_NULL_PTR(objp)))
94721 return;
94722 + VM_BUG_ON(!virt_addr_valid(objp));
94723 local_irq_save(flags);
94724 kfree_debugcheck(objp);
94725 c = virt_to_cache(objp);
94726 @@ -4097,14 +4117,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
94727 }
94728 /* cpu stats */
94729 {
94730 - unsigned long allochit = atomic_read(&cachep->allochit);
94731 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
94732 - unsigned long freehit = atomic_read(&cachep->freehit);
94733 - unsigned long freemiss = atomic_read(&cachep->freemiss);
94734 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
94735 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
94736 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
94737 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
94738
94739 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
94740 allochit, allocmiss, freehit, freemiss);
94741 }
94742 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94743 + {
94744 + unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
94745 + unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
94746 +
94747 + seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
94748 + }
94749 +#endif
94750 #endif
94751 }
94752
94753 @@ -4334,13 +4362,69 @@ static const struct file_operations proc_slabstats_operations = {
94754 static int __init slab_proc_init(void)
94755 {
94756 #ifdef CONFIG_DEBUG_SLAB_LEAK
94757 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
94758 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
94759 #endif
94760 return 0;
94761 }
94762 module_init(slab_proc_init);
94763 #endif
94764
94765 +bool is_usercopy_object(const void *ptr)
94766 +{
94767 + struct page *page;
94768 + struct kmem_cache *cachep;
94769 +
94770 + if (ZERO_OR_NULL_PTR(ptr))
94771 + return false;
94772 +
94773 + if (!slab_is_available())
94774 + return false;
94775 +
94776 + if (!virt_addr_valid(ptr))
94777 + return false;
94778 +
94779 + page = virt_to_head_page(ptr);
94780 +
94781 + if (!PageSlab(page))
94782 + return false;
94783 +
94784 + cachep = page->slab_cache;
94785 + return cachep->flags & SLAB_USERCOPY;
94786 +}
94787 +
94788 +#ifdef CONFIG_PAX_USERCOPY
94789 +const char *check_heap_object(const void *ptr, unsigned long n)
94790 +{
94791 + struct page *page;
94792 + struct kmem_cache *cachep;
94793 + unsigned int objnr;
94794 + unsigned long offset;
94795 +
94796 + if (ZERO_OR_NULL_PTR(ptr))
94797 + return "<null>";
94798 +
94799 + if (!virt_addr_valid(ptr))
94800 + return NULL;
94801 +
94802 + page = virt_to_head_page(ptr);
94803 +
94804 + if (!PageSlab(page))
94805 + return NULL;
94806 +
94807 + cachep = page->slab_cache;
94808 + if (!(cachep->flags & SLAB_USERCOPY))
94809 + return cachep->name;
94810 +
94811 + objnr = obj_to_index(cachep, page, ptr);
94812 + BUG_ON(objnr >= cachep->num);
94813 + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
94814 + if (offset <= cachep->object_size && n <= cachep->object_size - offset)
94815 + return NULL;
94816 +
94817 + return cachep->name;
94818 +}
94819 +#endif
94820 +
94821 /**
94822 * ksize - get the actual amount of memory allocated for a given object
94823 * @objp: Pointer to the object
94824 diff --git a/mm/slab.h b/mm/slab.h
94825 index 0859c42..2f7b737 100644
94826 --- a/mm/slab.h
94827 +++ b/mm/slab.h
94828 @@ -32,6 +32,15 @@ extern struct list_head slab_caches;
94829 /* The slab cache that manages slab cache information */
94830 extern struct kmem_cache *kmem_cache;
94831
94832 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94833 +#ifdef CONFIG_X86_64
94834 +#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
94835 +#else
94836 +#define PAX_MEMORY_SANITIZE_VALUE '\xff'
94837 +#endif
94838 +extern bool pax_sanitize_slab;
94839 +#endif
94840 +
94841 unsigned long calculate_alignment(unsigned long flags,
94842 unsigned long align, unsigned long size);
94843
94844 @@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94845
94846 /* Legal flag mask for kmem_cache_create(), for various configurations */
94847 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
94848 - SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
94849 + SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
94850 + SLAB_USERCOPY | SLAB_NO_SANITIZE)
94851
94852 #if defined(CONFIG_DEBUG_SLAB)
94853 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
94854 @@ -233,6 +243,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
94855 return s;
94856
94857 page = virt_to_head_page(x);
94858 +
94859 + BUG_ON(!PageSlab(page));
94860 +
94861 cachep = page->slab_cache;
94862 if (slab_equal_or_root(cachep, s))
94863 return cachep;
94864 diff --git a/mm/slab_common.c b/mm/slab_common.c
94865 index 0b7bb39..334c328 100644
94866 --- a/mm/slab_common.c
94867 +++ b/mm/slab_common.c
94868 @@ -23,11 +23,22 @@
94869
94870 #include "slab.h"
94871
94872 -enum slab_state slab_state;
94873 +enum slab_state slab_state __read_only;
94874 LIST_HEAD(slab_caches);
94875 DEFINE_MUTEX(slab_mutex);
94876 struct kmem_cache *kmem_cache;
94877
94878 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
94879 +bool pax_sanitize_slab __read_only = true;
94880 +static int __init pax_sanitize_slab_setup(char *str)
94881 +{
94882 + pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
94883 + printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
94884 + return 1;
94885 +}
94886 +__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
94887 +#endif
94888 +
94889 #ifdef CONFIG_DEBUG_VM
94890 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
94891 size_t size)
94892 @@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
94893
94894 err = __kmem_cache_create(s, flags);
94895 if (!err) {
94896 - s->refcount = 1;
94897 + atomic_set(&s->refcount, 1);
94898 list_add(&s->list, &slab_caches);
94899 memcg_cache_list_add(memcg, s);
94900 } else {
94901 @@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
94902
94903 get_online_cpus();
94904 mutex_lock(&slab_mutex);
94905 - s->refcount--;
94906 - if (!s->refcount) {
94907 + if (atomic_dec_and_test(&s->refcount)) {
94908 list_del(&s->list);
94909
94910 if (!__kmem_cache_shutdown(s)) {
94911 @@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
94912 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
94913 name, size, err);
94914
94915 - s->refcount = -1; /* Exempt from merging for now */
94916 + atomic_set(&s->refcount, -1); /* Exempt from merging for now */
94917 }
94918
94919 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
94920 @@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
94921
94922 create_boot_cache(s, name, size, flags);
94923 list_add(&s->list, &slab_caches);
94924 - s->refcount = 1;
94925 + atomic_set(&s->refcount, 1);
94926 return s;
94927 }
94928
94929 @@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
94930 EXPORT_SYMBOL(kmalloc_dma_caches);
94931 #endif
94932
94933 +#ifdef CONFIG_PAX_USERCOPY_SLABS
94934 +struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
94935 +EXPORT_SYMBOL(kmalloc_usercopy_caches);
94936 +#endif
94937 +
94938 /*
94939 * Conversion table for small slabs sizes / 8 to the index in the
94940 * kmalloc array. This is necessary for slabs < 192 since we have non power
94941 @@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
94942 return kmalloc_dma_caches[index];
94943
94944 #endif
94945 +
94946 +#ifdef CONFIG_PAX_USERCOPY_SLABS
94947 + if (unlikely((flags & GFP_USERCOPY)))
94948 + return kmalloc_usercopy_caches[index];
94949 +
94950 +#endif
94951 +
94952 return kmalloc_caches[index];
94953 }
94954
94955 @@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
94956 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
94957 if (!kmalloc_caches[i]) {
94958 kmalloc_caches[i] = create_kmalloc_cache(NULL,
94959 - 1 << i, flags);
94960 + 1 << i, SLAB_USERCOPY | flags);
94961 }
94962
94963 /*
94964 @@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
94965 * earlier power of two caches
94966 */
94967 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
94968 - kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
94969 + kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
94970
94971 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
94972 - kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
94973 + kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
94974 }
94975
94976 /* Kmalloc array is now usable */
94977 @@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
94978 }
94979 }
94980 #endif
94981 +
94982 +#ifdef CONFIG_PAX_USERCOPY_SLABS
94983 + for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
94984 + struct kmem_cache *s = kmalloc_caches[i];
94985 +
94986 + if (s) {
94987 + int size = kmalloc_size(i);
94988 + char *n = kasprintf(GFP_NOWAIT,
94989 + "usercopy-kmalloc-%d", size);
94990 +
94991 + BUG_ON(!n);
94992 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
94993 + size, SLAB_USERCOPY | flags);
94994 + }
94995 + }
94996 +#endif
94997 +
94998 }
94999 #endif /* !CONFIG_SLOB */
95000
95001 @@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
95002 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
95003 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
95004 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
95005 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
95006 + seq_puts(m, " : pax <sanitized> <not_sanitized>");
95007 +#endif
95008 #endif
95009 seq_putc(m, '\n');
95010 }
95011 diff --git a/mm/slob.c b/mm/slob.c
95012 index 4bf8809..98a6914 100644
95013 --- a/mm/slob.c
95014 +++ b/mm/slob.c
95015 @@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
95016 /*
95017 * Return the size of a slob block.
95018 */
95019 -static slobidx_t slob_units(slob_t *s)
95020 +static slobidx_t slob_units(const slob_t *s)
95021 {
95022 if (s->units > 0)
95023 return s->units;
95024 @@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
95025 /*
95026 * Return the next free slob block pointer after this one.
95027 */
95028 -static slob_t *slob_next(slob_t *s)
95029 +static slob_t *slob_next(const slob_t *s)
95030 {
95031 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
95032 slobidx_t next;
95033 @@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
95034 /*
95035 * Returns true if s is the last free block in its page.
95036 */
95037 -static int slob_last(slob_t *s)
95038 +static int slob_last(const slob_t *s)
95039 {
95040 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
95041 }
95042
95043 -static void *slob_new_pages(gfp_t gfp, int order, int node)
95044 +static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
95045 {
95046 - void *page;
95047 + struct page *page;
95048
95049 #ifdef CONFIG_NUMA
95050 if (node != NUMA_NO_NODE)
95051 @@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
95052 if (!page)
95053 return NULL;
95054
95055 - return page_address(page);
95056 + __SetPageSlab(page);
95057 + return page;
95058 }
95059
95060 -static void slob_free_pages(void *b, int order)
95061 +static void slob_free_pages(struct page *sp, int order)
95062 {
95063 if (current->reclaim_state)
95064 current->reclaim_state->reclaimed_slab += 1 << order;
95065 - free_pages((unsigned long)b, order);
95066 + __ClearPageSlab(sp);
95067 + page_mapcount_reset(sp);
95068 + sp->private = 0;
95069 + __free_pages(sp, order);
95070 }
95071
95072 /*
95073 @@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
95074
95075 /* Not enough space: must allocate a new page */
95076 if (!b) {
95077 - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95078 - if (!b)
95079 + sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
95080 + if (!sp)
95081 return NULL;
95082 - sp = virt_to_page(b);
95083 - __SetPageSlab(sp);
95084 + b = page_address(sp);
95085
95086 spin_lock_irqsave(&slob_lock, flags);
95087 sp->units = SLOB_UNITS(PAGE_SIZE);
95088 sp->freelist = b;
95089 + sp->private = 0;
95090 INIT_LIST_HEAD(&sp->list);
95091 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
95092 set_slob_page_free(sp, slob_list);
95093 @@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
95094 if (slob_page_free(sp))
95095 clear_slob_page_free(sp);
95096 spin_unlock_irqrestore(&slob_lock, flags);
95097 - __ClearPageSlab(sp);
95098 - page_mapcount_reset(sp);
95099 - slob_free_pages(b, 0);
95100 + slob_free_pages(sp, 0);
95101 return;
95102 }
95103
95104 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
95105 + if (pax_sanitize_slab)
95106 + memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
95107 +#endif
95108 +
95109 if (!slob_page_free(sp)) {
95110 /* This slob page is about to become partially free. Easy! */
95111 sp->units = units;
95112 @@ -424,11 +431,10 @@ out:
95113 */
95114
95115 static __always_inline void *
95116 -__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
95117 +__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
95118 {
95119 - unsigned int *m;
95120 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
95121 - void *ret;
95122 + slob_t *m;
95123 + void *ret = NULL;
95124
95125 gfp &= gfp_allowed_mask;
95126
95127 @@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
95128
95129 if (!m)
95130 return NULL;
95131 - *m = size;
95132 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
95133 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
95134 + m[0].units = size;
95135 + m[1].units = align;
95136 ret = (void *)m + align;
95137
95138 trace_kmalloc_node(caller, ret,
95139 size, size + align, gfp, node);
95140 } else {
95141 unsigned int order = get_order(size);
95142 + struct page *page;
95143
95144 if (likely(order))
95145 gfp |= __GFP_COMP;
95146 - ret = slob_new_pages(gfp, order, node);
95147 + page = slob_new_pages(gfp, order, node);
95148 + if (page) {
95149 + ret = page_address(page);
95150 + page->private = size;
95151 + }
95152
95153 trace_kmalloc_node(caller, ret,
95154 size, PAGE_SIZE << order, gfp, node);
95155 }
95156
95157 - kmemleak_alloc(ret, size, 1, gfp);
95158 + return ret;
95159 +}
95160 +
95161 +static __always_inline void *
95162 +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
95163 +{
95164 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
95165 + void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
95166 +
95167 + if (!ZERO_OR_NULL_PTR(ret))
95168 + kmemleak_alloc(ret, size, 1, gfp);
95169 return ret;
95170 }
95171
95172 @@ -493,34 +517,112 @@ void kfree(const void *block)
95173 return;
95174 kmemleak_free(block);
95175
95176 + VM_BUG_ON(!virt_addr_valid(block));
95177 sp = virt_to_page(block);
95178 - if (PageSlab(sp)) {
95179 + VM_BUG_ON(!PageSlab(sp));
95180 + if (!sp->private) {
95181 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
95182 - unsigned int *m = (unsigned int *)(block - align);
95183 - slob_free(m, *m + align);
95184 - } else
95185 + slob_t *m = (slob_t *)(block - align);
95186 + slob_free(m, m[0].units + align);
95187 + } else {
95188 + __ClearPageSlab(sp);
95189 + page_mapcount_reset(sp);
95190 + sp->private = 0;
95191 __free_pages(sp, compound_order(sp));
95192 + }
95193 }
95194 EXPORT_SYMBOL(kfree);
95195
95196 +bool is_usercopy_object(const void *ptr)
95197 +{
95198 + if (!slab_is_available())
95199 + return false;
95200 +
95201 + // PAX: TODO
95202 +
95203 + return false;
95204 +}
95205 +
95206 +#ifdef CONFIG_PAX_USERCOPY
95207 +const char *check_heap_object(const void *ptr, unsigned long n)
95208 +{
95209 + struct page *page;
95210 + const slob_t *free;
95211 + const void *base;
95212 + unsigned long flags;
95213 +
95214 + if (ZERO_OR_NULL_PTR(ptr))
95215 + return "<null>";
95216 +
95217 + if (!virt_addr_valid(ptr))
95218 + return NULL;
95219 +
95220 + page = virt_to_head_page(ptr);
95221 + if (!PageSlab(page))
95222 + return NULL;
95223 +
95224 + if (page->private) {
95225 + base = page;
95226 + if (base <= ptr && n <= page->private - (ptr - base))
95227 + return NULL;
95228 + return "<slob>";
95229 + }
95230 +
95231 + /* some tricky double walking to find the chunk */
95232 + spin_lock_irqsave(&slob_lock, flags);
95233 + base = (void *)((unsigned long)ptr & PAGE_MASK);
95234 + free = page->freelist;
95235 +
95236 + while (!slob_last(free) && (void *)free <= ptr) {
95237 + base = free + slob_units(free);
95238 + free = slob_next(free);
95239 + }
95240 +
95241 + while (base < (void *)free) {
95242 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
95243 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
95244 + int offset;
95245 +
95246 + if (ptr < base + align)
95247 + break;
95248 +
95249 + offset = ptr - base - align;
95250 + if (offset >= m) {
95251 + base += size;
95252 + continue;
95253 + }
95254 +
95255 + if (n > m - offset)
95256 + break;
95257 +
95258 + spin_unlock_irqrestore(&slob_lock, flags);
95259 + return NULL;
95260 + }
95261 +
95262 + spin_unlock_irqrestore(&slob_lock, flags);
95263 + return "<slob>";
95264 +}
95265 +#endif
95266 +
95267 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
95268 size_t ksize(const void *block)
95269 {
95270 struct page *sp;
95271 int align;
95272 - unsigned int *m;
95273 + slob_t *m;
95274
95275 BUG_ON(!block);
95276 if (unlikely(block == ZERO_SIZE_PTR))
95277 return 0;
95278
95279 sp = virt_to_page(block);
95280 - if (unlikely(!PageSlab(sp)))
95281 - return PAGE_SIZE << compound_order(sp);
95282 + VM_BUG_ON(!PageSlab(sp));
95283 + if (sp->private)
95284 + return sp->private;
95285
95286 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
95287 - m = (unsigned int *)(block - align);
95288 - return SLOB_UNITS(*m) * SLOB_UNIT;
95289 + m = (slob_t *)(block - align);
95290 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
95291 }
95292 EXPORT_SYMBOL(ksize);
95293
95294 @@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
95295
95296 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
95297 {
95298 - void *b;
95299 + void *b = NULL;
95300
95301 flags &= gfp_allowed_mask;
95302
95303 lockdep_trace_alloc(flags);
95304
95305 +#ifdef CONFIG_PAX_USERCOPY_SLABS
95306 + b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
95307 +#else
95308 if (c->size < PAGE_SIZE) {
95309 b = slob_alloc(c->size, flags, c->align, node);
95310 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
95311 SLOB_UNITS(c->size) * SLOB_UNIT,
95312 flags, node);
95313 } else {
95314 - b = slob_new_pages(flags, get_order(c->size), node);
95315 + struct page *sp;
95316 +
95317 + sp = slob_new_pages(flags, get_order(c->size), node);
95318 + if (sp) {
95319 + b = page_address(sp);
95320 + sp->private = c->size;
95321 + }
95322 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
95323 PAGE_SIZE << get_order(c->size),
95324 flags, node);
95325 }
95326 +#endif
95327
95328 if (b && c->ctor)
95329 c->ctor(b);
95330 @@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
95331
95332 static void __kmem_cache_free(void *b, int size)
95333 {
95334 - if (size < PAGE_SIZE)
95335 + struct page *sp;
95336 +
95337 + sp = virt_to_page(b);
95338 + BUG_ON(!PageSlab(sp));
95339 + if (!sp->private)
95340 slob_free(b, size);
95341 else
95342 - slob_free_pages(b, get_order(size));
95343 + slob_free_pages(sp, get_order(size));
95344 }
95345
95346 static void kmem_rcu_free(struct rcu_head *head)
95347 @@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
95348
95349 void kmem_cache_free(struct kmem_cache *c, void *b)
95350 {
95351 + int size = c->size;
95352 +
95353 +#ifdef CONFIG_PAX_USERCOPY_SLABS
95354 + if (size + c->align < PAGE_SIZE) {
95355 + size += c->align;
95356 + b -= c->align;
95357 + }
95358 +#endif
95359 +
95360 kmemleak_free_recursive(b, c->flags);
95361 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
95362 struct slob_rcu *slob_rcu;
95363 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
95364 - slob_rcu->size = c->size;
95365 + slob_rcu = b + (size - sizeof(struct slob_rcu));
95366 + slob_rcu->size = size;
95367 call_rcu(&slob_rcu->head, kmem_rcu_free);
95368 } else {
95369 - __kmem_cache_free(b, c->size);
95370 + __kmem_cache_free(b, size);
95371 }
95372
95373 +#ifdef CONFIG_PAX_USERCOPY_SLABS
95374 + trace_kfree(_RET_IP_, b);
95375 +#else
95376 trace_kmem_cache_free(_RET_IP_, b);
95377 +#endif
95378 +
95379 }
95380 EXPORT_SYMBOL(kmem_cache_free);
95381
95382 diff --git a/mm/slub.c b/mm/slub.c
95383 index 89490d9..c7b226a 100644
95384 --- a/mm/slub.c
95385 +++ b/mm/slub.c
95386 @@ -207,7 +207,7 @@ struct track {
95387
95388 enum track_item { TRACK_ALLOC, TRACK_FREE };
95389
95390 -#ifdef CONFIG_SYSFS
95391 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95392 static int sysfs_slab_add(struct kmem_cache *);
95393 static int sysfs_slab_alias(struct kmem_cache *, const char *);
95394 static void sysfs_slab_remove(struct kmem_cache *);
95395 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
95396 if (!t->addr)
95397 return;
95398
95399 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
95400 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
95401 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
95402 #ifdef CONFIG_STACKTRACE
95403 {
95404 @@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
95405
95406 slab_free_hook(s, x);
95407
95408 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
95409 + if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
95410 + memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
95411 + if (s->ctor)
95412 + s->ctor(x);
95413 + }
95414 +#endif
95415 +
95416 redo:
95417 /*
95418 * Determine the currently cpus per cpu slab.
95419 @@ -2710,7 +2718,7 @@ static int slub_min_objects;
95420 * Merge control. If this is set then no merging of slab caches will occur.
95421 * (Could be removed. This was introduced to pacify the merge skeptics.)
95422 */
95423 -static int slub_nomerge;
95424 +static int slub_nomerge = 1;
95425
95426 /*
95427 * Calculate the order of allocation given an slab object size.
95428 @@ -2987,6 +2995,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
95429 s->inuse = size;
95430
95431 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
95432 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
95433 + (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
95434 +#endif
95435 s->ctor)) {
95436 /*
95437 * Relocate free pointer after the object if it is not
95438 @@ -3332,6 +3343,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
95439 EXPORT_SYMBOL(__kmalloc_node);
95440 #endif
95441
95442 +bool is_usercopy_object(const void *ptr)
95443 +{
95444 + struct page *page;
95445 + struct kmem_cache *s;
95446 +
95447 + if (ZERO_OR_NULL_PTR(ptr))
95448 + return false;
95449 +
95450 + if (!slab_is_available())
95451 + return false;
95452 +
95453 + if (!virt_addr_valid(ptr))
95454 + return false;
95455 +
95456 + page = virt_to_head_page(ptr);
95457 +
95458 + if (!PageSlab(page))
95459 + return false;
95460 +
95461 + s = page->slab_cache;
95462 + return s->flags & SLAB_USERCOPY;
95463 +}
95464 +
95465 +#ifdef CONFIG_PAX_USERCOPY
95466 +const char *check_heap_object(const void *ptr, unsigned long n)
95467 +{
95468 + struct page *page;
95469 + struct kmem_cache *s;
95470 + unsigned long offset;
95471 +
95472 + if (ZERO_OR_NULL_PTR(ptr))
95473 + return "<null>";
95474 +
95475 + if (!virt_addr_valid(ptr))
95476 + return NULL;
95477 +
95478 + page = virt_to_head_page(ptr);
95479 +
95480 + if (!PageSlab(page))
95481 + return NULL;
95482 +
95483 + s = page->slab_cache;
95484 + if (!(s->flags & SLAB_USERCOPY))
95485 + return s->name;
95486 +
95487 + offset = (ptr - page_address(page)) % s->size;
95488 + if (offset <= s->object_size && n <= s->object_size - offset)
95489 + return NULL;
95490 +
95491 + return s->name;
95492 +}
95493 +#endif
95494 +
95495 size_t ksize(const void *object)
95496 {
95497 struct page *page;
95498 @@ -3360,6 +3424,7 @@ void kfree(const void *x)
95499 if (unlikely(ZERO_OR_NULL_PTR(x)))
95500 return;
95501
95502 + VM_BUG_ON(!virt_addr_valid(x));
95503 page = virt_to_head_page(x);
95504 if (unlikely(!PageSlab(page))) {
95505 BUG_ON(!PageCompound(page));
95506 @@ -3665,7 +3730,7 @@ static int slab_unmergeable(struct kmem_cache *s)
95507 /*
95508 * We may have set a slab to be unmergeable during bootstrap.
95509 */
95510 - if (s->refcount < 0)
95511 + if (atomic_read(&s->refcount) < 0)
95512 return 1;
95513
95514 return 0;
95515 @@ -3723,7 +3788,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
95516
95517 s = find_mergeable(memcg, size, align, flags, name, ctor);
95518 if (s) {
95519 - s->refcount++;
95520 + atomic_inc(&s->refcount);
95521 /*
95522 * Adjust the object sizes so that we clear
95523 * the complete object on kzalloc.
95524 @@ -3732,7 +3797,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
95525 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
95526
95527 if (sysfs_slab_alias(s, name)) {
95528 - s->refcount--;
95529 + atomic_dec(&s->refcount);
95530 s = NULL;
95531 }
95532 }
95533 @@ -3852,7 +3917,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
95534 }
95535 #endif
95536
95537 -#ifdef CONFIG_SYSFS
95538 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95539 static int count_inuse(struct page *page)
95540 {
95541 return page->inuse;
95542 @@ -4241,12 +4306,12 @@ static void resiliency_test(void)
95543 validate_slab_cache(kmalloc_caches[9]);
95544 }
95545 #else
95546 -#ifdef CONFIG_SYSFS
95547 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95548 static void resiliency_test(void) {};
95549 #endif
95550 #endif
95551
95552 -#ifdef CONFIG_SYSFS
95553 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95554 enum slab_stat_type {
95555 SL_ALL, /* All slabs */
95556 SL_PARTIAL, /* Only partially allocated slabs */
95557 @@ -4492,7 +4557,7 @@ SLAB_ATTR_RO(ctor);
95558
95559 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
95560 {
95561 - return sprintf(buf, "%d\n", s->refcount - 1);
95562 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
95563 }
95564 SLAB_ATTR_RO(aliases);
95565
95566 @@ -4580,6 +4645,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
95567 SLAB_ATTR_RO(cache_dma);
95568 #endif
95569
95570 +#ifdef CONFIG_PAX_USERCOPY_SLABS
95571 +static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
95572 +{
95573 + return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
95574 +}
95575 +SLAB_ATTR_RO(usercopy);
95576 +#endif
95577 +
95578 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
95579 {
95580 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
95581 @@ -4914,6 +4987,9 @@ static struct attribute *slab_attrs[] = {
95582 #ifdef CONFIG_ZONE_DMA
95583 &cache_dma_attr.attr,
95584 #endif
95585 +#ifdef CONFIG_PAX_USERCOPY_SLABS
95586 + &usercopy_attr.attr,
95587 +#endif
95588 #ifdef CONFIG_NUMA
95589 &remote_node_defrag_ratio_attr.attr,
95590 #endif
95591 @@ -5146,6 +5222,7 @@ static char *create_unique_id(struct kmem_cache *s)
95592 return name;
95593 }
95594
95595 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95596 static int sysfs_slab_add(struct kmem_cache *s)
95597 {
95598 int err;
95599 @@ -5169,7 +5246,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
95600 }
95601
95602 s->kobj.kset = slab_kset;
95603 - err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
95604 + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
95605 if (err) {
95606 kobject_put(&s->kobj);
95607 return err;
95608 @@ -5203,6 +5280,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
95609 kobject_del(&s->kobj);
95610 kobject_put(&s->kobj);
95611 }
95612 +#endif
95613
95614 /*
95615 * Need to buffer aliases during bootup until sysfs becomes
95616 @@ -5216,6 +5294,7 @@ struct saved_alias {
95617
95618 static struct saved_alias *alias_list;
95619
95620 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95621 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
95622 {
95623 struct saved_alias *al;
95624 @@ -5238,6 +5317,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
95625 alias_list = al;
95626 return 0;
95627 }
95628 +#endif
95629
95630 static int __init slab_sysfs_init(void)
95631 {
95632 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
95633 index 27eeab3..7c3f7f2 100644
95634 --- a/mm/sparse-vmemmap.c
95635 +++ b/mm/sparse-vmemmap.c
95636 @@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
95637 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
95638 if (!p)
95639 return NULL;
95640 - pud_populate(&init_mm, pud, p);
95641 + pud_populate_kernel(&init_mm, pud, p);
95642 }
95643 return pud;
95644 }
95645 @@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
95646 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
95647 if (!p)
95648 return NULL;
95649 - pgd_populate(&init_mm, pgd, p);
95650 + pgd_populate_kernel(&init_mm, pgd, p);
95651 }
95652 return pgd;
95653 }
95654 diff --git a/mm/sparse.c b/mm/sparse.c
95655 index 8cc7be0..d0f7d7a 100644
95656 --- a/mm/sparse.c
95657 +++ b/mm/sparse.c
95658 @@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
95659
95660 for (i = 0; i < PAGES_PER_SECTION; i++) {
95661 if (PageHWPoison(&memmap[i])) {
95662 - atomic_long_sub(1, &num_poisoned_pages);
95663 + atomic_long_sub_unchecked(1, &num_poisoned_pages);
95664 ClearPageHWPoison(&memmap[i]);
95665 }
95666 }
95667 diff --git a/mm/swap.c b/mm/swap.c
95668 index 84b26aa..ce39899 100644
95669 --- a/mm/swap.c
95670 +++ b/mm/swap.c
95671 @@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
95672
95673 __page_cache_release(page);
95674 dtor = get_compound_page_dtor(page);
95675 + if (!PageHuge(page))
95676 + BUG_ON(dtor != free_compound_page);
95677 (*dtor)(page);
95678 }
95679
95680 diff --git a/mm/swapfile.c b/mm/swapfile.c
95681 index 461fce2..363ae44 100644
95682 --- a/mm/swapfile.c
95683 +++ b/mm/swapfile.c
95684 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
95685
95686 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
95687 /* Activity counter to indicate that a swapon or swapoff has occurred */
95688 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
95689 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
95690
95691 static inline unsigned char swap_count(unsigned char ent)
95692 {
95693 @@ -1958,7 +1958,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
95694 spin_unlock(&swap_lock);
95695
95696 err = 0;
95697 - atomic_inc(&proc_poll_event);
95698 + atomic_inc_unchecked(&proc_poll_event);
95699 wake_up_interruptible(&proc_poll_wait);
95700
95701 out_dput:
95702 @@ -1975,8 +1975,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
95703
95704 poll_wait(file, &proc_poll_wait, wait);
95705
95706 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
95707 - seq->poll_event = atomic_read(&proc_poll_event);
95708 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
95709 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
95710 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
95711 }
95712
95713 @@ -2074,7 +2074,7 @@ static int swaps_open(struct inode *inode, struct file *file)
95714 return ret;
95715
95716 seq = file->private_data;
95717 - seq->poll_event = atomic_read(&proc_poll_event);
95718 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
95719 return 0;
95720 }
95721
95722 @@ -2533,7 +2533,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
95723 (frontswap_map) ? "FS" : "");
95724
95725 mutex_unlock(&swapon_mutex);
95726 - atomic_inc(&proc_poll_event);
95727 + atomic_inc_unchecked(&proc_poll_event);
95728 wake_up_interruptible(&proc_poll_wait);
95729
95730 if (S_ISREG(inode->i_mode))
95731 diff --git a/mm/util.c b/mm/util.c
95732 index 808f375..e4764b5 100644
95733 --- a/mm/util.c
95734 +++ b/mm/util.c
95735 @@ -297,6 +297,12 @@ done:
95736 void arch_pick_mmap_layout(struct mm_struct *mm)
95737 {
95738 mm->mmap_base = TASK_UNMAPPED_BASE;
95739 +
95740 +#ifdef CONFIG_PAX_RANDMMAP
95741 + if (mm->pax_flags & MF_PAX_RANDMMAP)
95742 + mm->mmap_base += mm->delta_mmap;
95743 +#endif
95744 +
95745 mm->get_unmapped_area = arch_get_unmapped_area;
95746 }
95747 #endif
95748 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
95749 index 0fdf968..d6686e8 100644
95750 --- a/mm/vmalloc.c
95751 +++ b/mm/vmalloc.c
95752 @@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
95753
95754 pte = pte_offset_kernel(pmd, addr);
95755 do {
95756 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
95757 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
95758 +
95759 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95760 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
95761 + BUG_ON(!pte_exec(*pte));
95762 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
95763 + continue;
95764 + }
95765 +#endif
95766 +
95767 + {
95768 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
95769 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
95770 + }
95771 } while (pte++, addr += PAGE_SIZE, addr != end);
95772 }
95773
95774 @@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
95775 pte = pte_alloc_kernel(pmd, addr);
95776 if (!pte)
95777 return -ENOMEM;
95778 +
95779 + pax_open_kernel();
95780 do {
95781 struct page *page = pages[*nr];
95782
95783 - if (WARN_ON(!pte_none(*pte)))
95784 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95785 + if (pgprot_val(prot) & _PAGE_NX)
95786 +#endif
95787 +
95788 + if (!pte_none(*pte)) {
95789 + pax_close_kernel();
95790 + WARN_ON(1);
95791 return -EBUSY;
95792 - if (WARN_ON(!page))
95793 + }
95794 + if (!page) {
95795 + pax_close_kernel();
95796 + WARN_ON(1);
95797 return -ENOMEM;
95798 + }
95799 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
95800 (*nr)++;
95801 } while (pte++, addr += PAGE_SIZE, addr != end);
95802 + pax_close_kernel();
95803 return 0;
95804 }
95805
95806 @@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
95807 pmd_t *pmd;
95808 unsigned long next;
95809
95810 - pmd = pmd_alloc(&init_mm, pud, addr);
95811 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95812 if (!pmd)
95813 return -ENOMEM;
95814 do {
95815 @@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
95816 pud_t *pud;
95817 unsigned long next;
95818
95819 - pud = pud_alloc(&init_mm, pgd, addr);
95820 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
95821 if (!pud)
95822 return -ENOMEM;
95823 do {
95824 @@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
95825 if (addr >= MODULES_VADDR && addr < MODULES_END)
95826 return 1;
95827 #endif
95828 +
95829 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95830 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
95831 + return 1;
95832 +#endif
95833 +
95834 return is_vmalloc_addr(x);
95835 }
95836
95837 @@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
95838
95839 if (!pgd_none(*pgd)) {
95840 pud_t *pud = pud_offset(pgd, addr);
95841 +#ifdef CONFIG_X86
95842 + if (!pud_large(*pud))
95843 +#endif
95844 if (!pud_none(*pud)) {
95845 pmd_t *pmd = pmd_offset(pud, addr);
95846 +#ifdef CONFIG_X86
95847 + if (!pmd_large(*pmd))
95848 +#endif
95849 if (!pmd_none(*pmd)) {
95850 pte_t *ptep, pte;
95851
95852 @@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
95853 struct vm_struct *area;
95854
95855 BUG_ON(in_interrupt());
95856 +
95857 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95858 + if (flags & VM_KERNEXEC) {
95859 + if (start != VMALLOC_START || end != VMALLOC_END)
95860 + return NULL;
95861 + start = (unsigned long)MODULES_EXEC_VADDR;
95862 + end = (unsigned long)MODULES_EXEC_END;
95863 + }
95864 +#endif
95865 +
95866 if (flags & VM_IOREMAP)
95867 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
95868
95869 @@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
95870 if (count > totalram_pages)
95871 return NULL;
95872
95873 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95874 + if (!(pgprot_val(prot) & _PAGE_NX))
95875 + flags |= VM_KERNEXEC;
95876 +#endif
95877 +
95878 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
95879 __builtin_return_address(0));
95880 if (!area)
95881 @@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
95882 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
95883 goto fail;
95884
95885 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95886 + if (!(pgprot_val(prot) & _PAGE_NX))
95887 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
95888 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
95889 + else
95890 +#endif
95891 +
95892 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
95893 start, end, node, gfp_mask, caller);
95894 if (!area)
95895 @@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
95896 * For tight control over page level allocator and protection flags
95897 * use __vmalloc() instead.
95898 */
95899 -
95900 void *vmalloc_exec(unsigned long size)
95901 {
95902 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
95903 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
95904 NUMA_NO_NODE, __builtin_return_address(0));
95905 }
95906
95907 @@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
95908 {
95909 struct vm_struct *area;
95910
95911 + BUG_ON(vma->vm_mirror);
95912 +
95913 size = PAGE_ALIGN(size);
95914
95915 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
95916 @@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, void *p)
95917 v->addr, v->addr + v->size, v->size);
95918
95919 if (v->caller)
95920 +#ifdef CONFIG_GRKERNSEC_HIDESYM
95921 + seq_printf(m, " %pK", v->caller);
95922 +#else
95923 seq_printf(m, " %pS", v->caller);
95924 +#endif
95925
95926 if (v->nr_pages)
95927 seq_printf(m, " pages=%d", v->nr_pages);
95928 diff --git a/mm/vmstat.c b/mm/vmstat.c
95929 index 7249614..2639fc7 100644
95930 --- a/mm/vmstat.c
95931 +++ b/mm/vmstat.c
95932 @@ -20,6 +20,7 @@
95933 #include <linux/writeback.h>
95934 #include <linux/compaction.h>
95935 #include <linux/mm_inline.h>
95936 +#include <linux/grsecurity.h>
95937
95938 #include "internal.h"
95939
95940 @@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
95941 *
95942 * vm_stat contains the global counters
95943 */
95944 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
95945 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
95946 EXPORT_SYMBOL(vm_stat);
95947
95948 #ifdef CONFIG_SMP
95949 @@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
95950
95951 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
95952 if (diff[i])
95953 - atomic_long_add(diff[i], &vm_stat[i]);
95954 + atomic_long_add_unchecked(diff[i], &vm_stat[i]);
95955 }
95956
95957 /*
95958 @@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
95959 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
95960 if (v) {
95961
95962 - atomic_long_add(v, &zone->vm_stat[i]);
95963 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95964 global_diff[i] += v;
95965 #ifdef CONFIG_NUMA
95966 /* 3 seconds idle till flush */
95967 @@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
95968
95969 v = p->vm_stat_diff[i];
95970 p->vm_stat_diff[i] = 0;
95971 - atomic_long_add(v, &zone->vm_stat[i]);
95972 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95973 global_diff[i] += v;
95974 }
95975 }
95976 @@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
95977 if (pset->vm_stat_diff[i]) {
95978 int v = pset->vm_stat_diff[i];
95979 pset->vm_stat_diff[i] = 0;
95980 - atomic_long_add(v, &zone->vm_stat[i]);
95981 - atomic_long_add(v, &vm_stat[i]);
95982 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95983 + atomic_long_add_unchecked(v, &vm_stat[i]);
95984 }
95985 }
95986 #endif
95987 @@ -1148,10 +1149,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
95988 stat_items_size += sizeof(struct vm_event_state);
95989 #endif
95990
95991 - v = kmalloc(stat_items_size, GFP_KERNEL);
95992 + v = kzalloc(stat_items_size, GFP_KERNEL);
95993 m->private = v;
95994 if (!v)
95995 return ERR_PTR(-ENOMEM);
95996 +
95997 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
95998 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
95999 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
96000 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
96001 + && !in_group_p(grsec_proc_gid)
96002 +#endif
96003 + )
96004 + return (unsigned long *)m->private + *pos;
96005 +#endif
96006 +#endif
96007 +
96008 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
96009 v[i] = global_page_state(i);
96010 v += NR_VM_ZONE_STAT_ITEMS;
96011 @@ -1300,10 +1313,16 @@ static int __init setup_vmstat(void)
96012 put_online_cpus();
96013 #endif
96014 #ifdef CONFIG_PROC_FS
96015 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
96016 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
96017 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
96018 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
96019 + {
96020 + mode_t gr_mode = S_IRUGO;
96021 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
96022 + gr_mode = S_IRUSR;
96023 +#endif
96024 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
96025 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
96026 + proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
96027 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
96028 + }
96029 #endif
96030 return 0;
96031 }
96032 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
96033 index b3d17d1..e8e4cdd 100644
96034 --- a/net/8021q/vlan.c
96035 +++ b/net/8021q/vlan.c
96036 @@ -472,7 +472,7 @@ out:
96037 return NOTIFY_DONE;
96038 }
96039
96040 -static struct notifier_block vlan_notifier_block __read_mostly = {
96041 +static struct notifier_block vlan_notifier_block = {
96042 .notifier_call = vlan_device_event,
96043 };
96044
96045 @@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
96046 err = -EPERM;
96047 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
96048 break;
96049 - if ((args.u.name_type >= 0) &&
96050 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
96051 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
96052 struct vlan_net *vn;
96053
96054 vn = net_generic(net, vlan_net_id);
96055 diff --git a/net/9p/client.c b/net/9p/client.c
96056 index ee8fd6b..0469d50 100644
96057 --- a/net/9p/client.c
96058 +++ b/net/9p/client.c
96059 @@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
96060 len - inline_len);
96061 } else {
96062 err = copy_from_user(ename + inline_len,
96063 - uidata, len - inline_len);
96064 + (char __force_user *)uidata, len - inline_len);
96065 if (err) {
96066 err = -EFAULT;
96067 goto out_err;
96068 @@ -1563,7 +1563,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
96069 kernel_buf = 1;
96070 indata = data;
96071 } else
96072 - indata = (__force char *)udata;
96073 + indata = (__force_kernel char *)udata;
96074 /*
96075 * response header len is 11
96076 * PDU Header(7) + IO Size (4)
96077 @@ -1638,7 +1638,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
96078 kernel_buf = 1;
96079 odata = data;
96080 } else
96081 - odata = (char *)udata;
96082 + odata = (char __force_kernel *)udata;
96083 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
96084 P9_ZC_HDR_SZ, kernel_buf, "dqd",
96085 fid->fid, offset, rsize);
96086 diff --git a/net/9p/mod.c b/net/9p/mod.c
96087 index 6ab36ae..6f1841b 100644
96088 --- a/net/9p/mod.c
96089 +++ b/net/9p/mod.c
96090 @@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
96091 void v9fs_register_trans(struct p9_trans_module *m)
96092 {
96093 spin_lock(&v9fs_trans_lock);
96094 - list_add_tail(&m->list, &v9fs_trans_list);
96095 + pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
96096 spin_unlock(&v9fs_trans_lock);
96097 }
96098 EXPORT_SYMBOL(v9fs_register_trans);
96099 @@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
96100 void v9fs_unregister_trans(struct p9_trans_module *m)
96101 {
96102 spin_lock(&v9fs_trans_lock);
96103 - list_del_init(&m->list);
96104 + pax_list_del_init((struct list_head *)&m->list);
96105 spin_unlock(&v9fs_trans_lock);
96106 }
96107 EXPORT_SYMBOL(v9fs_unregister_trans);
96108 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
96109 index 9321a77..ed2f256 100644
96110 --- a/net/9p/trans_fd.c
96111 +++ b/net/9p/trans_fd.c
96112 @@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
96113 oldfs = get_fs();
96114 set_fs(get_ds());
96115 /* The cast to a user pointer is valid due to the set_fs() */
96116 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
96117 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
96118 set_fs(oldfs);
96119
96120 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
96121 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
96122 index 876fbe8..8bbea9f 100644
96123 --- a/net/atm/atm_misc.c
96124 +++ b/net/atm/atm_misc.c
96125 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
96126 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
96127 return 1;
96128 atm_return(vcc, truesize);
96129 - atomic_inc(&vcc->stats->rx_drop);
96130 + atomic_inc_unchecked(&vcc->stats->rx_drop);
96131 return 0;
96132 }
96133 EXPORT_SYMBOL(atm_charge);
96134 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
96135 }
96136 }
96137 atm_return(vcc, guess);
96138 - atomic_inc(&vcc->stats->rx_drop);
96139 + atomic_inc_unchecked(&vcc->stats->rx_drop);
96140 return NULL;
96141 }
96142 EXPORT_SYMBOL(atm_alloc_charge);
96143 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
96144
96145 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
96146 {
96147 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
96148 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
96149 __SONET_ITEMS
96150 #undef __HANDLE_ITEM
96151 }
96152 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
96153
96154 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
96155 {
96156 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
96157 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
96158 __SONET_ITEMS
96159 #undef __HANDLE_ITEM
96160 }
96161 diff --git a/net/atm/lec.c b/net/atm/lec.c
96162 index f23916b..dd4d26b 100644
96163 --- a/net/atm/lec.c
96164 +++ b/net/atm/lec.c
96165 @@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
96166 }
96167
96168 static struct lane2_ops lane2_ops = {
96169 - lane2_resolve, /* resolve, spec 3.1.3 */
96170 - lane2_associate_req, /* associate_req, spec 3.1.4 */
96171 - NULL /* associate indicator, spec 3.1.5 */
96172 + .resolve = lane2_resolve,
96173 + .associate_req = lane2_associate_req,
96174 + .associate_indicator = NULL
96175 };
96176
96177 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
96178 diff --git a/net/atm/lec.h b/net/atm/lec.h
96179 index 4149db1..f2ab682 100644
96180 --- a/net/atm/lec.h
96181 +++ b/net/atm/lec.h
96182 @@ -48,7 +48,7 @@ struct lane2_ops {
96183 const u8 *tlvs, u32 sizeoftlvs);
96184 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
96185 const u8 *tlvs, u32 sizeoftlvs);
96186 -};
96187 +} __no_const;
96188
96189 /*
96190 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
96191 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
96192 index d1b2d9a..d549f7f 100644
96193 --- a/net/atm/mpoa_caches.c
96194 +++ b/net/atm/mpoa_caches.c
96195 @@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
96196
96197
96198 static struct in_cache_ops ingress_ops = {
96199 - in_cache_add_entry, /* add_entry */
96200 - in_cache_get, /* get */
96201 - in_cache_get_with_mask, /* get_with_mask */
96202 - in_cache_get_by_vcc, /* get_by_vcc */
96203 - in_cache_put, /* put */
96204 - in_cache_remove_entry, /* remove_entry */
96205 - cache_hit, /* cache_hit */
96206 - clear_count_and_expired, /* clear_count */
96207 - check_resolving_entries, /* check_resolving */
96208 - refresh_entries, /* refresh */
96209 - in_destroy_cache /* destroy_cache */
96210 + .add_entry = in_cache_add_entry,
96211 + .get = in_cache_get,
96212 + .get_with_mask = in_cache_get_with_mask,
96213 + .get_by_vcc = in_cache_get_by_vcc,
96214 + .put = in_cache_put,
96215 + .remove_entry = in_cache_remove_entry,
96216 + .cache_hit = cache_hit,
96217 + .clear_count = clear_count_and_expired,
96218 + .check_resolving = check_resolving_entries,
96219 + .refresh = refresh_entries,
96220 + .destroy_cache = in_destroy_cache
96221 };
96222
96223 static struct eg_cache_ops egress_ops = {
96224 - eg_cache_add_entry, /* add_entry */
96225 - eg_cache_get_by_cache_id, /* get_by_cache_id */
96226 - eg_cache_get_by_tag, /* get_by_tag */
96227 - eg_cache_get_by_vcc, /* get_by_vcc */
96228 - eg_cache_get_by_src_ip, /* get_by_src_ip */
96229 - eg_cache_put, /* put */
96230 - eg_cache_remove_entry, /* remove_entry */
96231 - update_eg_cache_entry, /* update */
96232 - clear_expired, /* clear_expired */
96233 - eg_destroy_cache /* destroy_cache */
96234 + .add_entry = eg_cache_add_entry,
96235 + .get_by_cache_id = eg_cache_get_by_cache_id,
96236 + .get_by_tag = eg_cache_get_by_tag,
96237 + .get_by_vcc = eg_cache_get_by_vcc,
96238 + .get_by_src_ip = eg_cache_get_by_src_ip,
96239 + .put = eg_cache_put,
96240 + .remove_entry = eg_cache_remove_entry,
96241 + .update = update_eg_cache_entry,
96242 + .clear_expired = clear_expired,
96243 + .destroy_cache = eg_destroy_cache
96244 };
96245
96246
96247 diff --git a/net/atm/proc.c b/net/atm/proc.c
96248 index bbb6461..cf04016 100644
96249 --- a/net/atm/proc.c
96250 +++ b/net/atm/proc.c
96251 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
96252 const struct k_atm_aal_stats *stats)
96253 {
96254 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
96255 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
96256 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
96257 - atomic_read(&stats->rx_drop));
96258 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
96259 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
96260 + atomic_read_unchecked(&stats->rx_drop));
96261 }
96262
96263 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
96264 diff --git a/net/atm/resources.c b/net/atm/resources.c
96265 index 0447d5d..3cf4728 100644
96266 --- a/net/atm/resources.c
96267 +++ b/net/atm/resources.c
96268 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
96269 static void copy_aal_stats(struct k_atm_aal_stats *from,
96270 struct atm_aal_stats *to)
96271 {
96272 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
96273 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
96274 __AAL_STAT_ITEMS
96275 #undef __HANDLE_ITEM
96276 }
96277 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
96278 static void subtract_aal_stats(struct k_atm_aal_stats *from,
96279 struct atm_aal_stats *to)
96280 {
96281 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
96282 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
96283 __AAL_STAT_ITEMS
96284 #undef __HANDLE_ITEM
96285 }
96286 diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
96287 index 919a5ce..cc6b444 100644
96288 --- a/net/ax25/sysctl_net_ax25.c
96289 +++ b/net/ax25/sysctl_net_ax25.c
96290 @@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
96291 {
96292 char path[sizeof("net/ax25/") + IFNAMSIZ];
96293 int k;
96294 - struct ctl_table *table;
96295 + ctl_table_no_const *table;
96296
96297 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
96298 if (!table)
96299 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
96300 index f7270b9..cd0d879 100644
96301 --- a/net/batman-adv/bat_iv_ogm.c
96302 +++ b/net/batman-adv/bat_iv_ogm.c
96303 @@ -307,7 +307,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
96304
96305 /* randomize initial seqno to avoid collision */
96306 get_random_bytes(&random_seqno, sizeof(random_seqno));
96307 - atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
96308 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
96309
96310 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
96311 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
96312 @@ -894,9 +894,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
96313 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
96314
96315 /* change sequence number to network order */
96316 - seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
96317 + seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
96318 batadv_ogm_packet->seqno = htonl(seqno);
96319 - atomic_inc(&hard_iface->bat_iv.ogm_seqno);
96320 + atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
96321
96322 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
96323 batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
96324 @@ -1261,7 +1261,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
96325 return;
96326
96327 /* could be changed by schedule_own_packet() */
96328 - if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
96329 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
96330
96331 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
96332 has_directlink_flag = 1;
96333 diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
96334 index 6ddb614..ca7e886 100644
96335 --- a/net/batman-adv/fragmentation.c
96336 +++ b/net/batman-adv/fragmentation.c
96337 @@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
96338 frag_header.packet_type = BATADV_UNICAST_FRAG;
96339 frag_header.version = BATADV_COMPAT_VERSION;
96340 frag_header.ttl = BATADV_TTL;
96341 - frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
96342 + frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
96343 frag_header.reserved = 0;
96344 frag_header.no = 0;
96345 frag_header.total_size = htons(skb->len);
96346 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
96347 index a8f99d1..11797ef 100644
96348 --- a/net/batman-adv/soft-interface.c
96349 +++ b/net/batman-adv/soft-interface.c
96350 @@ -278,7 +278,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
96351 primary_if->net_dev->dev_addr, ETH_ALEN);
96352
96353 /* set broadcast sequence number */
96354 - seqno = atomic_inc_return(&bat_priv->bcast_seqno);
96355 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
96356 bcast_packet->seqno = htonl(seqno);
96357
96358 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
96359 @@ -688,7 +688,7 @@ static int batadv_softif_init_late(struct net_device *dev)
96360 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
96361
96362 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
96363 - atomic_set(&bat_priv->bcast_seqno, 1);
96364 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
96365 atomic_set(&bat_priv->tt.vn, 0);
96366 atomic_set(&bat_priv->tt.local_changes, 0);
96367 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
96368 @@ -700,7 +700,7 @@ static int batadv_softif_init_late(struct net_device *dev)
96369
96370 /* randomize initial seqno to avoid collision */
96371 get_random_bytes(&random_seqno, sizeof(random_seqno));
96372 - atomic_set(&bat_priv->frag_seqno, random_seqno);
96373 + atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
96374
96375 bat_priv->primary_if = NULL;
96376 bat_priv->num_ifaces = 0;
96377 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
96378 index 91dd369..9c25750 100644
96379 --- a/net/batman-adv/types.h
96380 +++ b/net/batman-adv/types.h
96381 @@ -56,7 +56,7 @@
96382 struct batadv_hard_iface_bat_iv {
96383 unsigned char *ogm_buff;
96384 int ogm_buff_len;
96385 - atomic_t ogm_seqno;
96386 + atomic_unchecked_t ogm_seqno;
96387 };
96388
96389 /**
96390 @@ -673,7 +673,7 @@ struct batadv_priv {
96391 atomic_t bonding;
96392 atomic_t fragmentation;
96393 atomic_t packet_size_max;
96394 - atomic_t frag_seqno;
96395 + atomic_unchecked_t frag_seqno;
96396 #ifdef CONFIG_BATMAN_ADV_BLA
96397 atomic_t bridge_loop_avoidance;
96398 #endif
96399 @@ -687,7 +687,7 @@ struct batadv_priv {
96400 #ifdef CONFIG_BATMAN_ADV_DEBUG
96401 atomic_t log_level;
96402 #endif
96403 - atomic_t bcast_seqno;
96404 + atomic_unchecked_t bcast_seqno;
96405 atomic_t bcast_queue_left;
96406 atomic_t batman_queue_left;
96407 char num_ifaces;
96408 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
96409 index 7552f9e..074ce29 100644
96410 --- a/net/bluetooth/hci_sock.c
96411 +++ b/net/bluetooth/hci_sock.c
96412 @@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
96413 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
96414 }
96415
96416 - len = min_t(unsigned int, len, sizeof(uf));
96417 + len = min((size_t)len, sizeof(uf));
96418 if (copy_from_user(&uf, optval, len)) {
96419 err = -EFAULT;
96420 break;
96421 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
96422 index 4af3821..f2ba46c 100644
96423 --- a/net/bluetooth/l2cap_core.c
96424 +++ b/net/bluetooth/l2cap_core.c
96425 @@ -3500,8 +3500,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
96426 break;
96427
96428 case L2CAP_CONF_RFC:
96429 - if (olen == sizeof(rfc))
96430 - memcpy(&rfc, (void *)val, olen);
96431 + if (olen != sizeof(rfc))
96432 + break;
96433 +
96434 + memcpy(&rfc, (void *)val, olen);
96435
96436 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
96437 rfc.mode != chan->mode)
96438 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
96439 index 7cc24d2..e83f531 100644
96440 --- a/net/bluetooth/l2cap_sock.c
96441 +++ b/net/bluetooth/l2cap_sock.c
96442 @@ -545,7 +545,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
96443 struct sock *sk = sock->sk;
96444 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
96445 struct l2cap_options opts;
96446 - int len, err = 0;
96447 + int err = 0;
96448 + size_t len = optlen;
96449 u32 opt;
96450
96451 BT_DBG("sk %p", sk);
96452 @@ -567,7 +568,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
96453 opts.max_tx = chan->max_tx;
96454 opts.txwin_size = chan->tx_win;
96455
96456 - len = min_t(unsigned int, sizeof(opts), optlen);
96457 + len = min(sizeof(opts), len);
96458 if (copy_from_user((char *) &opts, optval, len)) {
96459 err = -EFAULT;
96460 break;
96461 @@ -647,7 +648,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96462 struct bt_security sec;
96463 struct bt_power pwr;
96464 struct l2cap_conn *conn;
96465 - int len, err = 0;
96466 + int err = 0;
96467 + size_t len = optlen;
96468 u32 opt;
96469
96470 BT_DBG("sk %p", sk);
96471 @@ -670,7 +672,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96472
96473 sec.level = BT_SECURITY_LOW;
96474
96475 - len = min_t(unsigned int, sizeof(sec), optlen);
96476 + len = min(sizeof(sec), len);
96477 if (copy_from_user((char *) &sec, optval, len)) {
96478 err = -EFAULT;
96479 break;
96480 @@ -770,7 +772,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96481
96482 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
96483
96484 - len = min_t(unsigned int, sizeof(pwr), optlen);
96485 + len = min(sizeof(pwr), len);
96486 if (copy_from_user((char *) &pwr, optval, len)) {
96487 err = -EFAULT;
96488 break;
96489 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
96490 index 3c2d3e4..884855a 100644
96491 --- a/net/bluetooth/rfcomm/sock.c
96492 +++ b/net/bluetooth/rfcomm/sock.c
96493 @@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
96494 struct sock *sk = sock->sk;
96495 struct bt_security sec;
96496 int err = 0;
96497 - size_t len;
96498 + size_t len = optlen;
96499 u32 opt;
96500
96501 BT_DBG("sk %p", sk);
96502 @@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
96503
96504 sec.level = BT_SECURITY_LOW;
96505
96506 - len = min_t(unsigned int, sizeof(sec), optlen);
96507 + len = min(sizeof(sec), len);
96508 if (copy_from_user((char *) &sec, optval, len)) {
96509 err = -EFAULT;
96510 break;
96511 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
96512 index 84fcf9f..e389b27 100644
96513 --- a/net/bluetooth/rfcomm/tty.c
96514 +++ b/net/bluetooth/rfcomm/tty.c
96515 @@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
96516 BT_DBG("tty %p id %d", tty, tty->index);
96517
96518 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
96519 - dev->channel, dev->port.count);
96520 + dev->channel, atomic_read(&dev->port.count));
96521
96522 err = tty_port_open(&dev->port, tty, filp);
96523 if (err)
96524 @@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
96525 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
96526
96527 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
96528 - dev->port.count);
96529 + atomic_read(&dev->port.count));
96530
96531 tty_port_close(&dev->port, tty, filp);
96532 }
96533 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
96534 index ac78024..161a80c 100644
96535 --- a/net/bridge/netfilter/ebtables.c
96536 +++ b/net/bridge/netfilter/ebtables.c
96537 @@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
96538 tmp.valid_hooks = t->table->valid_hooks;
96539 }
96540 mutex_unlock(&ebt_mutex);
96541 - if (copy_to_user(user, &tmp, *len) != 0){
96542 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
96543 BUGPRINT("c2u Didn't work\n");
96544 ret = -EFAULT;
96545 break;
96546 @@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
96547 goto out;
96548 tmp.valid_hooks = t->valid_hooks;
96549
96550 - if (copy_to_user(user, &tmp, *len) != 0) {
96551 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
96552 ret = -EFAULT;
96553 break;
96554 }
96555 @@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
96556 tmp.entries_size = t->table->entries_size;
96557 tmp.valid_hooks = t->table->valid_hooks;
96558
96559 - if (copy_to_user(user, &tmp, *len) != 0) {
96560 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
96561 ret = -EFAULT;
96562 break;
96563 }
96564 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
96565 index 0f45522..dab651f 100644
96566 --- a/net/caif/cfctrl.c
96567 +++ b/net/caif/cfctrl.c
96568 @@ -10,6 +10,7 @@
96569 #include <linux/spinlock.h>
96570 #include <linux/slab.h>
96571 #include <linux/pkt_sched.h>
96572 +#include <linux/sched.h>
96573 #include <net/caif/caif_layer.h>
96574 #include <net/caif/cfpkt.h>
96575 #include <net/caif/cfctrl.h>
96576 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
96577 memset(&dev_info, 0, sizeof(dev_info));
96578 dev_info.id = 0xff;
96579 cfsrvl_init(&this->serv, 0, &dev_info, false);
96580 - atomic_set(&this->req_seq_no, 1);
96581 - atomic_set(&this->rsp_seq_no, 1);
96582 + atomic_set_unchecked(&this->req_seq_no, 1);
96583 + atomic_set_unchecked(&this->rsp_seq_no, 1);
96584 this->serv.layer.receive = cfctrl_recv;
96585 sprintf(this->serv.layer.name, "ctrl");
96586 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
96587 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
96588 struct cfctrl_request_info *req)
96589 {
96590 spin_lock_bh(&ctrl->info_list_lock);
96591 - atomic_inc(&ctrl->req_seq_no);
96592 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
96593 + atomic_inc_unchecked(&ctrl->req_seq_no);
96594 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
96595 list_add_tail(&req->list, &ctrl->list);
96596 spin_unlock_bh(&ctrl->info_list_lock);
96597 }
96598 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
96599 if (p != first)
96600 pr_warn("Requests are not received in order\n");
96601
96602 - atomic_set(&ctrl->rsp_seq_no,
96603 + atomic_set_unchecked(&ctrl->rsp_seq_no,
96604 p->sequence_no);
96605 list_del(&p->list);
96606 goto out;
96607 diff --git a/net/can/af_can.c b/net/can/af_can.c
96608 index a27f8aa..67174a3 100644
96609 --- a/net/can/af_can.c
96610 +++ b/net/can/af_can.c
96611 @@ -863,7 +863,7 @@ static const struct net_proto_family can_family_ops = {
96612 };
96613
96614 /* notifier block for netdevice event */
96615 -static struct notifier_block can_netdev_notifier __read_mostly = {
96616 +static struct notifier_block can_netdev_notifier = {
96617 .notifier_call = can_notifier,
96618 };
96619
96620 diff --git a/net/can/gw.c b/net/can/gw.c
96621 index 3f9b0f3..fc6d4fa 100644
96622 --- a/net/can/gw.c
96623 +++ b/net/can/gw.c
96624 @@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
96625 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
96626
96627 static HLIST_HEAD(cgw_list);
96628 -static struct notifier_block notifier;
96629
96630 static struct kmem_cache *cgw_cache __read_mostly;
96631
96632 @@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
96633 return err;
96634 }
96635
96636 +static struct notifier_block notifier = {
96637 + .notifier_call = cgw_notifier
96638 +};
96639 +
96640 static __init int cgw_module_init(void)
96641 {
96642 /* sanitize given module parameter */
96643 @@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
96644 return -ENOMEM;
96645
96646 /* set notifier */
96647 - notifier.notifier_call = cgw_notifier;
96648 register_netdevice_notifier(&notifier);
96649
96650 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
96651 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
96652 index 4a5df7b..9ad1f1d 100644
96653 --- a/net/ceph/messenger.c
96654 +++ b/net/ceph/messenger.c
96655 @@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
96656 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
96657
96658 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
96659 -static atomic_t addr_str_seq = ATOMIC_INIT(0);
96660 +static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
96661
96662 static struct page *zero_page; /* used in certain error cases */
96663
96664 @@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
96665 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
96666 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
96667
96668 - i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
96669 + i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
96670 s = addr_str[i];
96671
96672 switch (ss->ss_family) {
96673 diff --git a/net/compat.c b/net/compat.c
96674 index f50161f..94fa415 100644
96675 --- a/net/compat.c
96676 +++ b/net/compat.c
96677 @@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
96678 return -EFAULT;
96679 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
96680 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
96681 - kmsg->msg_name = compat_ptr(tmp1);
96682 - kmsg->msg_iov = compat_ptr(tmp2);
96683 - kmsg->msg_control = compat_ptr(tmp3);
96684 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
96685 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
96686 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
96687 return 0;
96688 }
96689
96690 @@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96691
96692 if (kern_msg->msg_namelen) {
96693 if (mode == VERIFY_READ) {
96694 - int err = move_addr_to_kernel(kern_msg->msg_name,
96695 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
96696 kern_msg->msg_namelen,
96697 kern_address);
96698 if (err < 0)
96699 @@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96700 kern_msg->msg_name = NULL;
96701
96702 tot_len = iov_from_user_compat_to_kern(kern_iov,
96703 - (struct compat_iovec __user *)kern_msg->msg_iov,
96704 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
96705 kern_msg->msg_iovlen);
96706 if (tot_len >= 0)
96707 kern_msg->msg_iov = kern_iov;
96708 @@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96709
96710 #define CMSG_COMPAT_FIRSTHDR(msg) \
96711 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
96712 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
96713 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
96714 (struct compat_cmsghdr __user *)NULL)
96715
96716 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
96717 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
96718 (ucmlen) <= (unsigned long) \
96719 ((mhdr)->msg_controllen - \
96720 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
96721 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
96722
96723 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
96724 struct compat_cmsghdr __user *cmsg, int cmsg_len)
96725 {
96726 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
96727 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
96728 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
96729 msg->msg_controllen)
96730 return NULL;
96731 return (struct compat_cmsghdr __user *)ptr;
96732 @@ -222,7 +222,7 @@ Efault:
96733
96734 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
96735 {
96736 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
96737 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
96738 struct compat_cmsghdr cmhdr;
96739 struct compat_timeval ctv;
96740 struct compat_timespec cts[3];
96741 @@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
96742
96743 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
96744 {
96745 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
96746 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
96747 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
96748 int fdnum = scm->fp->count;
96749 struct file **fp = scm->fp->fp;
96750 @@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
96751 return -EFAULT;
96752 old_fs = get_fs();
96753 set_fs(KERNEL_DS);
96754 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
96755 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
96756 set_fs(old_fs);
96757
96758 return err;
96759 @@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
96760 len = sizeof(ktime);
96761 old_fs = get_fs();
96762 set_fs(KERNEL_DS);
96763 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
96764 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
96765 set_fs(old_fs);
96766
96767 if (!err) {
96768 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96769 case MCAST_JOIN_GROUP:
96770 case MCAST_LEAVE_GROUP:
96771 {
96772 - struct compat_group_req __user *gr32 = (void *)optval;
96773 + struct compat_group_req __user *gr32 = (void __user *)optval;
96774 struct group_req __user *kgr =
96775 compat_alloc_user_space(sizeof(struct group_req));
96776 u32 interface;
96777 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96778 case MCAST_BLOCK_SOURCE:
96779 case MCAST_UNBLOCK_SOURCE:
96780 {
96781 - struct compat_group_source_req __user *gsr32 = (void *)optval;
96782 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
96783 struct group_source_req __user *kgsr = compat_alloc_user_space(
96784 sizeof(struct group_source_req));
96785 u32 interface;
96786 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96787 }
96788 case MCAST_MSFILTER:
96789 {
96790 - struct compat_group_filter __user *gf32 = (void *)optval;
96791 + struct compat_group_filter __user *gf32 = (void __user *)optval;
96792 struct group_filter __user *kgf;
96793 u32 interface, fmode, numsrc;
96794
96795 @@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
96796 char __user *optval, int __user *optlen,
96797 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
96798 {
96799 - struct compat_group_filter __user *gf32 = (void *)optval;
96800 + struct compat_group_filter __user *gf32 = (void __user *)optval;
96801 struct group_filter __user *kgf;
96802 int __user *koptlen;
96803 u32 interface, fmode, numsrc;
96804 @@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
96805
96806 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
96807 return -EINVAL;
96808 - if (copy_from_user(a, args, nas[call]))
96809 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
96810 return -EFAULT;
96811 a0 = a[0];
96812 a1 = a[1];
96813 diff --git a/net/core/datagram.c b/net/core/datagram.c
96814 index a16ed7b..eb44d17 100644
96815 --- a/net/core/datagram.c
96816 +++ b/net/core/datagram.c
96817 @@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
96818 }
96819
96820 kfree_skb(skb);
96821 - atomic_inc(&sk->sk_drops);
96822 + atomic_inc_unchecked(&sk->sk_drops);
96823 sk_mem_reclaim_partial(sk);
96824
96825 return err;
96826 diff --git a/net/core/dev.c b/net/core/dev.c
96827 index 616eccf..31832d38 100644
96828 --- a/net/core/dev.c
96829 +++ b/net/core/dev.c
96830 @@ -1684,14 +1684,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
96831 {
96832 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
96833 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
96834 - atomic_long_inc(&dev->rx_dropped);
96835 + atomic_long_inc_unchecked(&dev->rx_dropped);
96836 kfree_skb(skb);
96837 return NET_RX_DROP;
96838 }
96839 }
96840
96841 if (unlikely(!is_skb_forwardable(dev, skb))) {
96842 - atomic_long_inc(&dev->rx_dropped);
96843 + atomic_long_inc_unchecked(&dev->rx_dropped);
96844 kfree_skb(skb);
96845 return NET_RX_DROP;
96846 }
96847 @@ -2434,7 +2434,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
96848
96849 struct dev_gso_cb {
96850 void (*destructor)(struct sk_buff *skb);
96851 -};
96852 +} __no_const;
96853
96854 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
96855
96856 @@ -3224,7 +3224,7 @@ enqueue:
96857
96858 local_irq_restore(flags);
96859
96860 - atomic_long_inc(&skb->dev->rx_dropped);
96861 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
96862 kfree_skb(skb);
96863 return NET_RX_DROP;
96864 }
96865 @@ -3296,7 +3296,7 @@ int netif_rx_ni(struct sk_buff *skb)
96866 }
96867 EXPORT_SYMBOL(netif_rx_ni);
96868
96869 -static void net_tx_action(struct softirq_action *h)
96870 +static __latent_entropy void net_tx_action(void)
96871 {
96872 struct softnet_data *sd = &__get_cpu_var(softnet_data);
96873
96874 @@ -3630,7 +3630,7 @@ ncls:
96875 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
96876 } else {
96877 drop:
96878 - atomic_long_inc(&skb->dev->rx_dropped);
96879 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
96880 kfree_skb(skb);
96881 /* Jamal, now you will not able to escape explaining
96882 * me how you were going to use this. :-)
96883 @@ -4290,7 +4290,7 @@ void netif_napi_del(struct napi_struct *napi)
96884 }
96885 EXPORT_SYMBOL(netif_napi_del);
96886
96887 -static void net_rx_action(struct softirq_action *h)
96888 +static __latent_entropy void net_rx_action(void)
96889 {
96890 struct softnet_data *sd = &__get_cpu_var(softnet_data);
96891 unsigned long time_limit = jiffies + 2;
96892 @@ -6179,7 +6179,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
96893 } else {
96894 netdev_stats_to_stats64(storage, &dev->stats);
96895 }
96896 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
96897 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
96898 return storage;
96899 }
96900 EXPORT_SYMBOL(dev_get_stats);
96901 diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
96902 index 5b7d0e1..cb960fc 100644
96903 --- a/net/core/dev_ioctl.c
96904 +++ b/net/core/dev_ioctl.c
96905 @@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
96906 if (no_module && capable(CAP_NET_ADMIN))
96907 no_module = request_module("netdev-%s", name);
96908 if (no_module && capable(CAP_SYS_MODULE)) {
96909 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
96910 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
96911 +#else
96912 if (!request_module("%s", name))
96913 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
96914 name);
96915 +#endif
96916 }
96917 }
96918 EXPORT_SYMBOL(dev_load);
96919 diff --git a/net/core/filter.c b/net/core/filter.c
96920 index ad30d62..c2757df 100644
96921 --- a/net/core/filter.c
96922 +++ b/net/core/filter.c
96923 @@ -679,7 +679,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
96924 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
96925 if (!fp)
96926 return -ENOMEM;
96927 - memcpy(fp->insns, fprog->filter, fsize);
96928 + memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
96929
96930 atomic_set(&fp->refcnt, 1);
96931 fp->len = fprog->len;
96932 diff --git a/net/core/flow.c b/net/core/flow.c
96933 index dfa602c..3103d88 100644
96934 --- a/net/core/flow.c
96935 +++ b/net/core/flow.c
96936 @@ -61,7 +61,7 @@ struct flow_cache {
96937 struct timer_list rnd_timer;
96938 };
96939
96940 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
96941 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
96942 EXPORT_SYMBOL(flow_cache_genid);
96943 static struct flow_cache flow_cache_global;
96944 static struct kmem_cache *flow_cachep __read_mostly;
96945 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
96946
96947 static int flow_entry_valid(struct flow_cache_entry *fle)
96948 {
96949 - if (atomic_read(&flow_cache_genid) != fle->genid)
96950 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
96951 return 0;
96952 if (fle->object && !fle->object->ops->check(fle->object))
96953 return 0;
96954 @@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
96955 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
96956 fcp->hash_count++;
96957 }
96958 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
96959 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
96960 flo = fle->object;
96961 if (!flo)
96962 goto ret_object;
96963 @@ -279,7 +279,7 @@ nocache:
96964 }
96965 flo = resolver(net, key, family, dir, flo, ctx);
96966 if (fle) {
96967 - fle->genid = atomic_read(&flow_cache_genid);
96968 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
96969 if (!IS_ERR(flo))
96970 fle->object = flo;
96971 else
96972 diff --git a/net/core/iovec.c b/net/core/iovec.c
96973 index b618694..192bbba 100644
96974 --- a/net/core/iovec.c
96975 +++ b/net/core/iovec.c
96976 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96977 if (m->msg_namelen) {
96978 if (mode == VERIFY_READ) {
96979 void __user *namep;
96980 - namep = (void __user __force *) m->msg_name;
96981 + namep = (void __force_user *) m->msg_name;
96982 err = move_addr_to_kernel(namep, m->msg_namelen,
96983 address);
96984 if (err < 0)
96985 @@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96986 }
96987
96988 size = m->msg_iovlen * sizeof(struct iovec);
96989 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
96990 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
96991 return -EFAULT;
96992
96993 m->msg_iov = iov;
96994 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
96995 index 43128dd..e4d4311 100644
96996 --- a/net/core/neighbour.c
96997 +++ b/net/core/neighbour.c
96998 @@ -2775,7 +2775,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
96999 void __user *buffer, size_t *lenp, loff_t *ppos)
97000 {
97001 int size, ret;
97002 - struct ctl_table tmp = *ctl;
97003 + ctl_table_no_const tmp = *ctl;
97004
97005 tmp.extra1 = &zero;
97006 tmp.extra2 = &unres_qlen_max;
97007 @@ -2983,11 +2983,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
97008 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
97009 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
97010 } else {
97011 + struct neigh_table *ntable = container_of(p, struct neigh_table, parms);
97012 dev_name_source = "default";
97013 - t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
97014 - t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
97015 - t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
97016 - t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
97017 + t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &ntable->gc_interval;
97018 + t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &ntable->gc_thresh1;
97019 + t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &ntable->gc_thresh2;
97020 + t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &ntable->gc_thresh3;
97021 }
97022
97023
97024 diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
97025 index 2bf8329..7960607 100644
97026 --- a/net/core/net-procfs.c
97027 +++ b/net/core/net-procfs.c
97028 @@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
97029 else
97030 seq_printf(seq, "%04x", ntohs(pt->type));
97031
97032 +#ifdef CONFIG_GRKERNSEC_HIDESYM
97033 + seq_printf(seq, " %-8s %pf\n",
97034 + pt->dev ? pt->dev->name : "", NULL);
97035 +#else
97036 seq_printf(seq, " %-8s %pf\n",
97037 pt->dev ? pt->dev->name : "", pt->func);
97038 +#endif
97039 }
97040
97041 return 0;
97042 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
97043 index f3edf96..3cd8b40 100644
97044 --- a/net/core/net-sysfs.c
97045 +++ b/net/core/net-sysfs.c
97046 @@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
97047 }
97048 EXPORT_SYMBOL(netdev_class_remove_file_ns);
97049
97050 -int netdev_kobject_init(void)
97051 +int __init netdev_kobject_init(void)
97052 {
97053 kobj_ns_type_register(&net_ns_type_operations);
97054 return class_register(&net_class);
97055 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
97056 index 81d3a9a..a0bd7a8 100644
97057 --- a/net/core/net_namespace.c
97058 +++ b/net/core/net_namespace.c
97059 @@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
97060 int error;
97061 LIST_HEAD(net_exit_list);
97062
97063 - list_add_tail(&ops->list, list);
97064 + pax_list_add_tail((struct list_head *)&ops->list, list);
97065 if (ops->init || (ops->id && ops->size)) {
97066 for_each_net(net) {
97067 error = ops_init(ops, net);
97068 @@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
97069
97070 out_undo:
97071 /* If I have an error cleanup all namespaces I initialized */
97072 - list_del(&ops->list);
97073 + pax_list_del((struct list_head *)&ops->list);
97074 ops_exit_list(ops, &net_exit_list);
97075 ops_free_list(ops, &net_exit_list);
97076 return error;
97077 @@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
97078 struct net *net;
97079 LIST_HEAD(net_exit_list);
97080
97081 - list_del(&ops->list);
97082 + pax_list_del((struct list_head *)&ops->list);
97083 for_each_net(net)
97084 list_add_tail(&net->exit_list, &net_exit_list);
97085 ops_exit_list(ops, &net_exit_list);
97086 @@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
97087 mutex_lock(&net_mutex);
97088 error = register_pernet_operations(&pernet_list, ops);
97089 if (!error && (first_device == &pernet_list))
97090 - first_device = &ops->list;
97091 + first_device = (struct list_head *)&ops->list;
97092 mutex_unlock(&net_mutex);
97093 return error;
97094 }
97095 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
97096 index 81975f2..9ef3531 100644
97097 --- a/net/core/netpoll.c
97098 +++ b/net/core/netpoll.c
97099 @@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
97100 struct udphdr *udph;
97101 struct iphdr *iph;
97102 struct ethhdr *eth;
97103 - static atomic_t ip_ident;
97104 + static atomic_unchecked_t ip_ident;
97105 struct ipv6hdr *ip6h;
97106
97107 udp_len = len + sizeof(*udph);
97108 @@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
97109 put_unaligned(0x45, (unsigned char *)iph);
97110 iph->tos = 0;
97111 put_unaligned(htons(ip_len), &(iph->tot_len));
97112 - iph->id = htons(atomic_inc_return(&ip_ident));
97113 + iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
97114 iph->frag_off = 0;
97115 iph->ttl = 64;
97116 iph->protocol = IPPROTO_UDP;
97117 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
97118 index cf67144..12bf94c 100644
97119 --- a/net/core/rtnetlink.c
97120 +++ b/net/core/rtnetlink.c
97121 @@ -58,7 +58,7 @@ struct rtnl_link {
97122 rtnl_doit_func doit;
97123 rtnl_dumpit_func dumpit;
97124 rtnl_calcit_func calcit;
97125 -};
97126 +} __no_const;
97127
97128 static DEFINE_MUTEX(rtnl_mutex);
97129
97130 @@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
97131 if (rtnl_link_ops_get(ops->kind))
97132 return -EEXIST;
97133
97134 - if (!ops->dellink)
97135 - ops->dellink = unregister_netdevice_queue;
97136 + if (!ops->dellink) {
97137 + pax_open_kernel();
97138 + *(void **)&ops->dellink = unregister_netdevice_queue;
97139 + pax_close_kernel();
97140 + }
97141
97142 - list_add_tail(&ops->list, &link_ops);
97143 + pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
97144 return 0;
97145 }
97146 EXPORT_SYMBOL_GPL(__rtnl_link_register);
97147 @@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
97148 for_each_net(net) {
97149 __rtnl_kill_links(net, ops);
97150 }
97151 - list_del(&ops->list);
97152 + pax_list_del((struct list_head *)&ops->list);
97153 }
97154 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
97155
97156 diff --git a/net/core/scm.c b/net/core/scm.c
97157 index b442e7e..6f5b5a2 100644
97158 --- a/net/core/scm.c
97159 +++ b/net/core/scm.c
97160 @@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
97161 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
97162 {
97163 struct cmsghdr __user *cm
97164 - = (__force struct cmsghdr __user *)msg->msg_control;
97165 + = (struct cmsghdr __force_user *)msg->msg_control;
97166 struct cmsghdr cmhdr;
97167 int cmlen = CMSG_LEN(len);
97168 int err;
97169 @@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
97170 err = -EFAULT;
97171 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
97172 goto out;
97173 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
97174 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
97175 goto out;
97176 cmlen = CMSG_SPACE(len);
97177 if (msg->msg_controllen < cmlen)
97178 @@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
97179 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
97180 {
97181 struct cmsghdr __user *cm
97182 - = (__force struct cmsghdr __user*)msg->msg_control;
97183 + = (struct cmsghdr __force_user *)msg->msg_control;
97184
97185 int fdmax = 0;
97186 int fdnum = scm->fp->count;
97187 @@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
97188 if (fdnum < fdmax)
97189 fdmax = fdnum;
97190
97191 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
97192 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
97193 i++, cmfptr++)
97194 {
97195 struct socket *sock;
97196 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
97197 index deffb37..213db0a 100644
97198 --- a/net/core/skbuff.c
97199 +++ b/net/core/skbuff.c
97200 @@ -2006,7 +2006,7 @@ EXPORT_SYMBOL(__skb_checksum);
97201 __wsum skb_checksum(const struct sk_buff *skb, int offset,
97202 int len, __wsum csum)
97203 {
97204 - const struct skb_checksum_ops ops = {
97205 + static const struct skb_checksum_ops ops = {
97206 .update = csum_partial_ext,
97207 .combine = csum_block_add_ext,
97208 };
97209 @@ -3119,13 +3119,15 @@ void __init skb_init(void)
97210 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
97211 sizeof(struct sk_buff),
97212 0,
97213 - SLAB_HWCACHE_ALIGN|SLAB_PANIC,
97214 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|
97215 + SLAB_NO_SANITIZE,
97216 NULL);
97217 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
97218 (2*sizeof(struct sk_buff)) +
97219 sizeof(atomic_t),
97220 0,
97221 - SLAB_HWCACHE_ALIGN|SLAB_PANIC,
97222 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|
97223 + SLAB_NO_SANITIZE,
97224 NULL);
97225 }
97226
97227 diff --git a/net/core/sock.c b/net/core/sock.c
97228 index fbc5cfb..6d7e8c3 100644
97229 --- a/net/core/sock.c
97230 +++ b/net/core/sock.c
97231 @@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97232 struct sk_buff_head *list = &sk->sk_receive_queue;
97233
97234 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
97235 - atomic_inc(&sk->sk_drops);
97236 + atomic_inc_unchecked(&sk->sk_drops);
97237 trace_sock_rcvqueue_full(sk, skb);
97238 return -ENOMEM;
97239 }
97240 @@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97241 return err;
97242
97243 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
97244 - atomic_inc(&sk->sk_drops);
97245 + atomic_inc_unchecked(&sk->sk_drops);
97246 return -ENOBUFS;
97247 }
97248
97249 @@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97250 skb_dst_force(skb);
97251
97252 spin_lock_irqsave(&list->lock, flags);
97253 - skb->dropcount = atomic_read(&sk->sk_drops);
97254 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
97255 __skb_queue_tail(list, skb);
97256 spin_unlock_irqrestore(&list->lock, flags);
97257
97258 @@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
97259 skb->dev = NULL;
97260
97261 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
97262 - atomic_inc(&sk->sk_drops);
97263 + atomic_inc_unchecked(&sk->sk_drops);
97264 goto discard_and_relse;
97265 }
97266 if (nested)
97267 @@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
97268 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
97269 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
97270 bh_unlock_sock(sk);
97271 - atomic_inc(&sk->sk_drops);
97272 + atomic_inc_unchecked(&sk->sk_drops);
97273 goto discard_and_relse;
97274 }
97275
97276 @@ -950,12 +950,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97277 struct timeval tm;
97278 } v;
97279
97280 - int lv = sizeof(int);
97281 - int len;
97282 + unsigned int lv = sizeof(int);
97283 + unsigned int len;
97284
97285 if (get_user(len, optlen))
97286 return -EFAULT;
97287 - if (len < 0)
97288 + if (len > INT_MAX)
97289 return -EINVAL;
97290
97291 memset(&v, 0, sizeof(v));
97292 @@ -1107,11 +1107,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97293
97294 case SO_PEERNAME:
97295 {
97296 - char address[128];
97297 + char address[_K_SS_MAXSIZE];
97298
97299 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
97300 return -ENOTCONN;
97301 - if (lv < len)
97302 + if (lv < len || sizeof address < len)
97303 return -EINVAL;
97304 if (copy_to_user(optval, address, len))
97305 return -EFAULT;
97306 @@ -1188,7 +1188,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97307
97308 if (len > lv)
97309 len = lv;
97310 - if (copy_to_user(optval, &v, len))
97311 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
97312 return -EFAULT;
97313 lenout:
97314 if (put_user(len, optlen))
97315 @@ -2353,7 +2353,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
97316 */
97317 smp_wmb();
97318 atomic_set(&sk->sk_refcnt, 1);
97319 - atomic_set(&sk->sk_drops, 0);
97320 + atomic_set_unchecked(&sk->sk_drops, 0);
97321 }
97322 EXPORT_SYMBOL(sock_init_data);
97323
97324 @@ -2478,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
97325 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
97326 int level, int type)
97327 {
97328 + struct sock_extended_err ee;
97329 struct sock_exterr_skb *serr;
97330 struct sk_buff *skb, *skb2;
97331 int copied, err;
97332 @@ -2499,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
97333 sock_recv_timestamp(msg, sk, skb);
97334
97335 serr = SKB_EXT_ERR(skb);
97336 - put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
97337 + ee = serr->ee;
97338 + put_cmsg(msg, level, type, sizeof ee, &ee);
97339
97340 msg->msg_flags |= MSG_ERRQUEUE;
97341 err = copied;
97342 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
97343 index a0e9cf6..ef7f9ed 100644
97344 --- a/net/core/sock_diag.c
97345 +++ b/net/core/sock_diag.c
97346 @@ -9,26 +9,33 @@
97347 #include <linux/inet_diag.h>
97348 #include <linux/sock_diag.h>
97349
97350 -static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
97351 +static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
97352 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
97353 static DEFINE_MUTEX(sock_diag_table_mutex);
97354
97355 int sock_diag_check_cookie(void *sk, __u32 *cookie)
97356 {
97357 +#ifndef CONFIG_GRKERNSEC_HIDESYM
97358 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
97359 cookie[1] != INET_DIAG_NOCOOKIE) &&
97360 ((u32)(unsigned long)sk != cookie[0] ||
97361 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
97362 return -ESTALE;
97363 else
97364 +#endif
97365 return 0;
97366 }
97367 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
97368
97369 void sock_diag_save_cookie(void *sk, __u32 *cookie)
97370 {
97371 +#ifdef CONFIG_GRKERNSEC_HIDESYM
97372 + cookie[0] = 0;
97373 + cookie[1] = 0;
97374 +#else
97375 cookie[0] = (u32)(unsigned long)sk;
97376 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
97377 +#endif
97378 }
97379 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
97380
97381 @@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
97382 mutex_lock(&sock_diag_table_mutex);
97383 if (sock_diag_handlers[hndl->family])
97384 err = -EBUSY;
97385 - else
97386 + else {
97387 + pax_open_kernel();
97388 sock_diag_handlers[hndl->family] = hndl;
97389 + pax_close_kernel();
97390 + }
97391 mutex_unlock(&sock_diag_table_mutex);
97392
97393 return err;
97394 @@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
97395
97396 mutex_lock(&sock_diag_table_mutex);
97397 BUG_ON(sock_diag_handlers[family] != hnld);
97398 + pax_open_kernel();
97399 sock_diag_handlers[family] = NULL;
97400 + pax_close_kernel();
97401 mutex_unlock(&sock_diag_table_mutex);
97402 }
97403 EXPORT_SYMBOL_GPL(sock_diag_unregister);
97404 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
97405 index cca4441..5e616de 100644
97406 --- a/net/core/sysctl_net_core.c
97407 +++ b/net/core/sysctl_net_core.c
97408 @@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
97409 {
97410 unsigned int orig_size, size;
97411 int ret, i;
97412 - struct ctl_table tmp = {
97413 + ctl_table_no_const tmp = {
97414 .data = &size,
97415 .maxlen = sizeof(size),
97416 .mode = table->mode
97417 @@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
97418 void __user *buffer, size_t *lenp, loff_t *ppos)
97419 {
97420 char id[IFNAMSIZ];
97421 - struct ctl_table tbl = {
97422 + ctl_table_no_const tbl = {
97423 .data = id,
97424 .maxlen = IFNAMSIZ,
97425 };
97426 @@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
97427
97428 static __net_init int sysctl_core_net_init(struct net *net)
97429 {
97430 - struct ctl_table *tbl;
97431 + ctl_table_no_const *tbl = NULL;
97432
97433 net->core.sysctl_somaxconn = SOMAXCONN;
97434
97435 - tbl = netns_core_table;
97436 if (!net_eq(net, &init_net)) {
97437 - tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
97438 + tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
97439 if (tbl == NULL)
97440 goto err_dup;
97441
97442 @@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
97443 if (net->user_ns != &init_user_ns) {
97444 tbl[0].procname = NULL;
97445 }
97446 - }
97447 -
97448 - net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
97449 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
97450 + } else
97451 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
97452 if (net->core.sysctl_hdr == NULL)
97453 goto err_reg;
97454
97455 return 0;
97456
97457 err_reg:
97458 - if (tbl != netns_core_table)
97459 - kfree(tbl);
97460 + kfree(tbl);
97461 err_dup:
97462 return -ENOMEM;
97463 }
97464 @@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
97465 kfree(tbl);
97466 }
97467
97468 -static __net_initdata struct pernet_operations sysctl_core_ops = {
97469 +static __net_initconst struct pernet_operations sysctl_core_ops = {
97470 .init = sysctl_core_net_init,
97471 .exit = sysctl_core_net_exit,
97472 };
97473 diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
97474 index dd4d506..fb2fb87 100644
97475 --- a/net/decnet/af_decnet.c
97476 +++ b/net/decnet/af_decnet.c
97477 @@ -465,6 +465,7 @@ static struct proto dn_proto = {
97478 .sysctl_rmem = sysctl_decnet_rmem,
97479 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
97480 .obj_size = sizeof(struct dn_sock),
97481 + .slab_flags = SLAB_USERCOPY,
97482 };
97483
97484 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
97485 diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
97486 index dd0dfb2..fdbc764 100644
97487 --- a/net/decnet/dn_dev.c
97488 +++ b/net/decnet/dn_dev.c
97489 @@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
97490 .extra1 = &min_t3,
97491 .extra2 = &max_t3
97492 },
97493 - {0}
97494 + { }
97495 },
97496 };
97497
97498 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
97499 index 5325b54..a0d4d69 100644
97500 --- a/net/decnet/sysctl_net_decnet.c
97501 +++ b/net/decnet/sysctl_net_decnet.c
97502 @@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
97503
97504 if (len > *lenp) len = *lenp;
97505
97506 - if (copy_to_user(buffer, addr, len))
97507 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
97508 return -EFAULT;
97509
97510 *lenp = len;
97511 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
97512
97513 if (len > *lenp) len = *lenp;
97514
97515 - if (copy_to_user(buffer, devname, len))
97516 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
97517 return -EFAULT;
97518
97519 *lenp = len;
97520 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
97521 index 1865fdf..581a595 100644
97522 --- a/net/ieee802154/dgram.c
97523 +++ b/net/ieee802154/dgram.c
97524 @@ -315,8 +315,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
97525 if (saddr) {
97526 saddr->family = AF_IEEE802154;
97527 saddr->addr = mac_cb(skb)->sa;
97528 + }
97529 + if (addr_len)
97530 *addr_len = sizeof(*saddr);
97531 - }
97532
97533 if (flags & MSG_TRUNC)
97534 copied = skb->len;
97535 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
97536 index e4d96d4..e1651da 100644
97537 --- a/net/ipv4/af_inet.c
97538 +++ b/net/ipv4/af_inet.c
97539 @@ -1686,13 +1686,9 @@ static int __init inet_init(void)
97540
97541 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
97542
97543 - sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
97544 - if (!sysctl_local_reserved_ports)
97545 - goto out;
97546 -
97547 rc = proto_register(&tcp_prot, 1);
97548 if (rc)
97549 - goto out_free_reserved_ports;
97550 + goto out;
97551
97552 rc = proto_register(&udp_prot, 1);
97553 if (rc)
97554 @@ -1799,8 +1795,6 @@ out_unregister_udp_proto:
97555 proto_unregister(&udp_prot);
97556 out_unregister_tcp_proto:
97557 proto_unregister(&tcp_prot);
97558 -out_free_reserved_ports:
97559 - kfree(sysctl_local_reserved_ports);
97560 goto out;
97561 }
97562
97563 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
97564 index f4b34d8..c54a163 100644
97565 --- a/net/ipv4/devinet.c
97566 +++ b/net/ipv4/devinet.c
97567 @@ -1534,7 +1534,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
97568 idx = 0;
97569 head = &net->dev_index_head[h];
97570 rcu_read_lock();
97571 - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
97572 + cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
97573 net->dev_base_seq;
97574 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97575 if (idx < s_idx)
97576 @@ -1845,7 +1845,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
97577 idx = 0;
97578 head = &net->dev_index_head[h];
97579 rcu_read_lock();
97580 - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
97581 + cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
97582 net->dev_base_seq;
97583 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97584 if (idx < s_idx)
97585 @@ -2070,7 +2070,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
97586 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
97587 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
97588
97589 -static struct devinet_sysctl_table {
97590 +static const struct devinet_sysctl_table {
97591 struct ctl_table_header *sysctl_header;
97592 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
97593 } devinet_sysctl = {
97594 @@ -2192,7 +2192,7 @@ static __net_init int devinet_init_net(struct net *net)
97595 int err;
97596 struct ipv4_devconf *all, *dflt;
97597 #ifdef CONFIG_SYSCTL
97598 - struct ctl_table *tbl = ctl_forward_entry;
97599 + ctl_table_no_const *tbl = NULL;
97600 struct ctl_table_header *forw_hdr;
97601 #endif
97602
97603 @@ -2210,7 +2210,7 @@ static __net_init int devinet_init_net(struct net *net)
97604 goto err_alloc_dflt;
97605
97606 #ifdef CONFIG_SYSCTL
97607 - tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
97608 + tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
97609 if (tbl == NULL)
97610 goto err_alloc_ctl;
97611
97612 @@ -2230,7 +2230,10 @@ static __net_init int devinet_init_net(struct net *net)
97613 goto err_reg_dflt;
97614
97615 err = -ENOMEM;
97616 - forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
97617 + if (!net_eq(net, &init_net))
97618 + forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
97619 + else
97620 + forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
97621 if (forw_hdr == NULL)
97622 goto err_reg_ctl;
97623 net->ipv4.forw_hdr = forw_hdr;
97624 @@ -2246,8 +2249,7 @@ err_reg_ctl:
97625 err_reg_dflt:
97626 __devinet_sysctl_unregister(all);
97627 err_reg_all:
97628 - if (tbl != ctl_forward_entry)
97629 - kfree(tbl);
97630 + kfree(tbl);
97631 err_alloc_ctl:
97632 #endif
97633 if (dflt != &ipv4_devconf_dflt)
97634 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
97635 index c7539e2..b455e51 100644
97636 --- a/net/ipv4/fib_frontend.c
97637 +++ b/net/ipv4/fib_frontend.c
97638 @@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
97639 #ifdef CONFIG_IP_ROUTE_MULTIPATH
97640 fib_sync_up(dev);
97641 #endif
97642 - atomic_inc(&net->ipv4.dev_addr_genid);
97643 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97644 rt_cache_flush(dev_net(dev));
97645 break;
97646 case NETDEV_DOWN:
97647 fib_del_ifaddr(ifa, NULL);
97648 - atomic_inc(&net->ipv4.dev_addr_genid);
97649 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97650 if (ifa->ifa_dev->ifa_list == NULL) {
97651 /* Last address was deleted from this interface.
97652 * Disable IP.
97653 @@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
97654 #ifdef CONFIG_IP_ROUTE_MULTIPATH
97655 fib_sync_up(dev);
97656 #endif
97657 - atomic_inc(&net->ipv4.dev_addr_genid);
97658 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97659 rt_cache_flush(net);
97660 break;
97661 case NETDEV_DOWN:
97662 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
97663 index e63f47a..e5c531d 100644
97664 --- a/net/ipv4/fib_semantics.c
97665 +++ b/net/ipv4/fib_semantics.c
97666 @@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
97667 nh->nh_saddr = inet_select_addr(nh->nh_dev,
97668 nh->nh_gw,
97669 nh->nh_parent->fib_scope);
97670 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
97671 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
97672
97673 return nh->nh_saddr;
97674 }
97675 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
97676 index fc0e649..febfa65 100644
97677 --- a/net/ipv4/inet_connection_sock.c
97678 +++ b/net/ipv4/inet_connection_sock.c
97679 @@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
97680 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
97681 #endif
97682
97683 -unsigned long *sysctl_local_reserved_ports;
97684 +unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
97685 EXPORT_SYMBOL(sysctl_local_reserved_ports);
97686
97687 void inet_get_local_port_range(struct net *net, int *low, int *high)
97688 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
97689 index bb075fc..322dceb 100644
97690 --- a/net/ipv4/inet_fragment.c
97691 +++ b/net/ipv4/inet_fragment.c
97692 @@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
97693
97694 atomic_inc(&qp->refcnt);
97695 hlist_add_head(&qp->list, &hb->chain);
97696 - spin_unlock(&hb->chain_lock);
97697 - read_unlock(&f->lock);
97698 inet_frag_lru_add(nf, qp);
97699 + spin_unlock(&hb->chain_lock);
97700 + read_unlock(&f->lock);
97701 +
97702 return qp;
97703 }
97704
97705 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
97706 index 8b9cf27..0d8d592 100644
97707 --- a/net/ipv4/inet_hashtables.c
97708 +++ b/net/ipv4/inet_hashtables.c
97709 @@ -18,6 +18,7 @@
97710 #include <linux/sched.h>
97711 #include <linux/slab.h>
97712 #include <linux/wait.h>
97713 +#include <linux/security.h>
97714
97715 #include <net/inet_connection_sock.h>
97716 #include <net/inet_hashtables.h>
97717 @@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
97718 return inet_ehashfn(net, laddr, lport, faddr, fport);
97719 }
97720
97721 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
97722 +
97723 /*
97724 * Allocate and initialize a new local port bind bucket.
97725 * The bindhash mutex for snum's hash chain must be held here.
97726 @@ -554,6 +557,8 @@ ok:
97727 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
97728 spin_unlock(&head->lock);
97729
97730 + gr_update_task_in_ip_table(current, inet_sk(sk));
97731 +
97732 if (tw) {
97733 inet_twsk_deschedule(tw, death_row);
97734 while (twrefcnt) {
97735 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
97736 index 33d5537..da337a4 100644
97737 --- a/net/ipv4/inetpeer.c
97738 +++ b/net/ipv4/inetpeer.c
97739 @@ -503,8 +503,8 @@ relookup:
97740 if (p) {
97741 p->daddr = *daddr;
97742 atomic_set(&p->refcnt, 1);
97743 - atomic_set(&p->rid, 0);
97744 - atomic_set(&p->ip_id_count,
97745 + atomic_set_unchecked(&p->rid, 0);
97746 + atomic_set_unchecked(&p->ip_id_count,
97747 (daddr->family == AF_INET) ?
97748 secure_ip_id(daddr->addr.a4) :
97749 secure_ipv6_id(daddr->addr.a6));
97750 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
97751 index 2481993..2d9a7a7 100644
97752 --- a/net/ipv4/ip_fragment.c
97753 +++ b/net/ipv4/ip_fragment.c
97754 @@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
97755 return 0;
97756
97757 start = qp->rid;
97758 - end = atomic_inc_return(&peer->rid);
97759 + end = atomic_inc_return_unchecked(&peer->rid);
97760 qp->rid = end;
97761
97762 rc = qp->q.fragments && (end - start) > max;
97763 @@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
97764
97765 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97766 {
97767 - struct ctl_table *table;
97768 + ctl_table_no_const *table = NULL;
97769 struct ctl_table_header *hdr;
97770
97771 - table = ip4_frags_ns_ctl_table;
97772 if (!net_eq(net, &init_net)) {
97773 - table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
97774 + table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
97775 if (table == NULL)
97776 goto err_alloc;
97777
97778 @@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97779 /* Don't export sysctls to unprivileged users */
97780 if (net->user_ns != &init_user_ns)
97781 table[0].procname = NULL;
97782 - }
97783 + hdr = register_net_sysctl(net, "net/ipv4", table);
97784 + } else
97785 + hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
97786
97787 - hdr = register_net_sysctl(net, "net/ipv4", table);
97788 if (hdr == NULL)
97789 goto err_reg;
97790
97791 @@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97792 return 0;
97793
97794 err_reg:
97795 - if (!net_eq(net, &init_net))
97796 - kfree(table);
97797 + kfree(table);
97798 err_alloc:
97799 return -ENOMEM;
97800 }
97801 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
97802 index d306360..1c1a1f1 100644
97803 --- a/net/ipv4/ip_gre.c
97804 +++ b/net/ipv4/ip_gre.c
97805 @@ -115,7 +115,7 @@ static bool log_ecn_error = true;
97806 module_param(log_ecn_error, bool, 0644);
97807 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
97808
97809 -static struct rtnl_link_ops ipgre_link_ops __read_mostly;
97810 +static struct rtnl_link_ops ipgre_link_ops;
97811 static int ipgre_tunnel_init(struct net_device *dev);
97812
97813 static int ipgre_net_id __read_mostly;
97814 @@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
97815 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
97816 };
97817
97818 -static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
97819 +static struct rtnl_link_ops ipgre_link_ops = {
97820 .kind = "gre",
97821 .maxtype = IFLA_GRE_MAX,
97822 .policy = ipgre_policy,
97823 @@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
97824 .fill_info = ipgre_fill_info,
97825 };
97826
97827 -static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
97828 +static struct rtnl_link_ops ipgre_tap_ops = {
97829 .kind = "gretap",
97830 .maxtype = IFLA_GRE_MAX,
97831 .policy = ipgre_policy,
97832 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
97833 index ddf32a6..3fdeea9 100644
97834 --- a/net/ipv4/ip_sockglue.c
97835 +++ b/net/ipv4/ip_sockglue.c
97836 @@ -1172,7 +1172,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
97837 len = min_t(unsigned int, len, opt->optlen);
97838 if (put_user(len, optlen))
97839 return -EFAULT;
97840 - if (copy_to_user(optval, opt->__data, len))
97841 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
97842 + copy_to_user(optval, opt->__data, len))
97843 return -EFAULT;
97844 return 0;
97845 }
97846 @@ -1303,7 +1304,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
97847 if (sk->sk_type != SOCK_STREAM)
97848 return -ENOPROTOOPT;
97849
97850 - msg.msg_control = optval;
97851 + msg.msg_control = (void __force_kernel *)optval;
97852 msg.msg_controllen = len;
97853 msg.msg_flags = flags;
97854
97855 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
97856 index 52b802a..b725179 100644
97857 --- a/net/ipv4/ip_vti.c
97858 +++ b/net/ipv4/ip_vti.c
97859 @@ -44,7 +44,7 @@
97860 #include <net/net_namespace.h>
97861 #include <net/netns/generic.h>
97862
97863 -static struct rtnl_link_ops vti_link_ops __read_mostly;
97864 +static struct rtnl_link_ops vti_link_ops;
97865
97866 static int vti_net_id __read_mostly;
97867 static int vti_tunnel_init(struct net_device *dev);
97868 @@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
97869 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
97870 };
97871
97872 -static struct rtnl_link_ops vti_link_ops __read_mostly = {
97873 +static struct rtnl_link_ops vti_link_ops = {
97874 .kind = "vti",
97875 .maxtype = IFLA_VTI_MAX,
97876 .policy = vti_policy,
97877 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
97878 index efa1138..20dbba0 100644
97879 --- a/net/ipv4/ipconfig.c
97880 +++ b/net/ipv4/ipconfig.c
97881 @@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
97882
97883 mm_segment_t oldfs = get_fs();
97884 set_fs(get_ds());
97885 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
97886 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
97887 set_fs(oldfs);
97888 return res;
97889 }
97890 @@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
97891
97892 mm_segment_t oldfs = get_fs();
97893 set_fs(get_ds());
97894 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
97895 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
97896 set_fs(oldfs);
97897 return res;
97898 }
97899 @@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
97900
97901 mm_segment_t oldfs = get_fs();
97902 set_fs(get_ds());
97903 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
97904 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
97905 set_fs(oldfs);
97906 return res;
97907 }
97908 diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
97909 index fe3e9f7..4956990 100644
97910 --- a/net/ipv4/ipip.c
97911 +++ b/net/ipv4/ipip.c
97912 @@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
97913 static int ipip_net_id __read_mostly;
97914
97915 static int ipip_tunnel_init(struct net_device *dev);
97916 -static struct rtnl_link_ops ipip_link_ops __read_mostly;
97917 +static struct rtnl_link_ops ipip_link_ops;
97918
97919 static int ipip_err(struct sk_buff *skb, u32 info)
97920 {
97921 @@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
97922 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
97923 };
97924
97925 -static struct rtnl_link_ops ipip_link_ops __read_mostly = {
97926 +static struct rtnl_link_ops ipip_link_ops = {
97927 .kind = "ipip",
97928 .maxtype = IFLA_IPTUN_MAX,
97929 .policy = ipip_policy,
97930 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
97931 index 59da7cd..e318de1 100644
97932 --- a/net/ipv4/netfilter/arp_tables.c
97933 +++ b/net/ipv4/netfilter/arp_tables.c
97934 @@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
97935 #endif
97936
97937 static int get_info(struct net *net, void __user *user,
97938 - const int *len, int compat)
97939 + int len, int compat)
97940 {
97941 char name[XT_TABLE_MAXNAMELEN];
97942 struct xt_table *t;
97943 int ret;
97944
97945 - if (*len != sizeof(struct arpt_getinfo)) {
97946 - duprintf("length %u != %Zu\n", *len,
97947 + if (len != sizeof(struct arpt_getinfo)) {
97948 + duprintf("length %u != %Zu\n", len,
97949 sizeof(struct arpt_getinfo));
97950 return -EINVAL;
97951 }
97952 @@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
97953 info.size = private->size;
97954 strcpy(info.name, name);
97955
97956 - if (copy_to_user(user, &info, *len) != 0)
97957 + if (copy_to_user(user, &info, len) != 0)
97958 ret = -EFAULT;
97959 else
97960 ret = 0;
97961 @@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
97962
97963 switch (cmd) {
97964 case ARPT_SO_GET_INFO:
97965 - ret = get_info(sock_net(sk), user, len, 1);
97966 + ret = get_info(sock_net(sk), user, *len, 1);
97967 break;
97968 case ARPT_SO_GET_ENTRIES:
97969 ret = compat_get_entries(sock_net(sk), user, len);
97970 @@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
97971
97972 switch (cmd) {
97973 case ARPT_SO_GET_INFO:
97974 - ret = get_info(sock_net(sk), user, len, 0);
97975 + ret = get_info(sock_net(sk), user, *len, 0);
97976 break;
97977
97978 case ARPT_SO_GET_ENTRIES:
97979 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
97980 index 718dfbd..cef4152 100644
97981 --- a/net/ipv4/netfilter/ip_tables.c
97982 +++ b/net/ipv4/netfilter/ip_tables.c
97983 @@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
97984 #endif
97985
97986 static int get_info(struct net *net, void __user *user,
97987 - const int *len, int compat)
97988 + int len, int compat)
97989 {
97990 char name[XT_TABLE_MAXNAMELEN];
97991 struct xt_table *t;
97992 int ret;
97993
97994 - if (*len != sizeof(struct ipt_getinfo)) {
97995 - duprintf("length %u != %zu\n", *len,
97996 + if (len != sizeof(struct ipt_getinfo)) {
97997 + duprintf("length %u != %zu\n", len,
97998 sizeof(struct ipt_getinfo));
97999 return -EINVAL;
98000 }
98001 @@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
98002 info.size = private->size;
98003 strcpy(info.name, name);
98004
98005 - if (copy_to_user(user, &info, *len) != 0)
98006 + if (copy_to_user(user, &info, len) != 0)
98007 ret = -EFAULT;
98008 else
98009 ret = 0;
98010 @@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98011
98012 switch (cmd) {
98013 case IPT_SO_GET_INFO:
98014 - ret = get_info(sock_net(sk), user, len, 1);
98015 + ret = get_info(sock_net(sk), user, *len, 1);
98016 break;
98017 case IPT_SO_GET_ENTRIES:
98018 ret = compat_get_entries(sock_net(sk), user, len);
98019 @@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98020
98021 switch (cmd) {
98022 case IPT_SO_GET_INFO:
98023 - ret = get_info(sock_net(sk), user, len, 0);
98024 + ret = get_info(sock_net(sk), user, *len, 0);
98025 break;
98026
98027 case IPT_SO_GET_ENTRIES:
98028 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
98029 index 242e7f4..a084e95 100644
98030 --- a/net/ipv4/ping.c
98031 +++ b/net/ipv4/ping.c
98032 @@ -55,7 +55,7 @@
98033
98034
98035 struct ping_table ping_table;
98036 -struct pingv6_ops pingv6_ops;
98037 +struct pingv6_ops *pingv6_ops;
98038 EXPORT_SYMBOL_GPL(pingv6_ops);
98039
98040 static u16 ping_port_rover;
98041 @@ -334,7 +334,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
98042 return -ENODEV;
98043 }
98044 }
98045 - has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
98046 + has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
98047 scoped);
98048 rcu_read_unlock();
98049
98050 @@ -542,7 +542,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
98051 }
98052 #if IS_ENABLED(CONFIG_IPV6)
98053 } else if (skb->protocol == htons(ETH_P_IPV6)) {
98054 - harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
98055 + harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
98056 #endif
98057 }
98058
98059 @@ -560,7 +560,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
98060 info, (u8 *)icmph);
98061 #if IS_ENABLED(CONFIG_IPV6)
98062 } else if (family == AF_INET6) {
98063 - pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
98064 + pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
98065 info, (u8 *)icmph);
98066 #endif
98067 }
98068 @@ -830,6 +830,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98069 {
98070 struct inet_sock *isk = inet_sk(sk);
98071 int family = sk->sk_family;
98072 + struct sockaddr_in *sin;
98073 + struct sockaddr_in6 *sin6;
98074 struct sk_buff *skb;
98075 int copied, err;
98076
98077 @@ -839,12 +841,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98078 if (flags & MSG_OOB)
98079 goto out;
98080
98081 + if (addr_len) {
98082 + if (family == AF_INET)
98083 + *addr_len = sizeof(*sin);
98084 + else if (family == AF_INET6 && addr_len)
98085 + *addr_len = sizeof(*sin6);
98086 + }
98087 +
98088 if (flags & MSG_ERRQUEUE) {
98089 if (family == AF_INET) {
98090 return ip_recv_error(sk, msg, len, addr_len);
98091 #if IS_ENABLED(CONFIG_IPV6)
98092 } else if (family == AF_INET6) {
98093 - return pingv6_ops.ipv6_recv_error(sk, msg, len,
98094 + return pingv6_ops->ipv6_recv_error(sk, msg, len,
98095 addr_len);
98096 #endif
98097 }
98098 @@ -876,7 +885,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98099 sin->sin_port = 0 /* skb->h.uh->source */;
98100 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98101 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
98102 - *addr_len = sizeof(*sin);
98103 }
98104
98105 if (isk->cmsg_flags)
98106 @@ -899,11 +907,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98107 sin6->sin6_scope_id =
98108 ipv6_iface_scope_id(&sin6->sin6_addr,
98109 IP6CB(skb)->iif);
98110 - *addr_len = sizeof(*sin6);
98111 }
98112
98113 if (inet6_sk(sk)->rxopt.all)
98114 - pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
98115 + pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
98116 #endif
98117 } else {
98118 BUG();
98119 @@ -1093,7 +1100,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
98120 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
98121 0, sock_i_ino(sp),
98122 atomic_read(&sp->sk_refcnt), sp,
98123 - atomic_read(&sp->sk_drops));
98124 + atomic_read_unchecked(&sp->sk_drops));
98125 }
98126
98127 static int ping_v4_seq_show(struct seq_file *seq, void *v)
98128 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
98129 index 23c3e5b..cdb8b36 100644
98130 --- a/net/ipv4/raw.c
98131 +++ b/net/ipv4/raw.c
98132 @@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
98133 int raw_rcv(struct sock *sk, struct sk_buff *skb)
98134 {
98135 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
98136 - atomic_inc(&sk->sk_drops);
98137 + atomic_inc_unchecked(&sk->sk_drops);
98138 kfree_skb(skb);
98139 return NET_RX_DROP;
98140 }
98141 @@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98142 if (flags & MSG_OOB)
98143 goto out;
98144
98145 + if (addr_len)
98146 + *addr_len = sizeof(*sin);
98147 +
98148 if (flags & MSG_ERRQUEUE) {
98149 err = ip_recv_error(sk, msg, len, addr_len);
98150 goto out;
98151 @@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98152 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98153 sin->sin_port = 0;
98154 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
98155 - *addr_len = sizeof(*sin);
98156 }
98157 if (inet->cmsg_flags)
98158 ip_cmsg_recv(msg, skb);
98159 @@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
98160
98161 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
98162 {
98163 + struct icmp_filter filter;
98164 +
98165 if (optlen > sizeof(struct icmp_filter))
98166 optlen = sizeof(struct icmp_filter);
98167 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
98168 + if (copy_from_user(&filter, optval, optlen))
98169 return -EFAULT;
98170 + raw_sk(sk)->filter = filter;
98171 return 0;
98172 }
98173
98174 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
98175 {
98176 int len, ret = -EFAULT;
98177 + struct icmp_filter filter;
98178
98179 if (get_user(len, optlen))
98180 goto out;
98181 @@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
98182 if (len > sizeof(struct icmp_filter))
98183 len = sizeof(struct icmp_filter);
98184 ret = -EFAULT;
98185 - if (put_user(len, optlen) ||
98186 - copy_to_user(optval, &raw_sk(sk)->filter, len))
98187 + filter = raw_sk(sk)->filter;
98188 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
98189 goto out;
98190 ret = 0;
98191 out: return ret;
98192 @@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
98193 0, 0L, 0,
98194 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
98195 0, sock_i_ino(sp),
98196 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
98197 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
98198 }
98199
98200 static int raw_seq_show(struct seq_file *seq, void *v)
98201 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
98202 index e611651f..0c17263 100644
98203 --- a/net/ipv4/route.c
98204 +++ b/net/ipv4/route.c
98205 @@ -2621,34 +2621,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
98206 .maxlen = sizeof(int),
98207 .mode = 0200,
98208 .proc_handler = ipv4_sysctl_rtcache_flush,
98209 + .extra1 = &init_net,
98210 },
98211 { },
98212 };
98213
98214 static __net_init int sysctl_route_net_init(struct net *net)
98215 {
98216 - struct ctl_table *tbl;
98217 + ctl_table_no_const *tbl = NULL;
98218
98219 - tbl = ipv4_route_flush_table;
98220 if (!net_eq(net, &init_net)) {
98221 - tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
98222 + tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
98223 if (tbl == NULL)
98224 goto err_dup;
98225
98226 /* Don't export sysctls to unprivileged users */
98227 if (net->user_ns != &init_user_ns)
98228 tbl[0].procname = NULL;
98229 - }
98230 - tbl[0].extra1 = net;
98231 + tbl[0].extra1 = net;
98232 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
98233 + } else
98234 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
98235
98236 - net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
98237 if (net->ipv4.route_hdr == NULL)
98238 goto err_reg;
98239 return 0;
98240
98241 err_reg:
98242 - if (tbl != ipv4_route_flush_table)
98243 - kfree(tbl);
98244 + kfree(tbl);
98245 err_dup:
98246 return -ENOMEM;
98247 }
98248 @@ -2671,8 +2671,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
98249
98250 static __net_init int rt_genid_init(struct net *net)
98251 {
98252 - atomic_set(&net->ipv4.rt_genid, 0);
98253 - atomic_set(&net->fnhe_genid, 0);
98254 + atomic_set_unchecked(&net->ipv4.rt_genid, 0);
98255 + atomic_set_unchecked(&net->fnhe_genid, 0);
98256 get_random_bytes(&net->ipv4.dev_addr_genid,
98257 sizeof(net->ipv4.dev_addr_genid));
98258 return 0;
98259 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
98260 index 3d69ec8..57207b4 100644
98261 --- a/net/ipv4/sysctl_net_ipv4.c
98262 +++ b/net/ipv4/sysctl_net_ipv4.c
98263 @@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
98264 container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
98265 int ret;
98266 int range[2];
98267 - struct ctl_table tmp = {
98268 + ctl_table_no_const tmp = {
98269 .data = &range,
98270 .maxlen = sizeof(range),
98271 .mode = table->mode,
98272 @@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
98273 int ret;
98274 gid_t urange[2];
98275 kgid_t low, high;
98276 - struct ctl_table tmp = {
98277 + ctl_table_no_const tmp = {
98278 .data = &urange,
98279 .maxlen = sizeof(urange),
98280 .mode = table->mode,
98281 @@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
98282 void __user *buffer, size_t *lenp, loff_t *ppos)
98283 {
98284 char val[TCP_CA_NAME_MAX];
98285 - struct ctl_table tbl = {
98286 + ctl_table_no_const tbl = {
98287 .data = val,
98288 .maxlen = TCP_CA_NAME_MAX,
98289 };
98290 @@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
98291 void __user *buffer, size_t *lenp,
98292 loff_t *ppos)
98293 {
98294 - struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
98295 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
98296 int ret;
98297
98298 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
98299 @@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
98300 void __user *buffer, size_t *lenp,
98301 loff_t *ppos)
98302 {
98303 - struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
98304 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
98305 int ret;
98306
98307 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
98308 @@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
98309 void __user *buffer, size_t *lenp,
98310 loff_t *ppos)
98311 {
98312 - struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
98313 + ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
98314 struct tcp_fastopen_context *ctxt;
98315 int ret;
98316 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
98317 @@ -445,7 +445,7 @@ static struct ctl_table ipv4_table[] = {
98318 },
98319 {
98320 .procname = "ip_local_reserved_ports",
98321 - .data = NULL, /* initialized in sysctl_ipv4_init */
98322 + .data = sysctl_local_reserved_ports,
98323 .maxlen = 65536,
98324 .mode = 0644,
98325 .proc_handler = proc_do_large_bitmap,
98326 @@ -827,13 +827,12 @@ static struct ctl_table ipv4_net_table[] = {
98327
98328 static __net_init int ipv4_sysctl_init_net(struct net *net)
98329 {
98330 - struct ctl_table *table;
98331 + ctl_table_no_const *table = NULL;
98332
98333 - table = ipv4_net_table;
98334 if (!net_eq(net, &init_net)) {
98335 int i;
98336
98337 - table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
98338 + table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
98339 if (table == NULL)
98340 goto err_alloc;
98341
98342 @@ -856,15 +855,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
98343 net->ipv4.sysctl_local_ports.range[0] = 32768;
98344 net->ipv4.sysctl_local_ports.range[1] = 61000;
98345
98346 - net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
98347 + if (!net_eq(net, &init_net))
98348 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
98349 + else
98350 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
98351 if (net->ipv4.ipv4_hdr == NULL)
98352 goto err_reg;
98353
98354 return 0;
98355
98356 err_reg:
98357 - if (!net_eq(net, &init_net))
98358 - kfree(table);
98359 + kfree(table);
98360 err_alloc:
98361 return -ENOMEM;
98362 }
98363 @@ -886,16 +887,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
98364 static __init int sysctl_ipv4_init(void)
98365 {
98366 struct ctl_table_header *hdr;
98367 - struct ctl_table *i;
98368 -
98369 - for (i = ipv4_table; i->procname; i++) {
98370 - if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
98371 - i->data = sysctl_local_reserved_ports;
98372 - break;
98373 - }
98374 - }
98375 - if (!i->procname)
98376 - return -EINVAL;
98377
98378 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
98379 if (hdr == NULL)
98380 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
98381 index c53b7f3..a89aadd 100644
98382 --- a/net/ipv4/tcp_input.c
98383 +++ b/net/ipv4/tcp_input.c
98384 @@ -759,7 +759,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
98385 * without any lock. We want to make sure compiler wont store
98386 * intermediate values in this location.
98387 */
98388 - ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
98389 + ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
98390 sk->sk_max_pacing_rate);
98391 }
98392
98393 @@ -4482,7 +4482,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
98394 * simplifies code)
98395 */
98396 static void
98397 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
98398 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
98399 struct sk_buff *head, struct sk_buff *tail,
98400 u32 start, u32 end)
98401 {
98402 @@ -5559,6 +5559,7 @@ discard:
98403 tcp_paws_reject(&tp->rx_opt, 0))
98404 goto discard_and_undo;
98405
98406 +#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
98407 if (th->syn) {
98408 /* We see SYN without ACK. It is attempt of
98409 * simultaneous connect with crossed SYNs.
98410 @@ -5609,6 +5610,7 @@ discard:
98411 goto discard;
98412 #endif
98413 }
98414 +#endif
98415 /* "fifth, if neither of the SYN or RST bits is set then
98416 * drop the segment and return."
98417 */
98418 @@ -5655,7 +5657,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
98419 goto discard;
98420
98421 if (th->syn) {
98422 - if (th->fin)
98423 + if (th->fin || th->urg || th->psh)
98424 goto discard;
98425 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
98426 return 1;
98427 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
98428 index 0672139..cacc17d 100644
98429 --- a/net/ipv4/tcp_ipv4.c
98430 +++ b/net/ipv4/tcp_ipv4.c
98431 @@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
98432 EXPORT_SYMBOL(sysctl_tcp_low_latency);
98433
98434
98435 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98436 +extern int grsec_enable_blackhole;
98437 +#endif
98438 +
98439 #ifdef CONFIG_TCP_MD5SIG
98440 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
98441 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98442 @@ -1830,6 +1834,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
98443 return 0;
98444
98445 reset:
98446 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98447 + if (!grsec_enable_blackhole)
98448 +#endif
98449 tcp_v4_send_reset(rsk, skb);
98450 discard:
98451 kfree_skb(skb);
98452 @@ -1975,12 +1982,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
98453 TCP_SKB_CB(skb)->sacked = 0;
98454
98455 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
98456 - if (!sk)
98457 + if (!sk) {
98458 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98459 + ret = 1;
98460 +#endif
98461 goto no_tcp_socket;
98462 -
98463 + }
98464 process:
98465 - if (sk->sk_state == TCP_TIME_WAIT)
98466 + if (sk->sk_state == TCP_TIME_WAIT) {
98467 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98468 + ret = 2;
98469 +#endif
98470 goto do_time_wait;
98471 + }
98472
98473 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
98474 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
98475 @@ -2034,6 +2048,10 @@ csum_error:
98476 bad_packet:
98477 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
98478 } else {
98479 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98480 + if (!grsec_enable_blackhole || (ret == 1 &&
98481 + (skb->dev->flags & IFF_LOOPBACK)))
98482 +#endif
98483 tcp_v4_send_reset(NULL, skb);
98484 }
98485
98486 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
98487 index 97b6841..0893357 100644
98488 --- a/net/ipv4/tcp_minisocks.c
98489 +++ b/net/ipv4/tcp_minisocks.c
98490 @@ -27,6 +27,10 @@
98491 #include <net/inet_common.h>
98492 #include <net/xfrm.h>
98493
98494 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98495 +extern int grsec_enable_blackhole;
98496 +#endif
98497 +
98498 int sysctl_tcp_syncookies __read_mostly = 1;
98499 EXPORT_SYMBOL(sysctl_tcp_syncookies);
98500
98501 @@ -708,7 +712,10 @@ embryonic_reset:
98502 * avoid becoming vulnerable to outside attack aiming at
98503 * resetting legit local connections.
98504 */
98505 - req->rsk_ops->send_reset(sk, skb);
98506 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98507 + if (!grsec_enable_blackhole)
98508 +#endif
98509 + req->rsk_ops->send_reset(sk, skb);
98510 } else if (fastopen) { /* received a valid RST pkt */
98511 reqsk_fastopen_remove(sk, req, true);
98512 tcp_reset(sk);
98513 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
98514 index 8b97d71..9d7ccf5 100644
98515 --- a/net/ipv4/tcp_probe.c
98516 +++ b/net/ipv4/tcp_probe.c
98517 @@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
98518 if (cnt + width >= len)
98519 break;
98520
98521 - if (copy_to_user(buf + cnt, tbuf, width))
98522 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
98523 return -EFAULT;
98524 cnt += width;
98525 }
98526 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
98527 index 64f0354..a81b39d 100644
98528 --- a/net/ipv4/tcp_timer.c
98529 +++ b/net/ipv4/tcp_timer.c
98530 @@ -22,6 +22,10 @@
98531 #include <linux/gfp.h>
98532 #include <net/tcp.h>
98533
98534 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98535 +extern int grsec_lastack_retries;
98536 +#endif
98537 +
98538 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
98539 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
98540 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
98541 @@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk)
98542 }
98543 }
98544
98545 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98546 + if ((sk->sk_state == TCP_LAST_ACK) &&
98547 + (grsec_lastack_retries > 0) &&
98548 + (grsec_lastack_retries < retry_until))
98549 + retry_until = grsec_lastack_retries;
98550 +#endif
98551 +
98552 if (retransmits_timed_out(sk, retry_until,
98553 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
98554 /* Has it gone just too far? */
98555 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
98556 index a7e4729..2758946 100644
98557 --- a/net/ipv4/udp.c
98558 +++ b/net/ipv4/udp.c
98559 @@ -87,6 +87,7 @@
98560 #include <linux/types.h>
98561 #include <linux/fcntl.h>
98562 #include <linux/module.h>
98563 +#include <linux/security.h>
98564 #include <linux/socket.h>
98565 #include <linux/sockios.h>
98566 #include <linux/igmp.h>
98567 @@ -113,6 +114,10 @@
98568 #include <net/busy_poll.h>
98569 #include "udp_impl.h"
98570
98571 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98572 +extern int grsec_enable_blackhole;
98573 +#endif
98574 +
98575 struct udp_table udp_table __read_mostly;
98576 EXPORT_SYMBOL(udp_table);
98577
98578 @@ -615,6 +620,9 @@ found:
98579 return s;
98580 }
98581
98582 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
98583 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
98584 +
98585 /*
98586 * This routine is called by the ICMP module when it gets some
98587 * sort of error condition. If err < 0 then the socket should
98588 @@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98589 dport = usin->sin_port;
98590 if (dport == 0)
98591 return -EINVAL;
98592 +
98593 + err = gr_search_udp_sendmsg(sk, usin);
98594 + if (err)
98595 + return err;
98596 } else {
98597 if (sk->sk_state != TCP_ESTABLISHED)
98598 return -EDESTADDRREQ;
98599 +
98600 + err = gr_search_udp_sendmsg(sk, NULL);
98601 + if (err)
98602 + return err;
98603 +
98604 daddr = inet->inet_daddr;
98605 dport = inet->inet_dport;
98606 /* Open fast path for connected socket.
98607 @@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk)
98608 IS_UDPLITE(sk));
98609 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98610 IS_UDPLITE(sk));
98611 - atomic_inc(&sk->sk_drops);
98612 + atomic_inc_unchecked(&sk->sk_drops);
98613 __skb_unlink(skb, rcvq);
98614 __skb_queue_tail(&list_kill, skb);
98615 }
98616 @@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98617 int is_udplite = IS_UDPLITE(sk);
98618 bool slow;
98619
98620 + /*
98621 + * Check any passed addresses
98622 + */
98623 + if (addr_len)
98624 + *addr_len = sizeof(*sin);
98625 +
98626 if (flags & MSG_ERRQUEUE)
98627 return ip_recv_error(sk, msg, len, addr_len);
98628
98629 @@ -1243,6 +1266,10 @@ try_again:
98630 if (!skb)
98631 goto out;
98632
98633 + err = gr_search_udp_recvmsg(sk, skb);
98634 + if (err)
98635 + goto out_free;
98636 +
98637 ulen = skb->len - sizeof(struct udphdr);
98638 copied = len;
98639 if (copied > ulen)
98640 @@ -1276,7 +1303,7 @@ try_again:
98641 if (unlikely(err)) {
98642 trace_kfree_skb(skb, udp_recvmsg);
98643 if (!peeked) {
98644 - atomic_inc(&sk->sk_drops);
98645 + atomic_inc_unchecked(&sk->sk_drops);
98646 UDP_INC_STATS_USER(sock_net(sk),
98647 UDP_MIB_INERRORS, is_udplite);
98648 }
98649 @@ -1295,7 +1322,6 @@ try_again:
98650 sin->sin_port = udp_hdr(skb)->source;
98651 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98652 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
98653 - *addr_len = sizeof(*sin);
98654 }
98655 if (inet->cmsg_flags)
98656 ip_cmsg_recv(msg, skb);
98657 @@ -1566,7 +1592,7 @@ csum_error:
98658 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
98659 drop:
98660 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
98661 - atomic_inc(&sk->sk_drops);
98662 + atomic_inc_unchecked(&sk->sk_drops);
98663 kfree_skb(skb);
98664 return -1;
98665 }
98666 @@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
98667 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
98668
98669 if (!skb1) {
98670 - atomic_inc(&sk->sk_drops);
98671 + atomic_inc_unchecked(&sk->sk_drops);
98672 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
98673 IS_UDPLITE(sk));
98674 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98675 @@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
98676 goto csum_error;
98677
98678 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
98679 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98680 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
98681 +#endif
98682 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
98683
98684 /*
98685 @@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
98686 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
98687 0, sock_i_ino(sp),
98688 atomic_read(&sp->sk_refcnt), sp,
98689 - atomic_read(&sp->sk_drops));
98690 + atomic_read_unchecked(&sp->sk_drops));
98691 }
98692
98693 int udp4_seq_show(struct seq_file *seq, void *v)
98694 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
98695 index e1a6393..f634ce5 100644
98696 --- a/net/ipv4/xfrm4_policy.c
98697 +++ b/net/ipv4/xfrm4_policy.c
98698 @@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
98699 fl4->flowi4_tos = iph->tos;
98700 }
98701
98702 -static inline int xfrm4_garbage_collect(struct dst_ops *ops)
98703 +static int xfrm4_garbage_collect(struct dst_ops *ops)
98704 {
98705 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
98706
98707 - xfrm4_policy_afinfo.garbage_collect(net);
98708 + xfrm_garbage_collect_deferred(net);
98709 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
98710 }
98711
98712 @@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
98713
98714 static int __net_init xfrm4_net_init(struct net *net)
98715 {
98716 - struct ctl_table *table;
98717 + ctl_table_no_const *table = NULL;
98718 struct ctl_table_header *hdr;
98719
98720 - table = xfrm4_policy_table;
98721 if (!net_eq(net, &init_net)) {
98722 - table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
98723 + table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
98724 if (!table)
98725 goto err_alloc;
98726
98727 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
98728 - }
98729 -
98730 - hdr = register_net_sysctl(net, "net/ipv4", table);
98731 + hdr = register_net_sysctl(net, "net/ipv4", table);
98732 + } else
98733 + hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
98734 if (!hdr)
98735 goto err_reg;
98736
98737 @@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
98738 return 0;
98739
98740 err_reg:
98741 - if (!net_eq(net, &init_net))
98742 - kfree(table);
98743 + kfree(table);
98744 err_alloc:
98745 return -ENOMEM;
98746 }
98747 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
98748 index 9c05d77..9cfa714 100644
98749 --- a/net/ipv6/addrconf.c
98750 +++ b/net/ipv6/addrconf.c
98751 @@ -589,7 +589,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
98752 idx = 0;
98753 head = &net->dev_index_head[h];
98754 rcu_read_lock();
98755 - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
98756 + cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
98757 net->dev_base_seq;
98758 hlist_for_each_entry_rcu(dev, head, index_hlist) {
98759 if (idx < s_idx)
98760 @@ -2334,7 +2334,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
98761 p.iph.ihl = 5;
98762 p.iph.protocol = IPPROTO_IPV6;
98763 p.iph.ttl = 64;
98764 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
98765 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
98766
98767 if (ops->ndo_do_ioctl) {
98768 mm_segment_t oldfs = get_fs();
98769 @@ -3964,7 +3964,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
98770 s_ip_idx = ip_idx = cb->args[2];
98771
98772 rcu_read_lock();
98773 - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
98774 + cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
98775 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
98776 idx = 0;
98777 head = &net->dev_index_head[h];
98778 @@ -4571,7 +4571,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
98779 dst_free(&ifp->rt->dst);
98780 break;
98781 }
98782 - atomic_inc(&net->ipv6.dev_addr_genid);
98783 + atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
98784 rt_genid_bump_ipv6(net);
98785 }
98786
98787 @@ -4592,7 +4592,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
98788 int *valp = ctl->data;
98789 int val = *valp;
98790 loff_t pos = *ppos;
98791 - struct ctl_table lctl;
98792 + ctl_table_no_const lctl;
98793 int ret;
98794
98795 /*
98796 @@ -4677,7 +4677,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
98797 int *valp = ctl->data;
98798 int val = *valp;
98799 loff_t pos = *ppos;
98800 - struct ctl_table lctl;
98801 + ctl_table_no_const lctl;
98802 int ret;
98803
98804 /*
98805 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
98806 index 4fbdb70..f6411f2 100644
98807 --- a/net/ipv6/af_inet6.c
98808 +++ b/net/ipv6/af_inet6.c
98809 @@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net)
98810
98811 net->ipv6.sysctl.bindv6only = 0;
98812 net->ipv6.sysctl.icmpv6_time = 1*HZ;
98813 - atomic_set(&net->ipv6.rt_genid, 0);
98814 + atomic_set_unchecked(&net->ipv6.rt_genid, 0);
98815
98816 err = ipv6_init_mibs(net);
98817 if (err)
98818 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
98819 index 93b1aa3..e902855 100644
98820 --- a/net/ipv6/datagram.c
98821 +++ b/net/ipv6/datagram.c
98822 @@ -906,5 +906,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
98823 0,
98824 sock_i_ino(sp),
98825 atomic_read(&sp->sk_refcnt), sp,
98826 - atomic_read(&sp->sk_drops));
98827 + atomic_read_unchecked(&sp->sk_drops));
98828 }
98829 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
98830 index eef8d94..cfa1852 100644
98831 --- a/net/ipv6/icmp.c
98832 +++ b/net/ipv6/icmp.c
98833 @@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
98834
98835 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
98836 {
98837 - struct ctl_table *table;
98838 + ctl_table_no_const *table;
98839
98840 table = kmemdup(ipv6_icmp_table_template,
98841 sizeof(ipv6_icmp_table_template),
98842 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
98843 index 8acb286..840dd06 100644
98844 --- a/net/ipv6/ip6_gre.c
98845 +++ b/net/ipv6/ip6_gre.c
98846 @@ -74,7 +74,7 @@ struct ip6gre_net {
98847 struct net_device *fb_tunnel_dev;
98848 };
98849
98850 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
98851 +static struct rtnl_link_ops ip6gre_link_ops;
98852 static int ip6gre_tunnel_init(struct net_device *dev);
98853 static void ip6gre_tunnel_setup(struct net_device *dev);
98854 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
98855 @@ -1294,7 +1294,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
98856 }
98857
98858
98859 -static struct inet6_protocol ip6gre_protocol __read_mostly = {
98860 +static struct inet6_protocol ip6gre_protocol = {
98861 .handler = ip6gre_rcv,
98862 .err_handler = ip6gre_err,
98863 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
98864 @@ -1637,7 +1637,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
98865 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
98866 };
98867
98868 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
98869 +static struct rtnl_link_ops ip6gre_link_ops = {
98870 .kind = "ip6gre",
98871 .maxtype = IFLA_GRE_MAX,
98872 .policy = ip6gre_policy,
98873 @@ -1650,7 +1650,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
98874 .fill_info = ip6gre_fill_info,
98875 };
98876
98877 -static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
98878 +static struct rtnl_link_ops ip6gre_tap_ops = {
98879 .kind = "ip6gretap",
98880 .maxtype = IFLA_GRE_MAX,
98881 .policy = ip6gre_policy,
98882 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
98883 index 7881965..9cf62c4 100644
98884 --- a/net/ipv6/ip6_tunnel.c
98885 +++ b/net/ipv6/ip6_tunnel.c
98886 @@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
98887
98888 static int ip6_tnl_dev_init(struct net_device *dev);
98889 static void ip6_tnl_dev_setup(struct net_device *dev);
98890 -static struct rtnl_link_ops ip6_link_ops __read_mostly;
98891 +static struct rtnl_link_ops ip6_link_ops;
98892
98893 static int ip6_tnl_net_id __read_mostly;
98894 struct ip6_tnl_net {
98895 @@ -1717,7 +1717,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
98896 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
98897 };
98898
98899 -static struct rtnl_link_ops ip6_link_ops __read_mostly = {
98900 +static struct rtnl_link_ops ip6_link_ops = {
98901 .kind = "ip6tnl",
98902 .maxtype = IFLA_IPTUN_MAX,
98903 .policy = ip6_tnl_policy,
98904 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
98905 index 7b42d5e..1eff693 100644
98906 --- a/net/ipv6/ip6_vti.c
98907 +++ b/net/ipv6/ip6_vti.c
98908 @@ -63,7 +63,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
98909
98910 static int vti6_dev_init(struct net_device *dev);
98911 static void vti6_dev_setup(struct net_device *dev);
98912 -static struct rtnl_link_ops vti6_link_ops __read_mostly;
98913 +static struct rtnl_link_ops vti6_link_ops;
98914
98915 static int vti6_net_id __read_mostly;
98916 struct vti6_net {
98917 @@ -902,7 +902,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
98918 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
98919 };
98920
98921 -static struct rtnl_link_ops vti6_link_ops __read_mostly = {
98922 +static struct rtnl_link_ops vti6_link_ops = {
98923 .kind = "vti6",
98924 .maxtype = IFLA_VTI_MAX,
98925 .policy = vti6_policy,
98926 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
98927 index 1c6ce31..299e566 100644
98928 --- a/net/ipv6/ipv6_sockglue.c
98929 +++ b/net/ipv6/ipv6_sockglue.c
98930 @@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
98931 if (sk->sk_type != SOCK_STREAM)
98932 return -ENOPROTOOPT;
98933
98934 - msg.msg_control = optval;
98935 + msg.msg_control = (void __force_kernel *)optval;
98936 msg.msg_controllen = len;
98937 msg.msg_flags = flags;
98938
98939 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
98940 index 710238f..0fd1816 100644
98941 --- a/net/ipv6/netfilter/ip6_tables.c
98942 +++ b/net/ipv6/netfilter/ip6_tables.c
98943 @@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
98944 #endif
98945
98946 static int get_info(struct net *net, void __user *user,
98947 - const int *len, int compat)
98948 + int len, int compat)
98949 {
98950 char name[XT_TABLE_MAXNAMELEN];
98951 struct xt_table *t;
98952 int ret;
98953
98954 - if (*len != sizeof(struct ip6t_getinfo)) {
98955 - duprintf("length %u != %zu\n", *len,
98956 + if (len != sizeof(struct ip6t_getinfo)) {
98957 + duprintf("length %u != %zu\n", len,
98958 sizeof(struct ip6t_getinfo));
98959 return -EINVAL;
98960 }
98961 @@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
98962 info.size = private->size;
98963 strcpy(info.name, name);
98964
98965 - if (copy_to_user(user, &info, *len) != 0)
98966 + if (copy_to_user(user, &info, len) != 0)
98967 ret = -EFAULT;
98968 else
98969 ret = 0;
98970 @@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98971
98972 switch (cmd) {
98973 case IP6T_SO_GET_INFO:
98974 - ret = get_info(sock_net(sk), user, len, 1);
98975 + ret = get_info(sock_net(sk), user, *len, 1);
98976 break;
98977 case IP6T_SO_GET_ENTRIES:
98978 ret = compat_get_entries(sock_net(sk), user, len);
98979 @@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98980
98981 switch (cmd) {
98982 case IP6T_SO_GET_INFO:
98983 - ret = get_info(sock_net(sk), user, len, 0);
98984 + ret = get_info(sock_net(sk), user, *len, 0);
98985 break;
98986
98987 case IP6T_SO_GET_ENTRIES:
98988 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
98989 index 767ab8d..c5ec70a 100644
98990 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
98991 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
98992 @@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
98993
98994 static int nf_ct_frag6_sysctl_register(struct net *net)
98995 {
98996 - struct ctl_table *table;
98997 + ctl_table_no_const *table = NULL;
98998 struct ctl_table_header *hdr;
98999
99000 - table = nf_ct_frag6_sysctl_table;
99001 if (!net_eq(net, &init_net)) {
99002 - table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
99003 + table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
99004 GFP_KERNEL);
99005 if (table == NULL)
99006 goto err_alloc;
99007 @@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
99008 table[0].data = &net->nf_frag.frags.timeout;
99009 table[1].data = &net->nf_frag.frags.low_thresh;
99010 table[2].data = &net->nf_frag.frags.high_thresh;
99011 - }
99012 -
99013 - hdr = register_net_sysctl(net, "net/netfilter", table);
99014 + hdr = register_net_sysctl(net, "net/netfilter", table);
99015 + } else
99016 + hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
99017 if (hdr == NULL)
99018 goto err_reg;
99019
99020 @@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
99021 return 0;
99022
99023 err_reg:
99024 - if (!net_eq(net, &init_net))
99025 - kfree(table);
99026 + kfree(table);
99027 err_alloc:
99028 return -ENOMEM;
99029 }
99030 diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
99031 index 827f795..7e28e82 100644
99032 --- a/net/ipv6/output_core.c
99033 +++ b/net/ipv6/output_core.c
99034 @@ -9,8 +9,8 @@
99035
99036 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
99037 {
99038 - static atomic_t ipv6_fragmentation_id;
99039 - int old, new;
99040 + static atomic_unchecked_t ipv6_fragmentation_id;
99041 + int id;
99042
99043 #if IS_ENABLED(CONFIG_IPV6)
99044 if (rt && !(rt->dst.flags & DST_NOPEER)) {
99045 @@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
99046 }
99047 }
99048 #endif
99049 - do {
99050 - old = atomic_read(&ipv6_fragmentation_id);
99051 - new = old + 1;
99052 - if (!new)
99053 - new = 1;
99054 - } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
99055 - fhdr->identification = htonl(new);
99056 + id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
99057 + if (!id)
99058 + id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
99059 + fhdr->identification = htonl(id);
99060 }
99061 EXPORT_SYMBOL(ipv6_select_ident);
99062
99063 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
99064 index a83243c..a1ca589 100644
99065 --- a/net/ipv6/ping.c
99066 +++ b/net/ipv6/ping.c
99067 @@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
99068 };
99069 #endif
99070
99071 +static struct pingv6_ops real_pingv6_ops = {
99072 + .ipv6_recv_error = ipv6_recv_error,
99073 + .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
99074 + .icmpv6_err_convert = icmpv6_err_convert,
99075 + .ipv6_icmp_error = ipv6_icmp_error,
99076 + .ipv6_chk_addr = ipv6_chk_addr,
99077 +};
99078 +
99079 +static struct pingv6_ops dummy_pingv6_ops = {
99080 + .ipv6_recv_error = dummy_ipv6_recv_error,
99081 + .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
99082 + .icmpv6_err_convert = dummy_icmpv6_err_convert,
99083 + .ipv6_icmp_error = dummy_ipv6_icmp_error,
99084 + .ipv6_chk_addr = dummy_ipv6_chk_addr,
99085 +};
99086 +
99087 int __init pingv6_init(void)
99088 {
99089 #ifdef CONFIG_PROC_FS
99090 @@ -253,11 +269,7 @@ int __init pingv6_init(void)
99091 if (ret)
99092 return ret;
99093 #endif
99094 - pingv6_ops.ipv6_recv_error = ipv6_recv_error;
99095 - pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
99096 - pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
99097 - pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
99098 - pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
99099 + pingv6_ops = &real_pingv6_ops;
99100 return inet6_register_protosw(&pingv6_protosw);
99101 }
99102
99103 @@ -266,11 +278,7 @@ int __init pingv6_init(void)
99104 */
99105 void pingv6_exit(void)
99106 {
99107 - pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
99108 - pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
99109 - pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
99110 - pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
99111 - pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
99112 + pingv6_ops = &dummy_pingv6_ops;
99113 #ifdef CONFIG_PROC_FS
99114 unregister_pernet_subsys(&ping_v6_net_ops);
99115 #endif
99116 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
99117 index b6bb87e..06cc9ed 100644
99118 --- a/net/ipv6/raw.c
99119 +++ b/net/ipv6/raw.c
99120 @@ -384,7 +384,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
99121 {
99122 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
99123 skb_checksum_complete(skb)) {
99124 - atomic_inc(&sk->sk_drops);
99125 + atomic_inc_unchecked(&sk->sk_drops);
99126 kfree_skb(skb);
99127 return NET_RX_DROP;
99128 }
99129 @@ -412,7 +412,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
99130 struct raw6_sock *rp = raw6_sk(sk);
99131
99132 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
99133 - atomic_inc(&sk->sk_drops);
99134 + atomic_inc_unchecked(&sk->sk_drops);
99135 kfree_skb(skb);
99136 return NET_RX_DROP;
99137 }
99138 @@ -436,7 +436,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
99139
99140 if (inet->hdrincl) {
99141 if (skb_checksum_complete(skb)) {
99142 - atomic_inc(&sk->sk_drops);
99143 + atomic_inc_unchecked(&sk->sk_drops);
99144 kfree_skb(skb);
99145 return NET_RX_DROP;
99146 }
99147 @@ -465,6 +465,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
99148 if (flags & MSG_OOB)
99149 return -EOPNOTSUPP;
99150
99151 + if (addr_len)
99152 + *addr_len=sizeof(*sin6);
99153 +
99154 if (flags & MSG_ERRQUEUE)
99155 return ipv6_recv_error(sk, msg, len, addr_len);
99156
99157 @@ -503,7 +506,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
99158 sin6->sin6_flowinfo = 0;
99159 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
99160 IP6CB(skb)->iif);
99161 - *addr_len = sizeof(*sin6);
99162 }
99163
99164 sock_recv_ts_and_drops(msg, sk, skb);
99165 @@ -606,7 +608,7 @@ out:
99166 return err;
99167 }
99168
99169 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
99170 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
99171 struct flowi6 *fl6, struct dst_entry **dstp,
99172 unsigned int flags)
99173 {
99174 @@ -918,12 +920,15 @@ do_confirm:
99175 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
99176 char __user *optval, int optlen)
99177 {
99178 + struct icmp6_filter filter;
99179 +
99180 switch (optname) {
99181 case ICMPV6_FILTER:
99182 if (optlen > sizeof(struct icmp6_filter))
99183 optlen = sizeof(struct icmp6_filter);
99184 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
99185 + if (copy_from_user(&filter, optval, optlen))
99186 return -EFAULT;
99187 + raw6_sk(sk)->filter = filter;
99188 return 0;
99189 default:
99190 return -ENOPROTOOPT;
99191 @@ -936,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
99192 char __user *optval, int __user *optlen)
99193 {
99194 int len;
99195 + struct icmp6_filter filter;
99196
99197 switch (optname) {
99198 case ICMPV6_FILTER:
99199 @@ -947,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
99200 len = sizeof(struct icmp6_filter);
99201 if (put_user(len, optlen))
99202 return -EFAULT;
99203 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
99204 + filter = raw6_sk(sk)->filter;
99205 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
99206 return -EFAULT;
99207 return 0;
99208 default:
99209 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
99210 index cc85a9b..526a133 100644
99211 --- a/net/ipv6/reassembly.c
99212 +++ b/net/ipv6/reassembly.c
99213 @@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
99214
99215 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99216 {
99217 - struct ctl_table *table;
99218 + ctl_table_no_const *table = NULL;
99219 struct ctl_table_header *hdr;
99220
99221 - table = ip6_frags_ns_ctl_table;
99222 if (!net_eq(net, &init_net)) {
99223 - table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
99224 + table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
99225 if (table == NULL)
99226 goto err_alloc;
99227
99228 @@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99229 /* Don't export sysctls to unprivileged users */
99230 if (net->user_ns != &init_user_ns)
99231 table[0].procname = NULL;
99232 - }
99233 + hdr = register_net_sysctl(net, "net/ipv6", table);
99234 + } else
99235 + hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
99236
99237 - hdr = register_net_sysctl(net, "net/ipv6", table);
99238 if (hdr == NULL)
99239 goto err_reg;
99240
99241 @@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99242 return 0;
99243
99244 err_reg:
99245 - if (!net_eq(net, &init_net))
99246 - kfree(table);
99247 + kfree(table);
99248 err_alloc:
99249 return -ENOMEM;
99250 }
99251 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
99252 index 4b4944c..d346b14 100644
99253 --- a/net/ipv6/route.c
99254 +++ b/net/ipv6/route.c
99255 @@ -1495,7 +1495,7 @@ int ip6_route_add(struct fib6_config *cfg)
99256 if (!table)
99257 goto out;
99258
99259 - rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
99260 + rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
99261
99262 if (!rt) {
99263 err = -ENOMEM;
99264 @@ -2954,7 +2954,7 @@ struct ctl_table ipv6_route_table_template[] = {
99265
99266 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
99267 {
99268 - struct ctl_table *table;
99269 + ctl_table_no_const *table;
99270
99271 table = kmemdup(ipv6_route_table_template,
99272 sizeof(ipv6_route_table_template),
99273 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
99274 index d3005b3..b36df4a 100644
99275 --- a/net/ipv6/sit.c
99276 +++ b/net/ipv6/sit.c
99277 @@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
99278 static void ipip6_dev_free(struct net_device *dev);
99279 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
99280 __be32 *v4dst);
99281 -static struct rtnl_link_ops sit_link_ops __read_mostly;
99282 +static struct rtnl_link_ops sit_link_ops;
99283
99284 static int sit_net_id __read_mostly;
99285 struct sit_net {
99286 @@ -1664,7 +1664,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
99287 unregister_netdevice_queue(dev, head);
99288 }
99289
99290 -static struct rtnl_link_ops sit_link_ops __read_mostly = {
99291 +static struct rtnl_link_ops sit_link_ops = {
99292 .kind = "sit",
99293 .maxtype = IFLA_IPTUN_MAX,
99294 .policy = ipip6_policy,
99295 diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
99296 index 107b2f1..72741a9 100644
99297 --- a/net/ipv6/sysctl_net_ipv6.c
99298 +++ b/net/ipv6/sysctl_net_ipv6.c
99299 @@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
99300
99301 static int __net_init ipv6_sysctl_net_init(struct net *net)
99302 {
99303 - struct ctl_table *ipv6_table;
99304 + ctl_table_no_const *ipv6_table;
99305 struct ctl_table *ipv6_route_table;
99306 struct ctl_table *ipv6_icmp_table;
99307 int err;
99308 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
99309 index f67033b..6f974fc 100644
99310 --- a/net/ipv6/tcp_ipv6.c
99311 +++ b/net/ipv6/tcp_ipv6.c
99312 @@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
99313 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
99314 }
99315
99316 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99317 +extern int grsec_enable_blackhole;
99318 +#endif
99319 +
99320 static void tcp_v6_hash(struct sock *sk)
99321 {
99322 if (sk->sk_state != TCP_CLOSE) {
99323 @@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
99324 return 0;
99325
99326 reset:
99327 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99328 + if (!grsec_enable_blackhole)
99329 +#endif
99330 tcp_v6_send_reset(sk, skb);
99331 discard:
99332 if (opt_skb)
99333 @@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
99334 TCP_SKB_CB(skb)->sacked = 0;
99335
99336 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
99337 - if (!sk)
99338 + if (!sk) {
99339 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99340 + ret = 1;
99341 +#endif
99342 goto no_tcp_socket;
99343 + }
99344
99345 process:
99346 - if (sk->sk_state == TCP_TIME_WAIT)
99347 + if (sk->sk_state == TCP_TIME_WAIT) {
99348 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99349 + ret = 2;
99350 +#endif
99351 goto do_time_wait;
99352 + }
99353
99354 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
99355 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
99356 @@ -1536,6 +1551,10 @@ csum_error:
99357 bad_packet:
99358 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
99359 } else {
99360 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99361 + if (!grsec_enable_blackhole || (ret == 1 &&
99362 + (skb->dev->flags & IFF_LOOPBACK)))
99363 +#endif
99364 tcp_v6_send_reset(NULL, skb);
99365 }
99366
99367 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
99368 index 089c741..cfee117 100644
99369 --- a/net/ipv6/udp.c
99370 +++ b/net/ipv6/udp.c
99371 @@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
99372 udp_ipv6_hash_secret + net_hash_mix(net));
99373 }
99374
99375 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99376 +extern int grsec_enable_blackhole;
99377 +#endif
99378 +
99379 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
99380 {
99381 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
99382 @@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
99383 int is_udp4;
99384 bool slow;
99385
99386 + if (addr_len)
99387 + *addr_len = sizeof(struct sockaddr_in6);
99388 +
99389 if (flags & MSG_ERRQUEUE)
99390 return ipv6_recv_error(sk, msg, len, addr_len);
99391
99392 @@ -435,7 +442,7 @@ try_again:
99393 if (unlikely(err)) {
99394 trace_kfree_skb(skb, udpv6_recvmsg);
99395 if (!peeked) {
99396 - atomic_inc(&sk->sk_drops);
99397 + atomic_inc_unchecked(&sk->sk_drops);
99398 if (is_udp4)
99399 UDP_INC_STATS_USER(sock_net(sk),
99400 UDP_MIB_INERRORS,
99401 @@ -477,7 +484,7 @@ try_again:
99402 ipv6_iface_scope_id(&sin6->sin6_addr,
99403 IP6CB(skb)->iif);
99404 }
99405 - *addr_len = sizeof(*sin6);
99406 +
99407 }
99408 if (is_udp4) {
99409 if (inet->cmsg_flags)
99410 @@ -685,7 +692,7 @@ csum_error:
99411 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
99412 drop:
99413 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
99414 - atomic_inc(&sk->sk_drops);
99415 + atomic_inc_unchecked(&sk->sk_drops);
99416 kfree_skb(skb);
99417 return -1;
99418 }
99419 @@ -742,7 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
99420 if (likely(skb1 == NULL))
99421 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
99422 if (!skb1) {
99423 - atomic_inc(&sk->sk_drops);
99424 + atomic_inc_unchecked(&sk->sk_drops);
99425 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
99426 IS_UDPLITE(sk));
99427 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
99428 @@ -881,6 +888,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
99429 goto csum_error;
99430
99431 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
99432 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99433 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
99434 +#endif
99435 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
99436
99437 kfree_skb(skb);
99438 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
99439 index 5f8e128..865d38e 100644
99440 --- a/net/ipv6/xfrm6_policy.c
99441 +++ b/net/ipv6/xfrm6_policy.c
99442 @@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
99443 }
99444 }
99445
99446 -static inline int xfrm6_garbage_collect(struct dst_ops *ops)
99447 +static int xfrm6_garbage_collect(struct dst_ops *ops)
99448 {
99449 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
99450
99451 - xfrm6_policy_afinfo.garbage_collect(net);
99452 + xfrm_garbage_collect_deferred(net);
99453 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
99454 }
99455
99456 @@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
99457
99458 static int __net_init xfrm6_net_init(struct net *net)
99459 {
99460 - struct ctl_table *table;
99461 + ctl_table_no_const *table = NULL;
99462 struct ctl_table_header *hdr;
99463
99464 - table = xfrm6_policy_table;
99465 if (!net_eq(net, &init_net)) {
99466 - table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
99467 + table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
99468 if (!table)
99469 goto err_alloc;
99470
99471 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
99472 - }
99473 + hdr = register_net_sysctl(net, "net/ipv6", table);
99474 + } else
99475 + hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
99476
99477 - hdr = register_net_sysctl(net, "net/ipv6", table);
99478 if (!hdr)
99479 goto err_reg;
99480
99481 @@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
99482 return 0;
99483
99484 err_reg:
99485 - if (!net_eq(net, &init_net))
99486 - kfree(table);
99487 + kfree(table);
99488 err_alloc:
99489 return -ENOMEM;
99490 }
99491 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
99492 index 41ac7938..75e3bb1 100644
99493 --- a/net/irda/ircomm/ircomm_tty.c
99494 +++ b/net/irda/ircomm/ircomm_tty.c
99495 @@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99496 add_wait_queue(&port->open_wait, &wait);
99497
99498 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
99499 - __FILE__, __LINE__, tty->driver->name, port->count);
99500 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99501
99502 spin_lock_irqsave(&port->lock, flags);
99503 if (!tty_hung_up_p(filp))
99504 - port->count--;
99505 + atomic_dec(&port->count);
99506 port->blocked_open++;
99507 spin_unlock_irqrestore(&port->lock, flags);
99508
99509 @@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99510 }
99511
99512 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
99513 - __FILE__, __LINE__, tty->driver->name, port->count);
99514 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99515
99516 schedule();
99517 }
99518 @@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99519
99520 spin_lock_irqsave(&port->lock, flags);
99521 if (!tty_hung_up_p(filp))
99522 - port->count++;
99523 + atomic_inc(&port->count);
99524 port->blocked_open--;
99525 spin_unlock_irqrestore(&port->lock, flags);
99526
99527 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
99528 - __FILE__, __LINE__, tty->driver->name, port->count);
99529 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99530
99531 if (!retval)
99532 port->flags |= ASYNC_NORMAL_ACTIVE;
99533 @@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
99534
99535 /* ++ is not atomic, so this should be protected - Jean II */
99536 spin_lock_irqsave(&self->port.lock, flags);
99537 - self->port.count++;
99538 + atomic_inc(&self->port.count);
99539 spin_unlock_irqrestore(&self->port.lock, flags);
99540 tty_port_tty_set(&self->port, tty);
99541
99542 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
99543 - self->line, self->port.count);
99544 + self->line, atomic_read(&self->port.count));
99545
99546 /* Not really used by us, but lets do it anyway */
99547 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
99548 @@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
99549 tty_kref_put(port->tty);
99550 }
99551 port->tty = NULL;
99552 - port->count = 0;
99553 + atomic_set(&port->count, 0);
99554 spin_unlock_irqrestore(&port->lock, flags);
99555
99556 wake_up_interruptible(&port->open_wait);
99557 @@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
99558 seq_putc(m, '\n');
99559
99560 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
99561 - seq_printf(m, "Open count: %d\n", self->port.count);
99562 + seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
99563 seq_printf(m, "Max data size: %d\n", self->max_data_size);
99564 seq_printf(m, "Max header size: %d\n", self->max_header_size);
99565
99566 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
99567 index c4b7218..3e83259 100644
99568 --- a/net/iucv/af_iucv.c
99569 +++ b/net/iucv/af_iucv.c
99570 @@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
99571
99572 write_lock_bh(&iucv_sk_list.lock);
99573
99574 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
99575 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
99576 while (__iucv_get_sock_by_name(name)) {
99577 sprintf(name, "%08x",
99578 - atomic_inc_return(&iucv_sk_list.autobind_name));
99579 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
99580 }
99581
99582 write_unlock_bh(&iucv_sk_list.lock);
99583 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
99584 index cd5b8ec..f205e6b 100644
99585 --- a/net/iucv/iucv.c
99586 +++ b/net/iucv/iucv.c
99587 @@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
99588 return NOTIFY_OK;
99589 }
99590
99591 -static struct notifier_block __refdata iucv_cpu_notifier = {
99592 +static struct notifier_block iucv_cpu_notifier = {
99593 .notifier_call = iucv_cpu_notify,
99594 };
99595
99596 diff --git a/net/key/af_key.c b/net/key/af_key.c
99597 index 545f047..9757a9d 100644
99598 --- a/net/key/af_key.c
99599 +++ b/net/key/af_key.c
99600 @@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
99601 static u32 get_acqseq(void)
99602 {
99603 u32 res;
99604 - static atomic_t acqseq;
99605 + static atomic_unchecked_t acqseq;
99606
99607 do {
99608 - res = atomic_inc_return(&acqseq);
99609 + res = atomic_inc_return_unchecked(&acqseq);
99610 } while (!res);
99611 return res;
99612 }
99613 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
99614 index da1a1ce..571db8d 100644
99615 --- a/net/l2tp/l2tp_ip.c
99616 +++ b/net/l2tp/l2tp_ip.c
99617 @@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
99618 if (flags & MSG_OOB)
99619 goto out;
99620
99621 + if (addr_len)
99622 + *addr_len = sizeof(*sin);
99623 +
99624 skb = skb_recv_datagram(sk, flags, noblock, &err);
99625 if (!skb)
99626 goto out;
99627 @@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
99628 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
99629 sin->sin_port = 0;
99630 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
99631 - *addr_len = sizeof(*sin);
99632 }
99633 if (inet->cmsg_flags)
99634 ip_cmsg_recv(msg, skb);
99635 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
99636 index b4b61b2..ac84a257 100644
99637 --- a/net/mac80211/cfg.c
99638 +++ b/net/mac80211/cfg.c
99639 @@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
99640 ret = ieee80211_vif_use_channel(sdata, chandef,
99641 IEEE80211_CHANCTX_EXCLUSIVE);
99642 }
99643 - } else if (local->open_count == local->monitors) {
99644 + } else if (local_read(&local->open_count) == local->monitors) {
99645 local->_oper_chandef = *chandef;
99646 ieee80211_hw_config(local, 0);
99647 }
99648 @@ -3311,7 +3311,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
99649 else
99650 local->probe_req_reg--;
99651
99652 - if (!local->open_count)
99653 + if (!local_read(&local->open_count))
99654 break;
99655
99656 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
99657 @@ -3774,8 +3774,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
99658 if (chanctx_conf) {
99659 *chandef = chanctx_conf->def;
99660 ret = 0;
99661 - } else if (local->open_count > 0 &&
99662 - local->open_count == local->monitors &&
99663 + } else if (local_read(&local->open_count) > 0 &&
99664 + local_read(&local->open_count) == local->monitors &&
99665 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
99666 if (local->use_chanctx)
99667 *chandef = local->monitor_chandef;
99668 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
99669 index 4aea4e7..9e698d1 100644
99670 --- a/net/mac80211/ieee80211_i.h
99671 +++ b/net/mac80211/ieee80211_i.h
99672 @@ -28,6 +28,7 @@
99673 #include <net/ieee80211_radiotap.h>
99674 #include <net/cfg80211.h>
99675 #include <net/mac80211.h>
99676 +#include <asm/local.h>
99677 #include "key.h"
99678 #include "sta_info.h"
99679 #include "debug.h"
99680 @@ -961,7 +962,7 @@ struct ieee80211_local {
99681 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
99682 spinlock_t queue_stop_reason_lock;
99683
99684 - int open_count;
99685 + local_t open_count;
99686 int monitors, cooked_mntrs;
99687 /* number of interfaces with corresponding FIF_ flags */
99688 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
99689 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
99690 index a075791..1d0027f 100644
99691 --- a/net/mac80211/iface.c
99692 +++ b/net/mac80211/iface.c
99693 @@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99694 break;
99695 }
99696
99697 - if (local->open_count == 0) {
99698 + if (local_read(&local->open_count) == 0) {
99699 res = drv_start(local);
99700 if (res)
99701 goto err_del_bss;
99702 @@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99703 res = drv_add_interface(local, sdata);
99704 if (res)
99705 goto err_stop;
99706 - } else if (local->monitors == 0 && local->open_count == 0) {
99707 + } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
99708 res = ieee80211_add_virtual_monitor(local);
99709 if (res)
99710 goto err_stop;
99711 @@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99712 atomic_inc(&local->iff_promiscs);
99713
99714 if (coming_up)
99715 - local->open_count++;
99716 + local_inc(&local->open_count);
99717
99718 if (hw_reconf_flags)
99719 ieee80211_hw_config(local, hw_reconf_flags);
99720 @@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99721 err_del_interface:
99722 drv_remove_interface(local, sdata);
99723 err_stop:
99724 - if (!local->open_count)
99725 + if (!local_read(&local->open_count))
99726 drv_stop(local);
99727 err_del_bss:
99728 sdata->bss = NULL;
99729 @@ -856,7 +856,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99730 }
99731
99732 if (going_down)
99733 - local->open_count--;
99734 + local_dec(&local->open_count);
99735
99736 switch (sdata->vif.type) {
99737 case NL80211_IFTYPE_AP_VLAN:
99738 @@ -923,7 +923,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99739 }
99740 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
99741
99742 - if (local->open_count == 0)
99743 + if (local_read(&local->open_count) == 0)
99744 ieee80211_clear_tx_pending(local);
99745
99746 /*
99747 @@ -963,7 +963,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99748
99749 ieee80211_recalc_ps(local, -1);
99750
99751 - if (local->open_count == 0) {
99752 + if (local_read(&local->open_count) == 0) {
99753 ieee80211_stop_device(local);
99754
99755 /* no reconfiguring after stop! */
99756 @@ -974,7 +974,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99757 ieee80211_configure_filter(local);
99758 ieee80211_hw_config(local, hw_reconf_flags);
99759
99760 - if (local->monitors == local->open_count)
99761 + if (local->monitors == local_read(&local->open_count))
99762 ieee80211_add_virtual_monitor(local);
99763 }
99764
99765 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
99766 index 7d1c3ac..b62dd29 100644
99767 --- a/net/mac80211/main.c
99768 +++ b/net/mac80211/main.c
99769 @@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
99770 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
99771 IEEE80211_CONF_CHANGE_POWER);
99772
99773 - if (changed && local->open_count) {
99774 + if (changed && local_read(&local->open_count)) {
99775 ret = drv_config(local, changed);
99776 /*
99777 * Goal:
99778 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
99779 index 3401262..d5cd68d 100644
99780 --- a/net/mac80211/pm.c
99781 +++ b/net/mac80211/pm.c
99782 @@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99783 struct ieee80211_sub_if_data *sdata;
99784 struct sta_info *sta;
99785
99786 - if (!local->open_count)
99787 + if (!local_read(&local->open_count))
99788 goto suspend;
99789
99790 ieee80211_scan_cancel(local);
99791 @@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99792 cancel_work_sync(&local->dynamic_ps_enable_work);
99793 del_timer_sync(&local->dynamic_ps_timer);
99794
99795 - local->wowlan = wowlan && local->open_count;
99796 + local->wowlan = wowlan && local_read(&local->open_count);
99797 if (local->wowlan) {
99798 int err = drv_suspend(local, wowlan);
99799 if (err < 0) {
99800 @@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99801 WARN_ON(!list_empty(&local->chanctx_list));
99802
99803 /* stop hardware - this must stop RX */
99804 - if (local->open_count)
99805 + if (local_read(&local->open_count))
99806 ieee80211_stop_device(local);
99807
99808 suspend:
99809 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
99810 index 22b223f..ab70070 100644
99811 --- a/net/mac80211/rate.c
99812 +++ b/net/mac80211/rate.c
99813 @@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
99814
99815 ASSERT_RTNL();
99816
99817 - if (local->open_count)
99818 + if (local_read(&local->open_count))
99819 return -EBUSY;
99820
99821 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
99822 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
99823 index 6ff1346..936ca9a 100644
99824 --- a/net/mac80211/rc80211_pid_debugfs.c
99825 +++ b/net/mac80211/rc80211_pid_debugfs.c
99826 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
99827
99828 spin_unlock_irqrestore(&events->lock, status);
99829
99830 - if (copy_to_user(buf, pb, p))
99831 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
99832 return -EFAULT;
99833
99834 return p;
99835 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
99836 index 9f9b9bd..d6fcf59 100644
99837 --- a/net/mac80211/util.c
99838 +++ b/net/mac80211/util.c
99839 @@ -1474,7 +1474,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
99840 }
99841 #endif
99842 /* everything else happens only if HW was up & running */
99843 - if (!local->open_count)
99844 + if (!local_read(&local->open_count))
99845 goto wake_up;
99846
99847 /*
99848 @@ -1699,7 +1699,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
99849 local->in_reconfig = false;
99850 barrier();
99851
99852 - if (local->monitors == local->open_count && local->monitors > 0)
99853 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
99854 ieee80211_add_virtual_monitor(local);
99855
99856 /*
99857 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
99858 index c3398cd..98ad3b4 100644
99859 --- a/net/netfilter/Kconfig
99860 +++ b/net/netfilter/Kconfig
99861 @@ -1002,6 +1002,16 @@ config NETFILTER_XT_MATCH_ESP
99862
99863 To compile it as a module, choose M here. If unsure, say N.
99864
99865 +config NETFILTER_XT_MATCH_GRADM
99866 + tristate '"gradm" match support'
99867 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
99868 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
99869 + ---help---
99870 + The gradm match allows to match on grsecurity RBAC being enabled.
99871 + It is useful when iptables rules are applied early on bootup to
99872 + prevent connections to the machine (except from a trusted host)
99873 + while the RBAC system is disabled.
99874 +
99875 config NETFILTER_XT_MATCH_HASHLIMIT
99876 tristate '"hashlimit" match support'
99877 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
99878 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
99879 index 394483b..ed51f2d 100644
99880 --- a/net/netfilter/Makefile
99881 +++ b/net/netfilter/Makefile
99882 @@ -130,6 +130,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
99883 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
99884 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
99885 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
99886 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
99887 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
99888 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
99889 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
99890 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
99891 index bac7e01..1d7a31a 100644
99892 --- a/net/netfilter/ipset/ip_set_core.c
99893 +++ b/net/netfilter/ipset/ip_set_core.c
99894 @@ -1950,7 +1950,7 @@ done:
99895 return ret;
99896 }
99897
99898 -static struct nf_sockopt_ops so_set __read_mostly = {
99899 +static struct nf_sockopt_ops so_set = {
99900 .pf = PF_INET,
99901 .get_optmin = SO_IP_SET,
99902 .get_optmax = SO_IP_SET + 1,
99903 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
99904 index 4c8e5c0..5a79b4d 100644
99905 --- a/net/netfilter/ipvs/ip_vs_conn.c
99906 +++ b/net/netfilter/ipvs/ip_vs_conn.c
99907 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
99908 /* Increase the refcnt counter of the dest */
99909 ip_vs_dest_hold(dest);
99910
99911 - conn_flags = atomic_read(&dest->conn_flags);
99912 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
99913 if (cp->protocol != IPPROTO_UDP)
99914 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
99915 flags = cp->flags;
99916 @@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
99917
99918 cp->control = NULL;
99919 atomic_set(&cp->n_control, 0);
99920 - atomic_set(&cp->in_pkts, 0);
99921 + atomic_set_unchecked(&cp->in_pkts, 0);
99922
99923 cp->packet_xmit = NULL;
99924 cp->app = NULL;
99925 @@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
99926
99927 /* Don't drop the entry if its number of incoming packets is not
99928 located in [0, 8] */
99929 - i = atomic_read(&cp->in_pkts);
99930 + i = atomic_read_unchecked(&cp->in_pkts);
99931 if (i > 8 || i < 0) return 0;
99932
99933 if (!todrop_rate[i]) return 0;
99934 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
99935 index 4f26ee4..6a9d7c3 100644
99936 --- a/net/netfilter/ipvs/ip_vs_core.c
99937 +++ b/net/netfilter/ipvs/ip_vs_core.c
99938 @@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
99939 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
99940 /* do not touch skb anymore */
99941
99942 - atomic_inc(&cp->in_pkts);
99943 + atomic_inc_unchecked(&cp->in_pkts);
99944 ip_vs_conn_put(cp);
99945 return ret;
99946 }
99947 @@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
99948 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
99949 pkts = sysctl_sync_threshold(ipvs);
99950 else
99951 - pkts = atomic_add_return(1, &cp->in_pkts);
99952 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99953
99954 if (ipvs->sync_state & IP_VS_STATE_MASTER)
99955 ip_vs_sync_conn(net, cp, pkts);
99956 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
99957 index 35be035..50f8834 100644
99958 --- a/net/netfilter/ipvs/ip_vs_ctl.c
99959 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
99960 @@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
99961 */
99962 ip_vs_rs_hash(ipvs, dest);
99963 }
99964 - atomic_set(&dest->conn_flags, conn_flags);
99965 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
99966
99967 /* bind the service */
99968 old_svc = rcu_dereference_protected(dest->svc, 1);
99969 @@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
99970 * align with netns init in ip_vs_control_net_init()
99971 */
99972
99973 -static struct ctl_table vs_vars[] = {
99974 +static ctl_table_no_const vs_vars[] __read_only = {
99975 {
99976 .procname = "amemthresh",
99977 .maxlen = sizeof(int),
99978 @@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
99979 " %-7s %-6d %-10d %-10d\n",
99980 &dest->addr.in6,
99981 ntohs(dest->port),
99982 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
99983 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
99984 atomic_read(&dest->weight),
99985 atomic_read(&dest->activeconns),
99986 atomic_read(&dest->inactconns));
99987 @@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
99988 "%-7s %-6d %-10d %-10d\n",
99989 ntohl(dest->addr.ip),
99990 ntohs(dest->port),
99991 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
99992 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
99993 atomic_read(&dest->weight),
99994 atomic_read(&dest->activeconns),
99995 atomic_read(&dest->inactconns));
99996 @@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
99997
99998 entry.addr = dest->addr.ip;
99999 entry.port = dest->port;
100000 - entry.conn_flags = atomic_read(&dest->conn_flags);
100001 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
100002 entry.weight = atomic_read(&dest->weight);
100003 entry.u_threshold = dest->u_threshold;
100004 entry.l_threshold = dest->l_threshold;
100005 @@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
100006 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
100007 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
100008 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
100009 - (atomic_read(&dest->conn_flags) &
100010 + (atomic_read_unchecked(&dest->conn_flags) &
100011 IP_VS_CONN_F_FWD_MASK)) ||
100012 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
100013 atomic_read(&dest->weight)) ||
100014 @@ -3580,7 +3580,7 @@ out:
100015 }
100016
100017
100018 -static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
100019 +static const struct genl_ops ip_vs_genl_ops[] = {
100020 {
100021 .cmd = IPVS_CMD_NEW_SERVICE,
100022 .flags = GENL_ADMIN_PERM,
100023 @@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
100024 {
100025 int idx;
100026 struct netns_ipvs *ipvs = net_ipvs(net);
100027 - struct ctl_table *tbl;
100028 + ctl_table_no_const *tbl;
100029
100030 atomic_set(&ipvs->dropentry, 0);
100031 spin_lock_init(&ipvs->dropentry_lock);
100032 diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
100033 index ca056a3..9cf01ef 100644
100034 --- a/net/netfilter/ipvs/ip_vs_lblc.c
100035 +++ b/net/netfilter/ipvs/ip_vs_lblc.c
100036 @@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
100037 * IPVS LBLC sysctl table
100038 */
100039 #ifdef CONFIG_SYSCTL
100040 -static struct ctl_table vs_vars_table[] = {
100041 +static ctl_table_no_const vs_vars_table[] __read_only = {
100042 {
100043 .procname = "lblc_expiration",
100044 .data = NULL,
100045 diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
100046 index 3f21a2f..a112e85 100644
100047 --- a/net/netfilter/ipvs/ip_vs_lblcr.c
100048 +++ b/net/netfilter/ipvs/ip_vs_lblcr.c
100049 @@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
100050 * IPVS LBLCR sysctl table
100051 */
100052
100053 -static struct ctl_table vs_vars_table[] = {
100054 +static ctl_table_no_const vs_vars_table[] __read_only = {
100055 {
100056 .procname = "lblcr_expiration",
100057 .data = NULL,
100058 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
100059 index f63c238..1b87f8a 100644
100060 --- a/net/netfilter/ipvs/ip_vs_sync.c
100061 +++ b/net/netfilter/ipvs/ip_vs_sync.c
100062 @@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
100063 cp = cp->control;
100064 if (cp) {
100065 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
100066 - pkts = atomic_add_return(1, &cp->in_pkts);
100067 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
100068 else
100069 pkts = sysctl_sync_threshold(ipvs);
100070 ip_vs_sync_conn(net, cp->control, pkts);
100071 @@ -771,7 +771,7 @@ control:
100072 if (!cp)
100073 return;
100074 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
100075 - pkts = atomic_add_return(1, &cp->in_pkts);
100076 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
100077 else
100078 pkts = sysctl_sync_threshold(ipvs);
100079 goto sloop;
100080 @@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
100081
100082 if (opt)
100083 memcpy(&cp->in_seq, opt, sizeof(*opt));
100084 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
100085 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
100086 cp->state = state;
100087 cp->old_state = cp->state;
100088 /*
100089 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
100090 index c47444e..b0961c6 100644
100091 --- a/net/netfilter/ipvs/ip_vs_xmit.c
100092 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
100093 @@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
100094 else
100095 rc = NF_ACCEPT;
100096 /* do not touch skb anymore */
100097 - atomic_inc(&cp->in_pkts);
100098 + atomic_inc_unchecked(&cp->in_pkts);
100099 goto out;
100100 }
100101
100102 @@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
100103 else
100104 rc = NF_ACCEPT;
100105 /* do not touch skb anymore */
100106 - atomic_inc(&cp->in_pkts);
100107 + atomic_inc_unchecked(&cp->in_pkts);
100108 goto out;
100109 }
100110
100111 diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
100112 index a4b5e2a..13b1de3 100644
100113 --- a/net/netfilter/nf_conntrack_acct.c
100114 +++ b/net/netfilter/nf_conntrack_acct.c
100115 @@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
100116 #ifdef CONFIG_SYSCTL
100117 static int nf_conntrack_acct_init_sysctl(struct net *net)
100118 {
100119 - struct ctl_table *table;
100120 + ctl_table_no_const *table;
100121
100122 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
100123 GFP_KERNEL);
100124 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
100125 index 43549eb..0bbeace 100644
100126 --- a/net/netfilter/nf_conntrack_core.c
100127 +++ b/net/netfilter/nf_conntrack_core.c
100128 @@ -1605,6 +1605,10 @@ void nf_conntrack_init_end(void)
100129 #define DYING_NULLS_VAL ((1<<30)+1)
100130 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
100131
100132 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100133 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
100134 +#endif
100135 +
100136 int nf_conntrack_init_net(struct net *net)
100137 {
100138 int ret;
100139 @@ -1619,7 +1623,11 @@ int nf_conntrack_init_net(struct net *net)
100140 goto err_stat;
100141 }
100142
100143 +#ifdef CONFIG_GRKERNSEC_HIDESYM
100144 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
100145 +#else
100146 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
100147 +#endif
100148 if (!net->ct.slabname) {
100149 ret = -ENOMEM;
100150 goto err_slabname;
100151 diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
100152 index 1df1761..ce8b88a 100644
100153 --- a/net/netfilter/nf_conntrack_ecache.c
100154 +++ b/net/netfilter/nf_conntrack_ecache.c
100155 @@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
100156 #ifdef CONFIG_SYSCTL
100157 static int nf_conntrack_event_init_sysctl(struct net *net)
100158 {
100159 - struct ctl_table *table;
100160 + ctl_table_no_const *table;
100161
100162 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
100163 GFP_KERNEL);
100164 diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
100165 index 974a2a4..52cc6ff 100644
100166 --- a/net/netfilter/nf_conntrack_helper.c
100167 +++ b/net/netfilter/nf_conntrack_helper.c
100168 @@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
100169
100170 static int nf_conntrack_helper_init_sysctl(struct net *net)
100171 {
100172 - struct ctl_table *table;
100173 + ctl_table_no_const *table;
100174
100175 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
100176 GFP_KERNEL);
100177 diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
100178 index ce30041..3861b5d 100644
100179 --- a/net/netfilter/nf_conntrack_proto.c
100180 +++ b/net/netfilter/nf_conntrack_proto.c
100181 @@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
100182
100183 static void
100184 nf_ct_unregister_sysctl(struct ctl_table_header **header,
100185 - struct ctl_table **table,
100186 + ctl_table_no_const **table,
100187 unsigned int users)
100188 {
100189 if (users > 0)
100190 diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
100191 index a99b6c3..cb372f9 100644
100192 --- a/net/netfilter/nf_conntrack_proto_dccp.c
100193 +++ b/net/netfilter/nf_conntrack_proto_dccp.c
100194 @@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
100195 const char *msg;
100196 u_int8_t state;
100197
100198 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
100199 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
100200 BUG_ON(dh == NULL);
100201
100202 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
100203 @@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
100204 out_invalid:
100205 if (LOG_INVALID(net, IPPROTO_DCCP))
100206 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
100207 - NULL, msg);
100208 + NULL, "%s", msg);
100209 return false;
100210 }
100211
100212 @@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
100213 u_int8_t type, old_state, new_state;
100214 enum ct_dccp_roles role;
100215
100216 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
100217 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
100218 BUG_ON(dh == NULL);
100219 type = dh->dccph_type;
100220
100221 @@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
100222 unsigned int cscov;
100223 const char *msg;
100224
100225 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
100226 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
100227 if (dh == NULL) {
100228 msg = "nf_ct_dccp: short packet ";
100229 goto out_invalid;
100230 @@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
100231
100232 out_invalid:
100233 if (LOG_INVALID(net, IPPROTO_DCCP))
100234 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
100235 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
100236 return -NF_ACCEPT;
100237 }
100238
100239 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
100240 index f641751..d3c5b51 100644
100241 --- a/net/netfilter/nf_conntrack_standalone.c
100242 +++ b/net/netfilter/nf_conntrack_standalone.c
100243 @@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
100244
100245 static int nf_conntrack_standalone_init_sysctl(struct net *net)
100246 {
100247 - struct ctl_table *table;
100248 + ctl_table_no_const *table;
100249
100250 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
100251 GFP_KERNEL);
100252 diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
100253 index 7a394df..bd91a8a 100644
100254 --- a/net/netfilter/nf_conntrack_timestamp.c
100255 +++ b/net/netfilter/nf_conntrack_timestamp.c
100256 @@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
100257 #ifdef CONFIG_SYSCTL
100258 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
100259 {
100260 - struct ctl_table *table;
100261 + ctl_table_no_const *table;
100262
100263 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
100264 GFP_KERNEL);
100265 diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
100266 index 85296d4..8becdec 100644
100267 --- a/net/netfilter/nf_log.c
100268 +++ b/net/netfilter/nf_log.c
100269 @@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
100270
100271 #ifdef CONFIG_SYSCTL
100272 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
100273 -static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
100274 +static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
100275
100276 static int nf_log_proc_dostring(struct ctl_table *table, int write,
100277 void __user *buffer, size_t *lenp, loff_t *ppos)
100278 @@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
100279 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
100280 mutex_unlock(&nf_log_mutex);
100281 } else {
100282 + ctl_table_no_const nf_log_table = *table;
100283 +
100284 mutex_lock(&nf_log_mutex);
100285 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
100286 lockdep_is_held(&nf_log_mutex));
100287 if (!logger)
100288 - table->data = "NONE";
100289 + nf_log_table.data = "NONE";
100290 else
100291 - table->data = logger->name;
100292 - r = proc_dostring(table, write, buffer, lenp, ppos);
100293 + nf_log_table.data = logger->name;
100294 + r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
100295 mutex_unlock(&nf_log_mutex);
100296 }
100297
100298 diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
100299 index f042ae5..30ea486 100644
100300 --- a/net/netfilter/nf_sockopt.c
100301 +++ b/net/netfilter/nf_sockopt.c
100302 @@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
100303 }
100304 }
100305
100306 - list_add(&reg->list, &nf_sockopts);
100307 + pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
100308 out:
100309 mutex_unlock(&nf_sockopt_mutex);
100310 return ret;
100311 @@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
100312 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
100313 {
100314 mutex_lock(&nf_sockopt_mutex);
100315 - list_del(&reg->list);
100316 + pax_list_del((struct list_head *)&reg->list);
100317 mutex_unlock(&nf_sockopt_mutex);
100318 }
100319 EXPORT_SYMBOL(nf_unregister_sockopt);
100320 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
100321 index a155d19..726b0f2 100644
100322 --- a/net/netfilter/nfnetlink_log.c
100323 +++ b/net/netfilter/nfnetlink_log.c
100324 @@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
100325 struct nfnl_log_net {
100326 spinlock_t instances_lock;
100327 struct hlist_head instance_table[INSTANCE_BUCKETS];
100328 - atomic_t global_seq;
100329 + atomic_unchecked_t global_seq;
100330 };
100331
100332 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
100333 @@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log,
100334 /* global sequence number */
100335 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
100336 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
100337 - htonl(atomic_inc_return(&log->global_seq))))
100338 + htonl(atomic_inc_return_unchecked(&log->global_seq))))
100339 goto nla_put_failure;
100340
100341 if (data_len) {
100342 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
100343 index da0c1f4..f79737a 100644
100344 --- a/net/netfilter/nft_compat.c
100345 +++ b/net/netfilter/nft_compat.c
100346 @@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
100347 /* We want to reuse existing compat_to_user */
100348 old_fs = get_fs();
100349 set_fs(KERNEL_DS);
100350 - t->compat_to_user(out, in);
100351 + t->compat_to_user((void __force_user *)out, in);
100352 set_fs(old_fs);
100353 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
100354 kfree(out);
100355 @@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
100356 /* We want to reuse existing compat_to_user */
100357 old_fs = get_fs();
100358 set_fs(KERNEL_DS);
100359 - m->compat_to_user(out, in);
100360 + m->compat_to_user((void __force_user *)out, in);
100361 set_fs(old_fs);
100362 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
100363 kfree(out);
100364 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
100365 new file mode 100644
100366 index 0000000..c566332
100367 --- /dev/null
100368 +++ b/net/netfilter/xt_gradm.c
100369 @@ -0,0 +1,51 @@
100370 +/*
100371 + * gradm match for netfilter
100372 + * Copyright © Zbigniew Krzystolik, 2010
100373 + *
100374 + * This program is free software; you can redistribute it and/or modify
100375 + * it under the terms of the GNU General Public License; either version
100376 + * 2 or 3 as published by the Free Software Foundation.
100377 + */
100378 +#include <linux/module.h>
100379 +#include <linux/moduleparam.h>
100380 +#include <linux/skbuff.h>
100381 +#include <linux/netfilter/x_tables.h>
100382 +#include <linux/grsecurity.h>
100383 +#include <linux/netfilter/xt_gradm.h>
100384 +
100385 +static bool
100386 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
100387 +{
100388 + const struct xt_gradm_mtinfo *info = par->matchinfo;
100389 + bool retval = false;
100390 + if (gr_acl_is_enabled())
100391 + retval = true;
100392 + return retval ^ info->invflags;
100393 +}
100394 +
100395 +static struct xt_match gradm_mt_reg __read_mostly = {
100396 + .name = "gradm",
100397 + .revision = 0,
100398 + .family = NFPROTO_UNSPEC,
100399 + .match = gradm_mt,
100400 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
100401 + .me = THIS_MODULE,
100402 +};
100403 +
100404 +static int __init gradm_mt_init(void)
100405 +{
100406 + return xt_register_match(&gradm_mt_reg);
100407 +}
100408 +
100409 +static void __exit gradm_mt_exit(void)
100410 +{
100411 + xt_unregister_match(&gradm_mt_reg);
100412 +}
100413 +
100414 +module_init(gradm_mt_init);
100415 +module_exit(gradm_mt_exit);
100416 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
100417 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
100418 +MODULE_LICENSE("GPL");
100419 +MODULE_ALIAS("ipt_gradm");
100420 +MODULE_ALIAS("ip6t_gradm");
100421 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
100422 index 4fe4fb4..87a89e5 100644
100423 --- a/net/netfilter/xt_statistic.c
100424 +++ b/net/netfilter/xt_statistic.c
100425 @@ -19,7 +19,7 @@
100426 #include <linux/module.h>
100427
100428 struct xt_statistic_priv {
100429 - atomic_t count;
100430 + atomic_unchecked_t count;
100431 } ____cacheline_aligned_in_smp;
100432
100433 MODULE_LICENSE("GPL");
100434 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
100435 break;
100436 case XT_STATISTIC_MODE_NTH:
100437 do {
100438 - oval = atomic_read(&info->master->count);
100439 + oval = atomic_read_unchecked(&info->master->count);
100440 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
100441 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
100442 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
100443 if (nval == 0)
100444 ret = !ret;
100445 break;
100446 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
100447 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
100448 if (info->master == NULL)
100449 return -ENOMEM;
100450 - atomic_set(&info->master->count, info->u.nth.count);
100451 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
100452
100453 return 0;
100454 }
100455 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
100456 index bca50b9..782ec12 100644
100457 --- a/net/netlink/af_netlink.c
100458 +++ b/net/netlink/af_netlink.c
100459 @@ -249,7 +249,7 @@ static void netlink_overrun(struct sock *sk)
100460 sk->sk_error_report(sk);
100461 }
100462 }
100463 - atomic_inc(&sk->sk_drops);
100464 + atomic_inc_unchecked(&sk->sk_drops);
100465 }
100466
100467 static void netlink_rcv_wake(struct sock *sk)
100468 @@ -1481,8 +1481,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
100469 if (addr->sa_family != AF_NETLINK)
100470 return -EINVAL;
100471
100472 - /* Only superuser is allowed to send multicasts */
100473 - if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
100474 + if ((nladdr->nl_groups || nladdr->nl_pid) &&
100475 + !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
100476 return -EPERM;
100477
100478 if (!nlk->portid)
100479 @@ -2940,7 +2940,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
100480 sk_wmem_alloc_get(s),
100481 nlk->cb_running,
100482 atomic_read(&s->sk_refcnt),
100483 - atomic_read(&s->sk_drops),
100484 + atomic_read_unchecked(&s->sk_drops),
100485 sock_i_ino(s)
100486 );
100487
100488 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
100489 index 53c19a3..b0ac04a 100644
100490 --- a/net/netrom/af_netrom.c
100491 +++ b/net/netrom/af_netrom.c
100492 @@ -850,7 +850,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
100493 *uaddr_len = sizeof(struct full_sockaddr_ax25);
100494 } else {
100495 sax->fsa_ax25.sax25_family = AF_NETROM;
100496 - sax->fsa_ax25.sax25_ndigis = 0;
100497 sax->fsa_ax25.sax25_call = nr->source_addr;
100498 *uaddr_len = sizeof(struct sockaddr_ax25);
100499 }
100500 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
100501 index 88cfbc1..05d73f5 100644
100502 --- a/net/packet/af_packet.c
100503 +++ b/net/packet/af_packet.c
100504 @@ -1720,7 +1720,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
100505
100506 spin_lock(&sk->sk_receive_queue.lock);
100507 po->stats.stats1.tp_packets++;
100508 - skb->dropcount = atomic_read(&sk->sk_drops);
100509 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
100510 __skb_queue_tail(&sk->sk_receive_queue, skb);
100511 spin_unlock(&sk->sk_receive_queue.lock);
100512 sk->sk_data_ready(sk, skb->len);
100513 @@ -1729,7 +1729,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
100514 drop_n_acct:
100515 spin_lock(&sk->sk_receive_queue.lock);
100516 po->stats.stats1.tp_drops++;
100517 - atomic_inc(&sk->sk_drops);
100518 + atomic_inc_unchecked(&sk->sk_drops);
100519 spin_unlock(&sk->sk_receive_queue.lock);
100520
100521 drop_n_restore:
100522 @@ -3275,7 +3275,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
100523 case PACKET_HDRLEN:
100524 if (len > sizeof(int))
100525 len = sizeof(int);
100526 - if (copy_from_user(&val, optval, len))
100527 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
100528 return -EFAULT;
100529 switch (val) {
100530 case TPACKET_V1:
100531 @@ -3318,7 +3318,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
100532 len = lv;
100533 if (put_user(len, optlen))
100534 return -EFAULT;
100535 - if (copy_to_user(optval, data, len))
100536 + if (len > sizeof(st) || copy_to_user(optval, data, len))
100537 return -EFAULT;
100538 return 0;
100539 }
100540 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
100541 index 38946b2..12c30f3 100644
100542 --- a/net/phonet/datagram.c
100543 +++ b/net/phonet/datagram.c
100544 @@ -139,6 +139,9 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
100545 MSG_CMSG_COMPAT))
100546 goto out_nofree;
100547
100548 + if (addr_len)
100549 + *addr_len = sizeof(sa);
100550 +
100551 skb = skb_recv_datagram(sk, flags, noblock, &rval);
100552 if (skb == NULL)
100553 goto out_nofree;
100554 @@ -159,10 +162,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
100555
100556 rval = (flags & MSG_TRUNC) ? skb->len : copylen;
100557
100558 - if (msg->msg_name != NULL) {
100559 - memcpy(msg->msg_name, &sa, sizeof(sa));
100560 - *addr_len = sizeof(sa);
100561 - }
100562 + if (msg->msg_name != NULL)
100563 + memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
100564
100565 out:
100566 skb_free_datagram(sk, skb);
100567 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
100568 index e774117..900b8b7 100644
100569 --- a/net/phonet/pep.c
100570 +++ b/net/phonet/pep.c
100571 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
100572
100573 case PNS_PEP_CTRL_REQ:
100574 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
100575 - atomic_inc(&sk->sk_drops);
100576 + atomic_inc_unchecked(&sk->sk_drops);
100577 break;
100578 }
100579 __skb_pull(skb, 4);
100580 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
100581 }
100582
100583 if (pn->rx_credits == 0) {
100584 - atomic_inc(&sk->sk_drops);
100585 + atomic_inc_unchecked(&sk->sk_drops);
100586 err = -ENOBUFS;
100587 break;
100588 }
100589 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
100590 }
100591
100592 if (pn->rx_credits == 0) {
100593 - atomic_inc(&sk->sk_drops);
100594 + atomic_inc_unchecked(&sk->sk_drops);
100595 err = NET_RX_DROP;
100596 break;
100597 }
100598 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
100599 index 008214a..bb68240 100644
100600 --- a/net/phonet/socket.c
100601 +++ b/net/phonet/socket.c
100602 @@ -611,7 +611,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
100603 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
100604 sock_i_ino(sk),
100605 atomic_read(&sk->sk_refcnt), sk,
100606 - atomic_read(&sk->sk_drops));
100607 + atomic_read_unchecked(&sk->sk_drops));
100608 }
100609 seq_pad(seq, '\n');
100610 return 0;
100611 diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
100612 index c02a8c4..3c5b600 100644
100613 --- a/net/phonet/sysctl.c
100614 +++ b/net/phonet/sysctl.c
100615 @@ -67,7 +67,7 @@ static int proc_local_port_range(struct ctl_table *table, int write,
100616 {
100617 int ret;
100618 int range[2] = {local_port_range[0], local_port_range[1]};
100619 - struct ctl_table tmp = {
100620 + ctl_table_no_const tmp = {
100621 .data = &range,
100622 .maxlen = sizeof(range),
100623 .mode = table->mode,
100624 diff --git a/net/rds/cong.c b/net/rds/cong.c
100625 index e5b65ac..f3b6fb7 100644
100626 --- a/net/rds/cong.c
100627 +++ b/net/rds/cong.c
100628 @@ -78,7 +78,7 @@
100629 * finds that the saved generation number is smaller than the global generation
100630 * number, it wakes up the process.
100631 */
100632 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
100633 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
100634
100635 /*
100636 * Congestion monitoring
100637 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
100638 rdsdebug("waking map %p for %pI4\n",
100639 map, &map->m_addr);
100640 rds_stats_inc(s_cong_update_received);
100641 - atomic_inc(&rds_cong_generation);
100642 + atomic_inc_unchecked(&rds_cong_generation);
100643 if (waitqueue_active(&map->m_waitq))
100644 wake_up(&map->m_waitq);
100645 if (waitqueue_active(&rds_poll_waitq))
100646 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
100647
100648 int rds_cong_updated_since(unsigned long *recent)
100649 {
100650 - unsigned long gen = atomic_read(&rds_cong_generation);
100651 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
100652
100653 if (likely(*recent == gen))
100654 return 0;
100655 diff --git a/net/rds/ib.h b/net/rds/ib.h
100656 index 7280ab8..e04f4ea 100644
100657 --- a/net/rds/ib.h
100658 +++ b/net/rds/ib.h
100659 @@ -128,7 +128,7 @@ struct rds_ib_connection {
100660 /* sending acks */
100661 unsigned long i_ack_flags;
100662 #ifdef KERNEL_HAS_ATOMIC64
100663 - atomic64_t i_ack_next; /* next ACK to send */
100664 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
100665 #else
100666 spinlock_t i_ack_lock; /* protect i_ack_next */
100667 u64 i_ack_next; /* next ACK to send */
100668 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
100669 index 31b74f5..dc1fbfa 100644
100670 --- a/net/rds/ib_cm.c
100671 +++ b/net/rds/ib_cm.c
100672 @@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
100673 /* Clear the ACK state */
100674 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
100675 #ifdef KERNEL_HAS_ATOMIC64
100676 - atomic64_set(&ic->i_ack_next, 0);
100677 + atomic64_set_unchecked(&ic->i_ack_next, 0);
100678 #else
100679 ic->i_ack_next = 0;
100680 #endif
100681 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
100682 index b7ebe23..b6352f6 100644
100683 --- a/net/rds/ib_recv.c
100684 +++ b/net/rds/ib_recv.c
100685 @@ -596,7 +596,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
100686 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
100687 int ack_required)
100688 {
100689 - atomic64_set(&ic->i_ack_next, seq);
100690 + atomic64_set_unchecked(&ic->i_ack_next, seq);
100691 if (ack_required) {
100692 smp_mb__before_clear_bit();
100693 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
100694 @@ -608,7 +608,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
100695 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
100696 smp_mb__after_clear_bit();
100697
100698 - return atomic64_read(&ic->i_ack_next);
100699 + return atomic64_read_unchecked(&ic->i_ack_next);
100700 }
100701 #endif
100702
100703 diff --git a/net/rds/iw.h b/net/rds/iw.h
100704 index 04ce3b1..48119a6 100644
100705 --- a/net/rds/iw.h
100706 +++ b/net/rds/iw.h
100707 @@ -134,7 +134,7 @@ struct rds_iw_connection {
100708 /* sending acks */
100709 unsigned long i_ack_flags;
100710 #ifdef KERNEL_HAS_ATOMIC64
100711 - atomic64_t i_ack_next; /* next ACK to send */
100712 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
100713 #else
100714 spinlock_t i_ack_lock; /* protect i_ack_next */
100715 u64 i_ack_next; /* next ACK to send */
100716 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
100717 index a91e1db..cf3053f 100644
100718 --- a/net/rds/iw_cm.c
100719 +++ b/net/rds/iw_cm.c
100720 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
100721 /* Clear the ACK state */
100722 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
100723 #ifdef KERNEL_HAS_ATOMIC64
100724 - atomic64_set(&ic->i_ack_next, 0);
100725 + atomic64_set_unchecked(&ic->i_ack_next, 0);
100726 #else
100727 ic->i_ack_next = 0;
100728 #endif
100729 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
100730 index 4503335..db566b4 100644
100731 --- a/net/rds/iw_recv.c
100732 +++ b/net/rds/iw_recv.c
100733 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
100734 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
100735 int ack_required)
100736 {
100737 - atomic64_set(&ic->i_ack_next, seq);
100738 + atomic64_set_unchecked(&ic->i_ack_next, seq);
100739 if (ack_required) {
100740 smp_mb__before_clear_bit();
100741 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
100742 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
100743 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
100744 smp_mb__after_clear_bit();
100745
100746 - return atomic64_read(&ic->i_ack_next);
100747 + return atomic64_read_unchecked(&ic->i_ack_next);
100748 }
100749 #endif
100750
100751 diff --git a/net/rds/rds.h b/net/rds/rds.h
100752 index 48f8ffc..0ef3eec 100644
100753 --- a/net/rds/rds.h
100754 +++ b/net/rds/rds.h
100755 @@ -449,7 +449,7 @@ struct rds_transport {
100756 void (*sync_mr)(void *trans_private, int direction);
100757 void (*free_mr)(void *trans_private, int invalidate);
100758 void (*flush_mrs)(void);
100759 -};
100760 +} __do_const;
100761
100762 struct rds_sock {
100763 struct sock rs_sk;
100764 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
100765 index edac9ef..16bcb98 100644
100766 --- a/net/rds/tcp.c
100767 +++ b/net/rds/tcp.c
100768 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
100769 int val = 1;
100770
100771 set_fs(KERNEL_DS);
100772 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
100773 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
100774 sizeof(val));
100775 set_fs(oldfs);
100776 }
100777 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
100778 index 81cf5a4..b5826ff 100644
100779 --- a/net/rds/tcp_send.c
100780 +++ b/net/rds/tcp_send.c
100781 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
100782
100783 oldfs = get_fs();
100784 set_fs(KERNEL_DS);
100785 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
100786 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
100787 sizeof(val));
100788 set_fs(oldfs);
100789 }
100790 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
100791 index e61aa60..f07cc89 100644
100792 --- a/net/rxrpc/af_rxrpc.c
100793 +++ b/net/rxrpc/af_rxrpc.c
100794 @@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
100795 __be32 rxrpc_epoch;
100796
100797 /* current debugging ID */
100798 -atomic_t rxrpc_debug_id;
100799 +atomic_unchecked_t rxrpc_debug_id;
100800
100801 /* count of skbs currently in use */
100802 atomic_t rxrpc_n_skbs;
100803 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
100804 index e4d9cbc..b229649 100644
100805 --- a/net/rxrpc/ar-ack.c
100806 +++ b/net/rxrpc/ar-ack.c
100807 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
100808
100809 _enter("{%d,%d,%d,%d},",
100810 call->acks_hard, call->acks_unacked,
100811 - atomic_read(&call->sequence),
100812 + atomic_read_unchecked(&call->sequence),
100813 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
100814
100815 stop = 0;
100816 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
100817
100818 /* each Tx packet has a new serial number */
100819 sp->hdr.serial =
100820 - htonl(atomic_inc_return(&call->conn->serial));
100821 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
100822
100823 hdr = (struct rxrpc_header *) txb->head;
100824 hdr->serial = sp->hdr.serial;
100825 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
100826 */
100827 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
100828 {
100829 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
100830 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
100831 }
100832
100833 /*
100834 @@ -629,7 +629,7 @@ process_further:
100835
100836 latest = ntohl(sp->hdr.serial);
100837 hard = ntohl(ack.firstPacket);
100838 - tx = atomic_read(&call->sequence);
100839 + tx = atomic_read_unchecked(&call->sequence);
100840
100841 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
100842 latest,
100843 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
100844 goto maybe_reschedule;
100845
100846 send_ACK_with_skew:
100847 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
100848 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
100849 ntohl(ack.serial));
100850 send_ACK:
100851 mtu = call->conn->trans->peer->if_mtu;
100852 @@ -1173,7 +1173,7 @@ send_ACK:
100853 ackinfo.rxMTU = htonl(5692);
100854 ackinfo.jumbo_max = htonl(4);
100855
100856 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
100857 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
100858 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
100859 ntohl(hdr.serial),
100860 ntohs(ack.maxSkew),
100861 @@ -1191,7 +1191,7 @@ send_ACK:
100862 send_message:
100863 _debug("send message");
100864
100865 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
100866 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
100867 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
100868 send_message_2:
100869
100870 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
100871 index a3bbb36..3341fb9 100644
100872 --- a/net/rxrpc/ar-call.c
100873 +++ b/net/rxrpc/ar-call.c
100874 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
100875 spin_lock_init(&call->lock);
100876 rwlock_init(&call->state_lock);
100877 atomic_set(&call->usage, 1);
100878 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
100879 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
100880 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
100881
100882 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
100883 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
100884 index 4106ca9..a338d7a 100644
100885 --- a/net/rxrpc/ar-connection.c
100886 +++ b/net/rxrpc/ar-connection.c
100887 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
100888 rwlock_init(&conn->lock);
100889 spin_lock_init(&conn->state_lock);
100890 atomic_set(&conn->usage, 1);
100891 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
100892 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
100893 conn->avail_calls = RXRPC_MAXCALLS;
100894 conn->size_align = 4;
100895 conn->header_size = sizeof(struct rxrpc_header);
100896 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
100897 index e7ed43a..6afa140 100644
100898 --- a/net/rxrpc/ar-connevent.c
100899 +++ b/net/rxrpc/ar-connevent.c
100900 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
100901
100902 len = iov[0].iov_len + iov[1].iov_len;
100903
100904 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
100905 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
100906 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
100907
100908 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
100909 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
100910 index 529572f..c758ca7 100644
100911 --- a/net/rxrpc/ar-input.c
100912 +++ b/net/rxrpc/ar-input.c
100913 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
100914 /* track the latest serial number on this connection for ACK packet
100915 * information */
100916 serial = ntohl(sp->hdr.serial);
100917 - hi_serial = atomic_read(&call->conn->hi_serial);
100918 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
100919 while (serial > hi_serial)
100920 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
100921 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
100922 serial);
100923
100924 /* request ACK generation for any ACK or DATA packet that requests
100925 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
100926 index 5f43675..ca07817 100644
100927 --- a/net/rxrpc/ar-internal.h
100928 +++ b/net/rxrpc/ar-internal.h
100929 @@ -272,8 +272,8 @@ struct rxrpc_connection {
100930 int error; /* error code for local abort */
100931 int debug_id; /* debug ID for printks */
100932 unsigned int call_counter; /* call ID counter */
100933 - atomic_t serial; /* packet serial number counter */
100934 - atomic_t hi_serial; /* highest serial number received */
100935 + atomic_unchecked_t serial; /* packet serial number counter */
100936 + atomic_unchecked_t hi_serial; /* highest serial number received */
100937 u8 avail_calls; /* number of calls available */
100938 u8 size_align; /* data size alignment (for security) */
100939 u8 header_size; /* rxrpc + security header size */
100940 @@ -346,7 +346,7 @@ struct rxrpc_call {
100941 spinlock_t lock;
100942 rwlock_t state_lock; /* lock for state transition */
100943 atomic_t usage;
100944 - atomic_t sequence; /* Tx data packet sequence counter */
100945 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
100946 u32 abort_code; /* local/remote abort code */
100947 enum { /* current state of call */
100948 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
100949 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
100950 */
100951 extern atomic_t rxrpc_n_skbs;
100952 extern __be32 rxrpc_epoch;
100953 -extern atomic_t rxrpc_debug_id;
100954 +extern atomic_unchecked_t rxrpc_debug_id;
100955 extern struct workqueue_struct *rxrpc_workqueue;
100956
100957 /*
100958 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
100959 index 87f7135..74d3703 100644
100960 --- a/net/rxrpc/ar-local.c
100961 +++ b/net/rxrpc/ar-local.c
100962 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
100963 spin_lock_init(&local->lock);
100964 rwlock_init(&local->services_lock);
100965 atomic_set(&local->usage, 1);
100966 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
100967 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
100968 memcpy(&local->srx, srx, sizeof(*srx));
100969 }
100970
100971 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
100972 index e1ac183..b43e10e 100644
100973 --- a/net/rxrpc/ar-output.c
100974 +++ b/net/rxrpc/ar-output.c
100975 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
100976 sp->hdr.cid = call->cid;
100977 sp->hdr.callNumber = call->call_id;
100978 sp->hdr.seq =
100979 - htonl(atomic_inc_return(&call->sequence));
100980 + htonl(atomic_inc_return_unchecked(&call->sequence));
100981 sp->hdr.serial =
100982 - htonl(atomic_inc_return(&conn->serial));
100983 + htonl(atomic_inc_return_unchecked(&conn->serial));
100984 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
100985 sp->hdr.userStatus = 0;
100986 sp->hdr.securityIndex = conn->security_ix;
100987 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
100988 index bebaa43..2644591 100644
100989 --- a/net/rxrpc/ar-peer.c
100990 +++ b/net/rxrpc/ar-peer.c
100991 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
100992 INIT_LIST_HEAD(&peer->error_targets);
100993 spin_lock_init(&peer->lock);
100994 atomic_set(&peer->usage, 1);
100995 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
100996 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
100997 memcpy(&peer->srx, srx, sizeof(*srx));
100998
100999 rxrpc_assess_MTU_size(peer);
101000 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
101001 index 38047f7..9f48511 100644
101002 --- a/net/rxrpc/ar-proc.c
101003 +++ b/net/rxrpc/ar-proc.c
101004 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
101005 atomic_read(&conn->usage),
101006 rxrpc_conn_states[conn->state],
101007 key_serial(conn->key),
101008 - atomic_read(&conn->serial),
101009 - atomic_read(&conn->hi_serial));
101010 + atomic_read_unchecked(&conn->serial),
101011 + atomic_read_unchecked(&conn->hi_serial));
101012
101013 return 0;
101014 }
101015 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
101016 index 92df566..87ec1bf 100644
101017 --- a/net/rxrpc/ar-transport.c
101018 +++ b/net/rxrpc/ar-transport.c
101019 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
101020 spin_lock_init(&trans->client_lock);
101021 rwlock_init(&trans->conn_lock);
101022 atomic_set(&trans->usage, 1);
101023 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
101024 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
101025
101026 if (peer->srx.transport.family == AF_INET) {
101027 switch (peer->srx.transport_type) {
101028 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
101029 index f226709..0e735a8 100644
101030 --- a/net/rxrpc/rxkad.c
101031 +++ b/net/rxrpc/rxkad.c
101032 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
101033
101034 len = iov[0].iov_len + iov[1].iov_len;
101035
101036 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
101037 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
101038 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
101039
101040 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
101041 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
101042
101043 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
101044
101045 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
101046 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
101047 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
101048
101049 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
101050 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
101051 index 7567e6f..299382b 100644
101052 --- a/net/sctp/ipv6.c
101053 +++ b/net/sctp/ipv6.c
101054 @@ -964,7 +964,7 @@ static const struct inet6_protocol sctpv6_protocol = {
101055 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
101056 };
101057
101058 -static struct sctp_af sctp_af_inet6 = {
101059 +static struct sctp_af sctp_af_inet6 __read_only = {
101060 .sa_family = AF_INET6,
101061 .sctp_xmit = sctp_v6_xmit,
101062 .setsockopt = ipv6_setsockopt,
101063 @@ -996,7 +996,7 @@ static struct sctp_af sctp_af_inet6 = {
101064 #endif
101065 };
101066
101067 -static struct sctp_pf sctp_pf_inet6 = {
101068 +static struct sctp_pf sctp_pf_inet6 __read_only = {
101069 .event_msgname = sctp_inet6_event_msgname,
101070 .skb_msgname = sctp_inet6_skb_msgname,
101071 .af_supported = sctp_inet6_af_supported,
101072 @@ -1021,7 +1021,7 @@ void sctp_v6_pf_init(void)
101073
101074 void sctp_v6_pf_exit(void)
101075 {
101076 - list_del(&sctp_af_inet6.list);
101077 + pax_list_del(&sctp_af_inet6.list);
101078 }
101079
101080 /* Initialize IPv6 support and register with socket layer. */
101081 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
101082 index 5e17092..19be1d4 100644
101083 --- a/net/sctp/protocol.c
101084 +++ b/net/sctp/protocol.c
101085 @@ -832,8 +832,10 @@ int sctp_register_af(struct sctp_af *af)
101086 return 0;
101087 }
101088
101089 + pax_open_kernel();
101090 INIT_LIST_HEAD(&af->list);
101091 - list_add_tail(&af->list, &sctp_address_families);
101092 + pax_close_kernel();
101093 + pax_list_add_tail(&af->list, &sctp_address_families);
101094 return 1;
101095 }
101096
101097 @@ -963,7 +965,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
101098
101099 static struct sctp_af sctp_af_inet;
101100
101101 -static struct sctp_pf sctp_pf_inet = {
101102 +static struct sctp_pf sctp_pf_inet __read_only = {
101103 .event_msgname = sctp_inet_event_msgname,
101104 .skb_msgname = sctp_inet_skb_msgname,
101105 .af_supported = sctp_inet_af_supported,
101106 @@ -1034,7 +1036,7 @@ static const struct net_protocol sctp_protocol = {
101107 };
101108
101109 /* IPv4 address related functions. */
101110 -static struct sctp_af sctp_af_inet = {
101111 +static struct sctp_af sctp_af_inet __read_only = {
101112 .sa_family = AF_INET,
101113 .sctp_xmit = sctp_v4_xmit,
101114 .setsockopt = ip_setsockopt,
101115 @@ -1119,7 +1121,7 @@ static void sctp_v4_pf_init(void)
101116
101117 static void sctp_v4_pf_exit(void)
101118 {
101119 - list_del(&sctp_af_inet.list);
101120 + pax_list_del(&sctp_af_inet.list);
101121 }
101122
101123 static int sctp_v4_protosw_init(void)
101124 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
101125 index 1a6eef3..17e898f 100644
101126 --- a/net/sctp/sm_sideeffect.c
101127 +++ b/net/sctp/sm_sideeffect.c
101128 @@ -440,7 +440,7 @@ static void sctp_generate_sack_event(unsigned long data)
101129 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
101130 }
101131
101132 -sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
101133 +sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
101134 NULL,
101135 sctp_generate_t1_cookie_event,
101136 sctp_generate_t1_init_event,
101137 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
101138 index 146b35d..1021a34 100644
101139 --- a/net/sctp/socket.c
101140 +++ b/net/sctp/socket.c
101141 @@ -2176,11 +2176,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
101142 {
101143 struct sctp_association *asoc;
101144 struct sctp_ulpevent *event;
101145 + struct sctp_event_subscribe subscribe;
101146
101147 if (optlen > sizeof(struct sctp_event_subscribe))
101148 return -EINVAL;
101149 - if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
101150 + if (copy_from_user(&subscribe, optval, optlen))
101151 return -EFAULT;
101152 + sctp_sk(sk)->subscribe = subscribe;
101153
101154 /*
101155 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
101156 @@ -4252,13 +4254,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
101157 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
101158 int __user *optlen)
101159 {
101160 + struct sctp_event_subscribe subscribe;
101161 +
101162 if (len <= 0)
101163 return -EINVAL;
101164 if (len > sizeof(struct sctp_event_subscribe))
101165 len = sizeof(struct sctp_event_subscribe);
101166 if (put_user(len, optlen))
101167 return -EFAULT;
101168 - if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
101169 + subscribe = sctp_sk(sk)->subscribe;
101170 + if (copy_to_user(optval, &subscribe, len))
101171 return -EFAULT;
101172 return 0;
101173 }
101174 @@ -4276,6 +4281,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
101175 */
101176 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
101177 {
101178 + __u32 autoclose;
101179 +
101180 /* Applicable to UDP-style socket only */
101181 if (sctp_style(sk, TCP))
101182 return -EOPNOTSUPP;
101183 @@ -4284,7 +4291,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
101184 len = sizeof(int);
101185 if (put_user(len, optlen))
101186 return -EFAULT;
101187 - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
101188 + autoclose = sctp_sk(sk)->autoclose;
101189 + if (copy_to_user(optval, &autoclose, sizeof(int)))
101190 return -EFAULT;
101191 return 0;
101192 }
101193 @@ -4656,12 +4664,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
101194 */
101195 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
101196 {
101197 + struct sctp_initmsg initmsg;
101198 +
101199 if (len < sizeof(struct sctp_initmsg))
101200 return -EINVAL;
101201 len = sizeof(struct sctp_initmsg);
101202 if (put_user(len, optlen))
101203 return -EFAULT;
101204 - if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
101205 + initmsg = sctp_sk(sk)->initmsg;
101206 + if (copy_to_user(optval, &initmsg, len))
101207 return -EFAULT;
101208 return 0;
101209 }
101210 @@ -4702,6 +4713,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
101211 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
101212 if (space_left < addrlen)
101213 return -ENOMEM;
101214 + if (addrlen > sizeof(temp) || addrlen < 0)
101215 + return -EFAULT;
101216 if (copy_to_user(to, &temp, addrlen))
101217 return -EFAULT;
101218 to += addrlen;
101219 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
101220 index b0565af..d135e6e 100644
101221 --- a/net/sctp/sysctl.c
101222 +++ b/net/sctp/sysctl.c
101223 @@ -305,7 +305,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
101224 {
101225 struct net *net = current->nsproxy->net_ns;
101226 char tmp[8];
101227 - struct ctl_table tbl;
101228 + ctl_table_no_const tbl;
101229 int ret;
101230 int changed = 0;
101231 char *none = "none";
101232 @@ -352,7 +352,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
101233 {
101234 struct net *net = current->nsproxy->net_ns;
101235 int new_value;
101236 - struct ctl_table tbl;
101237 + ctl_table_no_const tbl;
101238 unsigned int min = *(unsigned int *) ctl->extra1;
101239 unsigned int max = *(unsigned int *) ctl->extra2;
101240 int ret;
101241 @@ -379,7 +379,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
101242 {
101243 struct net *net = current->nsproxy->net_ns;
101244 int new_value;
101245 - struct ctl_table tbl;
101246 + ctl_table_no_const tbl;
101247 unsigned int min = *(unsigned int *) ctl->extra1;
101248 unsigned int max = *(unsigned int *) ctl->extra2;
101249 int ret;
101250 @@ -402,7 +402,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
101251
101252 int sctp_sysctl_net_register(struct net *net)
101253 {
101254 - struct ctl_table *table;
101255 + ctl_table_no_const *table;
101256 int i;
101257
101258 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
101259 diff --git a/net/socket.c b/net/socket.c
101260 index e83c416..f87df4c 100644
101261 --- a/net/socket.c
101262 +++ b/net/socket.c
101263 @@ -88,6 +88,7 @@
101264 #include <linux/magic.h>
101265 #include <linux/slab.h>
101266 #include <linux/xattr.h>
101267 +#include <linux/in.h>
101268
101269 #include <asm/uaccess.h>
101270 #include <asm/unistd.h>
101271 @@ -111,6 +112,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
101272 unsigned int sysctl_net_busy_poll __read_mostly;
101273 #endif
101274
101275 +#include <linux/grsock.h>
101276 +
101277 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
101278 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
101279 unsigned long nr_segs, loff_t pos);
101280 @@ -162,7 +165,7 @@ static const struct file_operations socket_file_ops = {
101281 */
101282
101283 static DEFINE_SPINLOCK(net_family_lock);
101284 -static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
101285 +const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
101286
101287 /*
101288 * Statistics counters of the socket lists
101289 @@ -328,7 +331,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
101290 &sockfs_dentry_operations, SOCKFS_MAGIC);
101291 }
101292
101293 -static struct vfsmount *sock_mnt __read_mostly;
101294 +struct vfsmount *sock_mnt __read_mostly;
101295
101296 static struct file_system_type sock_fs_type = {
101297 .name = "sockfs",
101298 @@ -1255,6 +1258,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
101299 return -EAFNOSUPPORT;
101300 if (type < 0 || type >= SOCK_MAX)
101301 return -EINVAL;
101302 + if (protocol < 0)
101303 + return -EINVAL;
101304
101305 /* Compatibility.
101306
101307 @@ -1275,6 +1280,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
101308 if (err)
101309 return err;
101310
101311 + if(!kern && !gr_search_socket(family, type, protocol)) {
101312 + if (rcu_access_pointer(net_families[family]) == NULL)
101313 + return -EAFNOSUPPORT;
101314 + else
101315 + return -EACCES;
101316 + }
101317 +
101318 + if (!kern && gr_handle_sock_all(family, type, protocol)) {
101319 + if (rcu_access_pointer(net_families[family]) == NULL)
101320 + return -EAFNOSUPPORT;
101321 + else
101322 + return -EACCES;
101323 + }
101324 +
101325 /*
101326 * Allocate the socket and allow the family to set things up. if
101327 * the protocol is 0, the family is instructed to select an appropriate
101328 @@ -1513,6 +1532,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
101329 if (sock) {
101330 err = move_addr_to_kernel(umyaddr, addrlen, &address);
101331 if (err >= 0) {
101332 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
101333 + err = -EACCES;
101334 + goto error;
101335 + }
101336 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
101337 + if (err)
101338 + goto error;
101339 +
101340 err = security_socket_bind(sock,
101341 (struct sockaddr *)&address,
101342 addrlen);
101343 @@ -1521,6 +1548,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
101344 (struct sockaddr *)
101345 &address, addrlen);
101346 }
101347 +error:
101348 fput_light(sock->file, fput_needed);
101349 }
101350 return err;
101351 @@ -1544,10 +1572,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
101352 if ((unsigned int)backlog > somaxconn)
101353 backlog = somaxconn;
101354
101355 + if (gr_handle_sock_server_other(sock->sk)) {
101356 + err = -EPERM;
101357 + goto error;
101358 + }
101359 +
101360 + err = gr_search_listen(sock);
101361 + if (err)
101362 + goto error;
101363 +
101364 err = security_socket_listen(sock, backlog);
101365 if (!err)
101366 err = sock->ops->listen(sock, backlog);
101367
101368 +error:
101369 fput_light(sock->file, fput_needed);
101370 }
101371 return err;
101372 @@ -1591,6 +1629,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
101373 newsock->type = sock->type;
101374 newsock->ops = sock->ops;
101375
101376 + if (gr_handle_sock_server_other(sock->sk)) {
101377 + err = -EPERM;
101378 + sock_release(newsock);
101379 + goto out_put;
101380 + }
101381 +
101382 + err = gr_search_accept(sock);
101383 + if (err) {
101384 + sock_release(newsock);
101385 + goto out_put;
101386 + }
101387 +
101388 /*
101389 * We don't need try_module_get here, as the listening socket (sock)
101390 * has the protocol module (sock->ops->owner) held.
101391 @@ -1636,6 +1686,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
101392 fd_install(newfd, newfile);
101393 err = newfd;
101394
101395 + gr_attach_curr_ip(newsock->sk);
101396 +
101397 out_put:
101398 fput_light(sock->file, fput_needed);
101399 out:
101400 @@ -1668,6 +1720,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
101401 int, addrlen)
101402 {
101403 struct socket *sock;
101404 + struct sockaddr *sck;
101405 struct sockaddr_storage address;
101406 int err, fput_needed;
101407
101408 @@ -1678,6 +1731,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
101409 if (err < 0)
101410 goto out_put;
101411
101412 + sck = (struct sockaddr *)&address;
101413 +
101414 + if (gr_handle_sock_client(sck)) {
101415 + err = -EACCES;
101416 + goto out_put;
101417 + }
101418 +
101419 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
101420 + if (err)
101421 + goto out_put;
101422 +
101423 err =
101424 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
101425 if (err)
101426 @@ -1759,6 +1823,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
101427 * the protocol.
101428 */
101429
101430 +asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int);
101431 +
101432 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
101433 unsigned int, flags, struct sockaddr __user *, addr,
101434 int, addr_len)
101435 @@ -1972,6 +2038,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
101436 {
101437 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
101438 return -EFAULT;
101439 +
101440 + if (kmsg->msg_namelen < 0)
101441 + return -EINVAL;
101442 +
101443 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
101444 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
101445 return 0;
101446 @@ -2047,7 +2117,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
101447 * checking falls down on this.
101448 */
101449 if (copy_from_user(ctl_buf,
101450 - (void __user __force *)msg_sys->msg_control,
101451 + (void __force_user *)msg_sys->msg_control,
101452 ctl_len))
101453 goto out_freectl;
101454 msg_sys->msg_control = ctl_buf;
101455 @@ -2227,7 +2297,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
101456 /* Save the user-mode address (verify_iovec will change the
101457 * kernel msghdr to use the kernel address space)
101458 */
101459 - uaddr = (__force void __user *)msg_sys->msg_name;
101460 + uaddr = (void __force_user *)msg_sys->msg_name;
101461 uaddr_len = COMPAT_NAMELEN(msg);
101462 if (MSG_CMSG_COMPAT & flags)
101463 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
101464 @@ -2871,7 +2941,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
101465 ifr = compat_alloc_user_space(buf_size);
101466 rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
101467
101468 - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
101469 + if (copy_in_user(ifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
101470 return -EFAULT;
101471
101472 if (put_user(convert_in ? rxnfc : compat_ptr(data),
101473 @@ -2985,14 +3055,14 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
101474 old_fs = get_fs();
101475 set_fs(KERNEL_DS);
101476 err = dev_ioctl(net, cmd,
101477 - (struct ifreq __user __force *) &kifr);
101478 + (struct ifreq __force_user *) &kifr);
101479 set_fs(old_fs);
101480
101481 return err;
101482 case SIOCBONDSLAVEINFOQUERY:
101483 case SIOCBONDINFOQUERY:
101484 uifr = compat_alloc_user_space(sizeof(*uifr));
101485 - if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
101486 + if (copy_in_user(uifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
101487 return -EFAULT;
101488
101489 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
101490 @@ -3094,7 +3164,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
101491
101492 old_fs = get_fs();
101493 set_fs(KERNEL_DS);
101494 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
101495 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
101496 set_fs(old_fs);
101497
101498 if (cmd == SIOCGIFMAP && !err) {
101499 @@ -3199,7 +3269,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
101500 ret |= get_user(rtdev, &(ur4->rt_dev));
101501 if (rtdev) {
101502 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
101503 - r4.rt_dev = (char __user __force *)devname;
101504 + r4.rt_dev = (char __force_user *)devname;
101505 devname[15] = 0;
101506 } else
101507 r4.rt_dev = NULL;
101508 @@ -3425,8 +3495,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
101509 int __user *uoptlen;
101510 int err;
101511
101512 - uoptval = (char __user __force *) optval;
101513 - uoptlen = (int __user __force *) optlen;
101514 + uoptval = (char __force_user *) optval;
101515 + uoptlen = (int __force_user *) optlen;
101516
101517 set_fs(KERNEL_DS);
101518 if (level == SOL_SOCKET)
101519 @@ -3446,7 +3516,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
101520 char __user *uoptval;
101521 int err;
101522
101523 - uoptval = (char __user __force *) optval;
101524 + uoptval = (char __force_user *) optval;
101525
101526 set_fs(KERNEL_DS);
101527 if (level == SOL_SOCKET)
101528 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
101529 index a642fd616..1eebf22 100644
101530 --- a/net/sunrpc/auth_gss/auth_gss.c
101531 +++ b/net/sunrpc/auth_gss/auth_gss.c
101532 @@ -500,10 +500,12 @@ gss_alloc_msg(struct gss_auth *gss_auth,
101533 default:
101534 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
101535 if (err)
101536 - goto err_free_msg;
101537 + goto err_put_pipe_version;
101538 };
101539 kref_get(&gss_auth->kref);
101540 return gss_msg;
101541 +err_put_pipe_version:
101542 + put_pipe_version(gss_auth->net);
101543 err_free_msg:
101544 kfree(gss_msg);
101545 err:
101546 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
101547 index 1b94a9c..496f7f5 100644
101548 --- a/net/sunrpc/auth_gss/svcauth_gss.c
101549 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
101550 @@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
101551 uint64_t *handle)
101552 {
101553 struct rsc rsci, *rscp = NULL;
101554 - static atomic64_t ctxhctr;
101555 + static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0);
101556 long long ctxh;
101557 struct gss_api_mech *gm = NULL;
101558 time_t expiry;
101559 @@ -1151,7 +1151,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
101560 status = -ENOMEM;
101561 /* the handle needs to be just a unique id,
101562 * use a static counter */
101563 - ctxh = atomic64_inc_return(&ctxhctr);
101564 + ctxh = atomic64_inc_return_unchecked(&ctxhctr);
101565
101566 /* make a copy for the caller */
101567 *handle = ctxh;
101568 diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
101569 index 890a299..e860d4f 100644
101570 --- a/net/sunrpc/backchannel_rqst.c
101571 +++ b/net/sunrpc/backchannel_rqst.c
101572 @@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
101573 free_page((unsigned long)xbufp->head[0].iov_base);
101574 xbufp = &req->rq_snd_buf;
101575 free_page((unsigned long)xbufp->head[0].iov_base);
101576 - list_del(&req->rq_bc_pa_list);
101577 kfree(req);
101578 }
101579
101580 @@ -168,8 +167,10 @@ out_free:
101581 /*
101582 * Memory allocation failed, free the temporary list
101583 */
101584 - list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
101585 + list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
101586 + list_del(&req->rq_bc_pa_list);
101587 xprt_free_allocation(req);
101588 + }
101589
101590 dprintk("RPC: setup backchannel transport failed\n");
101591 return -ENOMEM;
101592 @@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
101593 xprt_dec_alloc_count(xprt, max_reqs);
101594 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
101595 dprintk("RPC: req=%p\n", req);
101596 + list_del(&req->rq_bc_pa_list);
101597 xprt_free_allocation(req);
101598 if (--max_reqs == 0)
101599 break;
101600 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
101601 index e726e16..393c39e 100644
101602 --- a/net/sunrpc/clnt.c
101603 +++ b/net/sunrpc/clnt.c
101604 @@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task)
101605 (RPC_IS_ASYNC(task) ? "async" : "sync"));
101606
101607 /* Increment call count */
101608 - task->tk_msg.rpc_proc->p_count++;
101609 + pax_open_kernel();
101610 + (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
101611 + pax_close_kernel();
101612 clnt->cl_stats->rpccnt++;
101613 task->tk_action = call_reserve;
101614 }
101615 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
101616 index ff3cc4b..7612a9e 100644
101617 --- a/net/sunrpc/sched.c
101618 +++ b/net/sunrpc/sched.c
101619 @@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *word)
101620 #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
101621 static void rpc_task_set_debuginfo(struct rpc_task *task)
101622 {
101623 - static atomic_t rpc_pid;
101624 + static atomic_unchecked_t rpc_pid;
101625
101626 - task->tk_pid = atomic_inc_return(&rpc_pid);
101627 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
101628 }
101629 #else
101630 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
101631 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
101632 index e7fbe36..8052603 100644
101633 --- a/net/sunrpc/svc.c
101634 +++ b/net/sunrpc/svc.c
101635 @@ -1158,7 +1158,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
101636 svc_putnl(resv, RPC_SUCCESS);
101637
101638 /* Bump per-procedure stats counter */
101639 - procp->pc_count++;
101640 + pax_open_kernel();
101641 + (*(unsigned int *)&procp->pc_count)++;
101642 + pax_close_kernel();
101643
101644 /* Initialize storage for argp and resp */
101645 memset(rqstp->rq_argp, 0, procp->pc_argsize);
101646 diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
101647 index 621ca7b..59421dd 100644
101648 --- a/net/sunrpc/svcauth_unix.c
101649 +++ b/net/sunrpc/svcauth_unix.c
101650 @@ -414,7 +414,7 @@ struct unix_gid {
101651 struct group_info *gi;
101652 };
101653
101654 -static int unix_gid_hash(kuid_t uid)
101655 +static int __intentional_overflow(-1) unix_gid_hash(kuid_t uid)
101656 {
101657 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
101658 }
101659 @@ -470,7 +470,7 @@ static void unix_gid_request(struct cache_detail *cd,
101660 (*bpp)[-1] = '\n';
101661 }
101662
101663 -static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
101664 +static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
101665
101666 static int unix_gid_parse(struct cache_detail *cd,
101667 char *mesg, int mlen)
101668 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
101669 index c1b6270..05089c1 100644
101670 --- a/net/sunrpc/xprtrdma/svc_rdma.c
101671 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
101672 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
101673 static unsigned int min_max_inline = 4096;
101674 static unsigned int max_max_inline = 65536;
101675
101676 -atomic_t rdma_stat_recv;
101677 -atomic_t rdma_stat_read;
101678 -atomic_t rdma_stat_write;
101679 -atomic_t rdma_stat_sq_starve;
101680 -atomic_t rdma_stat_rq_starve;
101681 -atomic_t rdma_stat_rq_poll;
101682 -atomic_t rdma_stat_rq_prod;
101683 -atomic_t rdma_stat_sq_poll;
101684 -atomic_t rdma_stat_sq_prod;
101685 +atomic_unchecked_t rdma_stat_recv;
101686 +atomic_unchecked_t rdma_stat_read;
101687 +atomic_unchecked_t rdma_stat_write;
101688 +atomic_unchecked_t rdma_stat_sq_starve;
101689 +atomic_unchecked_t rdma_stat_rq_starve;
101690 +atomic_unchecked_t rdma_stat_rq_poll;
101691 +atomic_unchecked_t rdma_stat_rq_prod;
101692 +atomic_unchecked_t rdma_stat_sq_poll;
101693 +atomic_unchecked_t rdma_stat_sq_prod;
101694
101695 /* Temporary NFS request map and context caches */
101696 struct kmem_cache *svc_rdma_map_cachep;
101697 @@ -110,7 +110,7 @@ static int read_reset_stat(struct ctl_table *table, int write,
101698 len -= *ppos;
101699 if (len > *lenp)
101700 len = *lenp;
101701 - if (len && copy_to_user(buffer, str_buf, len))
101702 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
101703 return -EFAULT;
101704 *lenp = len;
101705 *ppos += len;
101706 @@ -151,63 +151,63 @@ static struct ctl_table svcrdma_parm_table[] = {
101707 {
101708 .procname = "rdma_stat_read",
101709 .data = &rdma_stat_read,
101710 - .maxlen = sizeof(atomic_t),
101711 + .maxlen = sizeof(atomic_unchecked_t),
101712 .mode = 0644,
101713 .proc_handler = read_reset_stat,
101714 },
101715 {
101716 .procname = "rdma_stat_recv",
101717 .data = &rdma_stat_recv,
101718 - .maxlen = sizeof(atomic_t),
101719 + .maxlen = sizeof(atomic_unchecked_t),
101720 .mode = 0644,
101721 .proc_handler = read_reset_stat,
101722 },
101723 {
101724 .procname = "rdma_stat_write",
101725 .data = &rdma_stat_write,
101726 - .maxlen = sizeof(atomic_t),
101727 + .maxlen = sizeof(atomic_unchecked_t),
101728 .mode = 0644,
101729 .proc_handler = read_reset_stat,
101730 },
101731 {
101732 .procname = "rdma_stat_sq_starve",
101733 .data = &rdma_stat_sq_starve,
101734 - .maxlen = sizeof(atomic_t),
101735 + .maxlen = sizeof(atomic_unchecked_t),
101736 .mode = 0644,
101737 .proc_handler = read_reset_stat,
101738 },
101739 {
101740 .procname = "rdma_stat_rq_starve",
101741 .data = &rdma_stat_rq_starve,
101742 - .maxlen = sizeof(atomic_t),
101743 + .maxlen = sizeof(atomic_unchecked_t),
101744 .mode = 0644,
101745 .proc_handler = read_reset_stat,
101746 },
101747 {
101748 .procname = "rdma_stat_rq_poll",
101749 .data = &rdma_stat_rq_poll,
101750 - .maxlen = sizeof(atomic_t),
101751 + .maxlen = sizeof(atomic_unchecked_t),
101752 .mode = 0644,
101753 .proc_handler = read_reset_stat,
101754 },
101755 {
101756 .procname = "rdma_stat_rq_prod",
101757 .data = &rdma_stat_rq_prod,
101758 - .maxlen = sizeof(atomic_t),
101759 + .maxlen = sizeof(atomic_unchecked_t),
101760 .mode = 0644,
101761 .proc_handler = read_reset_stat,
101762 },
101763 {
101764 .procname = "rdma_stat_sq_poll",
101765 .data = &rdma_stat_sq_poll,
101766 - .maxlen = sizeof(atomic_t),
101767 + .maxlen = sizeof(atomic_unchecked_t),
101768 .mode = 0644,
101769 .proc_handler = read_reset_stat,
101770 },
101771 {
101772 .procname = "rdma_stat_sq_prod",
101773 .data = &rdma_stat_sq_prod,
101774 - .maxlen = sizeof(atomic_t),
101775 + .maxlen = sizeof(atomic_unchecked_t),
101776 .mode = 0644,
101777 .proc_handler = read_reset_stat,
101778 },
101779 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
101780 index 0ce7552..d074459 100644
101781 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
101782 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
101783 @@ -501,7 +501,7 @@ next_sge:
101784 svc_rdma_put_context(ctxt, 0);
101785 goto out;
101786 }
101787 - atomic_inc(&rdma_stat_read);
101788 + atomic_inc_unchecked(&rdma_stat_read);
101789
101790 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
101791 chl_map->ch[ch_no].count -= read_wr.num_sge;
101792 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
101793 dto_q);
101794 list_del_init(&ctxt->dto_q);
101795 } else {
101796 - atomic_inc(&rdma_stat_rq_starve);
101797 + atomic_inc_unchecked(&rdma_stat_rq_starve);
101798 clear_bit(XPT_DATA, &xprt->xpt_flags);
101799 ctxt = NULL;
101800 }
101801 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
101802 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
101803 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
101804 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
101805 - atomic_inc(&rdma_stat_recv);
101806 + atomic_inc_unchecked(&rdma_stat_recv);
101807
101808 /* Build up the XDR from the receive buffers. */
101809 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
101810 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
101811 index c1d124d..acfc59e 100644
101812 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
101813 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
101814 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
101815 write_wr.wr.rdma.remote_addr = to;
101816
101817 /* Post It */
101818 - atomic_inc(&rdma_stat_write);
101819 + atomic_inc_unchecked(&rdma_stat_write);
101820 if (svc_rdma_send(xprt, &write_wr))
101821 goto err;
101822 return 0;
101823 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
101824 index 62e4f9b..dd3f2d7 100644
101825 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
101826 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
101827 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
101828 return;
101829
101830 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
101831 - atomic_inc(&rdma_stat_rq_poll);
101832 + atomic_inc_unchecked(&rdma_stat_rq_poll);
101833
101834 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
101835 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
101836 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
101837 }
101838
101839 if (ctxt)
101840 - atomic_inc(&rdma_stat_rq_prod);
101841 + atomic_inc_unchecked(&rdma_stat_rq_prod);
101842
101843 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
101844 /*
101845 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
101846 return;
101847
101848 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
101849 - atomic_inc(&rdma_stat_sq_poll);
101850 + atomic_inc_unchecked(&rdma_stat_sq_poll);
101851 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
101852 if (wc.status != IB_WC_SUCCESS)
101853 /* Close the transport */
101854 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
101855 }
101856
101857 if (ctxt)
101858 - atomic_inc(&rdma_stat_sq_prod);
101859 + atomic_inc_unchecked(&rdma_stat_sq_prod);
101860 }
101861
101862 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
101863 @@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
101864 spin_lock_bh(&xprt->sc_lock);
101865 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
101866 spin_unlock_bh(&xprt->sc_lock);
101867 - atomic_inc(&rdma_stat_sq_starve);
101868 + atomic_inc_unchecked(&rdma_stat_sq_starve);
101869
101870 /* See if we can opportunistically reap SQ WR to make room */
101871 sq_cq_reap(xprt);
101872 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
101873 index e7000be..e3b0ba7 100644
101874 --- a/net/sysctl_net.c
101875 +++ b/net/sysctl_net.c
101876 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
101877 kgid_t root_gid = make_kgid(net->user_ns, 0);
101878
101879 /* Allow network administrator to have same access as root. */
101880 - if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
101881 + if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
101882 uid_eq(root_uid, current_euid())) {
101883 int mode = (table->mode >> 6) & 7;
101884 return (mode << 6) | (mode << 3) | mode;
101885 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
101886 index d38bb45..38d5df5 100644
101887 --- a/net/tipc/subscr.c
101888 +++ b/net/tipc/subscr.c
101889 @@ -98,7 +98,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
101890 struct kvec msg_sect;
101891 int ret;
101892
101893 - msg_sect.iov_base = (void *)&sub->evt;
101894 + msg_sect.iov_base = &sub->evt;
101895 msg_sect.iov_len = sizeof(struct tipc_event);
101896
101897 sub->evt.event = htohl(event, sub->swap);
101898 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
101899 index d7c1ac6..b0fc322 100644
101900 --- a/net/unix/af_unix.c
101901 +++ b/net/unix/af_unix.c
101902 @@ -789,6 +789,12 @@ static struct sock *unix_find_other(struct net *net,
101903 err = -ECONNREFUSED;
101904 if (!S_ISSOCK(inode->i_mode))
101905 goto put_fail;
101906 +
101907 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
101908 + err = -EACCES;
101909 + goto put_fail;
101910 + }
101911 +
101912 u = unix_find_socket_byinode(inode);
101913 if (!u)
101914 goto put_fail;
101915 @@ -809,6 +815,13 @@ static struct sock *unix_find_other(struct net *net,
101916 if (u) {
101917 struct dentry *dentry;
101918 dentry = unix_sk(u)->path.dentry;
101919 +
101920 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
101921 + err = -EPERM;
101922 + sock_put(u);
101923 + goto fail;
101924 + }
101925 +
101926 if (dentry)
101927 touch_atime(&unix_sk(u)->path);
101928 } else
101929 @@ -842,12 +855,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
101930 */
101931 err = security_path_mknod(&path, dentry, mode, 0);
101932 if (!err) {
101933 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
101934 + err = -EACCES;
101935 + goto out;
101936 + }
101937 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
101938 if (!err) {
101939 res->mnt = mntget(path.mnt);
101940 res->dentry = dget(dentry);
101941 + gr_handle_create(dentry, path.mnt);
101942 }
101943 }
101944 +out:
101945 done_path_create(&path, dentry);
101946 return err;
101947 }
101948 @@ -1785,8 +1804,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
101949 goto out;
101950
101951 err = mutex_lock_interruptible(&u->readlock);
101952 - if (err) {
101953 - err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
101954 + if (unlikely(err)) {
101955 + /* recvmsg() in non blocking mode is supposed to return -EAGAIN
101956 + * sk_rcvtimeo is not honored by mutex_lock_interruptible()
101957 + */
101958 + err = noblock ? -EAGAIN : -ERESTARTSYS;
101959 goto out;
101960 }
101961
101962 @@ -1911,6 +1933,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
101963 struct unix_sock *u = unix_sk(sk);
101964 struct sockaddr_un *sunaddr = msg->msg_name;
101965 int copied = 0;
101966 + int noblock = flags & MSG_DONTWAIT;
101967 int check_creds = 0;
101968 int target;
101969 int err = 0;
101970 @@ -1926,7 +1949,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
101971 goto out;
101972
101973 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
101974 - timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
101975 + timeo = sock_rcvtimeo(sk, noblock);
101976
101977 /* Lock the socket to prevent queue disordering
101978 * while sleeps in memcpy_tomsg
101979 @@ -1938,8 +1961,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
101980 }
101981
101982 err = mutex_lock_interruptible(&u->readlock);
101983 - if (err) {
101984 - err = sock_intr_errno(timeo);
101985 + if (unlikely(err)) {
101986 + /* recvmsg() in non blocking mode is supposed to return -EAGAIN
101987 + * sk_rcvtimeo is not honored by mutex_lock_interruptible()
101988 + */
101989 + err = noblock ? -EAGAIN : -ERESTARTSYS;
101990 goto out;
101991 }
101992
101993 @@ -2335,9 +2361,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
101994 seq_puts(seq, "Num RefCount Protocol Flags Type St "
101995 "Inode Path\n");
101996 else {
101997 - struct sock *s = v;
101998 + struct sock *s = v, *peer;
101999 struct unix_sock *u = unix_sk(s);
102000 unix_state_lock(s);
102001 + peer = unix_peer(s);
102002 + unix_state_unlock(s);
102003 +
102004 + unix_state_double_lock(s, peer);
102005
102006 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
102007 s,
102008 @@ -2364,8 +2394,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
102009 }
102010 for ( ; i < len; i++)
102011 seq_putc(seq, u->addr->name->sun_path[i]);
102012 - }
102013 - unix_state_unlock(s);
102014 + } else if (peer)
102015 + seq_printf(seq, " P%lu", sock_i_ino(peer));
102016 +
102017 + unix_state_double_unlock(s, peer);
102018 seq_putc(seq, '\n');
102019 }
102020
102021 diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
102022 index b3d5150..ff3a837 100644
102023 --- a/net/unix/sysctl_net_unix.c
102024 +++ b/net/unix/sysctl_net_unix.c
102025 @@ -28,7 +28,7 @@ static struct ctl_table unix_table[] = {
102026
102027 int __net_init unix_sysctl_register(struct net *net)
102028 {
102029 - struct ctl_table *table;
102030 + ctl_table_no_const *table;
102031
102032 table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
102033 if (table == NULL)
102034 diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
102035 index 9a73074..aecba9a 100644
102036 --- a/net/vmw_vsock/vmci_transport_notify.c
102037 +++ b/net/vmw_vsock/vmci_transport_notify.c
102038 @@ -662,19 +662,19 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
102039
102040 /* Socket control packet based operations. */
102041 struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
102042 - vmci_transport_notify_pkt_socket_init,
102043 - vmci_transport_notify_pkt_socket_destruct,
102044 - vmci_transport_notify_pkt_poll_in,
102045 - vmci_transport_notify_pkt_poll_out,
102046 - vmci_transport_notify_pkt_handle_pkt,
102047 - vmci_transport_notify_pkt_recv_init,
102048 - vmci_transport_notify_pkt_recv_pre_block,
102049 - vmci_transport_notify_pkt_recv_pre_dequeue,
102050 - vmci_transport_notify_pkt_recv_post_dequeue,
102051 - vmci_transport_notify_pkt_send_init,
102052 - vmci_transport_notify_pkt_send_pre_block,
102053 - vmci_transport_notify_pkt_send_pre_enqueue,
102054 - vmci_transport_notify_pkt_send_post_enqueue,
102055 - vmci_transport_notify_pkt_process_request,
102056 - vmci_transport_notify_pkt_process_negotiate,
102057 + .socket_init = vmci_transport_notify_pkt_socket_init,
102058 + .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
102059 + .poll_in = vmci_transport_notify_pkt_poll_in,
102060 + .poll_out = vmci_transport_notify_pkt_poll_out,
102061 + .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
102062 + .recv_init = vmci_transport_notify_pkt_recv_init,
102063 + .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
102064 + .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
102065 + .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
102066 + .send_init = vmci_transport_notify_pkt_send_init,
102067 + .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
102068 + .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
102069 + .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
102070 + .process_request = vmci_transport_notify_pkt_process_request,
102071 + .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
102072 };
102073 diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
102074 index 622bd7a..b92086c 100644
102075 --- a/net/vmw_vsock/vmci_transport_notify_qstate.c
102076 +++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
102077 @@ -420,19 +420,19 @@ vmci_transport_notify_pkt_send_pre_enqueue(
102078
102079 /* Socket always on control packet based operations. */
102080 struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
102081 - vmci_transport_notify_pkt_socket_init,
102082 - vmci_transport_notify_pkt_socket_destruct,
102083 - vmci_transport_notify_pkt_poll_in,
102084 - vmci_transport_notify_pkt_poll_out,
102085 - vmci_transport_notify_pkt_handle_pkt,
102086 - vmci_transport_notify_pkt_recv_init,
102087 - vmci_transport_notify_pkt_recv_pre_block,
102088 - vmci_transport_notify_pkt_recv_pre_dequeue,
102089 - vmci_transport_notify_pkt_recv_post_dequeue,
102090 - vmci_transport_notify_pkt_send_init,
102091 - vmci_transport_notify_pkt_send_pre_block,
102092 - vmci_transport_notify_pkt_send_pre_enqueue,
102093 - vmci_transport_notify_pkt_send_post_enqueue,
102094 - vmci_transport_notify_pkt_process_request,
102095 - vmci_transport_notify_pkt_process_negotiate,
102096 + .socket_init = vmci_transport_notify_pkt_socket_init,
102097 + .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
102098 + .poll_in = vmci_transport_notify_pkt_poll_in,
102099 + .poll_out = vmci_transport_notify_pkt_poll_out,
102100 + .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
102101 + .recv_init = vmci_transport_notify_pkt_recv_init,
102102 + .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
102103 + .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
102104 + .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
102105 + .send_init = vmci_transport_notify_pkt_send_init,
102106 + .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
102107 + .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
102108 + .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
102109 + .process_request = vmci_transport_notify_pkt_process_request,
102110 + .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
102111 };
102112 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
102113 index c8717c1..08539f5 100644
102114 --- a/net/wireless/wext-core.c
102115 +++ b/net/wireless/wext-core.c
102116 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
102117 */
102118
102119 /* Support for very large requests */
102120 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
102121 - (user_length > descr->max_tokens)) {
102122 + if (user_length > descr->max_tokens) {
102123 /* Allow userspace to GET more than max so
102124 * we can support any size GET requests.
102125 * There is still a limit : -ENOMEM.
102126 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
102127 }
102128 }
102129
102130 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
102131 - /*
102132 - * If this is a GET, but not NOMAX, it means that the extra
102133 - * data is not bounded by userspace, but by max_tokens. Thus
102134 - * set the length to max_tokens. This matches the extra data
102135 - * allocation.
102136 - * The driver should fill it with the number of tokens it
102137 - * provided, and it may check iwp->length rather than having
102138 - * knowledge of max_tokens. If the driver doesn't change the
102139 - * iwp->length, this ioctl just copies back max_token tokens
102140 - * filled with zeroes. Hopefully the driver isn't claiming
102141 - * them to be valid data.
102142 - */
102143 - iwp->length = descr->max_tokens;
102144 - }
102145 -
102146 err = handler(dev, info, (union iwreq_data *) iwp, extra);
102147
102148 iwp->length += essid_compat;
102149 diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
102150 index 4323952..a06dfe1 100644
102151 --- a/net/x25/sysctl_net_x25.c
102152 +++ b/net/x25/sysctl_net_x25.c
102153 @@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = {
102154 .mode = 0644,
102155 .proc_handler = proc_dointvec,
102156 },
102157 - { 0, },
102158 + { },
102159 };
102160
102161 void __init x25_register_sysctl(void)
102162 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
102163 index 9a91f74..502f6605 100644
102164 --- a/net/xfrm/xfrm_policy.c
102165 +++ b/net/xfrm/xfrm_policy.c
102166 @@ -332,7 +332,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
102167 {
102168 policy->walk.dead = 1;
102169
102170 - atomic_inc(&policy->genid);
102171 + atomic_inc_unchecked(&policy->genid);
102172
102173 if (del_timer(&policy->polq.hold_timer))
102174 xfrm_pol_put(policy);
102175 @@ -660,7 +660,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
102176 hlist_add_head(&policy->bydst, chain);
102177 xfrm_pol_hold(policy);
102178 net->xfrm.policy_count[dir]++;
102179 - atomic_inc(&flow_cache_genid);
102180 + atomic_inc_unchecked(&flow_cache_genid);
102181
102182 /* After previous checking, family can either be AF_INET or AF_INET6 */
102183 if (policy->family == AF_INET)
102184 @@ -1636,7 +1636,7 @@ free_dst:
102185 goto out;
102186 }
102187
102188 -static int inline
102189 +static inline int
102190 xfrm_dst_alloc_copy(void **target, const void *src, int size)
102191 {
102192 if (!*target) {
102193 @@ -1648,7 +1648,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
102194 return 0;
102195 }
102196
102197 -static int inline
102198 +static inline int
102199 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
102200 {
102201 #ifdef CONFIG_XFRM_SUB_POLICY
102202 @@ -1660,7 +1660,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
102203 #endif
102204 }
102205
102206 -static int inline
102207 +static inline int
102208 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
102209 {
102210 #ifdef CONFIG_XFRM_SUB_POLICY
102211 @@ -1754,7 +1754,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
102212
102213 xdst->num_pols = num_pols;
102214 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
102215 - xdst->policy_genid = atomic_read(&pols[0]->genid);
102216 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
102217
102218 return xdst;
102219 }
102220 @@ -2582,11 +2582,12 @@ void xfrm_garbage_collect(struct net *net)
102221 }
102222 EXPORT_SYMBOL(xfrm_garbage_collect);
102223
102224 -static void xfrm_garbage_collect_deferred(struct net *net)
102225 +void xfrm_garbage_collect_deferred(struct net *net)
102226 {
102227 flow_cache_flush_deferred();
102228 __xfrm_garbage_collect(net);
102229 }
102230 +EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
102231
102232 static void xfrm_init_pmtu(struct dst_entry *dst)
102233 {
102234 @@ -2636,7 +2637,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
102235 if (xdst->xfrm_genid != dst->xfrm->genid)
102236 return 0;
102237 if (xdst->num_pols > 0 &&
102238 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
102239 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
102240 return 0;
102241
102242 mtu = dst_mtu(dst->child);
102243 @@ -2724,8 +2725,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
102244 dst_ops->link_failure = xfrm_link_failure;
102245 if (likely(dst_ops->neigh_lookup == NULL))
102246 dst_ops->neigh_lookup = xfrm_neigh_lookup;
102247 - if (likely(afinfo->garbage_collect == NULL))
102248 - afinfo->garbage_collect = xfrm_garbage_collect_deferred;
102249 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
102250 }
102251 spin_unlock(&xfrm_policy_afinfo_lock);
102252 @@ -2779,7 +2778,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
102253 dst_ops->check = NULL;
102254 dst_ops->negative_advice = NULL;
102255 dst_ops->link_failure = NULL;
102256 - afinfo->garbage_collect = NULL;
102257 }
102258 return err;
102259 }
102260 @@ -3162,7 +3160,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
102261 sizeof(pol->xfrm_vec[i].saddr));
102262 pol->xfrm_vec[i].encap_family = mp->new_family;
102263 /* flush bundles */
102264 - atomic_inc(&pol->genid);
102265 + atomic_inc_unchecked(&pol->genid);
102266 }
102267 }
102268
102269 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
102270 index 68c2f35..9eb05ca 100644
102271 --- a/net/xfrm/xfrm_state.c
102272 +++ b/net/xfrm/xfrm_state.c
102273 @@ -174,12 +174,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
102274
102275 if (unlikely(afinfo == NULL))
102276 return -EAFNOSUPPORT;
102277 - typemap = afinfo->type_map;
102278 + typemap = (const struct xfrm_type **)afinfo->type_map;
102279 spin_lock_bh(&xfrm_type_lock);
102280
102281 - if (likely(typemap[type->proto] == NULL))
102282 + if (likely(typemap[type->proto] == NULL)) {
102283 + pax_open_kernel();
102284 typemap[type->proto] = type;
102285 - else
102286 + pax_close_kernel();
102287 + } else
102288 err = -EEXIST;
102289 spin_unlock_bh(&xfrm_type_lock);
102290 xfrm_state_put_afinfo(afinfo);
102291 @@ -195,13 +197,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
102292
102293 if (unlikely(afinfo == NULL))
102294 return -EAFNOSUPPORT;
102295 - typemap = afinfo->type_map;
102296 + typemap = (const struct xfrm_type **)afinfo->type_map;
102297 spin_lock_bh(&xfrm_type_lock);
102298
102299 if (unlikely(typemap[type->proto] != type))
102300 err = -ENOENT;
102301 - else
102302 + else {
102303 + pax_open_kernel();
102304 typemap[type->proto] = NULL;
102305 + pax_close_kernel();
102306 + }
102307 spin_unlock_bh(&xfrm_type_lock);
102308 xfrm_state_put_afinfo(afinfo);
102309 return err;
102310 @@ -211,7 +216,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
102311 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
102312 {
102313 struct xfrm_state_afinfo *afinfo;
102314 - const struct xfrm_type **typemap;
102315 const struct xfrm_type *type;
102316 int modload_attempted = 0;
102317
102318 @@ -219,9 +223,8 @@ retry:
102319 afinfo = xfrm_state_get_afinfo(family);
102320 if (unlikely(afinfo == NULL))
102321 return NULL;
102322 - typemap = afinfo->type_map;
102323
102324 - type = typemap[proto];
102325 + type = afinfo->type_map[proto];
102326 if (unlikely(type && !try_module_get(type->owner)))
102327 type = NULL;
102328 if (!type && !modload_attempted) {
102329 @@ -255,7 +258,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
102330 return -EAFNOSUPPORT;
102331
102332 err = -EEXIST;
102333 - modemap = afinfo->mode_map;
102334 + modemap = (struct xfrm_mode **)afinfo->mode_map;
102335 spin_lock_bh(&xfrm_mode_lock);
102336 if (modemap[mode->encap])
102337 goto out;
102338 @@ -264,8 +267,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
102339 if (!try_module_get(afinfo->owner))
102340 goto out;
102341
102342 - mode->afinfo = afinfo;
102343 + pax_open_kernel();
102344 + *(const void **)&mode->afinfo = afinfo;
102345 modemap[mode->encap] = mode;
102346 + pax_close_kernel();
102347 err = 0;
102348
102349 out:
102350 @@ -289,10 +294,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
102351 return -EAFNOSUPPORT;
102352
102353 err = -ENOENT;
102354 - modemap = afinfo->mode_map;
102355 + modemap = (struct xfrm_mode **)afinfo->mode_map;
102356 spin_lock_bh(&xfrm_mode_lock);
102357 if (likely(modemap[mode->encap] == mode)) {
102358 + pax_open_kernel();
102359 modemap[mode->encap] = NULL;
102360 + pax_close_kernel();
102361 module_put(mode->afinfo->owner);
102362 err = 0;
102363 }
102364 @@ -1486,10 +1493,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
102365 u32 xfrm_get_acqseq(void)
102366 {
102367 u32 res;
102368 - static atomic_t acqseq;
102369 + static atomic_unchecked_t acqseq;
102370
102371 do {
102372 - res = atomic_inc_return(&acqseq);
102373 + res = atomic_inc_return_unchecked(&acqseq);
102374 } while (!res);
102375
102376 return res;
102377 diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
102378 index 05a6e3d..6716ec9 100644
102379 --- a/net/xfrm/xfrm_sysctl.c
102380 +++ b/net/xfrm/xfrm_sysctl.c
102381 @@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
102382
102383 int __net_init xfrm_sysctl_init(struct net *net)
102384 {
102385 - struct ctl_table *table;
102386 + ctl_table_no_const *table;
102387
102388 __xfrm_sysctl_init(net);
102389
102390 diff --git a/scripts/Makefile b/scripts/Makefile
102391 index 01e7adb..6176d5d 100644
102392 --- a/scripts/Makefile
102393 +++ b/scripts/Makefile
102394 @@ -40,3 +40,5 @@ subdir-$(CONFIG_DTC) += dtc
102395
102396 # Let clean descend into subdirs
102397 subdir- += basic kconfig package selinux
102398 +
102399 +clean-files := randstruct.seed
102400 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
102401 index d5d859c..781cbcb 100644
102402 --- a/scripts/Makefile.build
102403 +++ b/scripts/Makefile.build
102404 @@ -111,7 +111,7 @@ endif
102405 endif
102406
102407 # Do not include host rules unless needed
102408 -ifneq ($(hostprogs-y)$(hostprogs-m),)
102409 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
102410 include scripts/Makefile.host
102411 endif
102412
102413 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
102414 index 686cb0d..9d653bf 100644
102415 --- a/scripts/Makefile.clean
102416 +++ b/scripts/Makefile.clean
102417 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
102418 __clean-files := $(extra-y) $(always) \
102419 $(targets) $(clean-files) \
102420 $(host-progs) \
102421 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
102422 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
102423 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
102424
102425 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
102426
102427 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
102428 index 1ac414f..38575f7 100644
102429 --- a/scripts/Makefile.host
102430 +++ b/scripts/Makefile.host
102431 @@ -31,6 +31,8 @@
102432 # Note: Shared libraries consisting of C++ files are not supported
102433
102434 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
102435 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
102436 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
102437
102438 # C code
102439 # Executables compiled from a single .c file
102440 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
102441 # Shared libaries (only .c supported)
102442 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
102443 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
102444 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
102445 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
102446 # Remove .so files from "xxx-objs"
102447 host-cobjs := $(filter-out %.so,$(host-cobjs))
102448 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
102449
102450 -#Object (.o) files used by the shared libaries
102451 +# Object (.o) files used by the shared libaries
102452 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
102453 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
102454
102455 # output directory for programs/.o files
102456 # hostprogs-y := tools/build may have been specified. Retrieve directory
102457 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
102458 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
102459 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
102460 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
102461 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
102462 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
102463 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
102464 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
102465
102466 obj-dirs += $(host-objdirs)
102467 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
102468 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
102469 $(call if_changed_dep,host-cshobjs)
102470
102471 +# Compile .c file, create position independent .o file
102472 +# host-cxxshobjs -> .o
102473 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
102474 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
102475 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
102476 + $(call if_changed_dep,host-cxxshobjs)
102477 +
102478 # Link a shared library, based on position independent .o files
102479 # *.o -> .so shared library (host-cshlib)
102480 quiet_cmd_host-cshlib = HOSTLLD -shared $@
102481 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
102482 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
102483 $(call if_changed,host-cshlib)
102484
102485 +# Link a shared library, based on position independent .o files
102486 +# *.o -> .so shared library (host-cxxshlib)
102487 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
102488 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
102489 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
102490 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
102491 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
102492 + $(call if_changed,host-cxxshlib)
102493 +
102494 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
102495 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
102496 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
102497
102498 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
102499 index 078fe1d..fbdb363 100644
102500 --- a/scripts/basic/fixdep.c
102501 +++ b/scripts/basic/fixdep.c
102502 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
102503 /*
102504 * Lookup a value in the configuration string.
102505 */
102506 -static int is_defined_config(const char *name, int len, unsigned int hash)
102507 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
102508 {
102509 struct item *aux;
102510
102511 @@ -211,10 +211,10 @@ static void clear_config(void)
102512 /*
102513 * Record the use of a CONFIG_* word.
102514 */
102515 -static void use_config(const char *m, int slen)
102516 +static void use_config(const char *m, unsigned int slen)
102517 {
102518 unsigned int hash = strhash(m, slen);
102519 - int c, i;
102520 + unsigned int c, i;
102521
102522 if (is_defined_config(m, slen, hash))
102523 return;
102524 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
102525
102526 static void parse_config_file(const char *map, size_t len)
102527 {
102528 - const int *end = (const int *) (map + len);
102529 + const unsigned int *end = (const unsigned int *) (map + len);
102530 /* start at +1, so that p can never be < map */
102531 - const int *m = (const int *) map + 1;
102532 + const unsigned int *m = (const unsigned int *) map + 1;
102533 const char *p, *q;
102534
102535 for (; m < end; m++) {
102536 @@ -435,7 +435,7 @@ static void print_deps(void)
102537 static void traps(void)
102538 {
102539 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
102540 - int *p = (int *)test;
102541 + unsigned int *p = (unsigned int *)test;
102542
102543 if (*p != INT_CONF) {
102544 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
102545 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
102546 new file mode 100644
102547 index 0000000..ed4c19a
102548 --- /dev/null
102549 +++ b/scripts/gcc-plugin.sh
102550 @@ -0,0 +1,16 @@
102551 +#!/bin/bash
102552 +srctree=$(dirname "$0")
102553 +gccplugins_dir=$("$1" -print-file-name=plugin)
102554 +plugincc=$("$1" -E -shared - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
102555 +#include "gcc-common.h"
102556 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 8 || defined(ENABLE_BUILD_WITH_CXX)
102557 +#warning $2
102558 +#else
102559 +#warning $1
102560 +#endif
102561 +EOF
102562 +)
102563 +if [ $? -eq 0 ]
102564 +then
102565 + ( [[ "$plugincc" =~ "$1" ]] && echo "$1" ) || ( [[ "$plugincc" =~ "$2" ]] && echo "$2" )
102566 +fi
102567 diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
102568 index 5de5660..d3deb89 100644
102569 --- a/scripts/headers_install.sh
102570 +++ b/scripts/headers_install.sh
102571 @@ -32,6 +32,7 @@ do
102572 FILE="$(basename "$i")"
102573 sed -r \
102574 -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
102575 + -e 's/__intentional_overflow\([- \t,0-9]*\)//g' \
102576 -e 's/__attribute_const__([ \t]|$)/\1/g' \
102577 -e 's@^#include <linux/compiler.h>@@' \
102578 -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
102579 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
102580 index 2dcb377..a82c500 100644
102581 --- a/scripts/link-vmlinux.sh
102582 +++ b/scripts/link-vmlinux.sh
102583 @@ -162,7 +162,7 @@ else
102584 fi;
102585
102586 # final build of init/
102587 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
102588 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
102589
102590 kallsymso=""
102591 kallsyms_vmlinux=""
102592 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
102593 index 25e5cb0..6e85821 100644
102594 --- a/scripts/mod/file2alias.c
102595 +++ b/scripts/mod/file2alias.c
102596 @@ -142,7 +142,7 @@ static void device_id_check(const char *modname, const char *device_id,
102597 unsigned long size, unsigned long id_size,
102598 void *symval)
102599 {
102600 - int i;
102601 + unsigned int i;
102602
102603 if (size % id_size || size < id_size) {
102604 fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
102605 @@ -170,7 +170,7 @@ static void device_id_check(const char *modname, const char *device_id,
102606 /* USB is special because the bcdDevice can be matched against a numeric range */
102607 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
102608 static void do_usb_entry(void *symval,
102609 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
102610 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
102611 unsigned char range_lo, unsigned char range_hi,
102612 unsigned char max, struct module *mod)
102613 {
102614 @@ -280,7 +280,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod)
102615 {
102616 unsigned int devlo, devhi;
102617 unsigned char chi, clo, max;
102618 - int ndigits;
102619 + unsigned int ndigits;
102620
102621 DEF_FIELD(symval, usb_device_id, match_flags);
102622 DEF_FIELD(symval, usb_device_id, idVendor);
102623 @@ -533,7 +533,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
102624 for (i = 0; i < count; i++) {
102625 DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
102626 char acpi_id[sizeof(*id)];
102627 - int j;
102628 + unsigned int j;
102629
102630 buf_printf(&mod->dev_table_buf,
102631 "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
102632 @@ -562,7 +562,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
102633
102634 for (j = 0; j < PNP_MAX_DEVICES; j++) {
102635 const char *id = (char *)(*devs)[j].id;
102636 - int i2, j2;
102637 + unsigned int i2, j2;
102638 int dup = 0;
102639
102640 if (!id[0])
102641 @@ -588,7 +588,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
102642 /* add an individual alias for every device entry */
102643 if (!dup) {
102644 char acpi_id[PNP_ID_LEN];
102645 - int k;
102646 + unsigned int k;
102647
102648 buf_printf(&mod->dev_table_buf,
102649 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
102650 @@ -940,7 +940,7 @@ static void dmi_ascii_filter(char *d, const char *s)
102651 static int do_dmi_entry(const char *filename, void *symval,
102652 char *alias)
102653 {
102654 - int i, j;
102655 + unsigned int i, j;
102656 DEF_FIELD_ADDR(symval, dmi_system_id, matches);
102657 sprintf(alias, "dmi*");
102658
102659 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
102660 index 1785576..5e8e06a 100644
102661 --- a/scripts/mod/modpost.c
102662 +++ b/scripts/mod/modpost.c
102663 @@ -941,6 +941,7 @@ enum mismatch {
102664 ANY_INIT_TO_ANY_EXIT,
102665 ANY_EXIT_TO_ANY_INIT,
102666 EXPORT_TO_INIT_EXIT,
102667 + DATA_TO_TEXT
102668 };
102669
102670 struct sectioncheck {
102671 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
102672 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
102673 .mismatch = EXPORT_TO_INIT_EXIT,
102674 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
102675 +},
102676 +/* Do not reference code from writable data */
102677 +{
102678 + .fromsec = { DATA_SECTIONS, NULL },
102679 + .tosec = { TEXT_SECTIONS, NULL },
102680 + .mismatch = DATA_TO_TEXT
102681 }
102682 };
102683
102684 @@ -1147,10 +1154,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
102685 continue;
102686 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
102687 continue;
102688 - if (sym->st_value == addr)
102689 - return sym;
102690 /* Find a symbol nearby - addr are maybe negative */
102691 d = sym->st_value - addr;
102692 + if (d == 0)
102693 + return sym;
102694 if (d < 0)
102695 d = addr - sym->st_value;
102696 if (d < distance) {
102697 @@ -1428,6 +1435,14 @@ static void report_sec_mismatch(const char *modname,
102698 tosym, prl_to, prl_to, tosym);
102699 free(prl_to);
102700 break;
102701 + case DATA_TO_TEXT:
102702 +#if 0
102703 + fprintf(stderr,
102704 + "The %s %s:%s references\n"
102705 + "the %s %s:%s%s\n",
102706 + from, fromsec, fromsym, to, tosec, tosym, to_p);
102707 +#endif
102708 + break;
102709 }
102710 fprintf(stderr, "\n");
102711 }
102712 @@ -1662,7 +1677,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
102713 static void check_sec_ref(struct module *mod, const char *modname,
102714 struct elf_info *elf)
102715 {
102716 - int i;
102717 + unsigned int i;
102718 Elf_Shdr *sechdrs = elf->sechdrs;
102719
102720 /* Walk through all sections */
102721 @@ -1781,7 +1796,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
102722 va_end(ap);
102723 }
102724
102725 -void buf_write(struct buffer *buf, const char *s, int len)
102726 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
102727 {
102728 if (buf->size - buf->pos < len) {
102729 buf->size += len + SZ;
102730 @@ -2000,7 +2015,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
102731 if (fstat(fileno(file), &st) < 0)
102732 goto close_write;
102733
102734 - if (st.st_size != b->pos)
102735 + if (st.st_size != (off_t)b->pos)
102736 goto close_write;
102737
102738 tmp = NOFAIL(malloc(b->pos));
102739 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
102740 index 51207e4..f7d603d 100644
102741 --- a/scripts/mod/modpost.h
102742 +++ b/scripts/mod/modpost.h
102743 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
102744
102745 struct buffer {
102746 char *p;
102747 - int pos;
102748 - int size;
102749 + unsigned int pos;
102750 + unsigned int size;
102751 };
102752
102753 void __attribute__((format(printf, 2, 3)))
102754 buf_printf(struct buffer *buf, const char *fmt, ...);
102755
102756 void
102757 -buf_write(struct buffer *buf, const char *s, int len);
102758 +buf_write(struct buffer *buf, const char *s, unsigned int len);
102759
102760 struct module {
102761 struct module *next;
102762 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
102763 index deb2994..af4f63e 100644
102764 --- a/scripts/mod/sumversion.c
102765 +++ b/scripts/mod/sumversion.c
102766 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
102767 goto out;
102768 }
102769
102770 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
102771 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
102772 warn("writing sum in %s failed: %s\n",
102773 filename, strerror(errno));
102774 goto out;
102775 diff --git a/scripts/module-common.lds b/scripts/module-common.lds
102776 index 0865b3e..7235dd4 100644
102777 --- a/scripts/module-common.lds
102778 +++ b/scripts/module-common.lds
102779 @@ -6,6 +6,10 @@
102780 SECTIONS {
102781 /DISCARD/ : { *(.discard) }
102782
102783 + .rodata : {
102784 + *(.rodata) *(.rodata.*)
102785 + *(.data..read_only)
102786 + }
102787 __ksymtab : { *(SORT(___ksymtab+*)) }
102788 __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
102789 __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
102790 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
102791 index 90e521f..e9eaf8f 100644
102792 --- a/scripts/package/builddeb
102793 +++ b/scripts/package/builddeb
102794 @@ -281,6 +281,7 @@ fi
102795 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
102796 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
102797 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
102798 +(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
102799 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
102800 mkdir -p "$destdir"
102801 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
102802 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
102803 index 68bb4ef..2f419e1 100644
102804 --- a/scripts/pnmtologo.c
102805 +++ b/scripts/pnmtologo.c
102806 @@ -244,14 +244,14 @@ static void write_header(void)
102807 fprintf(out, " * Linux logo %s\n", logoname);
102808 fputs(" */\n\n", out);
102809 fputs("#include <linux/linux_logo.h>\n\n", out);
102810 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
102811 + fprintf(out, "static unsigned char %s_data[] = {\n",
102812 logoname);
102813 }
102814
102815 static void write_footer(void)
102816 {
102817 fputs("\n};\n\n", out);
102818 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
102819 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
102820 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
102821 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
102822 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
102823 @@ -381,7 +381,7 @@ static void write_logo_clut224(void)
102824 fputs("\n};\n\n", out);
102825
102826 /* write logo clut */
102827 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
102828 + fprintf(out, "static unsigned char %s_clut[] = {\n",
102829 logoname);
102830 write_hex_cnt = 0;
102831 for (i = 0; i < logo_clutsize; i++) {
102832 diff --git a/scripts/sortextable.h b/scripts/sortextable.h
102833 index 8fac3fd..32ff38d 100644
102834 --- a/scripts/sortextable.h
102835 +++ b/scripts/sortextable.h
102836 @@ -108,9 +108,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
102837 const char *secstrtab;
102838 const char *strtab;
102839 char *extab_image;
102840 - int extab_index = 0;
102841 - int i;
102842 - int idx;
102843 + unsigned int extab_index = 0;
102844 + unsigned int i;
102845 + unsigned int idx;
102846 unsigned int num_sections;
102847 unsigned int secindex_strings;
102848
102849 diff --git a/security/Kconfig b/security/Kconfig
102850 index e9c6ac7..75578c4 100644
102851 --- a/security/Kconfig
102852 +++ b/security/Kconfig
102853 @@ -4,6 +4,960 @@
102854
102855 menu "Security options"
102856
102857 +menu "Grsecurity"
102858 +
102859 + config ARCH_TRACK_EXEC_LIMIT
102860 + bool
102861 +
102862 + config PAX_KERNEXEC_PLUGIN
102863 + bool
102864 +
102865 + config PAX_PER_CPU_PGD
102866 + bool
102867 +
102868 + config TASK_SIZE_MAX_SHIFT
102869 + int
102870 + depends on X86_64
102871 + default 47 if !PAX_PER_CPU_PGD
102872 + default 42 if PAX_PER_CPU_PGD
102873 +
102874 + config PAX_ENABLE_PAE
102875 + bool
102876 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
102877 +
102878 + config PAX_USERCOPY_SLABS
102879 + bool
102880 +
102881 +config GRKERNSEC
102882 + bool "Grsecurity"
102883 + select CRYPTO
102884 + select CRYPTO_SHA256
102885 + select PROC_FS
102886 + select STOP_MACHINE
102887 + select TTY
102888 + select DEBUG_KERNEL
102889 + select DEBUG_LIST
102890 + help
102891 + If you say Y here, you will be able to configure many features
102892 + that will enhance the security of your system. It is highly
102893 + recommended that you say Y here and read through the help
102894 + for each option so that you fully understand the features and
102895 + can evaluate their usefulness for your machine.
102896 +
102897 +choice
102898 + prompt "Configuration Method"
102899 + depends on GRKERNSEC
102900 + default GRKERNSEC_CONFIG_CUSTOM
102901 + help
102902 +
102903 +config GRKERNSEC_CONFIG_AUTO
102904 + bool "Automatic"
102905 + help
102906 + If you choose this configuration method, you'll be able to answer a small
102907 + number of simple questions about how you plan to use this kernel.
102908 + The settings of grsecurity and PaX will be automatically configured for
102909 + the highest commonly-used settings within the provided constraints.
102910 +
102911 + If you require additional configuration, custom changes can still be made
102912 + from the "custom configuration" menu.
102913 +
102914 +config GRKERNSEC_CONFIG_CUSTOM
102915 + bool "Custom"
102916 + help
102917 + If you choose this configuration method, you'll be able to configure all
102918 + grsecurity and PaX settings manually. Via this method, no options are
102919 + automatically enabled.
102920 +
102921 +endchoice
102922 +
102923 +choice
102924 + prompt "Usage Type"
102925 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
102926 + default GRKERNSEC_CONFIG_SERVER
102927 + help
102928 +
102929 +config GRKERNSEC_CONFIG_SERVER
102930 + bool "Server"
102931 + help
102932 + Choose this option if you plan to use this kernel on a server.
102933 +
102934 +config GRKERNSEC_CONFIG_DESKTOP
102935 + bool "Desktop"
102936 + help
102937 + Choose this option if you plan to use this kernel on a desktop.
102938 +
102939 +endchoice
102940 +
102941 +choice
102942 + prompt "Virtualization Type"
102943 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
102944 + default GRKERNSEC_CONFIG_VIRT_NONE
102945 + help
102946 +
102947 +config GRKERNSEC_CONFIG_VIRT_NONE
102948 + bool "None"
102949 + help
102950 + Choose this option if this kernel will be run on bare metal.
102951 +
102952 +config GRKERNSEC_CONFIG_VIRT_GUEST
102953 + bool "Guest"
102954 + help
102955 + Choose this option if this kernel will be run as a VM guest.
102956 +
102957 +config GRKERNSEC_CONFIG_VIRT_HOST
102958 + bool "Host"
102959 + help
102960 + Choose this option if this kernel will be run as a VM host.
102961 +
102962 +endchoice
102963 +
102964 +choice
102965 + prompt "Virtualization Hardware"
102966 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
102967 + help
102968 +
102969 +config GRKERNSEC_CONFIG_VIRT_EPT
102970 + bool "EPT/RVI Processor Support"
102971 + depends on X86
102972 + help
102973 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
102974 + hardware virtualization. This allows for additional kernel hardening protections
102975 + to operate without additional performance impact.
102976 +
102977 + To see if your Intel processor supports EPT, see:
102978 + http://ark.intel.com/Products/VirtualizationTechnology
102979 + (Most Core i3/5/7 support EPT)
102980 +
102981 + To see if your AMD processor supports RVI, see:
102982 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
102983 +
102984 +config GRKERNSEC_CONFIG_VIRT_SOFT
102985 + bool "First-gen/No Hardware Virtualization"
102986 + help
102987 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
102988 + support hardware virtualization or doesn't support the EPT/RVI extensions.
102989 +
102990 +endchoice
102991 +
102992 +choice
102993 + prompt "Virtualization Software"
102994 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
102995 + help
102996 +
102997 +config GRKERNSEC_CONFIG_VIRT_XEN
102998 + bool "Xen"
102999 + help
103000 + Choose this option if this kernel is running as a Xen guest or host.
103001 +
103002 +config GRKERNSEC_CONFIG_VIRT_VMWARE
103003 + bool "VMWare"
103004 + help
103005 + Choose this option if this kernel is running as a VMWare guest or host.
103006 +
103007 +config GRKERNSEC_CONFIG_VIRT_KVM
103008 + bool "KVM"
103009 + help
103010 + Choose this option if this kernel is running as a KVM guest or host.
103011 +
103012 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
103013 + bool "VirtualBox"
103014 + help
103015 + Choose this option if this kernel is running as a VirtualBox guest or host.
103016 +
103017 +endchoice
103018 +
103019 +choice
103020 + prompt "Required Priorities"
103021 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
103022 + default GRKERNSEC_CONFIG_PRIORITY_PERF
103023 + help
103024 +
103025 +config GRKERNSEC_CONFIG_PRIORITY_PERF
103026 + bool "Performance"
103027 + help
103028 + Choose this option if performance is of highest priority for this deployment
103029 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
103030 + clearing of structures intended for userland, and freed memory sanitizing will
103031 + be disabled.
103032 +
103033 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
103034 + bool "Security"
103035 + help
103036 + Choose this option if security is of highest priority for this deployment of
103037 + grsecurity. UDEREF, kernel stack clearing, clearing of structures intended
103038 + for userland, and freed memory sanitizing will be enabled for this kernel.
103039 + In a worst-case scenario, these features can introduce a 20% performance hit
103040 + (UDEREF on x64 contributing half of this hit).
103041 +
103042 +endchoice
103043 +
103044 +menu "Default Special Groups"
103045 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
103046 +
103047 +config GRKERNSEC_PROC_GID
103048 + int "GID exempted from /proc restrictions"
103049 + default 1001
103050 + help
103051 + Setting this GID determines which group will be exempted from
103052 + grsecurity's /proc restrictions, allowing users of the specified
103053 + group to view network statistics and the existence of other users'
103054 + processes on the system. This GID may also be chosen at boot time
103055 + via "grsec_proc_gid=" on the kernel commandline.
103056 +
103057 +config GRKERNSEC_TPE_UNTRUSTED_GID
103058 + int "GID for TPE-untrusted users"
103059 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
103060 + default 1005
103061 + help
103062 + Setting this GID determines which group untrusted users should
103063 + be added to. These users will be placed under grsecurity's Trusted Path
103064 + Execution mechanism, preventing them from executing their own binaries.
103065 + The users will only be able to execute binaries in directories owned and
103066 + writable only by the root user. If the sysctl option is enabled, a sysctl
103067 + option with name "tpe_gid" is created.
103068 +
103069 +config GRKERNSEC_TPE_TRUSTED_GID
103070 + int "GID for TPE-trusted users"
103071 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
103072 + default 1005
103073 + help
103074 + Setting this GID determines what group TPE restrictions will be
103075 + *disabled* for. If the sysctl option is enabled, a sysctl option
103076 + with name "tpe_gid" is created.
103077 +
103078 +config GRKERNSEC_SYMLINKOWN_GID
103079 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
103080 + depends on GRKERNSEC_CONFIG_SERVER
103081 + default 1006
103082 + help
103083 + Setting this GID determines what group kernel-enforced
103084 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
103085 + is enabled, a sysctl option with name "symlinkown_gid" is created.
103086 +
103087 +
103088 +endmenu
103089 +
103090 +menu "Customize Configuration"
103091 +depends on GRKERNSEC
103092 +
103093 +menu "PaX"
103094 +
103095 +config PAX
103096 + bool "Enable various PaX features"
103097 + default y if GRKERNSEC_CONFIG_AUTO
103098 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
103099 + help
103100 + This allows you to enable various PaX features. PaX adds
103101 + intrusion prevention mechanisms to the kernel that reduce
103102 + the risks posed by exploitable memory corruption bugs.
103103 +
103104 +menu "PaX Control"
103105 + depends on PAX
103106 +
103107 +config PAX_SOFTMODE
103108 + bool 'Support soft mode'
103109 + help
103110 + Enabling this option will allow you to run PaX in soft mode, that
103111 + is, PaX features will not be enforced by default, only on executables
103112 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
103113 + support as they are the only way to mark executables for soft mode use.
103114 +
103115 + Soft mode can be activated by using the "pax_softmode=1" kernel command
103116 + line option on boot. Furthermore you can control various PaX features
103117 + at runtime via the entries in /proc/sys/kernel/pax.
103118 +
103119 +config PAX_EI_PAX
103120 + bool 'Use legacy ELF header marking'
103121 + default y if GRKERNSEC_CONFIG_AUTO
103122 + help
103123 + Enabling this option will allow you to control PaX features on
103124 + a per executable basis via the 'chpax' utility available at
103125 + http://pax.grsecurity.net/. The control flags will be read from
103126 + an otherwise reserved part of the ELF header. This marking has
103127 + numerous drawbacks (no support for soft-mode, toolchain does not
103128 + know about the non-standard use of the ELF header) therefore it
103129 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
103130 + support.
103131 +
103132 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
103133 + support as well, they will override the legacy EI_PAX marks.
103134 +
103135 + If you enable none of the marking options then all applications
103136 + will run with PaX enabled on them by default.
103137 +
103138 +config PAX_PT_PAX_FLAGS
103139 + bool 'Use ELF program header marking'
103140 + default y if GRKERNSEC_CONFIG_AUTO
103141 + help
103142 + Enabling this option will allow you to control PaX features on
103143 + a per executable basis via the 'paxctl' utility available at
103144 + http://pax.grsecurity.net/. The control flags will be read from
103145 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
103146 + has the benefits of supporting both soft mode and being fully
103147 + integrated into the toolchain (the binutils patch is available
103148 + from http://pax.grsecurity.net).
103149 +
103150 + Note that if you enable the legacy EI_PAX marking support as well,
103151 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
103152 +
103153 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
103154 + must make sure that the marks are the same if a binary has both marks.
103155 +
103156 + If you enable none of the marking options then all applications
103157 + will run with PaX enabled on them by default.
103158 +
103159 +config PAX_XATTR_PAX_FLAGS
103160 + bool 'Use filesystem extended attributes marking'
103161 + default y if GRKERNSEC_CONFIG_AUTO
103162 + select CIFS_XATTR if CIFS
103163 + select EXT2_FS_XATTR if EXT2_FS
103164 + select EXT3_FS_XATTR if EXT3_FS
103165 + select JFFS2_FS_XATTR if JFFS2_FS
103166 + select REISERFS_FS_XATTR if REISERFS_FS
103167 + select SQUASHFS_XATTR if SQUASHFS
103168 + select TMPFS_XATTR if TMPFS
103169 + select UBIFS_FS_XATTR if UBIFS_FS
103170 + help
103171 + Enabling this option will allow you to control PaX features on
103172 + a per executable basis via the 'setfattr' utility. The control
103173 + flags will be read from the user.pax.flags extended attribute of
103174 + the file. This marking has the benefit of supporting binary-only
103175 + applications that self-check themselves (e.g., skype) and would
103176 + not tolerate chpax/paxctl changes. The main drawback is that
103177 + extended attributes are not supported by some filesystems (e.g.,
103178 + isofs, udf, vfat) so copying files through such filesystems will
103179 + lose the extended attributes and these PaX markings.
103180 +
103181 + Note that if you enable the legacy EI_PAX marking support as well,
103182 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
103183 +
103184 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
103185 + must make sure that the marks are the same if a binary has both marks.
103186 +
103187 + If you enable none of the marking options then all applications
103188 + will run with PaX enabled on them by default.
103189 +
103190 +choice
103191 + prompt 'MAC system integration'
103192 + default PAX_HAVE_ACL_FLAGS
103193 + help
103194 + Mandatory Access Control systems have the option of controlling
103195 + PaX flags on a per executable basis, choose the method supported
103196 + by your particular system.
103197 +
103198 + - "none": if your MAC system does not interact with PaX,
103199 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
103200 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
103201 +
103202 + NOTE: this option is for developers/integrators only.
103203 +
103204 + config PAX_NO_ACL_FLAGS
103205 + bool 'none'
103206 +
103207 + config PAX_HAVE_ACL_FLAGS
103208 + bool 'direct'
103209 +
103210 + config PAX_HOOK_ACL_FLAGS
103211 + bool 'hook'
103212 +endchoice
103213 +
103214 +endmenu
103215 +
103216 +menu "Non-executable pages"
103217 + depends on PAX
103218 +
103219 +config PAX_NOEXEC
103220 + bool "Enforce non-executable pages"
103221 + default y if GRKERNSEC_CONFIG_AUTO
103222 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
103223 + help
103224 + By design some architectures do not allow for protecting memory
103225 + pages against execution or even if they do, Linux does not make
103226 + use of this feature. In practice this means that if a page is
103227 + readable (such as the stack or heap) it is also executable.
103228 +
103229 + There is a well known exploit technique that makes use of this
103230 + fact and a common programming mistake where an attacker can
103231 + introduce code of his choice somewhere in the attacked program's
103232 + memory (typically the stack or the heap) and then execute it.
103233 +
103234 + If the attacked program was running with different (typically
103235 + higher) privileges than that of the attacker, then he can elevate
103236 + his own privilege level (e.g. get a root shell, write to files for
103237 + which he does not have write access to, etc).
103238 +
103239 + Enabling this option will let you choose from various features
103240 + that prevent the injection and execution of 'foreign' code in
103241 + a program.
103242 +
103243 + This will also break programs that rely on the old behaviour and
103244 + expect that dynamically allocated memory via the malloc() family
103245 + of functions is executable (which it is not). Notable examples
103246 + are the XFree86 4.x server, the java runtime and wine.
103247 +
103248 +config PAX_PAGEEXEC
103249 + bool "Paging based non-executable pages"
103250 + default y if GRKERNSEC_CONFIG_AUTO
103251 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
103252 + select ARCH_TRACK_EXEC_LIMIT if X86_32
103253 + help
103254 + This implementation is based on the paging feature of the CPU.
103255 + On i386 without hardware non-executable bit support there is a
103256 + variable but usually low performance impact, however on Intel's
103257 + P4 core based CPUs it is very high so you should not enable this
103258 + for kernels meant to be used on such CPUs.
103259 +
103260 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
103261 + with hardware non-executable bit support there is no performance
103262 + impact, on ppc the impact is negligible.
103263 +
103264 + Note that several architectures require various emulations due to
103265 + badly designed userland ABIs, this will cause a performance impact
103266 + but will disappear as soon as userland is fixed. For example, ppc
103267 + userland MUST have been built with secure-plt by a recent toolchain.
103268 +
103269 +config PAX_SEGMEXEC
103270 + bool "Segmentation based non-executable pages"
103271 + default y if GRKERNSEC_CONFIG_AUTO
103272 + depends on PAX_NOEXEC && X86_32
103273 + help
103274 + This implementation is based on the segmentation feature of the
103275 + CPU and has a very small performance impact, however applications
103276 + will be limited to a 1.5 GB address space instead of the normal
103277 + 3 GB.
103278 +
103279 +config PAX_EMUTRAMP
103280 + bool "Emulate trampolines"
103281 + default y if PARISC || GRKERNSEC_CONFIG_AUTO
103282 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
103283 + help
103284 + There are some programs and libraries that for one reason or
103285 + another attempt to execute special small code snippets from
103286 + non-executable memory pages. Most notable examples are the
103287 + signal handler return code generated by the kernel itself and
103288 + the GCC trampolines.
103289 +
103290 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
103291 + such programs will no longer work under your kernel.
103292 +
103293 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
103294 + utilities to enable trampoline emulation for the affected programs
103295 + yet still have the protection provided by the non-executable pages.
103296 +
103297 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
103298 + your system will not even boot.
103299 +
103300 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
103301 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
103302 + for the affected files.
103303 +
103304 + NOTE: enabling this feature *may* open up a loophole in the
103305 + protection provided by non-executable pages that an attacker
103306 + could abuse. Therefore the best solution is to not have any
103307 + files on your system that would require this option. This can
103308 + be achieved by not using libc5 (which relies on the kernel
103309 + signal handler return code) and not using or rewriting programs
103310 + that make use of the nested function implementation of GCC.
103311 + Skilled users can just fix GCC itself so that it implements
103312 + nested function calls in a way that does not interfere with PaX.
103313 +
103314 +config PAX_EMUSIGRT
103315 + bool "Automatically emulate sigreturn trampolines"
103316 + depends on PAX_EMUTRAMP && PARISC
103317 + default y
103318 + help
103319 + Enabling this option will have the kernel automatically detect
103320 + and emulate signal return trampolines executing on the stack
103321 + that would otherwise lead to task termination.
103322 +
103323 + This solution is intended as a temporary one for users with
103324 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
103325 + Modula-3 runtime, etc) or executables linked to such, basically
103326 + everything that does not specify its own SA_RESTORER function in
103327 + normal executable memory like glibc 2.1+ does.
103328 +
103329 + On parisc you MUST enable this option, otherwise your system will
103330 + not even boot.
103331 +
103332 + NOTE: this feature cannot be disabled on a per executable basis
103333 + and since it *does* open up a loophole in the protection provided
103334 + by non-executable pages, the best solution is to not have any
103335 + files on your system that would require this option.
103336 +
103337 +config PAX_MPROTECT
103338 + bool "Restrict mprotect()"
103339 + default y if GRKERNSEC_CONFIG_AUTO
103340 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
103341 + help
103342 + Enabling this option will prevent programs from
103343 + - changing the executable status of memory pages that were
103344 + not originally created as executable,
103345 + - making read-only executable pages writable again,
103346 + - creating executable pages from anonymous memory,
103347 + - making read-only-after-relocations (RELRO) data pages writable again.
103348 +
103349 + You should say Y here to complete the protection provided by
103350 + the enforcement of non-executable pages.
103351 +
103352 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
103353 + this feature on a per file basis.
103354 +
103355 +config PAX_MPROTECT_COMPAT
103356 + bool "Use legacy/compat protection demoting (read help)"
103357 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
103358 + depends on PAX_MPROTECT
103359 + help
103360 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
103361 + by sending the proper error code to the application. For some broken
103362 + userland, this can cause problems with Python or other applications. The
103363 + current implementation however allows for applications like clamav to
103364 + detect if JIT compilation/execution is allowed and to fall back gracefully
103365 + to an interpreter-based mode if it does not. While we encourage everyone
103366 + to use the current implementation as-is and push upstream to fix broken
103367 + userland (note that the RWX logging option can assist with this), in some
103368 + environments this may not be possible. Having to disable MPROTECT
103369 + completely on certain binaries reduces the security benefit of PaX,
103370 + so this option is provided for those environments to revert to the old
103371 + behavior.
103372 +
103373 +config PAX_ELFRELOCS
103374 + bool "Allow ELF text relocations (read help)"
103375 + depends on PAX_MPROTECT
103376 + default n
103377 + help
103378 + Non-executable pages and mprotect() restrictions are effective
103379 + in preventing the introduction of new executable code into an
103380 + attacked task's address space. There remain only two venues
103381 + for this kind of attack: if the attacker can execute already
103382 + existing code in the attacked task then he can either have it
103383 + create and mmap() a file containing his code or have it mmap()
103384 + an already existing ELF library that does not have position
103385 + independent code in it and use mprotect() on it to make it
103386 + writable and copy his code there. While protecting against
103387 + the former approach is beyond PaX, the latter can be prevented
103388 + by having only PIC ELF libraries on one's system (which do not
103389 + need to relocate their code). If you are sure this is your case,
103390 + as is the case with all modern Linux distributions, then leave
103391 + this option disabled. You should say 'n' here.
103392 +
103393 +config PAX_ETEXECRELOCS
103394 + bool "Allow ELF ET_EXEC text relocations"
103395 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
103396 + select PAX_ELFRELOCS
103397 + default y
103398 + help
103399 + On some architectures there are incorrectly created applications
103400 + that require text relocations and would not work without enabling
103401 + this option. If you are an alpha, ia64 or parisc user, you should
103402 + enable this option and disable it once you have made sure that
103403 + none of your applications need it.
103404 +
103405 +config PAX_EMUPLT
103406 + bool "Automatically emulate ELF PLT"
103407 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
103408 + default y
103409 + help
103410 + Enabling this option will have the kernel automatically detect
103411 + and emulate the Procedure Linkage Table entries in ELF files.
103412 + On some architectures such entries are in writable memory, and
103413 + become non-executable leading to task termination. Therefore
103414 + it is mandatory that you enable this option on alpha, parisc,
103415 + sparc and sparc64, otherwise your system would not even boot.
103416 +
103417 + NOTE: this feature *does* open up a loophole in the protection
103418 + provided by the non-executable pages, therefore the proper
103419 + solution is to modify the toolchain to produce a PLT that does
103420 + not need to be writable.
103421 +
103422 +config PAX_DLRESOLVE
103423 + bool 'Emulate old glibc resolver stub'
103424 + depends on PAX_EMUPLT && SPARC
103425 + default n
103426 + help
103427 + This option is needed if userland has an old glibc (before 2.4)
103428 + that puts a 'save' instruction into the runtime generated resolver
103429 + stub that needs special emulation.
103430 +
103431 +config PAX_KERNEXEC
103432 + bool "Enforce non-executable kernel pages"
103433 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
103434 + depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
103435 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
103436 + select PAX_KERNEXEC_PLUGIN if X86_64
103437 + help
103438 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
103439 + that is, enabling this option will make it harder to inject
103440 + and execute 'foreign' code in kernel memory itself.
103441 +
103442 +choice
103443 + prompt "Return Address Instrumentation Method"
103444 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
103445 + depends on PAX_KERNEXEC_PLUGIN
103446 + help
103447 + Select the method used to instrument function pointer dereferences.
103448 + Note that binary modules cannot be instrumented by this approach.
103449 +
103450 + Note that the implementation requires a gcc with plugin support,
103451 + i.e., gcc 4.5 or newer. You may need to install the supporting
103452 + headers explicitly in addition to the normal gcc package.
103453 +
103454 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
103455 + bool "bts"
103456 + help
103457 + This method is compatible with binary only modules but has
103458 + a higher runtime overhead.
103459 +
103460 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
103461 + bool "or"
103462 + depends on !PARAVIRT
103463 + help
103464 + This method is incompatible with binary only modules but has
103465 + a lower runtime overhead.
103466 +endchoice
103467 +
103468 +config PAX_KERNEXEC_PLUGIN_METHOD
103469 + string
103470 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
103471 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
103472 + default ""
103473 +
103474 +config PAX_KERNEXEC_MODULE_TEXT
103475 + int "Minimum amount of memory reserved for module code"
103476 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
103477 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
103478 + depends on PAX_KERNEXEC && X86_32
103479 + help
103480 + Due to implementation details the kernel must reserve a fixed
103481 + amount of memory for runtime allocated code (such as modules)
103482 + at compile time that cannot be changed at runtime. Here you
103483 + can specify the minimum amount in MB that will be reserved.
103484 + Due to the same implementation details this size will always
103485 + be rounded up to the next 2/4 MB boundary (depends on PAE) so
103486 + the actually available memory for runtime allocated code will
103487 + usually be more than this minimum.
103488 +
103489 + The default 4 MB should be enough for most users but if you have
103490 + an excessive number of modules (e.g., most distribution configs
103491 + compile many drivers as modules) or use huge modules such as
103492 + nvidia's kernel driver, you will need to adjust this amount.
103493 + A good rule of thumb is to look at your currently loaded kernel
103494 + modules and add up their sizes.
103495 +
103496 +endmenu
103497 +
103498 +menu "Address Space Layout Randomization"
103499 + depends on PAX
103500 +
103501 +config PAX_ASLR
103502 + bool "Address Space Layout Randomization"
103503 + default y if GRKERNSEC_CONFIG_AUTO
103504 + help
103505 + Many if not most exploit techniques rely on the knowledge of
103506 + certain addresses in the attacked program. The following options
103507 + will allow the kernel to apply a certain amount of randomization
103508 + to specific parts of the program thereby forcing an attacker to
103509 + guess them in most cases. Any failed guess will most likely crash
103510 + the attacked program which allows the kernel to detect such attempts
103511 + and react on them. PaX itself provides no reaction mechanisms,
103512 + instead it is strongly encouraged that you make use of Nergal's
103513 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
103514 + (http://www.grsecurity.net/) built-in crash detection features or
103515 + develop one yourself.
103516 +
103517 + By saying Y here you can choose to randomize the following areas:
103518 + - top of the task's kernel stack
103519 + - top of the task's userland stack
103520 + - base address for mmap() requests that do not specify one
103521 + (this includes all libraries)
103522 + - base address of the main executable
103523 +
103524 + It is strongly recommended to say Y here as address space layout
103525 + randomization has negligible impact on performance yet it provides
103526 + a very effective protection.
103527 +
103528 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
103529 + this feature on a per file basis.
103530 +
103531 +config PAX_RANDKSTACK
103532 + bool "Randomize kernel stack base"
103533 + default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
103534 + depends on X86_TSC && X86
103535 + help
103536 + By saying Y here the kernel will randomize every task's kernel
103537 + stack on every system call. This will not only force an attacker
103538 + to guess it but also prevent him from making use of possible
103539 + leaked information about it.
103540 +
103541 + Since the kernel stack is a rather scarce resource, randomization
103542 + may cause unexpected stack overflows, therefore you should very
103543 + carefully test your system. Note that once enabled in the kernel
103544 + configuration, this feature cannot be disabled on a per file basis.
103545 +
103546 +config PAX_RANDUSTACK
103547 + bool "Randomize user stack base"
103548 + default y if GRKERNSEC_CONFIG_AUTO
103549 + depends on PAX_ASLR
103550 + help
103551 + By saying Y here the kernel will randomize every task's userland
103552 + stack. The randomization is done in two steps where the second
103553 + one may apply a big amount of shift to the top of the stack and
103554 + cause problems for programs that want to use lots of memory (more
103555 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
103556 + For this reason the second step can be controlled by 'chpax' or
103557 + 'paxctl' on a per file basis.
103558 +
103559 +config PAX_RANDMMAP
103560 + bool "Randomize mmap() base"
103561 + default y if GRKERNSEC_CONFIG_AUTO
103562 + depends on PAX_ASLR
103563 + help
103564 + By saying Y here the kernel will use a randomized base address for
103565 + mmap() requests that do not specify one themselves. As a result
103566 + all dynamically loaded libraries will appear at random addresses
103567 + and therefore be harder to exploit by a technique where an attacker
103568 + attempts to execute library code for his purposes (e.g. spawn a
103569 + shell from an exploited program that is running at an elevated
103570 + privilege level).
103571 +
103572 + Furthermore, if a program is relinked as a dynamic ELF file, its
103573 + base address will be randomized as well, completing the full
103574 + randomization of the address space layout. Attacking such programs
103575 + becomes a guess game. You can find an example of doing this at
103576 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
103577 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
103578 +
103579 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
103580 + feature on a per file basis.
103581 +
103582 +endmenu
103583 +
103584 +menu "Miscellaneous hardening features"
103585 +
103586 +config PAX_MEMORY_SANITIZE
103587 + bool "Sanitize all freed memory"
103588 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
103589 + help
103590 + By saying Y here the kernel will erase memory pages and slab objects
103591 + as soon as they are freed. This in turn reduces the lifetime of data
103592 + stored in them, making it less likely that sensitive information such
103593 + as passwords, cryptographic secrets, etc stay in memory for too long.
103594 +
103595 + This is especially useful for programs whose runtime is short, long
103596 + lived processes and the kernel itself benefit from this as long as
103597 + they ensure timely freeing of memory that may hold sensitive
103598 + information.
103599 +
103600 + A nice side effect of the sanitization of slab objects is the
103601 + reduction of possible info leaks caused by padding bytes within the
103602 + leaky structures. Use-after-free bugs for structures containing
103603 + pointers can also be detected as dereferencing the sanitized pointer
103604 + will generate an access violation.
103605 +
103606 + The tradeoff is performance impact, on a single CPU system kernel
103607 + compilation sees a 3% slowdown, other systems and workloads may vary
103608 + and you are advised to test this feature on your expected workload
103609 + before deploying it.
103610 +
103611 + To reduce the performance penalty by sanitizing pages only, albeit
103612 + limiting the effectiveness of this feature at the same time, slab
103613 + sanitization can be disabled with the kernel commandline parameter
103614 + "pax_sanitize_slab=0".
103615 +
103616 + Note that this feature does not protect data stored in live pages,
103617 + e.g., process memory swapped to disk may stay there for a long time.
103618 +
103619 +config PAX_MEMORY_STACKLEAK
103620 + bool "Sanitize kernel stack"
103621 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
103622 + depends on X86
103623 + help
103624 + By saying Y here the kernel will erase the kernel stack before it
103625 + returns from a system call. This in turn reduces the information
103626 + that a kernel stack leak bug can reveal.
103627 +
103628 + Note that such a bug can still leak information that was put on
103629 + the stack by the current system call (the one eventually triggering
103630 + the bug) but traces of earlier system calls on the kernel stack
103631 + cannot leak anymore.
103632 +
103633 + The tradeoff is performance impact: on a single CPU system kernel
103634 + compilation sees a 1% slowdown, other systems and workloads may vary
103635 + and you are advised to test this feature on your expected workload
103636 + before deploying it.
103637 +
103638 + Note that the full feature requires a gcc with plugin support,
103639 + i.e., gcc 4.5 or newer. You may need to install the supporting
103640 + headers explicitly in addition to the normal gcc package. Using
103641 + older gcc versions means that functions with large enough stack
103642 + frames may leave uninitialized memory behind that may be exposed
103643 + to a later syscall leaking the stack.
103644 +
103645 +config PAX_MEMORY_STRUCTLEAK
103646 + bool "Forcibly initialize local variables copied to userland"
103647 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
103648 + help
103649 + By saying Y here the kernel will zero initialize some local
103650 + variables that are going to be copied to userland. This in
103651 + turn prevents unintended information leakage from the kernel
103652 + stack should later code forget to explicitly set all parts of
103653 + the copied variable.
103654 +
103655 + The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
103656 + at a much smaller coverage.
103657 +
103658 + Note that the implementation requires a gcc with plugin support,
103659 + i.e., gcc 4.5 or newer. You may need to install the supporting
103660 + headers explicitly in addition to the normal gcc package.
103661 +
103662 +config PAX_MEMORY_UDEREF
103663 + bool "Prevent invalid userland pointer dereference"
103664 + default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
103665 + depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
103666 + select PAX_PER_CPU_PGD if X86_64
103667 + help
103668 + By saying Y here the kernel will be prevented from dereferencing
103669 + userland pointers in contexts where the kernel expects only kernel
103670 + pointers. This is both a useful runtime debugging feature and a
103671 + security measure that prevents exploiting a class of kernel bugs.
103672 +
103673 + The tradeoff is that some virtualization solutions may experience
103674 + a huge slowdown and therefore you should not enable this feature
103675 + for kernels meant to run in such environments. Whether a given VM
103676 + solution is affected or not is best determined by simply trying it
103677 + out, the performance impact will be obvious right on boot as this
103678 + mechanism engages from very early on. A good rule of thumb is that
103679 + VMs running on CPUs without hardware virtualization support (i.e.,
103680 + the majority of IA-32 CPUs) will likely experience the slowdown.
103681 +
103682 + On X86_64 the kernel will make use of PCID support when available
103683 + (Intel's Westmere, Sandy Bridge, etc) for better security (default)
103684 + or performance impact. Pass pax_weakuderef on the kernel command
103685 + line to choose the latter.
103686 +
103687 +config PAX_REFCOUNT
103688 + bool "Prevent various kernel object reference counter overflows"
103689 + default y if GRKERNSEC_CONFIG_AUTO
103690 + depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64 || X86)
103691 + help
103692 + By saying Y here the kernel will detect and prevent overflowing
103693 + various (but not all) kinds of object reference counters. Such
103694 + overflows can normally occur due to bugs only and are often, if
103695 + not always, exploitable.
103696 +
103697 + The tradeoff is that data structures protected by an overflowed
103698 + refcount will never be freed and therefore will leak memory. Note
103699 + that this leak also happens even without this protection but in
103700 + that case the overflow can eventually trigger the freeing of the
103701 + data structure while it is still being used elsewhere, resulting
103702 + in the exploitable situation that this feature prevents.
103703 +
103704 + Since this has a negligible performance impact, you should enable
103705 + this feature.
103706 +
103707 +config PAX_CONSTIFY_PLUGIN
103708 + bool "Automatically constify eligible structures"
103709 + default y
103710 + depends on !UML && PAX_KERNEXEC
103711 + help
103712 + By saying Y here the compiler will automatically constify a class
103713 + of types that contain only function pointers. This reduces the
103714 + kernel's attack surface and also produces a better memory layout.
103715 +
103716 + Note that the implementation requires a gcc with plugin support,
103717 + i.e., gcc 4.5 or newer. You may need to install the supporting
103718 + headers explicitly in addition to the normal gcc package.
103719 +
103720 + Note that if some code really has to modify constified variables
103721 + then the source code will have to be patched to allow it. Examples
103722 + can be found in PaX itself (the no_const attribute) and for some
103723 + out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
103724 +
103725 +config PAX_USERCOPY
103726 + bool "Harden heap object copies between kernel and userland"
103727 + default y if GRKERNSEC_CONFIG_AUTO
103728 + depends on ARM || IA64 || PPC || SPARC || X86
103729 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
103730 + select PAX_USERCOPY_SLABS
103731 + help
103732 + By saying Y here the kernel will enforce the size of heap objects
103733 + when they are copied in either direction between the kernel and
103734 + userland, even if only a part of the heap object is copied.
103735 +
103736 + Specifically, this checking prevents information leaking from the
103737 + kernel heap during kernel to userland copies (if the kernel heap
103738 + object is otherwise fully initialized) and prevents kernel heap
103739 + overflows during userland to kernel copies.
103740 +
103741 + Note that the current implementation provides the strictest bounds
103742 + checks for the SLUB allocator.
103743 +
103744 + Enabling this option also enables per-slab cache protection against
103745 + data in a given cache being copied into/out of via userland
103746 + accessors. Though the whitelist of regions will be reduced over
103747 + time, it notably protects important data structures like task structs.
103748 +
103749 + If frame pointers are enabled on x86, this option will also restrict
103750 + copies into and out of the kernel stack to local variables within a
103751 + single frame.
103752 +
103753 + Since this has a negligible performance impact, you should enable
103754 + this feature.
103755 +
103756 +config PAX_USERCOPY_DEBUG
103757 + bool
103758 + depends on X86 && PAX_USERCOPY
103759 + default n
103760 +
103761 +config PAX_SIZE_OVERFLOW
103762 + bool "Prevent various integer overflows in function size parameters"
103763 + default y if GRKERNSEC_CONFIG_AUTO
103764 + depends on X86
103765 + help
103766 + By saying Y here the kernel recomputes expressions of function
103767 + arguments marked by a size_overflow attribute with double integer
103768 + precision (DImode/TImode for 32/64 bit integer types).
103769 +
103770 + The recomputed argument is checked against TYPE_MAX and an event
103771 + is logged on overflow and the triggering process is killed.
103772 +
103773 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
103774 +
103775 + Note that the implementation requires a gcc with plugin support,
103776 + i.e., gcc 4.5 or newer. You may need to install the supporting
103777 + headers explicitly in addition to the normal gcc package.
103778 +
103779 +config PAX_LATENT_ENTROPY
103780 + bool "Generate some entropy during boot and runtime"
103781 + default y if GRKERNSEC_CONFIG_AUTO
103782 + help
103783 + By saying Y here the kernel will instrument some kernel code to
103784 + extract some entropy from both original and artificially created
103785 + program state. This will help especially embedded systems where
103786 + there is little 'natural' source of entropy normally. The cost
103787 + is some slowdown of the boot process and fork and irq processing.
103788 +
103789 + When pax_extra_latent_entropy is passed on the kernel command line,
103790 + entropy will be extracted from up to the first 4GB of RAM while the
103791 + runtime memory allocator is being initialized. This costs even more
103792 + slowdown of the boot process.
103793 +
103794 + Note that the implementation requires a gcc with plugin support,
103795 + i.e., gcc 4.5 or newer. You may need to install the supporting
103796 + headers explicitly in addition to the normal gcc package.
103797 +
103798 + Note that entropy extracted this way is not cryptographically
103799 + secure!
103800 +
103801 +endmenu
103802 +
103803 +endmenu
103804 +
103805 +source grsecurity/Kconfig
103806 +
103807 +endmenu
103808 +
103809 +endmenu
103810 +
103811 source security/keys/Kconfig
103812
103813 config SECURITY_DMESG_RESTRICT
103814 @@ -103,7 +1057,7 @@ config INTEL_TXT
103815 config LSM_MMAP_MIN_ADDR
103816 int "Low address space for LSM to protect from user allocation"
103817 depends on SECURITY && SECURITY_SELINUX
103818 - default 32768 if ARM
103819 + default 32768 if ALPHA || ARM || PARISC || SPARC32
103820 default 65536
103821 help
103822 This is the portion of low virtual memory which should be protected
103823 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
103824 index 4257b7e..f0c9438 100644
103825 --- a/security/apparmor/lsm.c
103826 +++ b/security/apparmor/lsm.c
103827 @@ -615,7 +615,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
103828 return error;
103829 }
103830
103831 -static struct security_operations apparmor_ops = {
103832 +static struct security_operations apparmor_ops __read_only = {
103833 .name = "apparmor",
103834
103835 .ptrace_access_check = apparmor_ptrace_access_check,
103836 diff --git a/security/commoncap.c b/security/commoncap.c
103837 index b9d613e..f68305c 100644
103838 --- a/security/commoncap.c
103839 +++ b/security/commoncap.c
103840 @@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
103841 return 0;
103842 }
103843
103844 +/* returns:
103845 + 1 for suid privilege
103846 + 2 for sgid privilege
103847 + 3 for fscap privilege
103848 +*/
103849 +int is_privileged_binary(const struct dentry *dentry)
103850 +{
103851 + struct cpu_vfs_cap_data capdata;
103852 + struct inode *inode = dentry->d_inode;
103853 +
103854 + if (!inode || S_ISDIR(inode->i_mode))
103855 + return 0;
103856 +
103857 + if (inode->i_mode & S_ISUID)
103858 + return 1;
103859 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
103860 + return 2;
103861 +
103862 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
103863 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
103864 + return 3;
103865 + }
103866 +
103867 + return 0;
103868 +}
103869 +
103870 /*
103871 * Attempt to get the on-exec apply capability sets for an executable file from
103872 * its xattrs and, if present, apply them to the proposed credentials being
103873 @@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
103874 const struct cred *cred = current_cred();
103875 kuid_t root_uid = make_kuid(cred->user_ns, 0);
103876
103877 + if (gr_acl_enable_at_secure())
103878 + return 1;
103879 +
103880 if (!uid_eq(cred->uid, root_uid)) {
103881 if (bprm->cap_effective)
103882 return 1;
103883 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
103884 index 0356e1d..72332ab 100644
103885 --- a/security/integrity/ima/ima.h
103886 +++ b/security/integrity/ima/ima.h
103887 @@ -118,8 +118,8 @@ int ima_init_template(void);
103888 extern spinlock_t ima_queue_lock;
103889
103890 struct ima_h_table {
103891 - atomic_long_t len; /* number of stored measurements in the list */
103892 - atomic_long_t violations;
103893 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
103894 + atomic_long_unchecked_t violations;
103895 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
103896 };
103897 extern struct ima_h_table ima_htable;
103898 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
103899 index c38bbce..f45133d 100644
103900 --- a/security/integrity/ima/ima_api.c
103901 +++ b/security/integrity/ima/ima_api.c
103902 @@ -137,7 +137,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
103903 int result;
103904
103905 /* can overflow, only indicator */
103906 - atomic_long_inc(&ima_htable.violations);
103907 + atomic_long_inc_unchecked(&ima_htable.violations);
103908
103909 result = ima_alloc_init_template(NULL, file, filename,
103910 NULL, 0, &entry);
103911 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
103912 index db01125..61f6597 100644
103913 --- a/security/integrity/ima/ima_fs.c
103914 +++ b/security/integrity/ima/ima_fs.c
103915 @@ -28,12 +28,12 @@
103916 static int valid_policy = 1;
103917 #define TMPBUFLEN 12
103918 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
103919 - loff_t *ppos, atomic_long_t *val)
103920 + loff_t *ppos, atomic_long_unchecked_t *val)
103921 {
103922 char tmpbuf[TMPBUFLEN];
103923 ssize_t len;
103924
103925 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
103926 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
103927 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
103928 }
103929
103930 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
103931 index d85e997..6992813 100644
103932 --- a/security/integrity/ima/ima_queue.c
103933 +++ b/security/integrity/ima/ima_queue.c
103934 @@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
103935 INIT_LIST_HEAD(&qe->later);
103936 list_add_tail_rcu(&qe->later, &ima_measurements);
103937
103938 - atomic_long_inc(&ima_htable.len);
103939 + atomic_long_inc_unchecked(&ima_htable.len);
103940 key = ima_hash_key(entry->digest);
103941 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
103942 return 0;
103943 diff --git a/security/keys/compat.c b/security/keys/compat.c
103944 index bbd32c7..c60c927 100644
103945 --- a/security/keys/compat.c
103946 +++ b/security/keys/compat.c
103947 @@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
103948 if (ret == 0)
103949 goto no_payload_free;
103950
103951 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
103952 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
103953 err:
103954 if (iov != iovstack)
103955 kfree(iov);
103956 diff --git a/security/keys/internal.h b/security/keys/internal.h
103957 index 80b2aac..bb7ee65 100644
103958 --- a/security/keys/internal.h
103959 +++ b/security/keys/internal.h
103960 @@ -253,7 +253,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
103961 extern long keyctl_invalidate_key(key_serial_t);
103962
103963 extern long keyctl_instantiate_key_common(key_serial_t,
103964 - const struct iovec *,
103965 + const struct iovec __user *,
103966 unsigned, size_t, key_serial_t);
103967 #ifdef CONFIG_PERSISTENT_KEYRINGS
103968 extern long keyctl_get_persistent(uid_t, key_serial_t);
103969 diff --git a/security/keys/key.c b/security/keys/key.c
103970 index 6e21c11..9ed67ca 100644
103971 --- a/security/keys/key.c
103972 +++ b/security/keys/key.c
103973 @@ -285,7 +285,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
103974
103975 atomic_set(&key->usage, 1);
103976 init_rwsem(&key->sem);
103977 - lockdep_set_class(&key->sem, &type->lock_class);
103978 + lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
103979 key->index_key.type = type;
103980 key->user = user;
103981 key->quotalen = quotalen;
103982 @@ -1036,7 +1036,9 @@ int register_key_type(struct key_type *ktype)
103983 struct key_type *p;
103984 int ret;
103985
103986 - memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
103987 + pax_open_kernel();
103988 + memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
103989 + pax_close_kernel();
103990
103991 ret = -EEXIST;
103992 down_write(&key_types_sem);
103993 @@ -1048,7 +1050,7 @@ int register_key_type(struct key_type *ktype)
103994 }
103995
103996 /* store the type */
103997 - list_add(&ktype->link, &key_types_list);
103998 + pax_list_add((struct list_head *)&ktype->link, &key_types_list);
103999
104000 pr_notice("Key type %s registered\n", ktype->name);
104001 ret = 0;
104002 @@ -1070,7 +1072,7 @@ EXPORT_SYMBOL(register_key_type);
104003 void unregister_key_type(struct key_type *ktype)
104004 {
104005 down_write(&key_types_sem);
104006 - list_del_init(&ktype->link);
104007 + pax_list_del_init((struct list_head *)&ktype->link);
104008 downgrade_write(&key_types_sem);
104009 key_gc_keytype(ktype);
104010 pr_notice("Key type %s unregistered\n", ktype->name);
104011 @@ -1088,10 +1090,10 @@ void __init key_init(void)
104012 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
104013
104014 /* add the special key types */
104015 - list_add_tail(&key_type_keyring.link, &key_types_list);
104016 - list_add_tail(&key_type_dead.link, &key_types_list);
104017 - list_add_tail(&key_type_user.link, &key_types_list);
104018 - list_add_tail(&key_type_logon.link, &key_types_list);
104019 + pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
104020 + pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
104021 + pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
104022 + pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
104023
104024 /* record the root user tracking */
104025 rb_link_node(&root_key_user.node,
104026 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
104027 index cee72ce..e46074a 100644
104028 --- a/security/keys/keyctl.c
104029 +++ b/security/keys/keyctl.c
104030 @@ -987,7 +987,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
104031 /*
104032 * Copy the iovec data from userspace
104033 */
104034 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
104035 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
104036 unsigned ioc)
104037 {
104038 for (; ioc > 0; ioc--) {
104039 @@ -1009,7 +1009,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
104040 * If successful, 0 will be returned.
104041 */
104042 long keyctl_instantiate_key_common(key_serial_t id,
104043 - const struct iovec *payload_iov,
104044 + const struct iovec __user *payload_iov,
104045 unsigned ioc,
104046 size_t plen,
104047 key_serial_t ringid)
104048 @@ -1104,7 +1104,7 @@ long keyctl_instantiate_key(key_serial_t id,
104049 [0].iov_len = plen
104050 };
104051
104052 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
104053 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
104054 }
104055
104056 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
104057 @@ -1137,7 +1137,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
104058 if (ret == 0)
104059 goto no_payload_free;
104060
104061 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
104062 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
104063 err:
104064 if (iov != iovstack)
104065 kfree(iov);
104066 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
104067 index d46cbc5..2fb2576 100644
104068 --- a/security/keys/keyring.c
104069 +++ b/security/keys/keyring.c
104070 @@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object,
104071
104072 kenter("{%d}", key->serial);
104073
104074 - BUG_ON(key != ctx->match_data);
104075 + /* We might get a keyring with matching index-key that is nonetheless a
104076 + * different keyring. */
104077 + if (key != ctx->match_data)
104078 + return 0;
104079 +
104080 ctx->result = ERR_PTR(-EDEADLK);
104081 return 1;
104082 }
104083 diff --git a/security/min_addr.c b/security/min_addr.c
104084 index f728728..6457a0c 100644
104085 --- a/security/min_addr.c
104086 +++ b/security/min_addr.c
104087 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
104088 */
104089 static void update_mmap_min_addr(void)
104090 {
104091 +#ifndef SPARC
104092 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
104093 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
104094 mmap_min_addr = dac_mmap_min_addr;
104095 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
104096 #else
104097 mmap_min_addr = dac_mmap_min_addr;
104098 #endif
104099 +#endif
104100 }
104101
104102 /*
104103 diff --git a/security/security.c b/security/security.c
104104 index 15b6928..60d03ec 100644
104105 --- a/security/security.c
104106 +++ b/security/security.c
104107 @@ -33,8 +33,8 @@
104108 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
104109 CONFIG_DEFAULT_SECURITY;
104110
104111 -static struct security_operations *security_ops;
104112 -static struct security_operations default_security_ops = {
104113 +struct security_operations *security_ops __read_only;
104114 +struct security_operations default_security_ops __read_only = {
104115 .name = "default",
104116 };
104117
104118 @@ -73,11 +73,6 @@ int __init security_init(void)
104119 return 0;
104120 }
104121
104122 -void reset_security_ops(void)
104123 -{
104124 - security_ops = &default_security_ops;
104125 -}
104126 -
104127 /* Save user chosen LSM */
104128 static int __init choose_lsm(char *str)
104129 {
104130 diff --git a/security/selinux/avc.c b/security/selinux/avc.c
104131 index fc3e662..7844c60 100644
104132 --- a/security/selinux/avc.c
104133 +++ b/security/selinux/avc.c
104134 @@ -59,7 +59,7 @@ struct avc_node {
104135 struct avc_cache {
104136 struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
104137 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
104138 - atomic_t lru_hint; /* LRU hint for reclaim scan */
104139 + atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */
104140 atomic_t active_nodes;
104141 u32 latest_notif; /* latest revocation notification */
104142 };
104143 @@ -167,7 +167,7 @@ void __init avc_init(void)
104144 spin_lock_init(&avc_cache.slots_lock[i]);
104145 }
104146 atomic_set(&avc_cache.active_nodes, 0);
104147 - atomic_set(&avc_cache.lru_hint, 0);
104148 + atomic_set_unchecked(&avc_cache.lru_hint, 0);
104149
104150 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
104151 0, SLAB_PANIC, NULL);
104152 @@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void)
104153 spinlock_t *lock;
104154
104155 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
104156 - hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
104157 + hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
104158 head = &avc_cache.slots[hvalue];
104159 lock = &avc_cache.slots_lock[hvalue];
104160
104161 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
104162 index 57b0b49..402063e 100644
104163 --- a/security/selinux/hooks.c
104164 +++ b/security/selinux/hooks.c
104165 @@ -96,8 +96,6 @@
104166 #include "audit.h"
104167 #include "avc_ss.h"
104168
104169 -extern struct security_operations *security_ops;
104170 -
104171 /* SECMARK reference count */
104172 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
104173
104174 @@ -5745,7 +5743,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
104175
104176 #endif
104177
104178 -static struct security_operations selinux_ops = {
104179 +static struct security_operations selinux_ops __read_only = {
104180 .name = "selinux",
104181
104182 .ptrace_access_check = selinux_ptrace_access_check,
104183 @@ -6098,6 +6096,9 @@ static void selinux_nf_ip_exit(void)
104184 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
104185 static int selinux_disabled;
104186
104187 +extern struct security_operations *security_ops;
104188 +extern struct security_operations default_security_ops;
104189 +
104190 int selinux_disable(void)
104191 {
104192 if (ss_initialized) {
104193 @@ -6115,7 +6116,9 @@ int selinux_disable(void)
104194 selinux_disabled = 1;
104195 selinux_enabled = 0;
104196
104197 - reset_security_ops();
104198 + pax_open_kernel();
104199 + security_ops = &default_security_ops;
104200 + pax_close_kernel();
104201
104202 /* Try to destroy the avc node cache */
104203 avc_disable();
104204 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
104205 index 48c3cc9..8022cf7 100644
104206 --- a/security/selinux/include/xfrm.h
104207 +++ b/security/selinux/include/xfrm.h
104208 @@ -45,7 +45,7 @@ static inline void selinux_xfrm_notify_policyload(void)
104209 {
104210 struct net *net;
104211
104212 - atomic_inc(&flow_cache_genid);
104213 + atomic_inc_unchecked(&flow_cache_genid);
104214 rtnl_lock();
104215 for_each_net(net)
104216 rt_genid_bump_all(net);
104217 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
104218 index b0be893..646bd94 100644
104219 --- a/security/smack/smack_lsm.c
104220 +++ b/security/smack/smack_lsm.c
104221 @@ -3731,7 +3731,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
104222 return 0;
104223 }
104224
104225 -struct security_operations smack_ops = {
104226 +struct security_operations smack_ops __read_only = {
104227 .name = "smack",
104228
104229 .ptrace_access_check = smack_ptrace_access_check,
104230 diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
104231 index 390c646..f2f8db3 100644
104232 --- a/security/tomoyo/mount.c
104233 +++ b/security/tomoyo/mount.c
104234 @@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
104235 type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
104236 need_dev = -1; /* dev_name is a directory */
104237 } else {
104238 + if (!capable(CAP_SYS_ADMIN)) {
104239 + error = -EPERM;
104240 + goto out;
104241 + }
104242 fstype = get_fs_type(type);
104243 if (!fstype) {
104244 error = -ENODEV;
104245 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
104246 index f0b756e..b129202 100644
104247 --- a/security/tomoyo/tomoyo.c
104248 +++ b/security/tomoyo/tomoyo.c
104249 @@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
104250 * tomoyo_security_ops is a "struct security_operations" which is used for
104251 * registering TOMOYO.
104252 */
104253 -static struct security_operations tomoyo_security_ops = {
104254 +static struct security_operations tomoyo_security_ops __read_only = {
104255 .name = "tomoyo",
104256 .cred_alloc_blank = tomoyo_cred_alloc_blank,
104257 .cred_prepare = tomoyo_cred_prepare,
104258 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
104259 index 20ef514..4182bed 100644
104260 --- a/security/yama/Kconfig
104261 +++ b/security/yama/Kconfig
104262 @@ -1,6 +1,6 @@
104263 config SECURITY_YAMA
104264 bool "Yama support"
104265 - depends on SECURITY
104266 + depends on SECURITY && !GRKERNSEC
104267 select SECURITYFS
104268 select SECURITY_PATH
104269 default n
104270 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
104271 index 13c88fbc..f8c115e 100644
104272 --- a/security/yama/yama_lsm.c
104273 +++ b/security/yama/yama_lsm.c
104274 @@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent)
104275 }
104276
104277 #ifndef CONFIG_SECURITY_YAMA_STACKED
104278 -static struct security_operations yama_ops = {
104279 +static struct security_operations yama_ops __read_only = {
104280 .name = "yama",
104281
104282 .ptrace_access_check = yama_ptrace_access_check,
104283 @@ -376,28 +376,24 @@ static struct security_operations yama_ops = {
104284 #endif
104285
104286 #ifdef CONFIG_SYSCTL
104287 +static int zero __read_only;
104288 +static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
104289 +
104290 static int yama_dointvec_minmax(struct ctl_table *table, int write,
104291 void __user *buffer, size_t *lenp, loff_t *ppos)
104292 {
104293 - int rc;
104294 + ctl_table_no_const yama_table;
104295
104296 if (write && !capable(CAP_SYS_PTRACE))
104297 return -EPERM;
104298
104299 - rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
104300 - if (rc)
104301 - return rc;
104302 -
104303 + yama_table = *table;
104304 /* Lock the max value if it ever gets set. */
104305 - if (write && *(int *)table->data == *(int *)table->extra2)
104306 - table->extra1 = table->extra2;
104307 -
104308 - return rc;
104309 + if (ptrace_scope == max_scope)
104310 + yama_table.extra1 = &max_scope;
104311 + return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
104312 }
104313
104314 -static int zero;
104315 -static int max_scope = YAMA_SCOPE_NO_ATTACH;
104316 -
104317 struct ctl_path yama_sysctl_path[] = {
104318 { .procname = "kernel", },
104319 { .procname = "yama", },
104320 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
104321 index 4cedc69..e59d8a3 100644
104322 --- a/sound/aoa/codecs/onyx.c
104323 +++ b/sound/aoa/codecs/onyx.c
104324 @@ -54,7 +54,7 @@ struct onyx {
104325 spdif_locked:1,
104326 analog_locked:1,
104327 original_mute:2;
104328 - int open_count;
104329 + local_t open_count;
104330 struct codec_info *codec_info;
104331
104332 /* mutex serializes concurrent access to the device
104333 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
104334 struct onyx *onyx = cii->codec_data;
104335
104336 mutex_lock(&onyx->mutex);
104337 - onyx->open_count++;
104338 + local_inc(&onyx->open_count);
104339 mutex_unlock(&onyx->mutex);
104340
104341 return 0;
104342 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
104343 struct onyx *onyx = cii->codec_data;
104344
104345 mutex_lock(&onyx->mutex);
104346 - onyx->open_count--;
104347 - if (!onyx->open_count)
104348 + if (local_dec_and_test(&onyx->open_count))
104349 onyx->spdif_locked = onyx->analog_locked = 0;
104350 mutex_unlock(&onyx->mutex);
104351
104352 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
104353 index ffd2025..df062c9 100644
104354 --- a/sound/aoa/codecs/onyx.h
104355 +++ b/sound/aoa/codecs/onyx.h
104356 @@ -11,6 +11,7 @@
104357 #include <linux/i2c.h>
104358 #include <asm/pmac_low_i2c.h>
104359 #include <asm/prom.h>
104360 +#include <asm/local.h>
104361
104362 /* PCM3052 register definitions */
104363
104364 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
104365 index 4c1cc51..16040040 100644
104366 --- a/sound/core/oss/pcm_oss.c
104367 +++ b/sound/core/oss/pcm_oss.c
104368 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
104369 if (in_kernel) {
104370 mm_segment_t fs;
104371 fs = snd_enter_user();
104372 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
104373 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
104374 snd_leave_user(fs);
104375 } else {
104376 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
104377 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
104378 }
104379 if (ret != -EPIPE && ret != -ESTRPIPE)
104380 break;
104381 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
104382 if (in_kernel) {
104383 mm_segment_t fs;
104384 fs = snd_enter_user();
104385 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
104386 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
104387 snd_leave_user(fs);
104388 } else {
104389 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
104390 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
104391 }
104392 if (ret == -EPIPE) {
104393 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
104394 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
104395 struct snd_pcm_plugin_channel *channels;
104396 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
104397 if (!in_kernel) {
104398 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
104399 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
104400 return -EFAULT;
104401 buf = runtime->oss.buffer;
104402 }
104403 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
104404 }
104405 } else {
104406 tmp = snd_pcm_oss_write2(substream,
104407 - (const char __force *)buf,
104408 + (const char __force_kernel *)buf,
104409 runtime->oss.period_bytes, 0);
104410 if (tmp <= 0)
104411 goto err;
104412 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
104413 struct snd_pcm_runtime *runtime = substream->runtime;
104414 snd_pcm_sframes_t frames, frames1;
104415 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
104416 - char __user *final_dst = (char __force __user *)buf;
104417 + char __user *final_dst = (char __force_user *)buf;
104418 if (runtime->oss.plugin_first) {
104419 struct snd_pcm_plugin_channel *channels;
104420 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
104421 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
104422 xfer += tmp;
104423 runtime->oss.buffer_used -= tmp;
104424 } else {
104425 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
104426 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
104427 runtime->oss.period_bytes, 0);
104428 if (tmp <= 0)
104429 goto err;
104430 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
104431 size1);
104432 size1 /= runtime->channels; /* frames */
104433 fs = snd_enter_user();
104434 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
104435 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
104436 snd_leave_user(fs);
104437 }
104438 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
104439 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
104440 index af49721..e85058e 100644
104441 --- a/sound/core/pcm_compat.c
104442 +++ b/sound/core/pcm_compat.c
104443 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
104444 int err;
104445
104446 fs = snd_enter_user();
104447 - err = snd_pcm_delay(substream, &delay);
104448 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
104449 snd_leave_user(fs);
104450 if (err < 0)
104451 return err;
104452 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
104453 index 01a5e05..c6bb425 100644
104454 --- a/sound/core/pcm_native.c
104455 +++ b/sound/core/pcm_native.c
104456 @@ -2811,11 +2811,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
104457 switch (substream->stream) {
104458 case SNDRV_PCM_STREAM_PLAYBACK:
104459 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
104460 - (void __user *)arg);
104461 + (void __force_user *)arg);
104462 break;
104463 case SNDRV_PCM_STREAM_CAPTURE:
104464 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
104465 - (void __user *)arg);
104466 + (void __force_user *)arg);
104467 break;
104468 default:
104469 result = -EINVAL;
104470 diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
104471 index 8d4d5e8..fdd0826 100644
104472 --- a/sound/core/seq/oss/seq_oss.c
104473 +++ b/sound/core/seq/oss/seq_oss.c
104474 @@ -75,8 +75,8 @@ static int __init alsa_seq_oss_init(void)
104475 {
104476 int rc;
104477 static struct snd_seq_dev_ops ops = {
104478 - snd_seq_oss_synth_register,
104479 - snd_seq_oss_synth_unregister,
104480 + .init_device = snd_seq_oss_synth_register,
104481 + .free_device = snd_seq_oss_synth_unregister,
104482 };
104483
104484 snd_seq_autoload_lock();
104485 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
104486 index 040c60e..989a19a 100644
104487 --- a/sound/core/seq/seq_device.c
104488 +++ b/sound/core/seq/seq_device.c
104489 @@ -64,7 +64,7 @@ struct ops_list {
104490 int argsize; /* argument size */
104491
104492 /* operators */
104493 - struct snd_seq_dev_ops ops;
104494 + struct snd_seq_dev_ops *ops;
104495
104496 /* registered devices */
104497 struct list_head dev_list; /* list of devices */
104498 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
104499
104500 mutex_lock(&ops->reg_mutex);
104501 /* copy driver operators */
104502 - ops->ops = *entry;
104503 + ops->ops = entry;
104504 ops->driver |= DRIVER_LOADED;
104505 ops->argsize = argsize;
104506
104507 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
104508 dev->name, ops->id, ops->argsize, dev->argsize);
104509 return -EINVAL;
104510 }
104511 - if (ops->ops.init_device(dev) >= 0) {
104512 + if (ops->ops->init_device(dev) >= 0) {
104513 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
104514 ops->num_init_devices++;
104515 } else {
104516 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
104517 dev->name, ops->id, ops->argsize, dev->argsize);
104518 return -EINVAL;
104519 }
104520 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
104521 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
104522 dev->status = SNDRV_SEQ_DEVICE_FREE;
104523 dev->driver_data = NULL;
104524 ops->num_init_devices--;
104525 diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
104526 index 64069db..3c6d392 100644
104527 --- a/sound/core/seq/seq_midi.c
104528 +++ b/sound/core/seq/seq_midi.c
104529 @@ -462,8 +462,8 @@ snd_seq_midisynth_unregister_port(struct snd_seq_device *dev)
104530 static int __init alsa_seq_midi_init(void)
104531 {
104532 static struct snd_seq_dev_ops ops = {
104533 - snd_seq_midisynth_register_port,
104534 - snd_seq_midisynth_unregister_port,
104535 + .init_device = snd_seq_midisynth_register_port,
104536 + .free_device = snd_seq_midisynth_unregister_port,
104537 };
104538 memset(&synths, 0, sizeof(synths));
104539 snd_seq_autoload_lock();
104540 diff --git a/sound/core/sound.c b/sound/core/sound.c
104541 index 437c25e..cd040ab 100644
104542 --- a/sound/core/sound.c
104543 +++ b/sound/core/sound.c
104544 @@ -86,7 +86,7 @@ static void snd_request_other(int minor)
104545 case SNDRV_MINOR_TIMER: str = "snd-timer"; break;
104546 default: return;
104547 }
104548 - request_module(str);
104549 + request_module("%s", str);
104550 }
104551
104552 #endif /* modular kernel */
104553 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
104554 index 4e0dd22..7a1f32c 100644
104555 --- a/sound/drivers/mts64.c
104556 +++ b/sound/drivers/mts64.c
104557 @@ -29,6 +29,7 @@
104558 #include <sound/initval.h>
104559 #include <sound/rawmidi.h>
104560 #include <sound/control.h>
104561 +#include <asm/local.h>
104562
104563 #define CARD_NAME "Miditerminal 4140"
104564 #define DRIVER_NAME "MTS64"
104565 @@ -67,7 +68,7 @@ struct mts64 {
104566 struct pardevice *pardev;
104567 int pardev_claimed;
104568
104569 - int open_count;
104570 + local_t open_count;
104571 int current_midi_output_port;
104572 int current_midi_input_port;
104573 u8 mode[MTS64_NUM_INPUT_PORTS];
104574 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
104575 {
104576 struct mts64 *mts = substream->rmidi->private_data;
104577
104578 - if (mts->open_count == 0) {
104579 + if (local_read(&mts->open_count) == 0) {
104580 /* We don't need a spinlock here, because this is just called
104581 if the device has not been opened before.
104582 So there aren't any IRQs from the device */
104583 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
104584
104585 msleep(50);
104586 }
104587 - ++(mts->open_count);
104588 + local_inc(&mts->open_count);
104589
104590 return 0;
104591 }
104592 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
104593 struct mts64 *mts = substream->rmidi->private_data;
104594 unsigned long flags;
104595
104596 - --(mts->open_count);
104597 - if (mts->open_count == 0) {
104598 + if (local_dec_return(&mts->open_count) == 0) {
104599 /* We need the spinlock_irqsave here because we can still
104600 have IRQs at this point */
104601 spin_lock_irqsave(&mts->lock, flags);
104602 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
104603
104604 msleep(500);
104605
104606 - } else if (mts->open_count < 0)
104607 - mts->open_count = 0;
104608 + } else if (local_read(&mts->open_count) < 0)
104609 + local_set(&mts->open_count, 0);
104610
104611 return 0;
104612 }
104613 diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
104614 index 6839953..7a0f4b9 100644
104615 --- a/sound/drivers/opl3/opl3_seq.c
104616 +++ b/sound/drivers/opl3/opl3_seq.c
104617 @@ -281,8 +281,8 @@ static int __init alsa_opl3_seq_init(void)
104618 {
104619 static struct snd_seq_dev_ops ops =
104620 {
104621 - snd_opl3_seq_new_device,
104622 - snd_opl3_seq_delete_device
104623 + .init_device = snd_opl3_seq_new_device,
104624 + .free_device = snd_opl3_seq_delete_device
104625 };
104626
104627 return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops,
104628 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
104629 index b953fb4..1999c01 100644
104630 --- a/sound/drivers/opl4/opl4_lib.c
104631 +++ b/sound/drivers/opl4/opl4_lib.c
104632 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
104633 MODULE_DESCRIPTION("OPL4 driver");
104634 MODULE_LICENSE("GPL");
104635
104636 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
104637 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
104638 {
104639 int timeout = 10;
104640 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
104641 diff --git a/sound/drivers/opl4/opl4_seq.c b/sound/drivers/opl4/opl4_seq.c
104642 index 9919769..d7de36c 100644
104643 --- a/sound/drivers/opl4/opl4_seq.c
104644 +++ b/sound/drivers/opl4/opl4_seq.c
104645 @@ -198,8 +198,8 @@ static int snd_opl4_seq_delete_device(struct snd_seq_device *dev)
104646 static int __init alsa_opl4_synth_init(void)
104647 {
104648 static struct snd_seq_dev_ops ops = {
104649 - snd_opl4_seq_new_device,
104650 - snd_opl4_seq_delete_device
104651 + .init_device = snd_opl4_seq_new_device,
104652 + .free_device = snd_opl4_seq_delete_device
104653 };
104654
104655 return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops,
104656 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
104657 index 991018d..8984740 100644
104658 --- a/sound/drivers/portman2x4.c
104659 +++ b/sound/drivers/portman2x4.c
104660 @@ -48,6 +48,7 @@
104661 #include <sound/initval.h>
104662 #include <sound/rawmidi.h>
104663 #include <sound/control.h>
104664 +#include <asm/local.h>
104665
104666 #define CARD_NAME "Portman 2x4"
104667 #define DRIVER_NAME "portman"
104668 @@ -85,7 +86,7 @@ struct portman {
104669 struct pardevice *pardev;
104670 int pardev_claimed;
104671
104672 - int open_count;
104673 + local_t open_count;
104674 int mode[PORTMAN_NUM_INPUT_PORTS];
104675 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
104676 };
104677 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
104678 index 9048777..2d8b1fc 100644
104679 --- a/sound/firewire/amdtp.c
104680 +++ b/sound/firewire/amdtp.c
104681 @@ -488,7 +488,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
104682 ptr = s->pcm_buffer_pointer + data_blocks;
104683 if (ptr >= pcm->runtime->buffer_size)
104684 ptr -= pcm->runtime->buffer_size;
104685 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
104686 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
104687
104688 s->pcm_period_pointer += data_blocks;
104689 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
104690 @@ -655,7 +655,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
104691 */
104692 void amdtp_out_stream_update(struct amdtp_out_stream *s)
104693 {
104694 - ACCESS_ONCE(s->source_node_id_field) =
104695 + ACCESS_ONCE_RW(s->source_node_id_field) =
104696 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
104697 }
104698 EXPORT_SYMBOL(amdtp_out_stream_update);
104699 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
104700 index 2746ecd..c35dedd 100644
104701 --- a/sound/firewire/amdtp.h
104702 +++ b/sound/firewire/amdtp.h
104703 @@ -135,7 +135,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
104704 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
104705 struct snd_pcm_substream *pcm)
104706 {
104707 - ACCESS_ONCE(s->pcm) = pcm;
104708 + ACCESS_ONCE_RW(s->pcm) = pcm;
104709 }
104710
104711 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
104712 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
104713 index fd42e6b..c041971 100644
104714 --- a/sound/firewire/isight.c
104715 +++ b/sound/firewire/isight.c
104716 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
104717 ptr += count;
104718 if (ptr >= runtime->buffer_size)
104719 ptr -= runtime->buffer_size;
104720 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
104721 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
104722
104723 isight->period_counter += count;
104724 if (isight->period_counter >= runtime->period_size) {
104725 @@ -299,7 +299,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
104726 if (err < 0)
104727 return err;
104728
104729 - ACCESS_ONCE(isight->pcm_active) = true;
104730 + ACCESS_ONCE_RW(isight->pcm_active) = true;
104731
104732 return 0;
104733 }
104734 @@ -337,7 +337,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
104735 {
104736 struct isight *isight = substream->private_data;
104737
104738 - ACCESS_ONCE(isight->pcm_active) = false;
104739 + ACCESS_ONCE_RW(isight->pcm_active) = false;
104740
104741 mutex_lock(&isight->mutex);
104742 isight_stop_streaming(isight);
104743 @@ -430,10 +430,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
104744
104745 switch (cmd) {
104746 case SNDRV_PCM_TRIGGER_START:
104747 - ACCESS_ONCE(isight->pcm_running) = true;
104748 + ACCESS_ONCE_RW(isight->pcm_running) = true;
104749 break;
104750 case SNDRV_PCM_TRIGGER_STOP:
104751 - ACCESS_ONCE(isight->pcm_running) = false;
104752 + ACCESS_ONCE_RW(isight->pcm_running) = false;
104753 break;
104754 default:
104755 return -EINVAL;
104756 diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
104757 index 858023c..83b3d3c 100644
104758 --- a/sound/firewire/scs1x.c
104759 +++ b/sound/firewire/scs1x.c
104760 @@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
104761 {
104762 struct scs *scs = stream->rmidi->private_data;
104763
104764 - ACCESS_ONCE(scs->output) = up ? stream : NULL;
104765 + ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
104766 if (up) {
104767 scs->output_idle = false;
104768 tasklet_schedule(&scs->tasklet);
104769 @@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
104770 {
104771 struct scs *scs = stream->rmidi->private_data;
104772
104773 - ACCESS_ONCE(scs->input) = up ? stream : NULL;
104774 + ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
104775 }
104776
104777 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
104778 @@ -473,8 +473,8 @@ static void scs_remove(struct fw_unit *unit)
104779
104780 snd_card_disconnect(scs->card);
104781
104782 - ACCESS_ONCE(scs->output) = NULL;
104783 - ACCESS_ONCE(scs->input) = NULL;
104784 + ACCESS_ONCE_RW(scs->output) = NULL;
104785 + ACCESS_ONCE_RW(scs->input) = NULL;
104786
104787 wait_event(scs->idle_wait, scs->output_idle);
104788
104789 diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c
104790 index 4e3fcfb..ab45a9d 100644
104791 --- a/sound/isa/sb/emu8000_synth.c
104792 +++ b/sound/isa/sb/emu8000_synth.c
104793 @@ -120,8 +120,8 @@ static int __init alsa_emu8000_init(void)
104794 {
104795
104796 static struct snd_seq_dev_ops ops = {
104797 - snd_emu8000_new_device,
104798 - snd_emu8000_delete_device,
104799 + .init_device = snd_emu8000_new_device,
104800 + .free_device = snd_emu8000_delete_device,
104801 };
104802 return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops,
104803 sizeof(struct snd_emu8000*));
104804 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
104805 index 048439a..3be9f6f 100644
104806 --- a/sound/oss/sb_audio.c
104807 +++ b/sound/oss/sb_audio.c
104808 @@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
104809 buf16 = (signed short *)(localbuf + localoffs);
104810 while (c)
104811 {
104812 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
104813 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
104814 if (copy_from_user(lbuf8,
104815 userbuf+useroffs + p,
104816 locallen))
104817 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
104818 index 7d8803a..559f8d0 100644
104819 --- a/sound/oss/swarm_cs4297a.c
104820 +++ b/sound/oss/swarm_cs4297a.c
104821 @@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
104822 {
104823 struct cs4297a_state *s;
104824 u32 pwr, id;
104825 - mm_segment_t fs;
104826 int rval;
104827 #ifndef CONFIG_BCM_CS4297A_CSWARM
104828 u64 cfg;
104829 @@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
104830 if (!rval) {
104831 char *sb1250_duart_present;
104832
104833 +#if 0
104834 + mm_segment_t fs;
104835 fs = get_fs();
104836 set_fs(KERNEL_DS);
104837 -#if 0
104838 val = SOUND_MASK_LINE;
104839 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
104840 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
104841 val = initvol[i].vol;
104842 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
104843 }
104844 + set_fs(fs);
104845 // cs4297a_write_ac97(s, 0x18, 0x0808);
104846 #else
104847 // cs4297a_write_ac97(s, 0x5e, 0x180);
104848 cs4297a_write_ac97(s, 0x02, 0x0808);
104849 cs4297a_write_ac97(s, 0x18, 0x0808);
104850 #endif
104851 - set_fs(fs);
104852
104853 list_add(&s->list, &cs4297a_devs);
104854
104855 diff --git a/sound/pci/emu10k1/emu10k1_synth.c b/sound/pci/emu10k1/emu10k1_synth.c
104856 index 4c41c90..37f3631 100644
104857 --- a/sound/pci/emu10k1/emu10k1_synth.c
104858 +++ b/sound/pci/emu10k1/emu10k1_synth.c
104859 @@ -108,8 +108,8 @@ static int __init alsa_emu10k1_synth_init(void)
104860 {
104861
104862 static struct snd_seq_dev_ops ops = {
104863 - snd_emu10k1_synth_new_device,
104864 - snd_emu10k1_synth_delete_device,
104865 + .init_device = snd_emu10k1_synth_new_device,
104866 + .free_device = snd_emu10k1_synth_delete_device,
104867 };
104868 return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops,
104869 sizeof(struct snd_emu10k1_synth_arg));
104870 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
104871 index 69178c4..4493d52 100644
104872 --- a/sound/pci/hda/hda_codec.c
104873 +++ b/sound/pci/hda/hda_codec.c
104874 @@ -976,14 +976,10 @@ find_codec_preset(struct hda_codec *codec)
104875 mutex_unlock(&preset_mutex);
104876
104877 if (mod_requested < HDA_MODREQ_MAX_COUNT) {
104878 - char name[32];
104879 if (!mod_requested)
104880 - snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
104881 - codec->vendor_id);
104882 + request_module("snd-hda-codec-id:%08x", codec->vendor_id);
104883 else
104884 - snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
104885 - (codec->vendor_id >> 16) & 0xffff);
104886 - request_module(name);
104887 + request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
104888 mod_requested++;
104889 goto again;
104890 }
104891 @@ -2668,7 +2664,7 @@ static int get_kctl_0dB_offset(struct snd_kcontrol *kctl, int *step_to_check)
104892 /* FIXME: set_fs() hack for obtaining user-space TLV data */
104893 mm_segment_t fs = get_fs();
104894 set_fs(get_ds());
104895 - if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), _tlv))
104896 + if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), (unsigned int __force_user *)_tlv))
104897 tlv = _tlv;
104898 set_fs(fs);
104899 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
104900 diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
104901 index 4631a23..001ae57 100644
104902 --- a/sound/pci/ymfpci/ymfpci.h
104903 +++ b/sound/pci/ymfpci/ymfpci.h
104904 @@ -358,7 +358,7 @@ struct snd_ymfpci {
104905 spinlock_t reg_lock;
104906 spinlock_t voice_lock;
104907 wait_queue_head_t interrupt_sleep;
104908 - atomic_t interrupt_sleep_count;
104909 + atomic_unchecked_t interrupt_sleep_count;
104910 struct snd_info_entry *proc_entry;
104911 const struct firmware *dsp_microcode;
104912 const struct firmware *controller_microcode;
104913 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
104914 index d591c15..8cb8f94 100644
104915 --- a/sound/pci/ymfpci/ymfpci_main.c
104916 +++ b/sound/pci/ymfpci/ymfpci_main.c
104917 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
104918 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
104919 break;
104920 }
104921 - if (atomic_read(&chip->interrupt_sleep_count)) {
104922 - atomic_set(&chip->interrupt_sleep_count, 0);
104923 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
104924 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
104925 wake_up(&chip->interrupt_sleep);
104926 }
104927 __end:
104928 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
104929 continue;
104930 init_waitqueue_entry(&wait, current);
104931 add_wait_queue(&chip->interrupt_sleep, &wait);
104932 - atomic_inc(&chip->interrupt_sleep_count);
104933 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
104934 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
104935 remove_wait_queue(&chip->interrupt_sleep, &wait);
104936 }
104937 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
104938 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
104939 spin_unlock(&chip->reg_lock);
104940
104941 - if (atomic_read(&chip->interrupt_sleep_count)) {
104942 - atomic_set(&chip->interrupt_sleep_count, 0);
104943 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
104944 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
104945 wake_up(&chip->interrupt_sleep);
104946 }
104947 }
104948 @@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
104949 spin_lock_init(&chip->reg_lock);
104950 spin_lock_init(&chip->voice_lock);
104951 init_waitqueue_head(&chip->interrupt_sleep);
104952 - atomic_set(&chip->interrupt_sleep_count, 0);
104953 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
104954 chip->card = card;
104955 chip->pci = pci;
104956 chip->irq = -1;
104957 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
104958 index 35e2773..6d6ceee 100644
104959 --- a/sound/soc/fsl/fsl_ssi.c
104960 +++ b/sound/soc/fsl/fsl_ssi.c
104961 @@ -857,7 +857,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
104962 {
104963 struct fsl_ssi_private *ssi_private;
104964 int ret = 0;
104965 - struct device_attribute *dev_attr = NULL;
104966 + device_attribute_no_const *dev_attr = NULL;
104967 struct device_node *np = pdev->dev.of_node;
104968 const char *p, *sprop;
104969 const uint32_t *iprop;
104970 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
104971 index a66783e..ed1d791 100644
104972 --- a/sound/soc/soc-core.c
104973 +++ b/sound/soc/soc-core.c
104974 @@ -2253,8 +2253,10 @@ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
104975 if (ret)
104976 return ret;
104977
104978 - ops->warm_reset = snd_soc_ac97_warm_reset;
104979 - ops->reset = snd_soc_ac97_reset;
104980 + pax_open_kernel();
104981 + *(void **)&ops->warm_reset = snd_soc_ac97_warm_reset;
104982 + *(void **)&ops->reset = snd_soc_ac97_reset;
104983 + pax_close_kernel();
104984
104985 snd_ac97_rst_cfg = cfg;
104986 return 0;
104987 diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
104988 index 7778b8e..3d619fc 100644
104989 --- a/sound/synth/emux/emux_seq.c
104990 +++ b/sound/synth/emux/emux_seq.c
104991 @@ -33,13 +33,13 @@ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *inf
104992 * MIDI emulation operators
104993 */
104994 static struct snd_midi_op emux_ops = {
104995 - snd_emux_note_on,
104996 - snd_emux_note_off,
104997 - snd_emux_key_press,
104998 - snd_emux_terminate_note,
104999 - snd_emux_control,
105000 - snd_emux_nrpn,
105001 - snd_emux_sysex,
105002 + .note_on = snd_emux_note_on,
105003 + .note_off = snd_emux_note_off,
105004 + .key_press = snd_emux_key_press,
105005 + .note_terminate = snd_emux_terminate_note,
105006 + .control = snd_emux_control,
105007 + .nrpn = snd_emux_nrpn,
105008 + .sysex = snd_emux_sysex,
105009 };
105010
105011
105012 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
105013 new file mode 100644
105014 index 0000000..4c2c45c
105015 --- /dev/null
105016 +++ b/tools/gcc/.gitignore
105017 @@ -0,0 +1,2 @@
105018 +randomize_layout_seed.h
105019 +size_overflow_hash.h
105020 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
105021 new file mode 100644
105022 index 0000000..b198b6d
105023 --- /dev/null
105024 +++ b/tools/gcc/Makefile
105025 @@ -0,0 +1,54 @@
105026 +#CC := gcc
105027 +#PLUGIN_SOURCE_FILES := pax_plugin.c
105028 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
105029 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
105030 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
105031 +
105032 +ifeq ($(PLUGINCC),$(HOSTCC))
105033 +HOSTLIBS := hostlibs
105034 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
105035 +else
105036 +HOSTLIBS := hostcxxlibs
105037 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing
105038 +endif
105039 +
105040 +$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
105041 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
105042 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
105043 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
105044 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
105045 +$(HOSTLIBS)-y += colorize_plugin.so
105046 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
105047 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
105048 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
105049 +$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so
105050 +
105051 +always := $($(HOSTLIBS)-y)
105052 +
105053 +constify_plugin-objs := constify_plugin.o
105054 +stackleak_plugin-objs := stackleak_plugin.o
105055 +kallocstat_plugin-objs := kallocstat_plugin.o
105056 +kernexec_plugin-objs := kernexec_plugin.o
105057 +checker_plugin-objs := checker_plugin.o
105058 +colorize_plugin-objs := colorize_plugin.o
105059 +size_overflow_plugin-objs := size_overflow_plugin.o
105060 +latent_entropy_plugin-objs := latent_entropy_plugin.o
105061 +structleak_plugin-objs := structleak_plugin.o
105062 +randomize_layout_plugin-objs := randomize_layout_plugin.o
105063 +
105064 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
105065 +$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
105066 +
105067 +quiet_cmd_build_size_overflow_hash = GENHASH $@
105068 + cmd_build_size_overflow_hash = \
105069 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
105070 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
105071 + $(call if_changed,build_size_overflow_hash)
105072 +
105073 +quiet_cmd_create_randomize_layout_seed = GENSEED $@
105074 + cmd_create_randomize_layout_seed = \
105075 + $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
105076 +$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
105077 + $(call if_changed,create_randomize_layout_seed)
105078 +
105079 +targets += size_overflow_hash.h randomize_layout_seed.h randomize_layout_hash.h
105080 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
105081 new file mode 100644
105082 index 0000000..5452feea
105083 --- /dev/null
105084 +++ b/tools/gcc/checker_plugin.c
105085 @@ -0,0 +1,150 @@
105086 +/*
105087 + * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
105088 + * Licensed under the GPL v2
105089 + *
105090 + * Note: the choice of the license means that the compilation process is
105091 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
105092 + * but for the kernel it doesn't matter since it doesn't link against
105093 + * any of the gcc libraries
105094 + *
105095 + * gcc plugin to implement various sparse (source code checker) features
105096 + *
105097 + * TODO:
105098 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
105099 + *
105100 + * BUGS:
105101 + * - none known
105102 + */
105103 +
105104 +#include "gcc-common.h"
105105 +
105106 +extern void c_register_addr_space (const char *str, addr_space_t as);
105107 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
105108 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
105109 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
105110 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
105111 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
105112 +
105113 +int plugin_is_GPL_compatible;
105114 +
105115 +static struct plugin_info checker_plugin_info = {
105116 + .version = "201304082245",
105117 + .help = NULL,
105118 +};
105119 +
105120 +#define ADDR_SPACE_KERNEL 0
105121 +#define ADDR_SPACE_FORCE_KERNEL 1
105122 +#define ADDR_SPACE_USER 2
105123 +#define ADDR_SPACE_FORCE_USER 3
105124 +#define ADDR_SPACE_IOMEM 0
105125 +#define ADDR_SPACE_FORCE_IOMEM 0
105126 +#define ADDR_SPACE_PERCPU 0
105127 +#define ADDR_SPACE_FORCE_PERCPU 0
105128 +#define ADDR_SPACE_RCU 0
105129 +#define ADDR_SPACE_FORCE_RCU 0
105130 +
105131 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
105132 +{
105133 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
105134 +}
105135 +
105136 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
105137 +{
105138 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
105139 +}
105140 +
105141 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
105142 +{
105143 + return default_addr_space_valid_pointer_mode(mode, as);
105144 +}
105145 +
105146 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
105147 +{
105148 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
105149 +}
105150 +
105151 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
105152 +{
105153 + return default_addr_space_legitimize_address(x, oldx, mode, as);
105154 +}
105155 +
105156 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
105157 +{
105158 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
105159 + return true;
105160 +
105161 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
105162 + return true;
105163 +
105164 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
105165 + return true;
105166 +
105167 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
105168 + return true;
105169 +
105170 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
105171 + return true;
105172 +
105173 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
105174 + return true;
105175 +
105176 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
105177 + return true;
105178 +
105179 + return subset == superset;
105180 +}
105181 +
105182 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
105183 +{
105184 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
105185 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
105186 +
105187 + return op;
105188 +}
105189 +
105190 +static void register_checker_address_spaces(void *event_data, void *data)
105191 +{
105192 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
105193 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
105194 + c_register_addr_space("__user", ADDR_SPACE_USER);
105195 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
105196 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
105197 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
105198 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
105199 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
105200 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
105201 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
105202 +
105203 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
105204 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
105205 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
105206 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
105207 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
105208 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
105209 + targetm.addr_space.convert = checker_addr_space_convert;
105210 +}
105211 +
105212 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
105213 +{
105214 + const char * const plugin_name = plugin_info->base_name;
105215 + const int argc = plugin_info->argc;
105216 + const struct plugin_argument * const argv = plugin_info->argv;
105217 + int i;
105218 +
105219 + if (!plugin_default_version_check(version, &gcc_version)) {
105220 + error(G_("incompatible gcc/plugin versions"));
105221 + return 1;
105222 + }
105223 +
105224 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
105225 +
105226 + for (i = 0; i < argc; ++i)
105227 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
105228 +
105229 + if (TARGET_64BIT == 0)
105230 + return 0;
105231 +
105232 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
105233 +
105234 + return 0;
105235 +}
105236 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
105237 new file mode 100644
105238 index 0000000..06dcfda
105239 --- /dev/null
105240 +++ b/tools/gcc/colorize_plugin.c
105241 @@ -0,0 +1,169 @@
105242 +/*
105243 + * Copyright 2012-2014 by PaX Team <pageexec@freemail.hu>
105244 + * Licensed under the GPL v2
105245 + *
105246 + * Note: the choice of the license means that the compilation process is
105247 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
105248 + * but for the kernel it doesn't matter since it doesn't link against
105249 + * any of the gcc libraries
105250 + *
105251 + * gcc plugin to colorize diagnostic output
105252 + *
105253 + */
105254 +
105255 +#include "gcc-common.h"
105256 +
105257 +int plugin_is_GPL_compatible;
105258 +
105259 +static struct plugin_info colorize_plugin_info = {
105260 + .version = "201401260140",
105261 + .help = NULL,
105262 +};
105263 +
105264 +#define GREEN "\033[32m\033[2m"
105265 +#define LIGHTGREEN "\033[32m\033[1m"
105266 +#define YELLOW "\033[33m\033[2m"
105267 +#define LIGHTYELLOW "\033[33m\033[1m"
105268 +#define RED "\033[31m\033[2m"
105269 +#define LIGHTRED "\033[31m\033[1m"
105270 +#define BLUE "\033[34m\033[2m"
105271 +#define LIGHTBLUE "\033[34m\033[1m"
105272 +#define BRIGHT "\033[m\033[1m"
105273 +#define NORMAL "\033[m"
105274 +
105275 +static diagnostic_starter_fn old_starter;
105276 +static diagnostic_finalizer_fn old_finalizer;
105277 +
105278 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
105279 +{
105280 + const char *color;
105281 + char *newprefix;
105282 +
105283 + switch (diagnostic->kind) {
105284 + case DK_NOTE:
105285 + color = LIGHTBLUE;
105286 + break;
105287 +
105288 + case DK_PEDWARN:
105289 + case DK_WARNING:
105290 + color = LIGHTYELLOW;
105291 + break;
105292 +
105293 + case DK_ERROR:
105294 + case DK_FATAL:
105295 + case DK_ICE:
105296 + case DK_PERMERROR:
105297 + case DK_SORRY:
105298 + color = LIGHTRED;
105299 + break;
105300 +
105301 + default:
105302 + color = NORMAL;
105303 + }
105304 +
105305 + old_starter(context, diagnostic);
105306 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
105307 + return;
105308 + pp_destroy_prefix(context->printer);
105309 + pp_set_prefix(context->printer, newprefix);
105310 +}
105311 +
105312 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
105313 +{
105314 + old_finalizer(context, diagnostic);
105315 +}
105316 +
105317 +static void colorize_arm(void)
105318 +{
105319 + old_starter = diagnostic_starter(global_dc);
105320 + old_finalizer = diagnostic_finalizer(global_dc);
105321 +
105322 + diagnostic_starter(global_dc) = start_colorize;
105323 + diagnostic_finalizer(global_dc) = finalize_colorize;
105324 +}
105325 +
105326 +static unsigned int execute_colorize_rearm(void)
105327 +{
105328 + if (diagnostic_starter(global_dc) == start_colorize)
105329 + return 0;
105330 +
105331 + colorize_arm();
105332 + return 0;
105333 +}
105334 +
105335 +#if BUILDING_GCC_VERSION >= 4009
105336 +static const struct pass_data colorize_rearm_pass_data = {
105337 +#else
105338 +struct simple_ipa_opt_pass colorize_rearm_pass = {
105339 + .pass = {
105340 +#endif
105341 + .type = SIMPLE_IPA_PASS,
105342 + .name = "colorize_rearm",
105343 +#if BUILDING_GCC_VERSION >= 4008
105344 + .optinfo_flags = OPTGROUP_NONE,
105345 +#endif
105346 +#if BUILDING_GCC_VERSION >= 4009
105347 + .has_gate = false,
105348 + .has_execute = true,
105349 +#else
105350 + .gate = NULL,
105351 + .execute = execute_colorize_rearm,
105352 + .sub = NULL,
105353 + .next = NULL,
105354 + .static_pass_number = 0,
105355 +#endif
105356 + .tv_id = TV_NONE,
105357 + .properties_required = 0,
105358 + .properties_provided = 0,
105359 + .properties_destroyed = 0,
105360 + .todo_flags_start = 0,
105361 + .todo_flags_finish = 0
105362 +#if BUILDING_GCC_VERSION < 4009
105363 + }
105364 +#endif
105365 +};
105366 +
105367 +#if BUILDING_GCC_VERSION >= 4009
105368 +namespace {
105369 +class colorize_rearm_pass : public simple_ipa_opt_pass {
105370 +public:
105371 + colorize_rearm_pass() : simple_ipa_opt_pass(colorize_rearm_pass_data, g) {}
105372 + unsigned int execute() { return execute_colorize_rearm(); }
105373 +};
105374 +}
105375 +#endif
105376 +
105377 +static struct opt_pass *make_colorize_rearm_pass(void)
105378 +{
105379 +#if BUILDING_GCC_VERSION >= 4009
105380 + return new colorize_rearm_pass();
105381 +#else
105382 + return &colorize_rearm_pass.pass;
105383 +#endif
105384 +}
105385 +
105386 +static void colorize_start_unit(void *gcc_data, void *user_data)
105387 +{
105388 + colorize_arm();
105389 +}
105390 +
105391 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
105392 +{
105393 + const char * const plugin_name = plugin_info->base_name;
105394 + struct register_pass_info colorize_rearm_pass_info;
105395 +
105396 + colorize_rearm_pass_info.pass = make_colorize_rearm_pass();
105397 + colorize_rearm_pass_info.reference_pass_name = "*free_lang_data";
105398 + colorize_rearm_pass_info.ref_pass_instance_number = 1;
105399 + colorize_rearm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
105400 +
105401 + if (!plugin_default_version_check(version, &gcc_version)) {
105402 + error(G_("incompatible gcc/plugin versions"));
105403 + return 1;
105404 + }
105405 +
105406 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
105407 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
105408 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
105409 + return 0;
105410 +}
105411 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
105412 new file mode 100644
105413 index 0000000..4f67ac1
105414 --- /dev/null
105415 +++ b/tools/gcc/constify_plugin.c
105416 @@ -0,0 +1,552 @@
105417 +/*
105418 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
105419 + * Copyright 2011-2014 by PaX Team <pageexec@freemail.hu>
105420 + * Licensed under the GPL v2, or (at your option) v3
105421 + *
105422 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
105423 + *
105424 + * Homepage:
105425 + * http://www.grsecurity.net/~ephox/const_plugin/
105426 + *
105427 + * Usage:
105428 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
105429 + * $ gcc -fplugin=constify_plugin.so test.c -O2
105430 + */
105431 +
105432 +#include "gcc-common.h"
105433 +
105434 +// unused C type flag in all versions 4.5-4.9
105435 +#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
105436 +
105437 +int plugin_is_GPL_compatible;
105438 +
105439 +static struct plugin_info const_plugin_info = {
105440 + .version = "201401270210",
105441 + .help = "no-constify\tturn off constification\n",
105442 +};
105443 +
105444 +typedef struct {
105445 + bool has_fptr_field;
105446 + bool has_writable_field;
105447 + bool has_do_const_field;
105448 + bool has_no_const_field;
105449 +} constify_info;
105450 +
105451 +static const_tree get_field_type(const_tree field)
105452 +{
105453 + return strip_array_types(TREE_TYPE(field));
105454 +}
105455 +
105456 +static bool is_fptr(const_tree field)
105457 +{
105458 + const_tree ptr = get_field_type(field);
105459 +
105460 + if (TREE_CODE(ptr) != POINTER_TYPE)
105461 + return false;
105462 +
105463 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
105464 +}
105465 +
105466 +/*
105467 + * determine whether the given structure type meets the requirements for automatic constification,
105468 + * including the constification attributes on nested structure types
105469 + */
105470 +static void constifiable(const_tree node, constify_info *cinfo)
105471 +{
105472 + const_tree field;
105473 +
105474 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
105475 +
105476 + // e.g., pointer to structure fields while still constructing the structure type
105477 + if (TYPE_FIELDS(node) == NULL_TREE)
105478 + return;
105479 +
105480 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
105481 + const_tree type = get_field_type(field);
105482 + enum tree_code code = TREE_CODE(type);
105483 +
105484 + if (node == type)
105485 + continue;
105486 +
105487 + if (is_fptr(field))
105488 + cinfo->has_fptr_field = true;
105489 + else if (!TREE_READONLY(field))
105490 + cinfo->has_writable_field = true;
105491 +
105492 + if (code == RECORD_TYPE || code == UNION_TYPE) {
105493 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
105494 + cinfo->has_do_const_field = true;
105495 + else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
105496 + cinfo->has_no_const_field = true;
105497 + else
105498 + constifiable(type, cinfo);
105499 + }
105500 + }
105501 +}
105502 +
105503 +static bool constified(const_tree node)
105504 +{
105505 + constify_info cinfo = {
105506 + .has_fptr_field = false,
105507 + .has_writable_field = false,
105508 + .has_do_const_field = false,
105509 + .has_no_const_field = false
105510 + };
105511 +
105512 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
105513 +
105514 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
105515 +// gcc_assert(!TYPE_READONLY(node));
105516 + return false;
105517 + }
105518 +
105519 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
105520 + gcc_assert(TYPE_READONLY(node));
105521 + return true;
105522 + }
105523 +
105524 + constifiable(node, &cinfo);
105525 + if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
105526 + return false;
105527 +
105528 + return TYPE_READONLY(node);
105529 +}
105530 +
105531 +static void deconstify_tree(tree node);
105532 +
105533 +static void deconstify_type(tree type)
105534 +{
105535 + tree field;
105536 +
105537 + gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
105538 +
105539 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
105540 + const_tree fieldtype = get_field_type(field);
105541 +
105542 + // special case handling of simple ptr-to-same-array-type members
105543 + if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
105544 + tree ptrtype = TREE_TYPE(TREE_TYPE(field));
105545 +
105546 + if (TREE_TYPE(TREE_TYPE(field)) == type)
105547 + continue;
105548 + if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
105549 + continue;
105550 + if (!constified(ptrtype))
105551 + continue;
105552 + if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
105553 + TREE_TYPE(field) = copy_node(TREE_TYPE(field));
105554 + TREE_TYPE(TREE_TYPE(field)) = build_qualified_type(type, TYPE_QUALS(ptrtype) & ~TYPE_QUAL_CONST);
105555 + }
105556 + continue;
105557 + }
105558 + if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
105559 + continue;
105560 + if (!constified(fieldtype))
105561 + continue;
105562 +
105563 + deconstify_tree(field);
105564 + TREE_READONLY(field) = 0;
105565 + }
105566 + TYPE_READONLY(type) = 0;
105567 + C_TYPE_FIELDS_READONLY(type) = 0;
105568 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
105569 + TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
105570 + TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
105571 + }
105572 +}
105573 +
105574 +static void deconstify_tree(tree node)
105575 +{
105576 + tree old_type, new_type, field;
105577 +
105578 + old_type = TREE_TYPE(node);
105579 + while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
105580 + node = TREE_TYPE(node) = copy_node(old_type);
105581 + old_type = TREE_TYPE(old_type);
105582 + }
105583 +
105584 + gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
105585 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
105586 +
105587 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
105588 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
105589 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
105590 + DECL_FIELD_CONTEXT(field) = new_type;
105591 +
105592 + deconstify_type(new_type);
105593 +
105594 + TREE_TYPE(node) = new_type;
105595 +}
105596 +
105597 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
105598 +{
105599 + tree type;
105600 + constify_info cinfo = {
105601 + .has_fptr_field = false,
105602 + .has_writable_field = false,
105603 + .has_do_const_field = false,
105604 + .has_no_const_field = false
105605 + };
105606 +
105607 + *no_add_attrs = true;
105608 + if (TREE_CODE(*node) == FUNCTION_DECL) {
105609 + error("%qE attribute does not apply to functions (%qF)", name, *node);
105610 + return NULL_TREE;
105611 + }
105612 +
105613 + if (TREE_CODE(*node) == PARM_DECL) {
105614 + error("%qE attribute does not apply to function parameters (%qD)", name, *node);
105615 + return NULL_TREE;
105616 + }
105617 +
105618 + if (TREE_CODE(*node) == VAR_DECL) {
105619 + error("%qE attribute does not apply to variables (%qD)", name, *node);
105620 + return NULL_TREE;
105621 + }
105622 +
105623 + if (TYPE_P(*node)) {
105624 + type = *node;
105625 + } else {
105626 + gcc_assert(TREE_CODE(*node) == TYPE_DECL);
105627 + type = TREE_TYPE(*node);
105628 + }
105629 +
105630 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
105631 + error("%qE attribute used on %qT applies to struct and union types only", name, type);
105632 + return NULL_TREE;
105633 + }
105634 +
105635 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
105636 + error("%qE attribute is already applied to the type %qT", name, type);
105637 + return NULL_TREE;
105638 + }
105639 +
105640 + if (TYPE_P(*node)) {
105641 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
105642 + error("%qE attribute used on type %qT is incompatible with 'do_const'", name, type);
105643 + else
105644 + *no_add_attrs = false;
105645 + return NULL_TREE;
105646 + }
105647 +
105648 + constifiable(type, &cinfo);
105649 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
105650 + deconstify_tree(*node);
105651 + TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
105652 + return NULL_TREE;
105653 + }
105654 +
105655 + error("%qE attribute used on type %qT that is not constified", name, type);
105656 + return NULL_TREE;
105657 +}
105658 +
105659 +static void constify_type(tree type)
105660 +{
105661 + TYPE_READONLY(type) = 1;
105662 + C_TYPE_FIELDS_READONLY(type) = 1;
105663 + TYPE_CONSTIFY_VISITED(type) = 1;
105664 +// TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
105665 +// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
105666 +}
105667 +
105668 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
105669 +{
105670 + *no_add_attrs = true;
105671 + if (!TYPE_P(*node)) {
105672 + error("%qE attribute applies to types only (%qD)", name, *node);
105673 + return NULL_TREE;
105674 + }
105675 +
105676 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
105677 + error("%qE attribute used on %qT applies to struct and union types only", name, *node);
105678 + return NULL_TREE;
105679 + }
105680 +
105681 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
105682 + error("%qE attribute used on %qT is already applied to the type", name, *node);
105683 + return NULL_TREE;
105684 + }
105685 +
105686 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
105687 + error("%qE attribute used on %qT is incompatible with 'no_const'", name, *node);
105688 + return NULL_TREE;
105689 + }
105690 +
105691 + *no_add_attrs = false;
105692 + return NULL_TREE;
105693 +}
105694 +
105695 +static struct attribute_spec no_const_attr = {
105696 + .name = "no_const",
105697 + .min_length = 0,
105698 + .max_length = 0,
105699 + .decl_required = false,
105700 + .type_required = false,
105701 + .function_type_required = false,
105702 + .handler = handle_no_const_attribute,
105703 +#if BUILDING_GCC_VERSION >= 4007
105704 + .affects_type_identity = true
105705 +#endif
105706 +};
105707 +
105708 +static struct attribute_spec do_const_attr = {
105709 + .name = "do_const",
105710 + .min_length = 0,
105711 + .max_length = 0,
105712 + .decl_required = false,
105713 + .type_required = false,
105714 + .function_type_required = false,
105715 + .handler = handle_do_const_attribute,
105716 +#if BUILDING_GCC_VERSION >= 4007
105717 + .affects_type_identity = true
105718 +#endif
105719 +};
105720 +
105721 +static void register_attributes(void *event_data, void *data)
105722 +{
105723 + register_attribute(&no_const_attr);
105724 + register_attribute(&do_const_attr);
105725 +}
105726 +
105727 +static void finish_type(void *event_data, void *data)
105728 +{
105729 + tree type = (tree)event_data;
105730 + constify_info cinfo = {
105731 + .has_fptr_field = false,
105732 + .has_writable_field = false,
105733 + .has_do_const_field = false,
105734 + .has_no_const_field = false
105735 + };
105736 +
105737 + if (type == NULL_TREE || type == error_mark_node)
105738 + return;
105739 +
105740 + if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
105741 + return;
105742 +
105743 + constifiable(type, &cinfo);
105744 +
105745 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
105746 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
105747 + deconstify_type(type);
105748 + TYPE_CONSTIFY_VISITED(type) = 1;
105749 + } else
105750 + error("'no_const' attribute used on type %qT that is not constified", type);
105751 + return;
105752 + }
105753 +
105754 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
105755 + if (!cinfo.has_writable_field) {
105756 + error("'do_const' attribute used on type %qT that is%sconstified", type, cinfo.has_fptr_field ? " " : " not ");
105757 + return;
105758 + }
105759 + constify_type(type);
105760 + return;
105761 + }
105762 +
105763 + if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
105764 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
105765 + error("'do_const' attribute used on type %qT that is constified", type);
105766 + return;
105767 + }
105768 + constify_type(type);
105769 + return;
105770 + }
105771 +
105772 + deconstify_type(type);
105773 + TYPE_CONSTIFY_VISITED(type) = 1;
105774 +}
105775 +
105776 +static void check_global_variables(void *event_data, void *data)
105777 +{
105778 + struct varpool_node *node;
105779 +
105780 + FOR_EACH_VARIABLE(node) {
105781 + tree var = NODE_DECL(node);
105782 + tree type = TREE_TYPE(var);
105783 +
105784 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
105785 + continue;
105786 +
105787 + if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
105788 + continue;
105789 +
105790 + if (!TYPE_CONSTIFY_VISITED(type))
105791 + continue;
105792 +
105793 + if (DECL_EXTERNAL(var))
105794 + continue;
105795 +
105796 + if (DECL_INITIAL(var))
105797 + continue;
105798 +
105799 + // this works around a gcc bug/feature where uninitialized globals
105800 + // are moved into the .bss section regardless of any constification
105801 + DECL_INITIAL(var) = build_constructor(type, NULL);
105802 +// inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var);
105803 + }
105804 +}
105805 +
105806 +static unsigned int check_local_variables(void)
105807 +{
105808 + unsigned int ret = 0;
105809 + tree var;
105810 +
105811 + unsigned int i;
105812 +
105813 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
105814 + tree type = TREE_TYPE(var);
105815 +
105816 + gcc_assert(DECL_P(var));
105817 + if (is_global_var(var))
105818 + continue;
105819 +
105820 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
105821 + continue;
105822 +
105823 + if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
105824 + continue;
105825 +
105826 + if (!TYPE_CONSTIFY_VISITED(type))
105827 + continue;
105828 +
105829 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
105830 + ret = 1;
105831 + }
105832 + return ret;
105833 +}
105834 +
105835 +#if BUILDING_GCC_VERSION >= 4009
105836 +static const struct pass_data check_local_variables_pass_data = {
105837 +#else
105838 +static struct gimple_opt_pass check_local_variables_pass = {
105839 + .pass = {
105840 +#endif
105841 + .type = GIMPLE_PASS,
105842 + .name = "check_local_variables",
105843 +#if BUILDING_GCC_VERSION >= 4008
105844 + .optinfo_flags = OPTGROUP_NONE,
105845 +#endif
105846 +#if BUILDING_GCC_VERSION >= 4009
105847 + .has_gate = false,
105848 + .has_execute = true,
105849 +#else
105850 + .gate = NULL,
105851 + .execute = check_local_variables,
105852 + .sub = NULL,
105853 + .next = NULL,
105854 + .static_pass_number = 0,
105855 +#endif
105856 + .tv_id = TV_NONE,
105857 + .properties_required = 0,
105858 + .properties_provided = 0,
105859 + .properties_destroyed = 0,
105860 + .todo_flags_start = 0,
105861 + .todo_flags_finish = 0
105862 +#if BUILDING_GCC_VERSION < 4009
105863 + }
105864 +#endif
105865 +};
105866 +
105867 +#if BUILDING_GCC_VERSION >= 4009
105868 +namespace {
105869 +class check_local_variables_pass : public gimple_opt_pass {
105870 +public:
105871 + check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g) {}
105872 + unsigned int execute() { return check_local_variables(); }
105873 +};
105874 +}
105875 +#endif
105876 +
105877 +static struct opt_pass *make_check_local_variables_pass(void)
105878 +{
105879 +#if BUILDING_GCC_VERSION >= 4009
105880 + return new check_local_variables_pass();
105881 +#else
105882 + return &check_local_variables_pass.pass;
105883 +#endif
105884 +}
105885 +
105886 +static struct {
105887 + const char *name;
105888 + const char *asm_op;
105889 +} sections[] = {
105890 + {".init.rodata", "\t.section\t.init.rodata,\"a\""},
105891 + {".ref.rodata", "\t.section\t.ref.rodata,\"a\""},
105892 + {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""},
105893 + {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""},
105894 + {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""},
105895 + {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""},
105896 + {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""},
105897 + {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""},
105898 + {".data..read_only", "\t.section\t.data..read_only,\"a\""},
105899 +};
105900 +
105901 +static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
105902 +
105903 +static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
105904 +{
105905 + size_t i;
105906 +
105907 + for (i = 0; i < ARRAY_SIZE(sections); i++)
105908 + if (!strcmp(sections[i].name, name))
105909 + return 0;
105910 + return old_section_type_flags(decl, name, reloc);
105911 +}
105912 +
105913 +static void constify_start_unit(void *gcc_data, void *user_data)
105914 +{
105915 +// size_t i;
105916 +
105917 +// for (i = 0; i < ARRAY_SIZE(sections); i++)
105918 +// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
105919 +// sections[i].section = get_section(sections[i].name, 0, NULL);
105920 +
105921 + old_section_type_flags = targetm.section_type_flags;
105922 + targetm.section_type_flags = constify_section_type_flags;
105923 +}
105924 +
105925 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
105926 +{
105927 + const char * const plugin_name = plugin_info->base_name;
105928 + const int argc = plugin_info->argc;
105929 + const struct plugin_argument * const argv = plugin_info->argv;
105930 + int i;
105931 + bool constify = true;
105932 +
105933 + struct register_pass_info check_local_variables_pass_info;
105934 +
105935 + check_local_variables_pass_info.pass = make_check_local_variables_pass();
105936 + check_local_variables_pass_info.reference_pass_name = "ssa";
105937 + check_local_variables_pass_info.ref_pass_instance_number = 1;
105938 + check_local_variables_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
105939 +
105940 + if (!plugin_default_version_check(version, &gcc_version)) {
105941 + error(G_("incompatible gcc/plugin versions"));
105942 + return 1;
105943 + }
105944 +
105945 + for (i = 0; i < argc; ++i) {
105946 + if (!(strcmp(argv[i].key, "no-constify"))) {
105947 + constify = false;
105948 + continue;
105949 + }
105950 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
105951 + }
105952 +
105953 + if (strcmp(lang_hooks.name, "GNU C")) {
105954 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
105955 + constify = false;
105956 + }
105957 +
105958 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
105959 + if (constify) {
105960 + register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
105961 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
105962 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &check_local_variables_pass_info);
105963 + register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
105964 + }
105965 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
105966 +
105967 + return 0;
105968 +}
105969 diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
105970 new file mode 100644
105971 index 0000000..6dbb502
105972 --- /dev/null
105973 +++ b/tools/gcc/gcc-common.h
105974 @@ -0,0 +1,286 @@
105975 +#ifndef GCC_COMMON_H_INCLUDED
105976 +#define GCC_COMMON_H_INCLUDED
105977 +
105978 +#include "plugin.h"
105979 +#include "bversion.h"
105980 +#include "plugin-version.h"
105981 +#include "config.h"
105982 +#include "system.h"
105983 +#include "coretypes.h"
105984 +#include "tm.h"
105985 +#include "line-map.h"
105986 +#include "input.h"
105987 +#include "tree.h"
105988 +
105989 +#include "tree-inline.h"
105990 +#include "version.h"
105991 +#include "rtl.h"
105992 +#include "tm_p.h"
105993 +#include "flags.h"
105994 +//#include "insn-attr.h"
105995 +//#include "insn-config.h"
105996 +//#include "insn-flags.h"
105997 +#include "hard-reg-set.h"
105998 +//#include "recog.h"
105999 +#include "output.h"
106000 +#include "except.h"
106001 +#include "function.h"
106002 +#include "toplev.h"
106003 +//#include "expr.h"
106004 +#include "basic-block.h"
106005 +#include "intl.h"
106006 +#include "ggc.h"
106007 +//#include "regs.h"
106008 +#include "timevar.h"
106009 +
106010 +#include "params.h"
106011 +#include "pointer-set.h"
106012 +#include "emit-rtl.h"
106013 +//#include "reload.h"
106014 +//#include "ira.h"
106015 +//#include "dwarf2asm.h"
106016 +#include "debug.h"
106017 +#include "target.h"
106018 +#include "langhooks.h"
106019 +#include "cfgloop.h"
106020 +//#include "hosthooks.h"
106021 +#include "cgraph.h"
106022 +#include "opts.h"
106023 +//#include "coverage.h"
106024 +//#include "value-prof.h"
106025 +
106026 +#if BUILDING_GCC_VERSION >= 4007
106027 +#include "tree-pretty-print.h"
106028 +#include "gimple-pretty-print.h"
106029 +#include "c-tree.h"
106030 +//#include "alloc-pool.h"
106031 +#endif
106032 +
106033 +#if BUILDING_GCC_VERSION <= 4008
106034 +#include "tree-flow.h"
106035 +#else
106036 +#include "tree-cfgcleanup.h"
106037 +#endif
106038 +
106039 +#include "diagnostic.h"
106040 +//#include "tree-diagnostic.h"
106041 +#include "tree-dump.h"
106042 +#include "tree-pass.h"
106043 +//#include "df.h"
106044 +#include "predict.h"
106045 +//#include "lto-streamer.h"
106046 +#include "ipa-utils.h"
106047 +
106048 +#if BUILDING_GCC_VERSION >= 4009
106049 +#include "varasm.h"
106050 +#include "stor-layout.h"
106051 +#include "internal-fn.h"
106052 +#include "gimple-expr.h"
106053 +//#include "diagnostic-color.h"
106054 +#include "context.h"
106055 +#include "tree-ssa-alias.h"
106056 +#include "stringpool.h"
106057 +#include "tree-ssanames.h"
106058 +#include "print-tree.h"
106059 +#include "tree-eh.h"
106060 +#endif
106061 +
106062 +#include "gimple.h"
106063 +
106064 +#if BUILDING_GCC_VERSION >= 4009
106065 +#include "tree-ssa-operands.h"
106066 +#include "tree-phinodes.h"
106067 +#include "tree-cfg.h"
106068 +#include "gimple-iterator.h"
106069 +#include "gimple-ssa.h"
106070 +#include "ssa-iterators.h"
106071 +#endif
106072 +
106073 +//#include "expr.h" where are you...
106074 +extern rtx emit_move_insn(rtx x, rtx y);
106075 +
106076 +// missing from basic_block.h...
106077 +extern void debug_dominance_info(enum cdi_direction dir);
106078 +extern void debug_dominance_tree(enum cdi_direction dir, basic_block root);
106079 +
106080 +#define __unused __attribute__((__unused__))
106081 +
106082 +#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
106083 +#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
106084 +
106085 +#if BUILDING_GCC_VERSION == 4005
106086 +#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls; vars && (D = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), I)
106087 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
106088 +#define FOR_EACH_VEC_ELT(T, V, I, P) for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I))
106089 +
106090 +static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
106091 +{
106092 + tree fndecl;
106093 +
106094 + if (!is_gimple_call(stmt))
106095 + return false;
106096 + fndecl = gimple_call_fndecl(stmt);
106097 + if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
106098 + return false;
106099 +// print_node(stderr, "pax", fndecl, 4);
106100 + return DECL_FUNCTION_CODE(fndecl) == code;
106101 +}
106102 +
106103 +static inline bool is_simple_builtin(tree decl)
106104 +{
106105 + if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
106106 + return false;
106107 +
106108 + switch (DECL_FUNCTION_CODE(decl)) {
106109 + /* Builtins that expand to constants. */
106110 + case BUILT_IN_CONSTANT_P:
106111 + case BUILT_IN_EXPECT:
106112 + case BUILT_IN_OBJECT_SIZE:
106113 + case BUILT_IN_UNREACHABLE:
106114 + /* Simple register moves or loads from stack. */
106115 + case BUILT_IN_RETURN_ADDRESS:
106116 + case BUILT_IN_EXTRACT_RETURN_ADDR:
106117 + case BUILT_IN_FROB_RETURN_ADDR:
106118 + case BUILT_IN_RETURN:
106119 + case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
106120 + case BUILT_IN_FRAME_ADDRESS:
106121 + case BUILT_IN_VA_END:
106122 + case BUILT_IN_STACK_SAVE:
106123 + case BUILT_IN_STACK_RESTORE:
106124 + /* Exception state returns or moves registers around. */
106125 + case BUILT_IN_EH_FILTER:
106126 + case BUILT_IN_EH_POINTER:
106127 + case BUILT_IN_EH_COPY_VALUES:
106128 + return true;
106129 +
106130 + default:
106131 + return false;
106132 + }
106133 +}
106134 +#endif
106135 +
106136 +#if BUILDING_GCC_VERSION <= 4006
106137 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
106138 +#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP)
106139 +
106140 +// should come from c-tree.h if only it were installed for gcc 4.5...
106141 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
106142 +
106143 +#define get_random_seed(noinit) ({ \
106144 + unsigned HOST_WIDE_INT seed; \
106145 + sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed); \
106146 + seed * seed; })
106147 +
106148 +static inline bool gimple_clobber_p(gimple s)
106149 +{
106150 + return false;
106151 +}
106152 +
106153 +static inline tree builtin_decl_implicit(enum built_in_function fncode)
106154 +{
106155 + return implicit_built_in_decls[fncode];
106156 +}
106157 +
106158 +static inline struct cgraph_node *cgraph_get_create_node(tree decl)
106159 +{
106160 + struct cgraph_node *node = cgraph_get_node(decl);
106161 +
106162 + return node ? node : cgraph_node(decl);
106163 +}
106164 +
106165 +static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
106166 +{
106167 + return node->analyzed && !node->thunk.thunk_p && !node->alias;
106168 +}
106169 +
106170 +static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void)
106171 +{
106172 + struct cgraph_node *node;
106173 +
106174 + for (node = cgraph_nodes; node; node = node->next)
106175 + if (cgraph_function_with_gimple_body_p(node))
106176 + return node;
106177 + return NULL;
106178 +}
106179 +
106180 +static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node)
106181 +{
106182 + for (node = node->next; node; node = node->next)
106183 + if (cgraph_function_with_gimple_body_p(node))
106184 + return node;
106185 + return NULL;
106186 +}
106187 +
106188 +#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
106189 + for ((node) = cgraph_first_function_with_gimple_body(); (node); \
106190 + (node) = cgraph_next_function_with_gimple_body(node))
106191 +#endif
106192 +
106193 +#if BUILDING_GCC_VERSION == 4006
106194 +extern void debug_gimple_stmt(gimple);
106195 +extern void debug_gimple_seq(gimple_seq);
106196 +extern void print_gimple_seq(FILE *, gimple_seq, int, int);
106197 +extern void print_gimple_stmt(FILE *, gimple, int, int);
106198 +extern void print_gimple_expr(FILE *, gimple, int, int);
106199 +extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
106200 +#endif
106201 +
106202 +#if BUILDING_GCC_VERSION <= 4007
106203 +#define FOR_EACH_VARIABLE(node) for (node = varpool_nodes; node; node = node->next)
106204 +#define PROP_loops 0
106205 +
106206 +static inline int bb_loop_depth(const_basic_block bb)
106207 +{
106208 + return bb->loop_father ? loop_depth(bb->loop_father) : 0;
106209 +}
106210 +
106211 +static inline bool gimple_store_p(gimple gs)
106212 +{
106213 + tree lhs = gimple_get_lhs(gs);
106214 + return lhs && !is_gimple_reg(lhs);
106215 +}
106216 +#endif
106217 +
106218 +#if BUILDING_GCC_VERSION >= 4007
106219 +#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
106220 + cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
106221 +#endif
106222 +
106223 +#if BUILDING_GCC_VERSION <= 4008
106224 +#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)
106225 +#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN)
106226 +#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info)
106227 +#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
106228 +#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
106229 +#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block)
106230 +#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map)
106231 +#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status)
106232 +
106233 +static inline const char *get_tree_code_name(enum tree_code code)
106234 +{
106235 + gcc_assert(code < MAX_TREE_CODES);
106236 + return tree_code_name[code];
106237 +}
106238 +
106239 +#define ipa_remove_stmt_references(cnode, stmt)
106240 +#endif
106241 +
106242 +#if BUILDING_GCC_VERSION == 4008
106243 +#define NODE_DECL(node) node->symbol.decl
106244 +#else
106245 +#define NODE_DECL(node) node->decl
106246 +#endif
106247 +
106248 +#if BUILDING_GCC_VERSION >= 4008
106249 +#define add_referenced_var(var)
106250 +#define mark_sym_for_renaming(var)
106251 +#define varpool_mark_needed_node(node)
106252 +#define TODO_dump_func 0
106253 +#define TODO_dump_cgraph 0
106254 +#endif
106255 +
106256 +#if BUILDING_GCC_VERSION >= 4009
106257 +#define TODO_ggc_collect 0
106258 +#endif
106259 +
106260 +#endif
106261 diff --git a/tools/gcc/gen-random-seed.sh b/tools/gcc/gen-random-seed.sh
106262 new file mode 100644
106263 index 0000000..7514850
106264 --- /dev/null
106265 +++ b/tools/gcc/gen-random-seed.sh
106266 @@ -0,0 +1,8 @@
106267 +#!/bin/sh
106268 +
106269 +if [ ! -f "$1" ]; then
106270 + SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'`
106271 + echo "const char *randstruct_seed = \"$SEED\";" > "$1"
106272 + HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
106273 + echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
106274 +fi
106275 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
106276 new file mode 100644
106277 index 0000000..e518932
106278 --- /dev/null
106279 +++ b/tools/gcc/generate_size_overflow_hash.sh
106280 @@ -0,0 +1,94 @@
106281 +#!/bin/bash
106282 +
106283 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
106284 +
106285 +header1="size_overflow_hash.h"
106286 +database="size_overflow_hash.data"
106287 +n=65536
106288 +
106289 +usage() {
106290 +cat <<EOF
106291 +usage: $0 options
106292 +OPTIONS:
106293 + -h|--help help
106294 + -o header file
106295 + -d database file
106296 + -n hash array size
106297 +EOF
106298 + return 0
106299 +}
106300 +
106301 +while true
106302 +do
106303 + case "$1" in
106304 + -h|--help) usage && exit 0;;
106305 + -n) n=$2; shift 2;;
106306 + -o) header1="$2"; shift 2;;
106307 + -d) database="$2"; shift 2;;
106308 + --) shift 1; break ;;
106309 + *) break ;;
106310 + esac
106311 +done
106312 +
106313 +create_defines() {
106314 + for i in `seq 0 31`
106315 + do
106316 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
106317 + done
106318 + echo >> "$header1"
106319 +}
106320 +
106321 +create_structs() {
106322 + rm -f "$header1"
106323 +
106324 + create_defines
106325 +
106326 + cat "$database" | while read data
106327 + do
106328 + data_array=($data)
106329 + struct_hash_name="${data_array[0]}"
106330 + funcn="${data_array[1]}"
106331 + params="${data_array[2]}"
106332 + next="${data_array[4]}"
106333 +
106334 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
106335 +
106336 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
106337 + echo -en "\t.param\t= " >> "$header1"
106338 + line=
106339 + for param_num in ${params//-/ };
106340 + do
106341 + line="${line}PARAM"$param_num"|"
106342 + done
106343 +
106344 + echo -e "${line%?},\n};\n" >> "$header1"
106345 + done
106346 +}
106347 +
106348 +create_headers() {
106349 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
106350 +}
106351 +
106352 +create_array_elements() {
106353 + index=0
106354 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
106355 + do
106356 + data_array=($data)
106357 + i="${data_array[3]}"
106358 + hash="${data_array[0]}"
106359 + while [[ $index -lt $i ]]
106360 + do
106361 + echo -e "\t["$index"]\t= NULL," >> "$header1"
106362 + index=$(($index + 1))
106363 + done
106364 + index=$(($index + 1))
106365 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
106366 + done
106367 + echo '};' >> $header1
106368 +}
106369 +
106370 +create_structs
106371 +create_headers
106372 +create_array_elements
106373 +
106374 +exit 0
106375 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
106376 new file mode 100644
106377 index 0000000..b559327
106378 --- /dev/null
106379 +++ b/tools/gcc/kallocstat_plugin.c
106380 @@ -0,0 +1,182 @@
106381 +/*
106382 + * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
106383 + * Licensed under the GPL v2
106384 + *
106385 + * Note: the choice of the license means that the compilation process is
106386 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
106387 + * but for the kernel it doesn't matter since it doesn't link against
106388 + * any of the gcc libraries
106389 + *
106390 + * gcc plugin to find the distribution of k*alloc sizes
106391 + *
106392 + * TODO:
106393 + *
106394 + * BUGS:
106395 + * - none known
106396 + */
106397 +
106398 +#include "gcc-common.h"
106399 +
106400 +int plugin_is_GPL_compatible;
106401 +
106402 +static struct plugin_info kallocstat_plugin_info = {
106403 + .version = "201401260140",
106404 + .help = NULL
106405 +};
106406 +
106407 +static const char * const kalloc_functions[] = {
106408 + "__kmalloc",
106409 + "kmalloc",
106410 + "kmalloc_large",
106411 + "kmalloc_node",
106412 + "kmalloc_order",
106413 + "kmalloc_order_trace",
106414 + "kmalloc_slab",
106415 + "kzalloc",
106416 + "kzalloc_node",
106417 +};
106418 +
106419 +static bool is_kalloc(const char *fnname)
106420 +{
106421 + size_t i;
106422 +
106423 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
106424 + if (!strcmp(fnname, kalloc_functions[i]))
106425 + return true;
106426 + return false;
106427 +}
106428 +
106429 +static unsigned int execute_kallocstat(void)
106430 +{
106431 + basic_block bb;
106432 +
106433 + // 1. loop through BBs and GIMPLE statements
106434 + FOR_EACH_BB_FN(bb, cfun) {
106435 + gimple_stmt_iterator gsi;
106436 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106437 + // gimple match:
106438 + tree fndecl, size;
106439 + gimple stmt;
106440 + const char *fnname;
106441 +
106442 + // is it a call
106443 + stmt = gsi_stmt(gsi);
106444 + if (!is_gimple_call(stmt))
106445 + continue;
106446 + fndecl = gimple_call_fndecl(stmt);
106447 + if (fndecl == NULL_TREE)
106448 + continue;
106449 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
106450 + continue;
106451 +
106452 + // is it a call to k*alloc
106453 + fnname = DECL_NAME_POINTER(fndecl);
106454 + if (!is_kalloc(fnname))
106455 + continue;
106456 +
106457 + // is the size arg const or the result of a simple const assignment
106458 + size = gimple_call_arg(stmt, 0);
106459 + while (true) {
106460 + expanded_location xloc;
106461 + size_t size_val;
106462 +
106463 + if (TREE_CONSTANT(size)) {
106464 + xloc = expand_location(gimple_location(stmt));
106465 + if (!xloc.file)
106466 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
106467 + size_val = TREE_INT_CST_LOW(size);
106468 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
106469 + break;
106470 + }
106471 +
106472 + if (TREE_CODE(size) != SSA_NAME)
106473 + break;
106474 + stmt = SSA_NAME_DEF_STMT(size);
106475 +//debug_gimple_stmt(stmt);
106476 +//debug_tree(size);
106477 + if (!stmt || !is_gimple_assign(stmt))
106478 + break;
106479 + if (gimple_num_ops(stmt) != 2)
106480 + break;
106481 + size = gimple_assign_rhs1(stmt);
106482 + }
106483 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
106484 +//debug_tree(gimple_call_fn(call_stmt));
106485 +//print_node(stderr, "pax", fndecl, 4);
106486 + }
106487 + }
106488 +
106489 + return 0;
106490 +}
106491 +
106492 +#if BUILDING_GCC_VERSION >= 4009
106493 +static const struct pass_data kallocstat_pass_data = {
106494 +#else
106495 +static struct gimple_opt_pass kallocstat_pass = {
106496 + .pass = {
106497 +#endif
106498 + .type = GIMPLE_PASS,
106499 + .name = "kallocstat",
106500 +#if BUILDING_GCC_VERSION >= 4008
106501 + .optinfo_flags = OPTGROUP_NONE,
106502 +#endif
106503 +#if BUILDING_GCC_VERSION >= 4009
106504 + .has_gate = false,
106505 + .has_execute = true,
106506 +#else
106507 + .gate = NULL,
106508 + .execute = execute_kallocstat,
106509 + .sub = NULL,
106510 + .next = NULL,
106511 + .static_pass_number = 0,
106512 +#endif
106513 + .tv_id = TV_NONE,
106514 + .properties_required = 0,
106515 + .properties_provided = 0,
106516 + .properties_destroyed = 0,
106517 + .todo_flags_start = 0,
106518 + .todo_flags_finish = 0
106519 +#if BUILDING_GCC_VERSION < 4009
106520 + }
106521 +#endif
106522 +};
106523 +
106524 +#if BUILDING_GCC_VERSION >= 4009
106525 +namespace {
106526 +class kallocstat_pass : public gimple_opt_pass {
106527 +public:
106528 + kallocstat_pass() : gimple_opt_pass(kallocstat_pass_data, g) {}
106529 + unsigned int execute() { return execute_kallocstat(); }
106530 +};
106531 +}
106532 +#endif
106533 +
106534 +static struct opt_pass *make_kallocstat_pass(void)
106535 +{
106536 +#if BUILDING_GCC_VERSION >= 4009
106537 + return new kallocstat_pass();
106538 +#else
106539 + return &kallocstat_pass.pass;
106540 +#endif
106541 +}
106542 +
106543 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
106544 +{
106545 + const char * const plugin_name = plugin_info->base_name;
106546 + struct register_pass_info kallocstat_pass_info;
106547 +
106548 + kallocstat_pass_info.pass = make_kallocstat_pass();
106549 + kallocstat_pass_info.reference_pass_name = "ssa";
106550 + kallocstat_pass_info.ref_pass_instance_number = 1;
106551 + kallocstat_pass_info.pos_op = PASS_POS_INSERT_AFTER;
106552 +
106553 + if (!plugin_default_version_check(version, &gcc_version)) {
106554 + error(G_("incompatible gcc/plugin versions"));
106555 + return 1;
106556 + }
106557 +
106558 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
106559 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
106560 +
106561 + return 0;
106562 +}
106563 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
106564 new file mode 100644
106565 index 0000000..dd73713
106566 --- /dev/null
106567 +++ b/tools/gcc/kernexec_plugin.c
106568 @@ -0,0 +1,519 @@
106569 +/*
106570 + * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
106571 + * Licensed under the GPL v2
106572 + *
106573 + * Note: the choice of the license means that the compilation process is
106574 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
106575 + * but for the kernel it doesn't matter since it doesn't link against
106576 + * any of the gcc libraries
106577 + *
106578 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
106579 + *
106580 + * TODO:
106581 + *
106582 + * BUGS:
106583 + * - none known
106584 + */
106585 +
106586 +#include "gcc-common.h"
106587 +
106588 +int plugin_is_GPL_compatible;
106589 +
106590 +static struct plugin_info kernexec_plugin_info = {
106591 + .version = "201401260140",
106592 + .help = "method=[bts|or]\tinstrumentation method\n"
106593 +};
106594 +
106595 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
106596 +static void (*kernexec_instrument_retaddr)(rtx);
106597 +
106598 +/*
106599 + * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
106600 + */
106601 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
106602 +{
106603 + gimple asm_movabs_stmt;
106604 +
106605 + // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
106606 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
106607 + gimple_asm_set_volatile(asm_movabs_stmt, true);
106608 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
106609 + update_stmt(asm_movabs_stmt);
106610 +}
106611 +
106612 +/*
106613 + * find all asm() stmts that clobber r12 and add a reload of r12
106614 + */
106615 +static unsigned int execute_kernexec_reload(void)
106616 +{
106617 + basic_block bb;
106618 +
106619 + // 1. loop through BBs and GIMPLE statements
106620 + FOR_EACH_BB_FN(bb, cfun) {
106621 + gimple_stmt_iterator gsi;
106622 +
106623 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106624 + // gimple match: __asm__ ("" : : : "r12");
106625 + gimple asm_stmt;
106626 + size_t nclobbers;
106627 +
106628 + // is it an asm ...
106629 + asm_stmt = gsi_stmt(gsi);
106630 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
106631 + continue;
106632 +
106633 + // ... clobbering r12
106634 + nclobbers = gimple_asm_nclobbers(asm_stmt);
106635 + while (nclobbers--) {
106636 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
106637 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
106638 + continue;
106639 + kernexec_reload_fptr_mask(&gsi);
106640 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
106641 + break;
106642 + }
106643 + }
106644 + }
106645 +
106646 + return 0;
106647 +}
106648 +
106649 +/*
106650 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
106651 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
106652 + */
106653 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
106654 +{
106655 + gimple assign_intptr, assign_new_fptr, call_stmt;
106656 + tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
106657 +
106658 + call_stmt = gsi_stmt(*gsi);
106659 + old_fptr = gimple_call_fn(call_stmt);
106660 +
106661 + // create temporary unsigned long variable used for bitops and cast fptr to it
106662 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
106663 + add_referenced_var(intptr);
106664 + intptr = make_ssa_name(intptr, NULL);
106665 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
106666 + SSA_NAME_DEF_STMT(intptr) = assign_intptr;
106667 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
106668 + update_stmt(assign_intptr);
106669 +
106670 + // apply logical or to temporary unsigned long and bitmask
106671 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
106672 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
106673 + orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
106674 + intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
106675 + assign_intptr = gimple_build_assign(intptr, orptr);
106676 + SSA_NAME_DEF_STMT(intptr) = assign_intptr;
106677 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
106678 + update_stmt(assign_intptr);
106679 +
106680 + // cast temporary unsigned long back to a temporary fptr variable
106681 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
106682 + add_referenced_var(new_fptr);
106683 + new_fptr = make_ssa_name(new_fptr, NULL);
106684 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
106685 + SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
106686 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
106687 + update_stmt(assign_new_fptr);
106688 +
106689 + // replace call stmt fn with the new fptr
106690 + gimple_call_set_fn(call_stmt, new_fptr);
106691 + update_stmt(call_stmt);
106692 +}
106693 +
106694 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
106695 +{
106696 + gimple asm_or_stmt, call_stmt;
106697 + tree old_fptr, new_fptr, input, output;
106698 +#if BUILDING_GCC_VERSION <= 4007
106699 + VEC(tree, gc) *inputs = NULL;
106700 + VEC(tree, gc) *outputs = NULL;
106701 +#else
106702 + vec<tree, va_gc> *inputs = NULL;
106703 + vec<tree, va_gc> *outputs = NULL;
106704 +#endif
106705 +
106706 + call_stmt = gsi_stmt(*gsi);
106707 + old_fptr = gimple_call_fn(call_stmt);
106708 +
106709 + // create temporary fptr variable
106710 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
106711 + add_referenced_var(new_fptr);
106712 + new_fptr = make_ssa_name(new_fptr, NULL);
106713 +
106714 + // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
106715 + input = build_tree_list(NULL_TREE, build_string(1, "0"));
106716 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
106717 + output = build_tree_list(NULL_TREE, build_string(2, "=r"));
106718 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
106719 +#if BUILDING_GCC_VERSION <= 4007
106720 + VEC_safe_push(tree, gc, inputs, input);
106721 + VEC_safe_push(tree, gc, outputs, output);
106722 +#else
106723 + vec_safe_push(inputs, input);
106724 + vec_safe_push(outputs, output);
106725 +#endif
106726 + asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
106727 + SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
106728 + gimple_asm_set_volatile(asm_or_stmt, true);
106729 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
106730 + update_stmt(asm_or_stmt);
106731 +
106732 + // replace call stmt fn with the new fptr
106733 + gimple_call_set_fn(call_stmt, new_fptr);
106734 + update_stmt(call_stmt);
106735 +}
106736 +
106737 +/*
106738 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
106739 + */
106740 +static unsigned int execute_kernexec_fptr(void)
106741 +{
106742 + basic_block bb;
106743 +
106744 + // 1. loop through BBs and GIMPLE statements
106745 + FOR_EACH_BB_FN(bb, cfun) {
106746 + gimple_stmt_iterator gsi;
106747 +
106748 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
106749 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
106750 + tree fn;
106751 + gimple call_stmt;
106752 +
106753 + // is it a call ...
106754 + call_stmt = gsi_stmt(gsi);
106755 + if (!is_gimple_call(call_stmt))
106756 + continue;
106757 + fn = gimple_call_fn(call_stmt);
106758 + if (TREE_CODE(fn) == ADDR_EXPR)
106759 + continue;
106760 + if (TREE_CODE(fn) != SSA_NAME)
106761 + gcc_unreachable();
106762 +
106763 + // ... through a function pointer
106764 + if (SSA_NAME_VAR(fn) != NULL_TREE) {
106765 + fn = SSA_NAME_VAR(fn);
106766 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
106767 + debug_tree(fn);
106768 + gcc_unreachable();
106769 + }
106770 + }
106771 + fn = TREE_TYPE(fn);
106772 + if (TREE_CODE(fn) != POINTER_TYPE)
106773 + continue;
106774 + fn = TREE_TYPE(fn);
106775 + if (TREE_CODE(fn) != FUNCTION_TYPE)
106776 + continue;
106777 +
106778 + kernexec_instrument_fptr(&gsi);
106779 +
106780 +//debug_tree(gimple_call_fn(call_stmt));
106781 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
106782 + }
106783 + }
106784 +
106785 + return 0;
106786 +}
106787 +
106788 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
106789 +static void kernexec_instrument_retaddr_bts(rtx insn)
106790 +{
106791 + rtx btsq;
106792 + rtvec argvec, constraintvec, labelvec;
106793 + int line;
106794 +
106795 + // create asm volatile("btsq $63,(%%rsp)":::)
106796 + argvec = rtvec_alloc(0);
106797 + constraintvec = rtvec_alloc(0);
106798 + labelvec = rtvec_alloc(0);
106799 + line = expand_location(RTL_LOCATION(insn)).line;
106800 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
106801 + MEM_VOLATILE_P(btsq) = 1;
106802 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
106803 + emit_insn_before(btsq, insn);
106804 +}
106805 +
106806 +// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
106807 +static void kernexec_instrument_retaddr_or(rtx insn)
106808 +{
106809 + rtx orq;
106810 + rtvec argvec, constraintvec, labelvec;
106811 + int line;
106812 +
106813 + // create asm volatile("orq %%r12,(%%rsp)":::)
106814 + argvec = rtvec_alloc(0);
106815 + constraintvec = rtvec_alloc(0);
106816 + labelvec = rtvec_alloc(0);
106817 + line = expand_location(RTL_LOCATION(insn)).line;
106818 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
106819 + MEM_VOLATILE_P(orq) = 1;
106820 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
106821 + emit_insn_before(orq, insn);
106822 +}
106823 +
106824 +/*
106825 + * find all asm level function returns and forcibly set the highest bit of the return address
106826 + */
106827 +static unsigned int execute_kernexec_retaddr(void)
106828 +{
106829 + rtx insn;
106830 +
106831 +// if (stack_realign_drap)
106832 +// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
106833 +
106834 + // 1. find function returns
106835 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
106836 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
106837 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
106838 + // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
106839 + rtx body;
106840 +
106841 + // is it a retn
106842 + if (!JUMP_P(insn))
106843 + continue;
106844 + body = PATTERN(insn);
106845 + if (GET_CODE(body) == PARALLEL)
106846 + body = XVECEXP(body, 0, 0);
106847 + if (!ANY_RETURN_P(body))
106848 + continue;
106849 + kernexec_instrument_retaddr(insn);
106850 + }
106851 +
106852 +// print_simple_rtl(stderr, get_insns());
106853 +// print_rtl(stderr, get_insns());
106854 +
106855 + return 0;
106856 +}
106857 +
106858 +static bool kernexec_cmodel_check(void)
106859 +{
106860 + tree section;
106861 +
106862 + if (ix86_cmodel != CM_KERNEL)
106863 + return false;
106864 +
106865 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
106866 + if (!section || !TREE_VALUE(section))
106867 + return true;
106868 +
106869 + section = TREE_VALUE(TREE_VALUE(section));
106870 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
106871 + return true;
106872 +
106873 + return false;
106874 +}
106875 +
106876 +#if BUILDING_GCC_VERSION >= 4009
106877 +static const struct pass_data kernexec_reload_pass_data = {
106878 +#else
106879 +static struct gimple_opt_pass kernexec_reload_pass = {
106880 + .pass = {
106881 +#endif
106882 + .type = GIMPLE_PASS,
106883 + .name = "kernexec_reload",
106884 +#if BUILDING_GCC_VERSION >= 4008
106885 + .optinfo_flags = OPTGROUP_NONE,
106886 +#endif
106887 +#if BUILDING_GCC_VERSION >= 4009
106888 + .has_gate = true,
106889 + .has_execute = true,
106890 +#else
106891 + .gate = kernexec_cmodel_check,
106892 + .execute = execute_kernexec_reload,
106893 + .sub = NULL,
106894 + .next = NULL,
106895 + .static_pass_number = 0,
106896 +#endif
106897 + .tv_id = TV_NONE,
106898 + .properties_required = 0,
106899 + .properties_provided = 0,
106900 + .properties_destroyed = 0,
106901 + .todo_flags_start = 0,
106902 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
106903 +#if BUILDING_GCC_VERSION < 4009
106904 + }
106905 +#endif
106906 +};
106907 +
106908 +#if BUILDING_GCC_VERSION >= 4009
106909 +static const struct pass_data kernexec_fptr_pass_data = {
106910 +#else
106911 +static struct gimple_opt_pass kernexec_fptr_pass = {
106912 + .pass = {
106913 +#endif
106914 + .type = GIMPLE_PASS,
106915 + .name = "kernexec_fptr",
106916 +#if BUILDING_GCC_VERSION >= 4008
106917 + .optinfo_flags = OPTGROUP_NONE,
106918 +#endif
106919 +#if BUILDING_GCC_VERSION >= 4009
106920 + .has_gate = true,
106921 + .has_execute = true,
106922 +#else
106923 + .gate = kernexec_cmodel_check,
106924 + .execute = execute_kernexec_fptr,
106925 + .sub = NULL,
106926 + .next = NULL,
106927 + .static_pass_number = 0,
106928 +#endif
106929 + .tv_id = TV_NONE,
106930 + .properties_required = 0,
106931 + .properties_provided = 0,
106932 + .properties_destroyed = 0,
106933 + .todo_flags_start = 0,
106934 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
106935 +#if BUILDING_GCC_VERSION < 4009
106936 + }
106937 +#endif
106938 +};
106939 +
106940 +#if BUILDING_GCC_VERSION >= 4009
106941 +static const struct pass_data kernexec_retaddr_pass_data = {
106942 +#else
106943 +static struct rtl_opt_pass kernexec_retaddr_pass = {
106944 + .pass = {
106945 +#endif
106946 + .type = RTL_PASS,
106947 + .name = "kernexec_retaddr",
106948 +#if BUILDING_GCC_VERSION >= 4008
106949 + .optinfo_flags = OPTGROUP_NONE,
106950 +#endif
106951 +#if BUILDING_GCC_VERSION >= 4009
106952 + .has_gate = true,
106953 + .has_execute = true,
106954 +#else
106955 + .gate = kernexec_cmodel_check,
106956 + .execute = execute_kernexec_retaddr,
106957 + .sub = NULL,
106958 + .next = NULL,
106959 + .static_pass_number = 0,
106960 +#endif
106961 + .tv_id = TV_NONE,
106962 + .properties_required = 0,
106963 + .properties_provided = 0,
106964 + .properties_destroyed = 0,
106965 + .todo_flags_start = 0,
106966 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
106967 +#if BUILDING_GCC_VERSION < 4009
106968 + }
106969 +#endif
106970 +};
106971 +
106972 +#if BUILDING_GCC_VERSION >= 4009
106973 +namespace {
106974 +class kernexec_reload_pass : public gimple_opt_pass {
106975 +public:
106976 + kernexec_reload_pass() : gimple_opt_pass(kernexec_reload_pass_data, g) {}
106977 + bool gate() { return kernexec_cmodel_check(); }
106978 + unsigned int execute() { return execute_kernexec_reload(); }
106979 +};
106980 +
106981 +class kernexec_fptr_pass : public gimple_opt_pass {
106982 +public:
106983 + kernexec_fptr_pass() : gimple_opt_pass(kernexec_fptr_pass_data, g) {}
106984 + bool gate() { return kernexec_cmodel_check(); }
106985 + unsigned int execute() { return execute_kernexec_fptr(); }
106986 +};
106987 +
106988 +class kernexec_retaddr_pass : public rtl_opt_pass {
106989 +public:
106990 + kernexec_retaddr_pass() : rtl_opt_pass(kernexec_retaddr_pass_data, g) {}
106991 + bool gate() { return kernexec_cmodel_check(); }
106992 + unsigned int execute() { return execute_kernexec_retaddr(); }
106993 +};
106994 +}
106995 +#endif
106996 +
106997 +static struct opt_pass *make_kernexec_reload_pass(void)
106998 +{
106999 +#if BUILDING_GCC_VERSION >= 4009
107000 + return new kernexec_reload_pass();
107001 +#else
107002 + return &kernexec_reload_pass.pass;
107003 +#endif
107004 +}
107005 +
107006 +static struct opt_pass *make_kernexec_fptr_pass(void)
107007 +{
107008 +#if BUILDING_GCC_VERSION >= 4009
107009 + return new kernexec_fptr_pass();
107010 +#else
107011 + return &kernexec_fptr_pass.pass;
107012 +#endif
107013 +}
107014 +
107015 +static struct opt_pass *make_kernexec_retaddr_pass(void)
107016 +{
107017 +#if BUILDING_GCC_VERSION >= 4009
107018 + return new kernexec_retaddr_pass();
107019 +#else
107020 + return &kernexec_retaddr_pass.pass;
107021 +#endif
107022 +}
107023 +
107024 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
107025 +{
107026 + const char * const plugin_name = plugin_info->base_name;
107027 + const int argc = plugin_info->argc;
107028 + const struct plugin_argument * const argv = plugin_info->argv;
107029 + int i;
107030 + struct register_pass_info kernexec_reload_pass_info;
107031 + struct register_pass_info kernexec_fptr_pass_info;
107032 + struct register_pass_info kernexec_retaddr_pass_info;
107033 +
107034 + kernexec_reload_pass_info.pass = make_kernexec_reload_pass();
107035 + kernexec_reload_pass_info.reference_pass_name = "ssa";
107036 + kernexec_reload_pass_info.ref_pass_instance_number = 1;
107037 + kernexec_reload_pass_info.pos_op = PASS_POS_INSERT_AFTER;
107038 +
107039 + kernexec_fptr_pass_info.pass = make_kernexec_fptr_pass();
107040 + kernexec_fptr_pass_info.reference_pass_name = "ssa";
107041 + kernexec_fptr_pass_info.ref_pass_instance_number = 1;
107042 + kernexec_fptr_pass_info.pos_op = PASS_POS_INSERT_AFTER;
107043 +
107044 + kernexec_retaddr_pass_info.pass = make_kernexec_retaddr_pass();
107045 + kernexec_retaddr_pass_info.reference_pass_name = "pro_and_epilogue";
107046 + kernexec_retaddr_pass_info.ref_pass_instance_number = 1;
107047 + kernexec_retaddr_pass_info.pos_op = PASS_POS_INSERT_AFTER;
107048 +
107049 + if (!plugin_default_version_check(version, &gcc_version)) {
107050 + error(G_("incompatible gcc/plugin versions"));
107051 + return 1;
107052 + }
107053 +
107054 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
107055 +
107056 + if (TARGET_64BIT == 0)
107057 + return 0;
107058 +
107059 + for (i = 0; i < argc; ++i) {
107060 + if (!strcmp(argv[i].key, "method")) {
107061 + if (!argv[i].value) {
107062 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
107063 + continue;
107064 + }
107065 + if (!strcmp(argv[i].value, "bts")) {
107066 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
107067 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
107068 + } else if (!strcmp(argv[i].value, "or")) {
107069 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
107070 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
107071 + fix_register("r12", 1, 1);
107072 + } else
107073 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
107074 + continue;
107075 + }
107076 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
107077 + }
107078 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
107079 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
107080 +
107081 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
107082 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
107083 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
107084 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
107085 +
107086 + return 0;
107087 +}
107088 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
107089 new file mode 100644
107090 index 0000000..c96f80f
107091 --- /dev/null
107092 +++ b/tools/gcc/latent_entropy_plugin.c
107093 @@ -0,0 +1,457 @@
107094 +/*
107095 + * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu>
107096 + * Licensed under the GPL v2
107097 + *
107098 + * Note: the choice of the license means that the compilation process is
107099 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
107100 + * but for the kernel it doesn't matter since it doesn't link against
107101 + * any of the gcc libraries
107102 + *
107103 + * gcc plugin to help generate a little bit of entropy from program state,
107104 + * used throughout the uptime of the kernel
107105 + *
107106 + * TODO:
107107 + * - add ipa pass to identify not explicitly marked candidate functions
107108 + * - mix in more program state (function arguments/return values, loop variables, etc)
107109 + * - more instrumentation control via attribute parameters
107110 + *
107111 + * BUGS:
107112 + * - LTO needs -flto-partition=none for now
107113 + */
107114 +
107115 +#include "gcc-common.h"
107116 +
107117 +int plugin_is_GPL_compatible;
107118 +
107119 +static tree latent_entropy_decl;
107120 +
107121 +static struct plugin_info latent_entropy_plugin_info = {
107122 + .version = "201403042150",
107123 + .help = NULL
107124 +};
107125 +
107126 +static unsigned HOST_WIDE_INT seed;
107127 +static unsigned HOST_WIDE_INT get_random_const(void)
107128 +{
107129 + unsigned int i;
107130 + unsigned HOST_WIDE_INT ret = 0;
107131 +
107132 + for (i = 0; i < 8 * sizeof ret; i++) {
107133 + ret = (ret << 1) | (seed & 1);
107134 + seed >>= 1;
107135 + if (ret & 1)
107136 + seed ^= 0xD800000000000000ULL;
107137 + }
107138 +
107139 + return ret;
107140 +}
107141 +
107142 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
107143 +{
107144 + tree type;
107145 + unsigned long long mask;
107146 +#if BUILDING_GCC_VERSION <= 4007
107147 + VEC(constructor_elt, gc) *vals;
107148 +#else
107149 + vec<constructor_elt, va_gc> *vals;
107150 +#endif
107151 +
107152 + switch (TREE_CODE(*node)) {
107153 + default:
107154 + *no_add_attrs = true;
107155 + error("%qE attribute only applies to functions and variables", name);
107156 + break;
107157 +
107158 + case VAR_DECL:
107159 + if (DECL_INITIAL(*node)) {
107160 + *no_add_attrs = true;
107161 + error("variable %qD with %qE attribute must not be initialized", *node, name);
107162 + break;
107163 + }
107164 +
107165 + if (!TREE_STATIC(*node)) {
107166 + *no_add_attrs = true;
107167 + error("variable %qD with %qE attribute must not be local", *node, name);
107168 + break;
107169 + }
107170 +
107171 + type = TREE_TYPE(*node);
107172 + switch (TREE_CODE(type)) {
107173 + default:
107174 + *no_add_attrs = true;
107175 + error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields", *node, name);
107176 + break;
107177 +
107178 + case RECORD_TYPE: {
107179 + tree field;
107180 + unsigned int nelt = 0;
107181 +
107182 + for (field = TYPE_FIELDS(type); field; nelt++, field = TREE_CHAIN(field)) {
107183 + tree fieldtype;
107184 +
107185 + fieldtype = TREE_TYPE(field);
107186 + if (TREE_CODE(fieldtype) != INTEGER_TYPE) {
107187 + *no_add_attrs = true;
107188 + error("structure variable %qD with %qE attribute has a non-integer field %qE", *node, name, field);
107189 + break;
107190 + }
107191 + }
107192 +
107193 + if (field)
107194 + break;
107195 +
107196 +#if BUILDING_GCC_VERSION <= 4007
107197 + vals = VEC_alloc(constructor_elt, gc, nelt);
107198 +#else
107199 + vec_alloc(vals, nelt);
107200 +#endif
107201 +
107202 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
107203 + tree fieldtype;
107204 +
107205 + fieldtype = TREE_TYPE(field);
107206 + mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(fieldtype)) - 1);
107207 + mask = 2 * (mask - 1) + 1;
107208 +
107209 + if (TYPE_UNSIGNED(fieldtype))
107210 + CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cstu(fieldtype, mask & get_random_const()));
107211 + else
107212 + CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cst(fieldtype, mask & get_random_const()));
107213 + }
107214 +
107215 + DECL_INITIAL(*node) = build_constructor(type, vals);
107216 +//debug_tree(DECL_INITIAL(*node));
107217 + break;
107218 + }
107219 +
107220 + case INTEGER_TYPE:
107221 + mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
107222 + mask = 2 * (mask - 1) + 1;
107223 +
107224 + if (TYPE_UNSIGNED(type))
107225 + DECL_INITIAL(*node) = build_int_cstu(type, mask & get_random_const());
107226 + else
107227 + DECL_INITIAL(*node) = build_int_cst(type, mask & get_random_const());
107228 + break;
107229 +
107230 + case ARRAY_TYPE: {
107231 + tree elt_type, array_size, elt_size;
107232 + unsigned int i, nelt;
107233 +
107234 + elt_type = TREE_TYPE(type);
107235 + elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
107236 + array_size = TYPE_SIZE_UNIT(type);
107237 +
107238 + if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size || TREE_CODE(array_size) != INTEGER_CST) {
107239 + *no_add_attrs = true;
107240 + error("array variable %qD with %qE attribute must be a fixed length integer array type", *node, name);
107241 + break;
107242 + }
107243 +
107244 + nelt = TREE_INT_CST_LOW(array_size) / TREE_INT_CST_LOW(elt_size);
107245 +#if BUILDING_GCC_VERSION <= 4007
107246 + vals = VEC_alloc(constructor_elt, gc, nelt);
107247 +#else
107248 + vec_alloc(vals, nelt);
107249 +#endif
107250 +
107251 + mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(elt_type)) - 1);
107252 + mask = 2 * (mask - 1) + 1;
107253 +
107254 + for (i = 0; i < nelt; i++)
107255 + if (TYPE_UNSIGNED(elt_type))
107256 + CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cstu(elt_type, mask & get_random_const()));
107257 + else
107258 + CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cst(elt_type, mask & get_random_const()));
107259 +
107260 + DECL_INITIAL(*node) = build_constructor(type, vals);
107261 +//debug_tree(DECL_INITIAL(*node));
107262 + break;
107263 + }
107264 + }
107265 + break;
107266 +
107267 + case FUNCTION_DECL:
107268 + break;
107269 + }
107270 +
107271 + return NULL_TREE;
107272 +}
107273 +
107274 +static struct attribute_spec latent_entropy_attr = {
107275 + .name = "latent_entropy",
107276 + .min_length = 0,
107277 + .max_length = 0,
107278 + .decl_required = true,
107279 + .type_required = false,
107280 + .function_type_required = false,
107281 + .handler = handle_latent_entropy_attribute,
107282 +#if BUILDING_GCC_VERSION >= 4007
107283 + .affects_type_identity = false
107284 +#endif
107285 +};
107286 +
107287 +static void register_attributes(void *event_data, void *data)
107288 +{
107289 + register_attribute(&latent_entropy_attr);
107290 +}
107291 +
107292 +static bool gate_latent_entropy(void)
107293 +{
107294 + // don't bother with noreturn functions for now
107295 + if (TREE_THIS_VOLATILE(current_function_decl))
107296 + return false;
107297 +
107298 + return lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)) != NULL_TREE;
107299 +}
107300 +
107301 +static enum tree_code get_op(tree *rhs)
107302 +{
107303 + static enum tree_code op;
107304 + unsigned HOST_WIDE_INT random_const;
107305 +
107306 + random_const = get_random_const();
107307 +
107308 + switch (op) {
107309 + case BIT_XOR_EXPR:
107310 + op = PLUS_EXPR;
107311 + break;
107312 +
107313 + case PLUS_EXPR:
107314 + if (rhs) {
107315 + op = LROTATE_EXPR;
107316 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
107317 + break;
107318 + }
107319 +
107320 + case LROTATE_EXPR:
107321 + default:
107322 + op = BIT_XOR_EXPR;
107323 + break;
107324 + }
107325 + if (rhs)
107326 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
107327 + return op;
107328 +}
107329 +
107330 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
107331 +{
107332 + gimple_stmt_iterator gsi;
107333 + gimple assign;
107334 + tree addxorrol, rhs;
107335 + enum tree_code op;
107336 +
107337 + op = get_op(&rhs);
107338 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
107339 + assign = gimple_build_assign(local_entropy, addxorrol);
107340 + gsi = gsi_after_labels(bb);
107341 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
107342 + update_stmt(assign);
107343 +//debug_bb(bb);
107344 +}
107345 +
107346 +static void perturb_latent_entropy(basic_block bb, tree rhs)
107347 +{
107348 + gimple_stmt_iterator gsi;
107349 + gimple assign;
107350 + tree addxorrol, temp;
107351 +
107352 + // 1. create temporary copy of latent_entropy
107353 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
107354 + add_referenced_var(temp);
107355 +
107356 + // 2. read...
107357 + temp = make_ssa_name(temp, NULL);
107358 + assign = gimple_build_assign(temp, latent_entropy_decl);
107359 + SSA_NAME_DEF_STMT(temp) = assign;
107360 + add_referenced_var(latent_entropy_decl);
107361 + gsi = gsi_after_labels(bb);
107362 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
107363 + update_stmt(assign);
107364 +
107365 + // 3. ...modify...
107366 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
107367 + temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
107368 + assign = gimple_build_assign(temp, addxorrol);
107369 + SSA_NAME_DEF_STMT(temp) = assign;
107370 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
107371 + update_stmt(assign);
107372 +
107373 + // 4. ...write latent_entropy
107374 + assign = gimple_build_assign(latent_entropy_decl, temp);
107375 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
107376 + update_stmt(assign);
107377 +}
107378 +
107379 +static unsigned int execute_latent_entropy(void)
107380 +{
107381 + basic_block bb;
107382 + gimple assign;
107383 + gimple_stmt_iterator gsi;
107384 + tree local_entropy;
107385 +
107386 + if (!latent_entropy_decl) {
107387 + struct varpool_node *node;
107388 +
107389 + FOR_EACH_VARIABLE(node) {
107390 + tree var = NODE_DECL(node);
107391 +
107392 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
107393 + continue;
107394 + latent_entropy_decl = var;
107395 +// debug_tree(var);
107396 + break;
107397 + }
107398 + if (!latent_entropy_decl) {
107399 +// debug_tree(current_function_decl);
107400 + return 0;
107401 + }
107402 + }
107403 +
107404 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
107405 +
107406 + // 1. create local entropy variable
107407 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
107408 + add_referenced_var(local_entropy);
107409 + mark_sym_for_renaming(local_entropy);
107410 +
107411 + // 2. initialize local entropy variable
107412 + bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
107413 + if (dom_info_available_p(CDI_DOMINATORS))
107414 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
107415 + gsi = gsi_start_bb(bb);
107416 +
107417 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
107418 +// gimple_set_location(assign, loc);
107419 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
107420 + update_stmt(assign);
107421 +//debug_bb(bb);
107422 + gcc_assert(single_succ_p(bb));
107423 + bb = single_succ(bb);
107424 +
107425 + // 3. instrument each BB with an operation on the local entropy variable
107426 + while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
107427 + perturb_local_entropy(bb, local_entropy);
107428 +//debug_bb(bb);
107429 + bb = bb->next_bb;
107430 + };
107431 +
107432 + // 4. mix local entropy into the global entropy variable
107433 + gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
107434 + perturb_latent_entropy(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)), local_entropy);
107435 +//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)));
107436 + return 0;
107437 +}
107438 +
107439 +static void latent_entropy_start_unit(void *gcc_data, void *user_data)
107440 +{
107441 + tree latent_entropy_type;
107442 +
107443 + seed = get_random_seed(false);
107444 +
107445 + if (in_lto_p)
107446 + return;
107447 +
107448 + // extern volatile u64 latent_entropy
107449 + gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
107450 + latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
107451 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
107452 +
107453 + TREE_STATIC(latent_entropy_decl) = 1;
107454 + TREE_PUBLIC(latent_entropy_decl) = 1;
107455 + TREE_USED(latent_entropy_decl) = 1;
107456 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
107457 + DECL_EXTERNAL(latent_entropy_decl) = 1;
107458 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
107459 + lang_hooks.decls.pushdecl(latent_entropy_decl);
107460 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
107461 +// varpool_finalize_decl(latent_entropy_decl);
107462 +// varpool_mark_needed_node(latent_entropy_decl);
107463 +}
107464 +
107465 +#if BUILDING_GCC_VERSION >= 4009
107466 +static const struct pass_data latent_entropy_pass_data = {
107467 +#else
107468 +static struct gimple_opt_pass latent_entropy_pass = {
107469 + .pass = {
107470 +#endif
107471 + .type = GIMPLE_PASS,
107472 + .name = "latent_entropy",
107473 +#if BUILDING_GCC_VERSION >= 4008
107474 + .optinfo_flags = OPTGROUP_NONE,
107475 +#endif
107476 +#if BUILDING_GCC_VERSION >= 4009
107477 + .has_gate = true,
107478 + .has_execute = true,
107479 +#else
107480 + .gate = gate_latent_entropy,
107481 + .execute = execute_latent_entropy,
107482 + .sub = NULL,
107483 + .next = NULL,
107484 + .static_pass_number = 0,
107485 +#endif
107486 + .tv_id = TV_NONE,
107487 + .properties_required = PROP_gimple_leh | PROP_cfg,
107488 + .properties_provided = 0,
107489 + .properties_destroyed = 0,
107490 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
107491 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
107492 +#if BUILDING_GCC_VERSION < 4009
107493 + }
107494 +#endif
107495 +};
107496 +
107497 +#if BUILDING_GCC_VERSION >= 4009
107498 +namespace {
107499 +class latent_entropy_pass : public gimple_opt_pass {
107500 +public:
107501 + latent_entropy_pass() : gimple_opt_pass(latent_entropy_pass_data, g) {}
107502 + bool gate() { return gate_latent_entropy(); }
107503 + unsigned int execute() { return execute_latent_entropy(); }
107504 +};
107505 +}
107506 +#endif
107507 +
107508 +static struct opt_pass *make_latent_entropy_pass(void)
107509 +{
107510 +#if BUILDING_GCC_VERSION >= 4009
107511 + return new latent_entropy_pass();
107512 +#else
107513 + return &latent_entropy_pass.pass;
107514 +#endif
107515 +}
107516 +
107517 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
107518 +{
107519 + const char * const plugin_name = plugin_info->base_name;
107520 + struct register_pass_info latent_entropy_pass_info;
107521 +
107522 + latent_entropy_pass_info.pass = make_latent_entropy_pass();
107523 + latent_entropy_pass_info.reference_pass_name = "optimized";
107524 + latent_entropy_pass_info.ref_pass_instance_number = 1;
107525 + latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
107526 + static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
107527 + {
107528 + .base = &latent_entropy_decl,
107529 + .nelt = 1,
107530 + .stride = sizeof(latent_entropy_decl),
107531 + .cb = &gt_ggc_mx_tree_node,
107532 + .pchw = &gt_pch_nx_tree_node
107533 + },
107534 + LAST_GGC_ROOT_TAB
107535 + };
107536 +
107537 + if (!plugin_default_version_check(version, &gcc_version)) {
107538 + error(G_("incompatible gcc/plugin versions"));
107539 + return 1;
107540 + }
107541 +
107542 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
107543 + register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL);
107544 + if (!in_lto_p)
107545 + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_latent_entropy);
107546 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
107547 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
107548 +
107549 + return 0;
107550 +}
107551 diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
107552 new file mode 100644
107553 index 0000000..8dafb22
107554 --- /dev/null
107555 +++ b/tools/gcc/randomize_layout_plugin.c
107556 @@ -0,0 +1,910 @@
107557 +/*
107558 + * Copyright 2014 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
107559 + * and PaX Team <pageexec@freemail.hu>
107560 + * Licensed under the GPL v2
107561 + *
107562 + * Usage:
107563 + * $ # for 4.5/4.6/C based 4.7
107564 + * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
107565 + * $ # for C++ based 4.7/4.8+
107566 + * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
107567 + * $ gcc -fplugin=./randomize_layout_plugin.so test.c -O2
107568 + */
107569 +
107570 +#include "gcc-common.h"
107571 +#include "randomize_layout_seed.h"
107572 +
107573 +#if BUILDING_GCC_MAJOR < 4 || BUILDING_GCC_MINOR < 6 || (BUILDING_GCC_MINOR == 6 && BUILDING_GCC_PATCHLEVEL < 4)
107574 +#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer."
107575 +#endif
107576 +
107577 +#define ORIG_TYPE_NAME(node) \
107578 + (TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
107579 +
107580 +int plugin_is_GPL_compatible;
107581 +
107582 +static int performance_mode;
107583 +
107584 +static struct plugin_info randomize_layout_plugin_info = {
107585 + .version = "201402201816",
107586 + .help = "disable\t\t\tdo not activate plugin\n"
107587 + "performance-mode\tenable cacheline-aware layout randomization\n"
107588 +};
107589 +
107590 +/* from old Linux dcache.h */
107591 +static inline unsigned long
107592 +partial_name_hash(unsigned long c, unsigned long prevhash)
107593 +{
107594 + return (prevhash + (c << 4) + (c >> 4)) * 11;
107595 +}
107596 +static inline unsigned int
107597 +name_hash(const unsigned char *name)
107598 +{
107599 + unsigned long hash = 0;
107600 + unsigned int len = strlen((const char *)name);
107601 + while (len--)
107602 + hash = partial_name_hash(*name++, hash);
107603 + return (unsigned int)hash;
107604 +}
107605 +
107606 +static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
107607 +{
107608 + tree type;
107609 +
107610 + *no_add_attrs = true;
107611 + if (TREE_CODE(*node) == FUNCTION_DECL) {
107612 + error("%qE attribute does not apply to functions (%qF)", name, *node);
107613 + return NULL_TREE;
107614 + }
107615 +
107616 + if (TREE_CODE(*node) == PARM_DECL) {
107617 + error("%qE attribute does not apply to function parameters (%qD)", name, *node);
107618 + return NULL_TREE;
107619 + }
107620 +
107621 + if (TREE_CODE(*node) == VAR_DECL) {
107622 + error("%qE attribute does not apply to variables (%qD)", name, *node);
107623 + return NULL_TREE;
107624 + }
107625 +
107626 + if (TYPE_P(*node)) {
107627 + type = *node;
107628 + } else {
107629 + gcc_assert(TREE_CODE(*node) == TYPE_DECL);
107630 + type = TREE_TYPE(*node);
107631 + }
107632 +
107633 + if (TREE_CODE(type) != RECORD_TYPE) {
107634 + error("%qE attribute used on %qT applies to struct types only", name, type);
107635 + return NULL_TREE;
107636 + }
107637 +
107638 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
107639 + error("%qE attribute is already applied to the type %qT", name, type);
107640 + return NULL_TREE;
107641 + }
107642 +
107643 + *no_add_attrs = false;
107644 +
107645 + return NULL_TREE;
107646 +}
107647 +
107648 +/* set on complete types that we don't need to inspect further at all */
107649 +static tree handle_randomize_considered_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
107650 +{
107651 + *no_add_attrs = false;
107652 + return NULL_TREE;
107653 +}
107654 +
107655 +/*
107656 + * set on types that we've performed a shuffle on, to prevent re-shuffling
107657 + * this does not preclude us from inspecting its fields for potential shuffles
107658 + */
107659 +static tree handle_randomize_performed_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
107660 +{
107661 + *no_add_attrs = false;
107662 + return NULL_TREE;
107663 +}
107664 +
107665 +/*
107666 + * 64bit variant of Bob Jenkins' public domain PRNG
107667 + * 256 bits of internal state
107668 + */
107669 +
107670 +typedef unsigned long long u64;
107671 +
107672 +typedef struct ranctx { u64 a; u64 b; u64 c; u64 d; } ranctx;
107673 +
107674 +#define rot(x,k) (((x)<<(k))|((x)>>(64-(k))))
107675 +static u64 ranval(ranctx *x) {
107676 + u64 e = x->a - rot(x->b, 7);
107677 + x->a = x->b ^ rot(x->c, 13);
107678 + x->b = x->c + rot(x->d, 37);
107679 + x->c = x->d + e;
107680 + x->d = e + x->a;
107681 + return x->d;
107682 +}
107683 +
107684 +static void raninit(ranctx *x, u64 *seed) {
107685 + int i;
107686 +
107687 + x->a = seed[0];
107688 + x->b = seed[1];
107689 + x->c = seed[2];
107690 + x->d = seed[3];
107691 +
107692 + for (i=0; i < 30; ++i)
107693 + (void)ranval(x);
107694 +}
107695 +
107696 +static u64 shuffle_seed[4];
107697 +
107698 +struct partition_group {
107699 + tree tree_start;
107700 + unsigned long start;
107701 + unsigned long length;
107702 +};
107703 +
107704 +static void partition_struct(tree *fields, unsigned long length, struct partition_group *size_groups, unsigned long *num_groups)
107705 +{
107706 + unsigned long i;
107707 + unsigned long accum_size = 0;
107708 + unsigned long accum_length = 0;
107709 + unsigned long group_idx = 0;
107710 +
107711 + gcc_assert(length < INT_MAX);
107712 +
107713 + memset(size_groups, 0, sizeof(struct partition_group) * length);
107714 +
107715 + for (i = 0; i < length; i++) {
107716 + if (size_groups[group_idx].tree_start == NULL_TREE) {
107717 + size_groups[group_idx].tree_start = fields[i];
107718 + size_groups[group_idx].start = i;
107719 + accum_length = 0;
107720 + accum_size = 0;
107721 + }
107722 + accum_size += (unsigned long)int_size_in_bytes(TREE_TYPE(fields[i]));
107723 + accum_length++;
107724 + if (accum_size >= 64) {
107725 + size_groups[group_idx].length = accum_length;
107726 + accum_length = 0;
107727 + group_idx++;
107728 + }
107729 + }
107730 +
107731 + if (size_groups[group_idx].tree_start != NULL_TREE &&
107732 + !size_groups[group_idx].length) {
107733 + size_groups[group_idx].length = accum_length;
107734 + group_idx++;
107735 + }
107736 +
107737 + *num_groups = group_idx;
107738 +}
107739 +
107740 +static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
107741 +{
107742 + unsigned long i, x;
107743 + struct partition_group size_group[length];
107744 + unsigned long num_groups = 0;
107745 + unsigned long randnum;
107746 +
107747 + partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
107748 + for (i = num_groups - 1; i > 0; i--) {
107749 + struct partition_group tmp;
107750 + randnum = ranval(prng_state) % (i + 1);
107751 + tmp = size_group[i];
107752 + size_group[i] = size_group[randnum];
107753 + size_group[randnum] = tmp;
107754 + }
107755 +
107756 + for (x = 0; x < num_groups; x++) {
107757 + for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
107758 + tree tmp;
107759 + if (DECL_BIT_FIELD_TYPE(newtree[i]))
107760 + continue;
107761 + randnum = ranval(prng_state) % (i + 1);
107762 + // we could handle this case differently if desired
107763 + if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
107764 + continue;
107765 + tmp = newtree[i];
107766 + newtree[i] = newtree[randnum];
107767 + newtree[randnum] = tmp;
107768 + }
107769 + }
107770 +}
107771 +
107772 +static void full_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
107773 +{
107774 + unsigned long i, randnum;
107775 +
107776 + for (i = length - 1; i > 0; i--) {
107777 + tree tmp;
107778 + randnum = ranval(prng_state) % (i + 1);
107779 + tmp = newtree[i];
107780 + newtree[i] = newtree[randnum];
107781 + newtree[randnum] = tmp;
107782 + }
107783 +}
107784 +
107785 +/* modern in-place Fisher-Yates shuffle */
107786 +static void shuffle(const_tree type, tree *newtree, unsigned long length)
107787 +{
107788 + unsigned long i;
107789 + u64 seed[4];
107790 + ranctx prng_state;
107791 + const unsigned char *structname;
107792 +
107793 + if (length == 0)
107794 + return;
107795 +
107796 + gcc_assert(TREE_CODE(type) == RECORD_TYPE);
107797 +
107798 + structname = ORIG_TYPE_NAME(type);
107799 +
107800 +#ifdef __DEBUG_PLUGIN
107801 + fprintf(stderr, "Shuffling struct %s %p\n", (const char *)structname, type);
107802 +#ifdef __DEBUG_VERBOSE
107803 + debug_tree((tree)type);
107804 +#endif
107805 +#endif
107806 +
107807 + for (i = 0; i < 4; i++) {
107808 + seed[i] = shuffle_seed[i];
107809 + seed[i] ^= name_hash(structname);
107810 + }
107811 +
107812 + raninit(&prng_state, (u64 *)&seed);
107813 +
107814 + if (performance_mode)
107815 + performance_shuffle(newtree, length, &prng_state);
107816 + else
107817 + full_shuffle(newtree, length, &prng_state);
107818 +}
107819 +
107820 +static bool is_flexible_array(const_tree field)
107821 +{
107822 + const_tree fieldtype;
107823 + const_tree typesize;
107824 + const_tree elemtype;
107825 + const_tree elemsize;
107826 +
107827 + fieldtype = TREE_TYPE(field);
107828 + typesize = TYPE_SIZE(fieldtype);
107829 +
107830 + if (TREE_CODE(fieldtype) != ARRAY_TYPE)
107831 + return false;
107832 +
107833 + elemtype = TREE_TYPE(fieldtype);
107834 + elemsize = TYPE_SIZE(elemtype);
107835 +
107836 + /* size of type is represented in bits */
107837 +
107838 + if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
107839 + TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
107840 + return true;
107841 +
107842 + if (typesize != NULL_TREE &&
107843 + (TREE_CONSTANT(typesize) && (!TREE_INT_CST_LOW(typesize) ||
107844 + TREE_INT_CST_LOW(typesize) == TREE_INT_CST_LOW(elemsize))))
107845 + return true;
107846 +
107847 + return false;
107848 +}
107849 +
107850 +static int relayout_struct(tree type)
107851 +{
107852 + unsigned long num_fields = (unsigned long)list_length(TYPE_FIELDS(type));
107853 + unsigned long shuffle_length = num_fields;
107854 + tree field;
107855 + tree newtree[num_fields];
107856 + unsigned long i;
107857 + tree list;
107858 + tree variant;
107859 + expanded_location xloc;
107860 +
107861 + if (TYPE_FIELDS(type) == NULL_TREE)
107862 + return 0;
107863 +
107864 + if (num_fields < 2)
107865 + return 0;
107866 +
107867 + gcc_assert(TREE_CODE(type) == RECORD_TYPE);
107868 +
107869 + gcc_assert(num_fields < INT_MAX);
107870 +
107871 + if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)) ||
107872 + lookup_attribute("no_randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))))
107873 + return 0;
107874 +
107875 + /* throw out any structs in uapi */
107876 + xloc = expand_location(DECL_SOURCE_LOCATION(TYPE_FIELDS(type)));
107877 +
107878 + if (strstr(xloc.file, "/uapi/"))
107879 + error(G_("attempted to randomize userland API struct %s"), ORIG_TYPE_NAME(type));
107880 +
107881 + for (field = TYPE_FIELDS(type), i = 0; field; field = TREE_CHAIN(field), i++) {
107882 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
107883 + newtree[i] = field;
107884 + }
107885 +
107886 + /*
107887 + * enforce that we don't randomize the layout of the last
107888 + * element of a struct if it's a 0 or 1-length array
107889 + * or a proper flexible array
107890 + */
107891 + if (is_flexible_array(newtree[num_fields - 1]))
107892 + shuffle_length--;
107893 +
107894 + shuffle(type, (tree *)newtree, shuffle_length);
107895 +
107896 + /*
107897 + * set up a bogus anonymous struct field designed to error out on unnamed struct initializers
107898 + * as gcc provides no other way to detect such code
107899 + */
107900 + list = make_node(FIELD_DECL);
107901 + TREE_CHAIN(list) = newtree[0];
107902 + TREE_TYPE(list) = void_type_node;
107903 + DECL_SIZE(list) = bitsize_zero_node;
107904 + DECL_NONADDRESSABLE_P(list) = 1;
107905 + DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node;
107906 + DECL_SIZE_UNIT(list) = size_zero_node;
107907 + DECL_FIELD_OFFSET(list) = size_zero_node;
107908 + // to satisfy the constify plugin
107909 + TREE_READONLY(list) = 1;
107910 +
107911 + for (i = 0; i < num_fields - 1; i++)
107912 + TREE_CHAIN(newtree[i]) = newtree[i+1];
107913 + TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
107914 +
107915 + for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
107916 + TYPE_FIELDS(variant) = list;
107917 + TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
107918 + TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
107919 + // force a re-layout
107920 + TYPE_SIZE(variant) = NULL_TREE;
107921 + layout_type(variant);
107922 + }
107923 +
107924 + return 1;
107925 +}
107926 +
107927 +/* from constify plugin */
107928 +static const_tree get_field_type(const_tree field)
107929 +{
107930 + return strip_array_types(TREE_TYPE(field));
107931 +}
107932 +
107933 +/* from constify plugin */
107934 +static bool is_fptr(const_tree fieldtype)
107935 +{
107936 + if (TREE_CODE(fieldtype) != POINTER_TYPE)
107937 + return false;
107938 +
107939 + return TREE_CODE(TREE_TYPE(fieldtype)) == FUNCTION_TYPE;
107940 +}
107941 +
107942 +/* derived from constify plugin */
107943 +static int is_pure_ops_struct(const_tree node)
107944 +{
107945 + const_tree field;
107946 +
107947 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
107948 +
107949 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
107950 + const_tree fieldtype = get_field_type(field);
107951 + enum tree_code code = TREE_CODE(fieldtype);
107952 +
107953 + if (node == fieldtype)
107954 + continue;
107955 +
107956 + if (!is_fptr(fieldtype))
107957 + return 0;
107958 +
107959 + if (code != RECORD_TYPE && code != UNION_TYPE)
107960 + continue;
107961 +
107962 + if (!is_pure_ops_struct(fieldtype))
107963 + return 0;
107964 + }
107965 +
107966 + return 1;
107967 +}
107968 +
107969 +static void randomize_type(tree type)
107970 +{
107971 + tree variant;
107972 +
107973 + gcc_assert(TREE_CODE(type) == RECORD_TYPE);
107974 +
107975 + if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
107976 + return;
107977 +
107978 + if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type))
107979 + relayout_struct(type);
107980 +
107981 + for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
107982 + TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
107983 + TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type));
107984 + }
107985 +#ifdef __DEBUG_PLUGIN
107986 + fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type));
107987 +#ifdef __DEBUG_VERBOSE
107988 + debug_tree(type);
107989 +#endif
107990 +#endif
107991 +}
107992 +
107993 +static void randomize_layout_finish_decl(void *event_data, void *data)
107994 +{
107995 + tree decl = (tree)event_data;
107996 + tree type;
107997 +
107998 + if (decl == NULL_TREE || decl == error_mark_node)
107999 + return;
108000 +
108001 + type = TREE_TYPE(decl);
108002 +
108003 + if (TREE_CODE(decl) != VAR_DECL)
108004 + return;
108005 +
108006 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
108007 + return;
108008 +
108009 + if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)))
108010 + return;
108011 +
108012 + relayout_decl(decl);
108013 +}
108014 +
108015 +static void finish_type(void *event_data, void *data)
108016 +{
108017 + tree type = (tree)event_data;
108018 +
108019 + if (type == NULL_TREE || type == error_mark_node)
108020 + return;
108021 +
108022 + if (TREE_CODE(type) != RECORD_TYPE)
108023 + return;
108024 +
108025 + if (TYPE_FIELDS(type) == NULL_TREE)
108026 + return;
108027 +
108028 + if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
108029 + return;
108030 +
108031 +#ifdef __DEBUG_PLUGIN
108032 + fprintf(stderr, "Calling randomize_type on %s\n", ORIG_TYPE_NAME(type));
108033 +#endif
108034 +#ifdef __DEBUG_VERBOSE
108035 + debug_tree(type);
108036 +#endif
108037 + randomize_type(type);
108038 +
108039 + return;
108040 +}
108041 +
108042 +static struct attribute_spec randomize_layout_attr = {
108043 + .name = "randomize_layout",
108044 + // related to args
108045 + .min_length = 0,
108046 + .max_length = 0,
108047 + .decl_required = false,
108048 + // need type declaration
108049 + .type_required = true,
108050 + .function_type_required = false,
108051 + .handler = handle_randomize_layout_attr,
108052 +#if BUILDING_GCC_VERSION >= 4007
108053 + .affects_type_identity = true
108054 +#endif
108055 +};
108056 +
108057 +static struct attribute_spec no_randomize_layout_attr = {
108058 + .name = "no_randomize_layout",
108059 + // related to args
108060 + .min_length = 0,
108061 + .max_length = 0,
108062 + .decl_required = false,
108063 + // need type declaration
108064 + .type_required = true,
108065 + .function_type_required = false,
108066 + .handler = handle_randomize_layout_attr,
108067 +#if BUILDING_GCC_VERSION >= 4007
108068 + .affects_type_identity = true
108069 +#endif
108070 +};
108071 +
108072 +static struct attribute_spec randomize_considered_attr = {
108073 + .name = "randomize_considered",
108074 + // related to args
108075 + .min_length = 0,
108076 + .max_length = 0,
108077 + .decl_required = false,
108078 + // need type declaration
108079 + .type_required = true,
108080 + .function_type_required = false,
108081 + .handler = handle_randomize_considered_attr,
108082 +#if BUILDING_GCC_VERSION >= 4007
108083 + .affects_type_identity = false
108084 +#endif
108085 +};
108086 +
108087 +static struct attribute_spec randomize_performed_attr = {
108088 + .name = "randomize_performed",
108089 + // related to args
108090 + .min_length = 0,
108091 + .max_length = 0,
108092 + .decl_required = false,
108093 + // need type declaration
108094 + .type_required = true,
108095 + .function_type_required = false,
108096 + .handler = handle_randomize_performed_attr,
108097 +#if BUILDING_GCC_VERSION >= 4007
108098 + .affects_type_identity = false
108099 +#endif
108100 +};
108101 +
108102 +static void register_attributes(void *event_data, void *data)
108103 +{
108104 + register_attribute(&randomize_layout_attr);
108105 + register_attribute(&no_randomize_layout_attr);
108106 + register_attribute(&randomize_considered_attr);
108107 + register_attribute(&randomize_performed_attr);
108108 +}
108109 +
108110 +static void check_bad_casts_in_constructor(tree var, tree init)
108111 +{
108112 + unsigned HOST_WIDE_INT idx;
108113 + tree field, val;
108114 + tree field_type, val_type;
108115 +
108116 + FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(init), idx, field, val) {
108117 + if (TREE_CODE(val) == CONSTRUCTOR) {
108118 + check_bad_casts_in_constructor(var, val);
108119 + continue;
108120 + }
108121 +
108122 + /* pipacs' plugin creates franken-arrays that differ from those produced by
108123 + normal code which all have valid 'field' trees. work around this */
108124 + if (field == NULL_TREE)
108125 + continue;
108126 + field_type = TREE_TYPE(field);
108127 + val_type = TREE_TYPE(val);
108128 +
108129 + if (TREE_CODE(field_type) != POINTER_TYPE || TREE_CODE(val_type) != POINTER_TYPE)
108130 + continue;
108131 +
108132 + if (field_type == val_type)
108133 + continue;
108134 +
108135 + field_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(field_type))));
108136 + val_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(val_type))));
108137 +
108138 + if (field_type == void_type_node)
108139 + continue;
108140 + if (field_type == val_type)
108141 + continue;
108142 + if (TREE_CODE(val_type) != RECORD_TYPE)
108143 + continue;
108144 +
108145 + if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(val_type)))
108146 + continue;
108147 + inform(DECL_SOURCE_LOCATION(var), "found mismatched struct pointer types: %qT and %qT\n", TYPE_MAIN_VARIANT(field_type), TYPE_MAIN_VARIANT(val_type));
108148 + }
108149 +}
108150 +
108151 +/* derived from the constify plugin */
108152 +static void check_global_variables(void *event_data, void *data)
108153 +{
108154 + struct varpool_node *node;
108155 + tree init;
108156 +
108157 + FOR_EACH_VARIABLE(node) {
108158 + tree var = NODE_DECL(node);
108159 + init = DECL_INITIAL(var);
108160 + if (init == NULL_TREE)
108161 + continue;
108162 +
108163 + if (TREE_CODE(init) != CONSTRUCTOR)
108164 + continue;
108165 +
108166 + check_bad_casts_in_constructor(var, init);
108167 + }
108168 +}
108169 +
108170 +static bool dominated_by_is_err(const_tree rhs, basic_block bb)
108171 +{
108172 + basic_block dom;
108173 + gimple dom_stmt;
108174 + gimple call_stmt;
108175 + const_tree dom_lhs;
108176 + const_tree poss_is_err_cond;
108177 + const_tree poss_is_err_func;
108178 + const_tree is_err_arg;
108179 +
108180 + dom = get_immediate_dominator(CDI_DOMINATORS, bb);
108181 + if (!dom)
108182 + return false;
108183 +
108184 + dom_stmt = last_stmt(dom);
108185 + if (!dom_stmt)
108186 + return false;
108187 +
108188 + if (gimple_code(dom_stmt) != GIMPLE_COND)
108189 + return false;
108190 +
108191 + if (gimple_cond_code(dom_stmt) != NE_EXPR)
108192 + return false;
108193 +
108194 + if (!integer_zerop(gimple_cond_rhs(dom_stmt)))
108195 + return false;
108196 +
108197 + poss_is_err_cond = gimple_cond_lhs(dom_stmt);
108198 +
108199 + if (TREE_CODE(poss_is_err_cond) != SSA_NAME)
108200 + return false;
108201 +
108202 + call_stmt = SSA_NAME_DEF_STMT(poss_is_err_cond);
108203 +
108204 + if (gimple_code(call_stmt) != GIMPLE_CALL)
108205 + return false;
108206 +
108207 + dom_lhs = gimple_get_lhs(call_stmt);
108208 + poss_is_err_func = gimple_call_fndecl(call_stmt);
108209 + if (!poss_is_err_func)
108210 + return false;
108211 + if (dom_lhs != poss_is_err_cond)
108212 + return false;
108213 + if (strcmp(DECL_NAME_POINTER(poss_is_err_func), "IS_ERR"))
108214 + return false;
108215 +
108216 + is_err_arg = gimple_call_arg(call_stmt, 0);
108217 + if (!is_err_arg)
108218 + return false;
108219 +
108220 + if (is_err_arg != rhs)
108221 + return false;
108222 +
108223 + return true;
108224 +}
108225 +
108226 +static void handle_local_var_initializers(void)
108227 +{
108228 + tree var;
108229 + unsigned int i;
108230 +
108231 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
108232 + tree init = DECL_INITIAL(var);
108233 + if (!init)
108234 + continue;
108235 + if (TREE_CODE(init) != CONSTRUCTOR)
108236 + continue;
108237 + check_bad_casts_in_constructor(var, init);
108238 + }
108239 +}
108240 +
108241 +/*
108242 + * iterate over all statements to find "bad" casts:
108243 + * those where the address of the start of a structure is cast
108244 + * to a pointer of a structure of a different type, or a
108245 + * structure pointer type is cast to a different structure pointer type
108246 + */
108247 +static unsigned int find_bad_casts(void)
108248 +{
108249 + basic_block bb;
108250 +
108251 + handle_local_var_initializers();
108252 +
108253 + FOR_ALL_BB_FN(bb, cfun) {
108254 + gimple_stmt_iterator gsi;
108255 +
108256 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
108257 + gimple stmt;
108258 + const_tree lhs;
108259 + const_tree lhs_type;
108260 + const_tree rhs1;
108261 + const_tree rhs_type;
108262 + const_tree ptr_lhs_type;
108263 + const_tree ptr_rhs_type;
108264 + const_tree op0;
108265 + const_tree op0_type;
108266 + enum tree_code rhs_code;
108267 +
108268 + stmt = gsi_stmt(gsi);
108269 +
108270 +#ifdef __DEBUG_PLUGIN
108271 +#ifdef __DEBUG_VERBOSE
108272 + debug_gimple_stmt(stmt);
108273 + debug_tree(gimple_get_lhs(stmt));
108274 +#endif
108275 +#endif
108276 +
108277 + if (gimple_code(stmt) != GIMPLE_ASSIGN)
108278 + continue;
108279 +
108280 +#ifdef __DEBUG_PLUGIN
108281 +#ifdef __DEBUG_VERBOSE
108282 + debug_tree(gimple_assign_rhs1(stmt));
108283 +#endif
108284 +#endif
108285 +
108286 + rhs_code = gimple_assign_rhs_code(stmt);
108287 +
108288 + if (rhs_code != ADDR_EXPR && rhs_code != SSA_NAME)
108289 + continue;
108290 +
108291 + lhs = gimple_get_lhs(stmt);
108292 + lhs_type = TREE_TYPE(lhs);
108293 + rhs1 = gimple_assign_rhs1(stmt);
108294 + rhs_type = TREE_TYPE(rhs1);
108295 +
108296 + if (TREE_CODE(rhs_type) != POINTER_TYPE ||
108297 + TREE_CODE(lhs_type) != POINTER_TYPE)
108298 + continue;
108299 +
108300 + ptr_lhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(lhs_type))));
108301 + ptr_rhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(rhs_type))));
108302 +
108303 + if (ptr_rhs_type == void_type_node)
108304 + continue;
108305 +
108306 + if (ptr_lhs_type == void_type_node)
108307 + continue;
108308 +
108309 + if (dominated_by_is_err(rhs1, bb))
108310 + continue;
108311 +
108312 + if (TREE_CODE(ptr_rhs_type) != RECORD_TYPE) {
108313 +#ifndef __DEBUG_PLUGIN
108314 + if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type)))
108315 +#endif
108316 + inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type);
108317 + continue;
108318 + }
108319 +
108320 + if (rhs_code == SSA_NAME && ptr_lhs_type == ptr_rhs_type)
108321 + continue;
108322 +
108323 + if (rhs_code == ADDR_EXPR) {
108324 + op0 = TREE_OPERAND(rhs1, 0);
108325 +
108326 + if (op0 == NULL_TREE)
108327 + continue;
108328 +
108329 + if (TREE_CODE(op0) != VAR_DECL)
108330 + continue;
108331 +
108332 + op0_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(op0))));
108333 + if (op0_type == ptr_lhs_type)
108334 + continue;
108335 +
108336 +#ifndef __DEBUG_PLUGIN
108337 + if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type)))
108338 +#endif
108339 + inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, op0_type);
108340 + } else {
108341 + const_tree ssa_name_var = SSA_NAME_VAR(rhs1);
108342 + /* skip bogus type casts introduced by container_of */
108343 + if (ssa_name_var != NULL_TREE && DECL_NAME(ssa_name_var) &&
108344 + !strcmp((const char *)DECL_NAME_POINTER(ssa_name_var), "__mptr"))
108345 + continue;
108346 +#ifndef __DEBUG_PLUGIN
108347 + if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type)))
108348 +#endif
108349 + inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type);
108350 + }
108351 +
108352 + }
108353 + }
108354 + return 0;
108355 +}
108356 +
108357 +#if BUILDING_GCC_VERSION >= 4009
108358 +static const struct pass_data randomize_layout_bad_cast_data = {
108359 +#else
108360 +static struct gimple_opt_pass randomize_layout_bad_cast = {
108361 + .pass = {
108362 +#endif
108363 + .type = GIMPLE_PASS,
108364 + .name = "randomize_layout_bad_cast",
108365 +#if BUILDING_GCC_VERSION >= 4008
108366 + .optinfo_flags = OPTGROUP_NONE,
108367 +#endif
108368 +#if BUILDING_GCC_VERSION >= 4009
108369 + .has_gate = false,
108370 + .has_execute = true,
108371 +#else
108372 + .gate = NULL,
108373 + .execute = find_bad_casts,
108374 + .sub = NULL,
108375 + .next = NULL,
108376 + .static_pass_number = 0,
108377 +#endif
108378 + .tv_id = TV_NONE,
108379 + .properties_required = PROP_cfg,
108380 + .properties_provided = 0,
108381 + .properties_destroyed = 0,
108382 + .todo_flags_start = 0,
108383 + .todo_flags_finish = TODO_dump_func
108384 +#if BUILDING_GCC_VERSION < 4009
108385 + }
108386 +#endif
108387 +};
108388 +
108389 +#if BUILDING_GCC_VERSION >= 4009
108390 +namespace {
108391 +class randomize_layout_bad_cast : public gimple_opt_pass {
108392 +public:
108393 + randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {}
108394 + unsigned int execute() { return find_bad_casts(); }
108395 +};
108396 +}
108397 +#endif
108398 +
108399 +static struct opt_pass *make_randomize_layout_bad_cast(void)
108400 +{
108401 +#if BUILDING_GCC_VERSION >= 4009
108402 + return new randomize_layout_bad_cast();
108403 +#else
108404 + return &randomize_layout_bad_cast.pass;
108405 +#endif
108406 +}
108407 +
108408 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
108409 +{
108410 + int i;
108411 + const char * const plugin_name = plugin_info->base_name;
108412 + const int argc = plugin_info->argc;
108413 + const struct plugin_argument * const argv = plugin_info->argv;
108414 + bool enable = true;
108415 + int obtained_seed = 0;
108416 + struct register_pass_info randomize_layout_bad_cast_info;
108417 +
108418 + randomize_layout_bad_cast_info.pass = make_randomize_layout_bad_cast();
108419 + randomize_layout_bad_cast_info.reference_pass_name = "ssa";
108420 + randomize_layout_bad_cast_info.ref_pass_instance_number = 1;
108421 + randomize_layout_bad_cast_info.pos_op = PASS_POS_INSERT_AFTER;
108422 +
108423 + if (!plugin_default_version_check(version, &gcc_version)) {
108424 + error(G_("incompatible gcc/plugin versions"));
108425 + return 1;
108426 + }
108427 +
108428 + if (strcmp(lang_hooks.name, "GNU C")) {
108429 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
108430 + enable = false;
108431 + }
108432 +
108433 + for (i = 0; i < argc; ++i) {
108434 + if (!strcmp(argv[i].key, "disable")) {
108435 + enable = false;
108436 + continue;
108437 + }
108438 + if (!strcmp(argv[i].key, "performance-mode")) {
108439 + performance_mode = 1;
108440 + continue;
108441 + }
108442 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
108443 + }
108444 +
108445 + if (strlen(randstruct_seed) != 64) {
108446 + error(G_("invalid seed value supplied for %s plugin"), plugin_name);
108447 + return 1;
108448 + }
108449 + obtained_seed = sscanf(randstruct_seed, "%016llx%016llx%016llx%016llx",
108450 + &shuffle_seed[0], &shuffle_seed[1], &shuffle_seed[2], &shuffle_seed[3]);
108451 + if (obtained_seed != 4) {
108452 + error(G_("Invalid seed supplied for %s plugin"), plugin_name);
108453 + return 1;
108454 + }
108455 +
108456 + register_callback(plugin_name, PLUGIN_INFO, NULL, &randomize_layout_plugin_info);
108457 + if (enable) {
108458 + register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
108459 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info);
108460 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
108461 + register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl, NULL);
108462 + }
108463 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
108464 +
108465 + return 0;
108466 +}
108467 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
108468 new file mode 100644
108469 index 0000000..9529806
108470 --- /dev/null
108471 +++ b/tools/gcc/size_overflow_hash.data
108472 @@ -0,0 +1,5709 @@
108473 +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
108474 +ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
108475 +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
108476 +compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
108477 +carl9170_alloc_27 carl9170_alloc 1 27 NULL
108478 +sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
108479 +padzero_55 padzero 1 55 &sel_read_policyvers_55
108480 +cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
108481 +__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
108482 +snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
108483 +load_msg_95 load_msg 2 95 NULL
108484 +ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL
108485 +init_q_132 init_q 4 132 NULL
108486 +memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
108487 +hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL
108488 +ping_v6_sendmsg_152 ping_v6_sendmsg 4 152 NULL
108489 +ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray
108490 +tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153
108491 +pci_request_selected_regions_169 pci_request_selected_regions 0 169 NULL
108492 +xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
108493 +iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
108494 +br_port_info_size_268 br_port_info_size 0 268 NULL
108495 +generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
108496 +read_file_war_stats_292 read_file_war_stats 3 292 NULL
108497 +SYSC_connect_304 SYSC_connect 3 304 NULL
108498 +syslog_print_307 syslog_print 2 307 NULL
108499 +dn_setsockopt_314 dn_setsockopt 5 314 NULL
108500 +mlx5_core_access_reg_361 mlx5_core_access_reg 3-5 361 NULL
108501 +hw_device_state_409 hw_device_state 0 409 NULL
108502 +aio_read_events_ring_410 aio_read_events_ring 3-0 410 NULL
108503 +lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
108504 +snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
108505 +cfs_trace_set_debug_mb_usrstr_486 cfs_trace_set_debug_mb_usrstr 2 486 NULL
108506 +nvme_trans_modesel_data_488 nvme_trans_modesel_data 4 488 NULL
108507 +iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL
108508 +rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
108509 +ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
108510 +zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0-1-2 537 NULL
108511 +iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
108512 +sco_sock_setsockopt_552 sco_sock_setsockopt 5 552 NULL
108513 +lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL
108514 +snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
108515 +start_isoc_chain_565 start_isoc_chain 2 565 NULL nohasharray
108516 +dev_hard_header_565 dev_hard_header 0 565 &start_isoc_chain_565
108517 +ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL
108518 +osl_pktget_590 osl_pktget 2 590 NULL
108519 +smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
108520 +btrfs_stack_file_extent_offset_607 btrfs_stack_file_extent_offset 0 607 NULL
108521 +ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
108522 +fuse_request_alloc_nofs_617 fuse_request_alloc_nofs 1 617 NULL
108523 +ptlrpc_lprocfs_nrs_seq_write_621 ptlrpc_lprocfs_nrs_seq_write 3 621 NULL
108524 +viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL
108525 +clone_split_bio_633 clone_split_bio 6 633 NULL
108526 +ceph_osdc_new_request_635 ceph_osdc_new_request 6 635 NULL
108527 +cfs_hash_bkt_size_643 cfs_hash_bkt_size 0 643 NULL
108528 +unlink_queued_645 unlink_queued 4 645 NULL
108529 +dtim_interval_read_654 dtim_interval_read 3 654 NULL
108530 +mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
108531 +persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL
108532 +sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
108533 +dvb_video_write_754 dvb_video_write 3 754 NULL
108534 +cfs_trace_allocate_string_buffer_781 cfs_trace_allocate_string_buffer 2 781 NULL
108535 +ath6kl_disconnect_timeout_write_794 ath6kl_disconnect_timeout_write 3 794 NULL
108536 +if_writecmd_815 if_writecmd 2 815 NULL
108537 +aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
108538 +error_state_read_859 error_state_read 6 859 NULL
108539 +o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
108540 +iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
108541 +snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
108542 +carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
108543 +__nodes_weight_956 __nodes_weight 2-0 956 NULL
108544 +bnx2x_fill_fw_str_968 bnx2x_fill_fw_str 3 968 NULL
108545 +memcmp_990 memcmp 0 990 NULL
108546 +readreg_1017 readreg 0-1 1017 NULL
108547 +smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
108548 +gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
108549 +mce_request_packet_1073 mce_request_packet 3 1073 NULL
108550 +agp_create_memory_1075 agp_create_memory 1 1075 NULL
108551 +_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
108552 +llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
108553 +llc_mac_hdr_init_1094 llc_mac_hdr_init 0 1094 NULL
108554 +nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL
108555 +utf8s_to_utf16s_1115 utf8s_to_utf16s 0 1115 NULL
108556 +cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL
108557 +i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
108558 +ipc_alloc_1192 ipc_alloc 1 1192 NULL
108559 +ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
108560 +pstore_ftrace_knob_write_1198 pstore_ftrace_knob_write 3 1198 NULL
108561 +i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
108562 +dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
108563 +i2cdev_read_1206 i2cdev_read 3 1206 NULL
108564 +lov_ost_pool_init_1215 lov_ost_pool_init 2 1215 NULL
108565 +ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
108566 +qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
108567 +ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
108568 +SyS_flistxattr_1287 SyS_flistxattr 3 1287 NULL
108569 +tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
108570 +ffs_1322 ffs 0 1322 NULL
108571 +qlcnic_pci_sriov_configure_1327 qlcnic_pci_sriov_configure 2 1327 NULL
108572 +btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
108573 +snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
108574 +ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
108575 +fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
108576 +ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
108577 +sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
108578 +do_msgsnd_1387 do_msgsnd 4 1387 NULL
108579 +SYSC_io_getevents_1392 SYSC_io_getevents 3 1392 NULL
108580 +file_read_actor_1401 file_read_actor 4-0 1401 NULL
108581 +cfs_trace_copyout_string_1416 cfs_trace_copyout_string 2 1416 NULL
108582 +init_rs_internal_1436 init_rs_internal 1 1436 NULL
108583 +stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
108584 +tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
108585 +xprt_alloc_1475 xprt_alloc 2 1475 NULL
108586 +SYSC_syslog_1477 SYSC_syslog 3 1477 NULL
108587 +sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
108588 +fpregs_set_1497 fpregs_set 4 1497 NULL
108589 +tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
108590 +alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
108591 +ath6kl_init_get_fwcaps_1557 ath6kl_init_get_fwcaps 3 1557 NULL
108592 +ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
108593 +ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL
108594 +packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
108595 +btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
108596 +v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
108597 +ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
108598 +mei_cl_recv_1665 mei_cl_recv 3 1665 NULL
108599 +rmap_add_1677 rmap_add 3 1677 NULL
108600 +configfs_read_file_1683 configfs_read_file 3 1683 NULL
108601 +pdu_write_u_1710 pdu_write_u 3 1710 NULL
108602 +coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
108603 +internal_create_group_1733 internal_create_group 0 1733 NULL
108604 +dev_irnet_read_1741 dev_irnet_read 3 1741 NULL
108605 +tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
108606 +cosa_write_1774 cosa_write 3 1774 NULL
108607 +fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
108608 +__nodelist_scnprintf_1815 __nodelist_scnprintf 2-0 1815 NULL
108609 +sb_issue_zeroout_1884 sb_issue_zeroout 3 1884 NULL
108610 +rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
108611 +nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
108612 +SyS_add_key_1900 SyS_add_key 4 1900 NULL
108613 +uhid_char_read_1920 uhid_char_read 3 1920 NULL
108614 +tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
108615 +bdev_erase_1933 bdev_erase 3 1933 NULL
108616 +ext3_fiemap_1936 ext3_fiemap 4 1936 NULL
108617 +cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
108618 +ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
108619 +ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
108620 +sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
108621 +gpio_power_write_1991 gpio_power_write 3 1991 NULL
108622 +__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
108623 +rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
108624 +ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
108625 +write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
108626 +BcmCopySection_2035 BcmCopySection 5 2035 NULL
108627 +ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
108628 +ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
108629 +__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL
108630 +diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL
108631 +iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
108632 +idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
108633 +audit_expand_2098 audit_expand 0 2098 NULL
108634 +iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
108635 +ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
108636 +enable_read_2117 enable_read 3 2117 NULL
108637 +pcf50633_write_block_2124 pcf50633_write_block 2-3 2124 NULL
108638 +check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
108639 +iov_iter_count_2152 iov_iter_count 0 2152 NULL
108640 +__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL
108641 +_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
108642 +bio_integrity_alloc_2194 bio_integrity_alloc 3 2194 NULL
108643 +picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL
108644 +u32_array_read_2219 u32_array_read 3 2219 NULL nohasharray
108645 +mei_dbgfs_read_meclients_2219 mei_dbgfs_read_meclients 3 2219 &u32_array_read_2219
108646 +vhci_write_2224 vhci_write 3 2224 NULL
108647 +__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL
108648 +ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
108649 +netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
108650 +sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
108651 +do_update_counters_2259 do_update_counters 4 2259 NULL
108652 +ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
108653 +debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
108654 +kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
108655 +intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
108656 +hfsplus_find_init_2318 hfsplus_find_init 0 2318 NULL nohasharray
108657 +picolcd_fb_write_2318 picolcd_fb_write 3 2318 &hfsplus_find_init_2318
108658 +dice_hwdep_read_2326 dice_hwdep_read 3 2326 NULL
108659 +__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
108660 +zr364xx_read_2354 zr364xx_read 3 2354 NULL
108661 +sysfs_add_file_mode_ns_2362 sysfs_add_file_mode_ns 0 2362 NULL
108662 +viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
108663 +xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL nohasharray
108664 +rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368
108665 +il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
108666 +rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
108667 +isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
108668 +raid1_size_2419 raid1_size 0-2 2419 NULL
108669 +b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
108670 +wiphy_new_2482 wiphy_new 2 2482 NULL
108671 +bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
108672 +hfsplus_user_setxattr_2485 hfsplus_user_setxattr 4 2485 NULL
108673 +squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
108674 +v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
108675 +ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
108676 +batadv_tvlv_container_list_size_2524 batadv_tvlv_container_list_size 0 2524 NULL
108677 +gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
108678 +pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
108679 +mdc_max_rpcs_in_flight_seq_write_2594 mdc_max_rpcs_in_flight_seq_write 3 2594 NULL
108680 +slot_bytes_2609 slot_bytes 0 2609 NULL
108681 +smk_write_logging_2618 smk_write_logging 3 2618 NULL
108682 +switch_status_2629 switch_status 5 2629 NULL
108683 +tcp_xmit_size_goal_2661 tcp_xmit_size_goal 2 2661 NULL
108684 +osc_build_ppga_2670 osc_build_ppga 2 2670 NULL
108685 +ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL
108686 +oti6858_write_2692 oti6858_write 4 2692 NULL
108687 +nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL
108688 +memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
108689 +lprocfs_stats_counter_size_2708 lprocfs_stats_counter_size 0 2708 NULL
108690 +xfs_readdir_2767 xfs_readdir 3 2767 NULL
108691 +mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
108692 +device_add_attrs_2789 device_add_attrs 0 2789 NULL
108693 +iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write 3 2804 NULL
108694 +sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
108695 +vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL
108696 +wait_for_avail_2847 wait_for_avail 0 2847 NULL
108697 +sfq_alloc_2861 sfq_alloc 1 2861 NULL
108698 +irnet_ctrl_read_2863 irnet_ctrl_read 4 2863 NULL
108699 +move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
108700 +nla_padlen_2883 nla_padlen 1 2883 NULL
108701 +cmm_write_2896 cmm_write 3 2896 NULL
108702 +osc_import_seq_write_2923 osc_import_seq_write 3 2923 NULL
108703 +xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
108704 +nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
108705 +tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
108706 +i40e_dbg_prep_dump_buf_2951 i40e_dbg_prep_dump_buf 2 2951 NULL
108707 +set_fast_connectable_2952 set_fast_connectable 4 2952 NULL
108708 +free_area_init_core_2962 free_area_init_core 2-3 2962 NULL
108709 +do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL
108710 +p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
108711 +lov_stripetype_seq_write_3013 lov_stripetype_seq_write 3 3013 NULL
108712 +do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
108713 +depth_write_3021 depth_write 3 3021 NULL
108714 +snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
108715 +kvm_unmap_hva_3028 kvm_unmap_hva 2 3028 NULL
108716 +xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
108717 +lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 NULL nohasharray
108718 +iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 &lpfc_idiag_mbxacc_write_3038
108719 +nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
108720 +il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
108721 +qp_alloc_ppn_set_3068 qp_alloc_ppn_set 2-4 3068 NULL
108722 +__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL
108723 +dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
108724 +read_file_antenna_diversity_3077 read_file_antenna_diversity 3 3077 NULL
108725 +clone_bio_3100 clone_bio 6 3100 NULL nohasharray
108726 +ttusb2_msg_3100 ttusb2_msg 4 3100 &clone_bio_3100
108727 +rb_alloc_3102 rb_alloc 1 3102 NULL
108728 +simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
108729 +print_time_3132 print_time 0 3132 NULL
108730 +fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
108731 +CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
108732 +compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
108733 +uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
108734 +uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL
108735 +compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
108736 +mempool_create_node_3191 mempool_create_node 1 3191 NULL
108737 +alloc_context_3194 alloc_context 1 3194 NULL
108738 +shmem_pread_slow_3198 shmem_pread_slow 3-2 3198 NULL
108739 +codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL
108740 +SyS_sendto_3219 SyS_sendto 6 3219 NULL
108741 +btrfs_prealloc_file_range_3227 btrfs_prealloc_file_range 3 3227 NULL
108742 +kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
108743 +write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
108744 +do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
108745 +ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
108746 +key_key_read_3241 key_key_read 3 3241 NULL
108747 +__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
108748 +__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 0-4-3 3314 NULL
108749 +dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
108750 +compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
108751 +ocfs2_extend_xattr_bucket_3328 ocfs2_extend_xattr_bucket 4 3328 NULL
108752 +read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
108753 +sysfs_create_group_3339 sysfs_create_group 0 3339 NULL
108754 +tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
108755 +il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
108756 +gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
108757 +scnprintf_3360 scnprintf 0-2 3360 NULL
108758 +ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL
108759 +sr_read_3366 sr_read 3 3366 NULL
108760 +mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
108761 +send_stream_3397 send_stream 4 3397 NULL
108762 +isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
108763 +mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL
108764 +pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
108765 +crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
108766 +SyS_msgsnd_3436 SyS_msgsnd 3 3436 NULL
108767 +pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
108768 +s3fb_ddc_read_3451 s3fb_ddc_read 0 3451 NULL
108769 +softsynth_write_3455 softsynth_write 3 3455 NULL
108770 +snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 NULL nohasharray
108771 +jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 &snd_pcm_lib_readv_transfer_3464
108772 +security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
108773 +xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL
108774 +mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL
108775 +SyS_semtimedop_3532 SyS_semtimedop 3 3532 NULL
108776 +SyS_readv_3539 SyS_readv 3 3539 NULL
108777 +btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
108778 +alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
108779 +evtchn_read_3569 evtchn_read 3 3569 NULL
108780 +ll_track_ppid_seq_write_3582 ll_track_ppid_seq_write 3 3582 NULL
108781 +vc_resize_3585 vc_resize 3-2 3585 NULL
108782 +kvm_mmu_notifier_change_pte_3596 kvm_mmu_notifier_change_pte 3 3596 NULL
108783 +sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
108784 +edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
108785 +tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
108786 +aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
108787 +ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
108788 +i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
108789 +snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 NULL
108790 +ci_ll_write_3740 ci_ll_write 4 3740 NULL nohasharray
108791 +ath6kl_mgmt_tx_3740 ath6kl_mgmt_tx 7 3740 &ci_ll_write_3740
108792 +sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
108793 +ncp_file_write_3813 ncp_file_write 3 3813 NULL
108794 +llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
108795 +hfsplus_direct_IO_3835 hfsplus_direct_IO 4 3835 NULL
108796 +create_one_cdev_3852 create_one_cdev 2 3852 NULL
108797 +smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
108798 +get_fd_set_3866 get_fd_set 1 3866 NULL
108799 +apei_res_sub_3873 apei_res_sub 0 3873 NULL
108800 +garp_attr_create_3883 garp_attr_create 3 3883 NULL
108801 +efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL
108802 +nvram_write_3894 nvram_write 3 3894 NULL
108803 +pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
108804 +comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
108805 +vcs_write_3910 vcs_write 3 3910 NULL
108806 +SyS_move_pages_3920 SyS_move_pages 2 3920 NULL
108807 +hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
108808 +brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL
108809 +ll_get_max_mdsize_3962 ll_get_max_mdsize 0 3962 NULL
108810 +mite_bytes_written_to_memory_lb_3987 mite_bytes_written_to_memory_lb 0 3987 NULL
108811 +do_add_counters_3992 do_add_counters 3 3992 NULL
108812 +obd_alloc_memmd_4002 obd_alloc_memmd 0 4002 NULL
108813 +userspace_status_4004 userspace_status 4 4004 NULL
108814 +xfs_check_block_4005 xfs_check_block 4 4005 NULL nohasharray
108815 +mei_write_4005 mei_write 3 4005 &xfs_check_block_4005
108816 +snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
108817 +blk_end_request_4024 blk_end_request 3 4024 NULL
108818 +ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
108819 +mtip_hw_read_registers_4037 mtip_hw_read_registers 3 4037 NULL
108820 +read_file_queues_4078 read_file_queues 3 4078 NULL
108821 +fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
108822 +C_SYSC_rt_sigpending_4114 C_SYSC_rt_sigpending 2 4114 NULL
108823 +tm6000_read_4151 tm6000_read 3 4151 NULL
108824 +mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
108825 +msg_bits_4158 msg_bits 0-3-4 4158 NULL
108826 +get_alua_req_4166 get_alua_req 3 4166 NULL
108827 +blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
108828 +read_file_bool_4180 read_file_bool 3 4180 NULL
108829 +ocfs2_find_cpos_for_right_leaf_4194 ocfs2_find_cpos_for_right_leaf 0 4194 NULL
108830 +vring_new_virtqueue_4199 vring_new_virtqueue 2 4199 NULL
108831 +f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
108832 +_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
108833 +__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
108834 +ath6kl_force_roam_write_4282 ath6kl_force_roam_write 3 4282 NULL
108835 +goldfish_audio_write_4284 goldfish_audio_write 3 4284 NULL
108836 +__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL
108837 +dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 3-2-5 4303 NULL
108838 +count_strings_4315 count_strings 0 4315 NULL
108839 +__sysfs_add_one_4326 __sysfs_add_one 0 4326 NULL
108840 +nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
108841 +snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
108842 +__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 NULL nohasharray
108843 +lookup_string_4365 lookup_string 0 4365 &__copy_from_user_inatomic_4365
108844 +irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
108845 +access_process_vm_4412 access_process_vm 0 4412 NULL nohasharray
108846 +cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
108847 +libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
108848 +rtw_android_get_rssi_4421 rtw_android_get_rssi 0 4421 NULL
108849 +do_pages_stat_4437 do_pages_stat 2 4437 NULL
108850 +at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
108851 +snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL
108852 +vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
108853 +set_link_security_4502 set_link_security 4 4502 NULL
108854 +ll_max_readahead_per_file_mb_seq_write_4531 ll_max_readahead_per_file_mb_seq_write 3 4531 NULL
108855 +tty_register_device_4544 tty_register_device 2 4544 NULL
108856 +btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
108857 +xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
108858 +bch_alloc_4593 bch_alloc 1 4593 NULL
108859 +ocfs2_refcount_lock_4595 ocfs2_refcount_lock 0 4595 NULL
108860 +ll_rw_extents_stats_seq_write_4633 ll_rw_extents_stats_seq_write 3 4633 NULL
108861 +iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
108862 +skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
108863 +cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
108864 +short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
108865 +kone_receive_4690 kone_receive 4 4690 NULL
108866 +hash_netportnet6_expire_4702 hash_netportnet6_expire 4 4702 NULL
108867 +cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
108868 +ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
108869 +show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
108870 +bitmap_startwrite_4736 bitmap_startwrite 2 4736 NULL nohasharray
108871 +ll_rw_offset_stats_seq_write_4736 ll_rw_offset_stats_seq_write 3 4736 &bitmap_startwrite_4736
108872 +lu_buf_alloc_4753 lu_buf_alloc 2 4753 NULL
108873 +pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
108874 +create_subvol_4791 create_subvol 4 4791 NULL
108875 +ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
108876 +repair_io_failure_4815 repair_io_failure 4-3 4815 NULL
108877 +comedi_buf_write_free_4847 comedi_buf_write_free 2 4847 NULL
108878 +gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
108879 +key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
108880 +ocfs2_defrag_extent_4873 ocfs2_defrag_extent 2 4873 NULL
108881 +hid_register_field_4874 hid_register_field 2-3 4874 NULL
108882 +vga_arb_read_4886 vga_arb_read 3 4886 NULL
108883 +ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
108884 +ocfs2_should_refresh_lock_res_4958 ocfs2_should_refresh_lock_res 0 4958 NULL
108885 +compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
108886 +ath10k_read_chip_id_4969 ath10k_read_chip_id 3 4969 NULL
108887 +skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
108888 +ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval 3 4976 NULL
108889 +compat_SyS_ipc_5000 compat_SyS_ipc 3 5000 NULL
108890 +do_mincore_5018 do_mincore 0-2-1 5018 NULL
108891 +btrfs_punch_hole_5041 btrfs_punch_hole 2 5041 NULL
108892 +cfg80211_rx_mgmt_5056 cfg80211_rx_mgmt 5 5056 NULL
108893 +ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 3-2 5066 NULL
108894 +snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
108895 +snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
108896 +kfifo_copy_from_user_5091 kfifo_copy_from_user 3-4-0 5091 NULL nohasharray
108897 +get_random_bytes_5091 get_random_bytes 2 5091 &kfifo_copy_from_user_5091 nohasharray
108898 +blk_rq_sectors_5091 blk_rq_sectors 0 5091 &get_random_bytes_5091
108899 +sound_write_5102 sound_write 3 5102 NULL
108900 +i40e_dbg_netdev_ops_write_5117 i40e_dbg_netdev_ops_write 3 5117 NULL
108901 +qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL
108902 +__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
108903 +iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
108904 +acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
108905 +ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
108906 +dwc2_hcd_urb_alloc_5217 dwc2_hcd_urb_alloc 2 5217 NULL
108907 +ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
108908 +usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL
108909 +r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL
108910 +sbc_get_write_same_sectors_5317 sbc_get_write_same_sectors 0 5317 NULL
108911 +pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
108912 +cq_free_res_5355 cq_free_res 5 5355 NULL
108913 +ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
108914 +cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
108915 +__split_bvec_across_targets_5454 __split_bvec_across_targets 3 5454 NULL
108916 +xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
108917 +xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
108918 +cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
108919 +tty_write_5494 tty_write 3 5494 NULL
108920 +tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
108921 +ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
108922 +__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
108923 +ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
108924 +get_entry_msg_len_5552 get_entry_msg_len 0 5552 NULL
108925 +le_readq_5557 le_readq 0 5557 NULL
108926 +inw_5558 inw 0 5558 NULL
108927 +bioset_create_5580 bioset_create 1 5580 NULL
108928 +oz_ep_alloc_5587 oz_ep_alloc 1 5587 NULL
108929 +SYSC_fsetxattr_5639 SYSC_fsetxattr 4 5639 NULL
108930 +ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
108931 +posix_clock_register_5662 posix_clock_register 2 5662 NULL
108932 +get_arg_5694 get_arg 3 5694 NULL
108933 +subbuf_read_actor_5708 subbuf_read_actor 3 5708 NULL
108934 +vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
108935 +rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
108936 +sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
108937 +__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
108938 +nvme_trans_bdev_char_page_5797 nvme_trans_bdev_char_page 3 5797 NULL
108939 +skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
108940 +nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL
108941 +ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
108942 +ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
108943 +setup_req_5848 setup_req 3-0 5848 NULL
108944 +ria_page_count_5849 ria_page_count 0 5849 NULL
108945 +rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
108946 +config_buf_5862 config_buf 0 5862 NULL
108947 +iwl_dbgfs_scan_ant_rxchain_write_5877 iwl_dbgfs_scan_ant_rxchain_write 3 5877 NULL
108948 +lprocfs_fid_width_seq_write_5889 lprocfs_fid_width_seq_write 3 5889 NULL
108949 +port_show_regs_5904 port_show_regs 3 5904 NULL
108950 +rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL
108951 +uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
108952 +lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
108953 +ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
108954 +edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
108955 +ll_statahead_one_5962 ll_statahead_one 3 5962 NULL
108956 +__apu_get_register_5967 __apu_get_register 0 5967 NULL
108957 +ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
108958 +SyS_semop_5980 SyS_semop 3 5980 NULL
108959 +alloc_msg_6072 alloc_msg 1 6072 NULL
108960 +sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
108961 +rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
108962 +ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
108963 +dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
108964 +nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
108965 +ieee80211_if_fmt_beacon_timeout_6153 ieee80211_if_fmt_beacon_timeout 3 6153 NULL
108966 +ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
108967 +wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
108968 +SyS_setgroups_6182 SyS_setgroups 1 6182 NULL
108969 +mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL
108970 +v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
108971 +mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
108972 +f_hidg_read_6238 f_hidg_read 3 6238 NULL
108973 +fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
108974 +tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
108975 +snd_hda_override_conn_list_6282 snd_hda_override_conn_list 3-0 6282 NULL nohasharray
108976 +xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
108977 +posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL
108978 +serial_port_in_6291 serial_port_in 0 6291 NULL
108979 +qlcnic_sriov_alloc_bc_msg_6309 qlcnic_sriov_alloc_bc_msg 2 6309 NULL
108980 +hfa384x_inw_6329 hfa384x_inw 0 6329 NULL nohasharray
108981 +SyS_mincore_6329 SyS_mincore 2-1 6329 &hfa384x_inw_6329
108982 +fuse_get_req_for_background_6337 fuse_get_req_for_background 2 6337 NULL
108983 +ucs2_strnlen_6342 ucs2_strnlen 0 6342 NULL
108984 +regcache_sync_block_raw_6350 regcache_sync_block_raw 5-4 6350 NULL
108985 +mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL
108986 +_proc_do_string_6376 _proc_do_string 2 6376 NULL
108987 +osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
108988 +posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
108989 +add_transaction_credits_6422 add_transaction_credits 2-3 6422 NULL
108990 +ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
108991 +__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
108992 +ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
108993 +qp_memcpy_from_queue_6479 qp_memcpy_from_queue 5-4 6479 NULL
108994 +cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
108995 +dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
108996 +mei_read_6507 mei_read 3 6507 NULL
108997 +rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
108998 +wdm_read_6549 wdm_read 3 6549 NULL
108999 +dm_stats_create_6551 dm_stats_create 4-2-3 6551 NULL
109000 +fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
109001 +SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
109002 +ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
109003 +xfs_do_div_6649 xfs_do_div 0-2 6649 NULL
109004 +process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
109005 +btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
109006 +ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
109007 +bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
109008 +mpeg_read_6708 mpeg_read 3 6708 NULL
109009 +ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL
109010 +video_proc_write_6724 video_proc_write 3 6724 NULL
109011 +posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
109012 +kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
109013 +iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
109014 +ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
109015 +zone_spanned_pages_in_node_6787 zone_spanned_pages_in_node 0-3-4 6787 NULL
109016 +hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
109017 +tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
109018 +lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL
109019 +calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
109020 +mon_bin_read_6841 mon_bin_read 3 6841 NULL
109021 +snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
109022 +perf_output_sample_ustack_6868 perf_output_sample_ustack 2 6868 NULL
109023 +dio_complete_6879 dio_complete 0-2-3 6879 NULL
109024 +raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 NULL nohasharray
109025 +ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 &raw_seticmpfilter_6888
109026 +dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
109027 +proc_sessionid_read_6911 proc_sessionid_read 3 6911 NULL nohasharray
109028 +spi_show_regs_6911 spi_show_regs 3 6911 &proc_sessionid_read_6911 nohasharray
109029 +acm_alloc_minor_6911 acm_alloc_minor 0 6911 &spi_show_regs_6911
109030 +__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
109031 +do_msgrcv_6921 do_msgrcv 3 6921 NULL
109032 +cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
109033 +ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
109034 +qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
109035 +tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 NULL
109036 +pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
109037 +rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL nohasharray
109038 +i40e_dbg_dump_write_6973 i40e_dbg_dump_write 3 6973 &rsa_extract_mpi_6973
109039 +request_key_async_6990 request_key_async 4 6990 NULL
109040 +tpl_write_6998 tpl_write 3 6998 NULL
109041 +r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
109042 +cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
109043 +tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
109044 +ld_usb_write_7022 ld_usb_write 3 7022 NULL
109045 +wimax_msg_7030 wimax_msg 4 7030 NULL
109046 +ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
109047 +snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
109048 +hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
109049 +event_enable_read_7074 event_enable_read 3 7074 NULL
109050 +beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
109051 +pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
109052 +osc_resend_count_seq_write_7120 osc_resend_count_seq_write 3 7120 NULL
109053 +qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL
109054 +kvm_mmu_notifier_test_young_7139 kvm_mmu_notifier_test_young 3 7139 NULL
109055 +__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
109056 +hdlc_loop_7255 hdlc_loop 0 7255 NULL
109057 +f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL
109058 +rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
109059 +get_string_7302 get_string 0 7302 NULL
109060 +mgmt_control_7349 mgmt_control 3 7349 NULL
109061 +at_est2timeout_7365 at_est2timeout 0-1 7365 NULL
109062 +ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
109063 +ath10k_read_fw_stats_7387 ath10k_read_fw_stats 3 7387 NULL
109064 +hweight_long_7388 hweight_long 1-0 7388 NULL
109065 +sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
109066 +_ore_add_stripe_unit_7399 _ore_add_stripe_unit 6-3 7399 NULL
109067 +readb_7401 readb 0 7401 NULL
109068 +drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
109069 +__copy_to_user_nocheck_7443 __copy_to_user_nocheck 0-3 7443 NULL
109070 +ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
109071 +SYSC_setgroups_7454 SYSC_setgroups 1 7454 NULL
109072 +rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL
109073 +l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL
109074 +garp_request_join_7471 garp_request_join 4 7471 NULL nohasharray
109075 +ReadHSCX_7471 ReadHSCX 0 7471 &garp_request_join_7471
109076 +snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
109077 +iwl_mvm_power_dbgfs_read_7502 iwl_mvm_power_dbgfs_read 0 7502 NULL
109078 +ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 NULL nohasharray
109079 +sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 &ahash_instance_headroom_7509
109080 +array_zalloc_7519 array_zalloc 1-2 7519 NULL
109081 +ath10k_read_htt_stats_mask_7557 ath10k_read_htt_stats_mask 3 7557 NULL
109082 +smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
109083 +cfs_cpt_num_estimate_7571 cfs_cpt_num_estimate 0 7571 NULL
109084 +ocfs2_lock_create_7612 ocfs2_lock_create 0 7612 NULL
109085 +groups_alloc_7614 groups_alloc 1 7614 NULL nohasharray
109086 +create_dir_7614 create_dir 0 7614 &groups_alloc_7614
109087 +_rtw_zmalloc_7636 _rtw_zmalloc 1 7636 NULL
109088 +fault_inject_write_7662 fault_inject_write 3 7662 NULL
109089 +acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 NULL
109090 +acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
109091 +dev_write_7708 dev_write 3 7708 NULL
109092 +pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL
109093 +vxge_device_register_7752 vxge_device_register 4 7752 NULL
109094 +iwl_dbgfs_bt_cmd_read_7770 iwl_dbgfs_bt_cmd_read 3 7770 NULL
109095 +alloc_candev_7776 alloc_candev 1-2 7776 NULL
109096 +dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
109097 +bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
109098 +diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL nohasharray
109099 +lustre_packed_msg_size_7792 lustre_packed_msg_size 0 7792 &diva_os_copy_from_user_7792
109100 +cfs_trace_dump_debug_buffer_usrstr_7861 cfs_trace_dump_debug_buffer_usrstr 2 7861 NULL
109101 +tipc_alloc_entry_7875 tipc_alloc_entry 2 7875 NULL
109102 +config_desc_7878 config_desc 0 7878 NULL
109103 +dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
109104 +xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
109105 +libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
109106 +f_hidg_write_7932 f_hidg_write 3 7932 NULL
109107 +integrity_digsig_verify_7956 integrity_digsig_verify 3-0 7956 NULL
109108 +smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
109109 +tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
109110 +vcs_read_8017 vcs_read 3 8017 NULL
109111 +vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
109112 +ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
109113 +dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
109114 +spi_write_then_read_8073 spi_write_then_read 5-3 8073 NULL
109115 +qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 NULL
109116 +venus_lookup_8121 venus_lookup 4 8121 NULL
109117 +ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
109118 +xfs_file_fallocate_8150 xfs_file_fallocate 3-4 8150 NULL
109119 +__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
109120 +ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
109121 +recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
109122 +__ocfs2_lock_refcount_tree_8207 __ocfs2_lock_refcount_tree 0 8207 NULL
109123 +rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
109124 +ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
109125 +play_iframe_8219 play_iframe 3 8219 NULL
109126 +kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 NULL
109127 +sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
109128 +ceph_sync_write_8233 ceph_sync_write 4 8233 NULL
109129 +check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
109130 +t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
109131 +init_cdev_8274 init_cdev 1 8274 NULL
109132 +rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
109133 +qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
109134 +ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
109135 +tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
109136 +ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL
109137 +generic_write_sync_8358 generic_write_sync 0 8358 NULL
109138 +ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
109139 +ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL
109140 +xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
109141 +zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
109142 +smk_write_change_rule_8411 smk_write_change_rule 3 8411 NULL nohasharray
109143 +uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 &smk_write_change_rule_8411
109144 +roccat_common2_sysfs_read_8431 roccat_common2_sysfs_read 6 8431 NULL
109145 +afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
109146 +fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL
109147 +batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL
109148 +dev_config_8506 dev_config 3 8506 NULL
109149 +ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
109150 +opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL
109151 +user_on_off_8552 user_on_off 2 8552 NULL
109152 +profile_remove_8556 profile_remove 3 8556 NULL
109153 +cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
109154 +isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
109155 +tower_write_8580 tower_write 3 8580 NULL
109156 +cfs_cpt_number_8618 cfs_cpt_number 0 8618 NULL
109157 +shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
109158 +it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
109159 +scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
109160 +fuse_send_write_pages_8636 fuse_send_write_pages 0-5 8636 NULL
109161 +generic_acl_set_8658 generic_acl_set 4 8658 NULL
109162 +mlx5_vzalloc_8663 mlx5_vzalloc 1 8663 NULL
109163 +dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
109164 +lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
109165 +rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
109166 +skb_frag_size_8695 skb_frag_size 0 8695 NULL
109167 +arcfb_write_8702 arcfb_write 3 8702 NULL
109168 +i_size_read_8703 i_size_read 0 8703 NULL nohasharray
109169 +init_header_8703 init_header 0 8703 &i_size_read_8703
109170 +HDLC_irq_8709 HDLC_irq 2 8709 NULL
109171 +ctrl_out_8712 ctrl_out 3-5 8712 NULL
109172 +tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL
109173 +jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
109174 +__create_irqs_8733 __create_irqs 2 8733 NULL
109175 +tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
109176 +compound_order_8750 compound_order 0 8750 NULL
109177 +ocfs2_find_path_8754 ocfs2_find_path 0 8754 NULL
109178 +yurex_write_8761 yurex_write 3 8761 NULL
109179 +joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
109180 +kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL
109181 +paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL
109182 +ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL
109183 +__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
109184 +cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
109185 +metronomefb_write_8823 metronomefb_write 3 8823 NULL
109186 +SyS_llistxattr_8824 SyS_llistxattr 3 8824 NULL
109187 +get_queue_depth_8833 get_queue_depth 0 8833 NULL
109188 +dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
109189 +usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
109190 +debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
109191 +wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
109192 +radeon_drm_ioctl_8875 radeon_drm_ioctl 2 8875 NULL
109193 +compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
109194 +ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray
109195 +tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890
109196 +sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
109197 +sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL
109198 +write_file_ani_8918 write_file_ani 3 8918 NULL
109199 +layout_commit_8926 layout_commit 3 8926 NULL
109200 +adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
109201 +driver_stats_read_8944 driver_stats_read 3 8944 NULL
109202 +read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
109203 +usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
109204 +qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
109205 +venus_mkdir_8967 venus_mkdir 4 8967 NULL
109206 +seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray
109207 +vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968
109208 +bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
109209 +jbd2_journal_blocks_per_page_9004 jbd2_journal_blocks_per_page 0 9004 NULL
109210 +il_dbgfs_clear_ucode_stats_write_9016 il_dbgfs_clear_ucode_stats_write 3 9016 NULL
109211 +snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
109212 +fd_ioctl_9028 fd_ioctl 3 9028 NULL
109213 +nla_put_9042 nla_put 3 9042 NULL
109214 +snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
109215 +snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
109216 +fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
109217 +create_queues_9088 create_queues 2-3 9088 NULL
109218 +ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
109219 +adxl34x_spi_read_block_9108 adxl34x_spi_read_block 3 9108 NULL
109220 +caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray
109221 +gfn_to_rmap_9110 gfn_to_rmap 3-2 9110 &caif_stream_sendmsg_9110
109222 +udf_direct_IO_9111 udf_direct_IO 4 9111 NULL
109223 +pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
109224 +apei_resources_merge_9149 apei_resources_merge 0 9149 NULL
109225 +vb2_dma_sg_alloc_9157 vb2_dma_sg_alloc 2 9157 NULL
109226 +dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
109227 +isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
109228 +count_leading_zeros_9183 count_leading_zeros 0 9183 NULL
109229 +altera_swap_ir_9194 altera_swap_ir 2 9194 NULL
109230 +snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
109231 +virtqueue_add_9217 virtqueue_add 4-5 9217 NULL
109232 +tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
109233 +sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
109234 +hfsplus_bnode_read_u16_9262 hfsplus_bnode_read_u16 0 9262 NULL
109235 +hdpvr_read_9273 hdpvr_read 3 9273 NULL
109236 +flakey_status_9274 flakey_status 5 9274 NULL
109237 +iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
109238 +ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
109239 +ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL
109240 +nvme_trans_fmt_get_parm_header_9340 nvme_trans_fmt_get_parm_header 2 9340 NULL
109241 +ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
109242 +ll_direct_rw_pages_9361 ll_direct_rw_pages 0 9361 NULL
109243 +sta_beacon_loss_count_read_9370 sta_beacon_loss_count_read 3 9370 NULL
109244 +get_request_type_9393 get_request_type 0 9393 NULL nohasharray
109245 +mlx4_bitmap_init_9393 mlx4_bitmap_init 5-2 9393 &get_request_type_9393
109246 +virtqueue_add_outbuf_9395 virtqueue_add_outbuf 3 9395 NULL
109247 +read_9397 read 3 9397 NULL
109248 +hash_ipportip4_expire_9415 hash_ipportip4_expire 4 9415 NULL
109249 +btrfs_drop_extents_9423 btrfs_drop_extents 4 9423 NULL
109250 +bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
109251 +ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
109252 +ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
109253 +ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
109254 +agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL nohasharray
109255 +get_registers_9470 get_registers 4 9470 &agp_generic_alloc_user_9470
109256 +crypt_status_9492 crypt_status 5 9492 NULL
109257 +lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL
109258 +lp_write_9511 lp_write 3 9511 NULL
109259 +mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL
109260 +scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
109261 +ll_max_read_ahead_whole_mb_seq_write_9528 ll_max_read_ahead_whole_mb_seq_write 3 9528 NULL
109262 +read_file_dma_9530 read_file_dma 3 9530 NULL
109263 +iwl_dbgfs_bf_params_read_9542 iwl_dbgfs_bf_params_read 3 9542 NULL
109264 +il_dbgfs_missed_beacon_write_9546 il_dbgfs_missed_beacon_write 3 9546 NULL
109265 +compat_SyS_pwritev64_9548 compat_SyS_pwritev64 3 9548 NULL
109266 +fw_node_create_9559 fw_node_create 2 9559 NULL
109267 +kobj_map_9566 kobj_map 2-3 9566 NULL
109268 +f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL
109269 +snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
109270 +lov_ost_pool_add_9626 lov_ost_pool_add 3 9626 NULL
109271 +saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
109272 +ceph_copy_user_to_page_vector_9635 ceph_copy_user_to_page_vector 4-3 9635 NULL
109273 +acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL
109274 +compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
109275 +ll_checksum_seq_write_9648 ll_checksum_seq_write 3 9648 NULL
109276 +ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
109277 +queue_received_packet_9657 queue_received_packet 5 9657 NULL
109278 +snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
109279 +dns_query_9676 dns_query 3 9676 NULL
109280 +qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
109281 +__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
109282 +vx_transfer_end_9701 vx_transfer_end 0 9701 NULL
109283 +fuse_iter_npages_9705 fuse_iter_npages 0 9705 NULL nohasharray
109284 +ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 &fuse_iter_npages_9705
109285 +cfg80211_tx_mlme_mgmt_9715 cfg80211_tx_mlme_mgmt 3 9715 NULL
109286 +btrfs_stack_file_extent_num_bytes_9720 btrfs_stack_file_extent_num_bytes 0 9720 NULL
109287 +SYSC_ppoll_9721 SYSC_ppoll 2 9721 NULL
109288 +nla_get_u8_9736 nla_get_u8 0 9736 NULL
109289 +ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL
109290 +ddb_input_read_9743 ddb_input_read 3-0 9743 NULL
109291 +sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL
109292 +btrfs_super_root_9763 btrfs_super_root 0 9763 NULL
109293 +__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
109294 +snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
109295 +kvm_age_hva_9795 kvm_age_hva 2 9795 NULL
109296 +parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL
109297 +tpm_data_in_9802 tpm_data_in 0 9802 NULL
109298 +udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
109299 +ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
109300 +pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
109301 +btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
109302 +f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL
109303 +wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL
109304 +snd_midi_event_new_9893 snd_midi_event_new 1 9893 NULL nohasharray
109305 +bm_register_write_9893 bm_register_write 3 9893 &snd_midi_event_new_9893
109306 +snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 5-3 9895 NULL
109307 +nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL
109308 +root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL
109309 +pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
109310 +read_file_misc_9948 read_file_misc 3 9948 NULL
109311 +csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
109312 +SyS_gethostname_9964 SyS_gethostname 2 9964 NULL
109313 +get_free_serial_index_9969 get_free_serial_index 0 9969 NULL
109314 +btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
109315 +gameport_read_9983 gameport_read 0 9983 NULL
109316 +SYSC_move_pages_9986 SYSC_move_pages 2 9986 NULL
109317 +aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
109318 +ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
109319 +get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray
109320 +dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110
109321 +gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL
109322 +SyS_migrate_pages_10134 SyS_migrate_pages 2 10134 NULL
109323 +aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
109324 +rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
109325 +hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL
109326 +asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
109327 +kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL
109328 +proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
109329 +jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
109330 +hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL
109331 +cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
109332 +__qlcnic_pci_sriov_enable_10281 __qlcnic_pci_sriov_enable 2 10281 NULL
109333 +snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
109334 +read_emulate_10310 read_emulate 2-4 10310 NULL
109335 +read_file_spectral_count_10320 read_file_spectral_count 3 10320 NULL
109336 +compat_SyS_writev_10327 compat_SyS_writev 3 10327 NULL
109337 +tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
109338 +ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
109339 +whci_add_cap_10350 whci_add_cap 0 10350 NULL
109340 +dbAllocAny_10354 dbAllocAny 0 10354 NULL
109341 +ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
109342 +ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
109343 +sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
109344 +ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
109345 +do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
109346 +fwtty_rx_10434 fwtty_rx 3 10434 NULL
109347 +event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
109348 +hash_ipportip6_expire_10478 hash_ipportip6_expire 4 10478 NULL
109349 +nouveau_pwr_create__10483 nouveau_pwr_create_ 4 10483 NULL
109350 +ext4_itable_unused_count_10501 ext4_itable_unused_count 0 10501 NULL
109351 +qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
109352 +sel_write_disable_10511 sel_write_disable 3 10511 NULL
109353 +osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
109354 +rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
109355 +qlcnic_pci_sriov_enable_10519 qlcnic_pci_sriov_enable 2 10519 NULL
109356 +kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 NULL nohasharray
109357 +snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 &kstrtouint_from_user_10536
109358 +ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL
109359 +scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL
109360 +otp_read_10594 otp_read 2-4-5 10594 NULL
109361 +supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
109362 +ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
109363 +nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
109364 +efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL
109365 +parport_write_10669 parport_write 0 10669 NULL
109366 +edge_write_10692 edge_write 4 10692 NULL
109367 +selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL nohasharray
109368 +inl_10708 inl 0 10708 &selinux_inode_setxattr_10708
109369 +shash_async_setkey_10720 shash_async_setkey 3 10720 NULL nohasharray
109370 +pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 &shash_async_setkey_10720
109371 +spi_sync_10731 spi_sync 0 10731 NULL
109372 +apu_get_register_10737 apu_get_register 0 10737 NULL nohasharray
109373 +sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 &apu_get_register_10737
109374 +SyS_io_getevents_10756 SyS_io_getevents 3 10756 NULL
109375 +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
109376 +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
109377 +__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL
109378 +diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL
109379 +lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
109380 +ida_get_new_above_10853 ida_get_new_above 0 10853 NULL
109381 +fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
109382 +snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
109383 +wiidebug_drm_write_10879 wiidebug_drm_write 3 10879 NULL
109384 +get_scq_10897 get_scq 2 10897 NULL
109385 +cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
109386 +tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
109387 +lprocfs_wr_atomic_10912 lprocfs_wr_atomic 3 10912 NULL
109388 +__copy_from_user_10918 __copy_from_user 0-3 10918 NULL
109389 +kobject_add_10919 kobject_add 0 10919 NULL
109390 +ar9003_dump_modal_eeprom_10959 ar9003_dump_modal_eeprom 3-2-0 10959 NULL
109391 +ci_port_test_write_10962 ci_port_test_write 3 10962 NULL
109392 +bm_entry_read_10976 bm_entry_read 3 10976 NULL
109393 +sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
109394 +xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
109395 +rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
109396 +SetLineNumber_11023 SetLineNumber 0 11023 NULL
109397 +tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
109398 +insert_inline_extent_backref_11063 insert_inline_extent_backref 8 11063 NULL
109399 +tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
109400 +count_argc_11083 count_argc 0 11083 NULL
109401 +ocfs2_blocks_per_xattr_bucket_11099 ocfs2_blocks_per_xattr_bucket 0 11099 NULL
109402 +kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
109403 +tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
109404 +page_offset_11120 page_offset 0 11120 NULL
109405 +cea_db_payload_len_11124 cea_db_payload_len 0 11124 NULL nohasharray
109406 +tracing_buffers_read_11124 tracing_buffers_read 3 11124 &cea_db_payload_len_11124
109407 +alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
109408 +snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL
109409 +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
109410 +comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL
109411 +hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
109412 +ath6kl_power_params_write_11274 ath6kl_power_params_write 3 11274 NULL
109413 +__proc_daemon_file_11305 __proc_daemon_file 5 11305 NULL
109414 +ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
109415 +bcache_dev_sectors_dirty_add_11315 bcache_dev_sectors_dirty_add 3-4 11315 NULL
109416 +sk_filter_size_11316 sk_filter_size 0 11316 NULL nohasharray
109417 +tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 &sk_filter_size_11316
109418 +construct_key_11329 construct_key 3 11329 NULL nohasharray
109419 +__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
109420 +next_segment_11330 next_segment 0-2-1 11330 NULL
109421 +persistent_ram_buffer_map_11332 persistent_ram_buffer_map 2-1 11332 NULL
109422 +ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL
109423 +sel_write_create_11353 sel_write_create 3 11353 NULL nohasharray
109424 +nl80211_send_mgmt_11353 nl80211_send_mgmt 7 11353 &sel_write_create_11353
109425 +qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
109426 +nft_value_dump_11381 nft_value_dump 3 11381 NULL
109427 +isku_sysfs_read_keys_capslock_11392 isku_sysfs_read_keys_capslock 6 11392 NULL
109428 +dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
109429 +lprocfs_wr_evict_client_11402 lprocfs_wr_evict_client 3 11402 NULL
109430 +___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
109431 +str_to_user_11411 str_to_user 2 11411 NULL
109432 +mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
109433 +ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL
109434 +adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 NULL nohasharray
109435 +import_sec_validate_get_11417 import_sec_validate_get 0 11417 &adis16480_show_firmware_revision_11417
109436 +trace_options_read_11419 trace_options_read 3 11419 NULL
109437 +i40e_dbg_command_write_11421 i40e_dbg_command_write 3 11421 NULL
109438 +xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
109439 +bttv_read_11432 bttv_read 3 11432 NULL
109440 +create_zero_mask_11453 create_zero_mask 0-1 11453 NULL
109441 +do_blockdev_direct_IO_11455 do_blockdev_direct_IO 0-6 11455 NULL
109442 +pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray
109443 +sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479
109444 +xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
109445 +sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
109446 +kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
109447 +ll_direct_IO_26_seg_11518 ll_direct_IO_26_seg 0 11518 NULL
109448 +twl_direction_in_11527 twl_direction_in 2 11527 NULL
109449 +skb_cow_data_11565 skb_cow_data 0 11565 NULL
109450 +lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL
109451 +oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
109452 +batadv_iv_ogm_orig_add_if_11586 batadv_iv_ogm_orig_add_if 2 11586 NULL
109453 +snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
109454 +fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
109455 +batadv_iv_ogm_orig_del_if_11604 batadv_iv_ogm_orig_del_if 2 11604 NULL
109456 +SYSC_mq_timedsend_11607 SYSC_mq_timedsend 3 11607 NULL
109457 +sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
109458 +nla_total_size_11658 nla_total_size 1-0 11658 NULL
109459 +slab_ksize_11664 slab_ksize 0 11664 NULL
109460 +ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
109461 +compat_SyS_msgsnd_11675 compat_SyS_msgsnd 3 11675 NULL
109462 +btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
109463 +sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
109464 +split_11691 split 2 11691 NULL
109465 +snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
109466 +blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
109467 +dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
109468 +iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
109469 +ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
109470 +btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
109471 +pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
109472 +umc_device_register_11824 umc_device_register 0 11824 NULL
109473 +zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
109474 +sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
109475 +rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
109476 +unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
109477 +ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
109478 +ieee80211_rx_bss_info_11887 ieee80211_rx_bss_info 3 11887 NULL
109479 +mdc_rename_11899 mdc_rename 4-6 11899 NULL
109480 +xstateregs_get_11906 xstateregs_get 4 11906 NULL
109481 +ti_write_11916 ti_write 4 11916 NULL
109482 +fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
109483 +bitmap_remap_11929 bitmap_remap 5 11929 NULL
109484 +atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
109485 +r1_sync_page_io_11963 r1_sync_page_io 3 11963 NULL
109486 +f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
109487 +read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL
109488 +i40e_pci_sriov_configure_12011 i40e_pci_sriov_configure 2 12011 NULL
109489 +ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
109490 +ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050 NULL
109491 +il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
109492 +ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
109493 +batadv_tt_global_size_mod_12085 batadv_tt_global_size_mod 3 12085 NULL
109494 +rtw_malloc2d_12102 rtw_malloc2d 1-2-3 12102 NULL
109495 +alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
109496 +set_powered_12129 set_powered 4 12129 NULL
109497 +ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
109498 +xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
109499 +rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
109500 +rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
109501 +btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
109502 +vmbus_open_12154 vmbus_open 2-3 12154 NULL
109503 +fnic_reset_stats_write_12177 fnic_reset_stats_write 3 12177 NULL
109504 +LNetEQAlloc_12178 LNetEQAlloc 1 12178 NULL
109505 +ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
109506 +compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
109507 +ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
109508 +snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
109509 +fuse_get_req_12221 fuse_get_req 2 12221 NULL nohasharray
109510 +aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 &fuse_get_req_12221
109511 +__alloc_bootmem_low_nopanic_12235 __alloc_bootmem_low_nopanic 1 12235 NULL
109512 +ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
109513 +shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
109514 +add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
109515 +note_last_dentry_12285 note_last_dentry 3 12285 NULL
109516 +roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray
109517 +il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288
109518 +bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
109519 +pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
109520 +mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
109521 +__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
109522 +xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
109523 +btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL nohasharray
109524 +populate_dir_12391 populate_dir 0 12391 &btrfs_file_extent_ram_bytes_12391 nohasharray
109525 +write_file_dump_12391 write_file_dump 3 12391 &populate_dir_12391
109526 +hbucket_elem_add_12416 hbucket_elem_add 3 12416 NULL
109527 +ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
109528 +ptlrpc_set_wait_12426 ptlrpc_set_wait 0 12426 NULL
109529 +cfs_array_alloc_12441 cfs_array_alloc 2 12441 NULL
109530 +skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
109531 +x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
109532 +fnic_trace_ctrl_read_12497 fnic_trace_ctrl_read 3 12497 NULL
109533 +qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
109534 +xfs_get_extsz_hint_12531 xfs_get_extsz_hint 0 12531 NULL
109535 +hvc_alloc_12579 hvc_alloc 4 12579 NULL
109536 +pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
109537 +tlbflush_write_file_12598 tlbflush_write_file 3 12598 NULL
109538 +vhci_put_user_12604 vhci_put_user 4 12604 NULL
109539 +sdhci_pltfm_init_12627 sdhci_pltfm_init 3 12627 NULL
109540 +pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
109541 +pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
109542 +dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
109543 +nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
109544 +rtw_android_get_link_speed_12655 rtw_android_get_link_speed 0 12655 NULL
109545 +ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
109546 +lustre_pack_request_v2_12665 lustre_pack_request_v2 0 12665 NULL
109547 +sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
109548 +sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
109549 +ext4_writepage_trans_blocks_12674 ext4_writepage_trans_blocks 0 12674 NULL
109550 +iwl_dbgfs_calib_disabled_write_12707 iwl_dbgfs_calib_disabled_write 3 12707 NULL
109551 +ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
109552 +ivtv_write_12721 ivtv_write 3 12721 NULL
109553 +key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
109554 +__mei_cl_async_send_12737 __mei_cl_async_send 3 12737 NULL
109555 +ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
109556 +listxattr_12769 listxattr 3 12769 NULL
109557 +sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
109558 +scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
109559 +xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
109560 +readq_12825 readq 0 12825 NULL
109561 +SyS_add_key_12834 SyS_add_key 4 12834 NULL
109562 +TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
109563 +spidev_sync_12842 spidev_sync 0 12842 NULL
109564 +spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
109565 +ath9k_dump_4k_modal_eeprom_12883 ath9k_dump_4k_modal_eeprom 3-2 12883 NULL
109566 +get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
109567 +get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
109568 +rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
109569 +do_inode_permission_12946 do_inode_permission 0 12946 NULL
109570 +bm_status_write_12964 bm_status_write 3 12964 NULL
109571 +raid56_parity_recover_12987 raid56_parity_recover 5 12987 NULL
109572 +TransmitTcb_12989 TransmitTcb 4 12989 NULL
109573 +sk_peek_offset_12991 sk_peek_offset 0 12991 NULL
109574 +subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
109575 +generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
109576 +ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
109577 +__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL nohasharray
109578 +ptlrpc_lprocfs_threads_min_seq_write_13060 ptlrpc_lprocfs_threads_min_seq_write 3 13060 &__dn_setsockopt_13060
109579 +biovec_create_pool_13079 biovec_create_pool 2 13079 NULL
109580 +xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
109581 +ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL
109582 +SyS_msgrcv_13109 SyS_msgrcv 3 13109 NULL
109583 +snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
109584 +bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
109585 +blk_update_request_13146 blk_update_request 3 13146 NULL
109586 +ocfs2_quota_trans_credits_13150 ocfs2_quota_trans_credits 0 13150 NULL
109587 +caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
109588 +pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
109589 +ucs2_strlen_13178 ucs2_strlen 0 13178 NULL
109590 +dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
109591 +create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
109592 +comedi_read_13199 comedi_read 3 13199 NULL
109593 +hash_ipport4_expire_13201 hash_ipport4_expire 4 13201 NULL
109594 +mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
109595 +svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
109596 +fnic_trace_ctrl_write_13229 fnic_trace_ctrl_write 3 13229 NULL
109597 +asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
109598 +init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
109599 +bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
109600 +simple_attr_write_13260 simple_attr_write 3 13260 NULL
109601 +pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
109602 +il4965_stats_flag_13281 il4965_stats_flag 3-0 13281 NULL
109603 +lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
109604 +sd_major_13294 sd_major 0-1 13294 NULL
109605 +module_param_sysfs_setup_13296 module_param_sysfs_setup 0 13296 NULL
109606 +__clone_and_map_data_bio_13334 __clone_and_map_data_bio 4-8 13334 NULL
109607 +kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
109608 +hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL
109609 +iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
109610 +wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
109611 +ext4_meta_trans_blocks_13380 ext4_meta_trans_blocks 0-3-2 13380 NULL
109612 +lov_mds_md_size_13388 lov_mds_md_size 0-1 13388 NULL nohasharray
109613 +dis_bypass_write_13388 dis_bypass_write 3 13388 &lov_mds_md_size_13388
109614 +netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
109615 +sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL
109616 +ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 8-9-7 13443 NULL
109617 +sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL
109618 +data_read_13494 data_read 3 13494 NULL
109619 +ioat_chansts_32_13506 ioat_chansts_32 0 13506 NULL
109620 +ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 0-2 13512 NULL
109621 +core_status_13515 core_status 4 13515 NULL
109622 +smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
109623 +bm_init_13529 bm_init 2 13529 NULL
109624 +llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
109625 +ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL
109626 +hash_net4_expire_13559 hash_net4_expire 4 13559 NULL
109627 +read_file_antenna_13574 read_file_antenna 3 13574 NULL
109628 +cache_write_13589 cache_write 3 13589 NULL
109629 +Rd_Indx_13602 Rd_Indx 3-2 13602 NULL
109630 +wm8994_bulk_write_13615 wm8994_bulk_write 2-3 13615 NULL
109631 +pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
109632 +packet_snd_13634 packet_snd 3 13634 NULL
109633 +blk_msg_write_13655 blk_msg_write 3 13655 NULL
109634 +cache_downcall_13666 cache_downcall 3 13666 NULL
109635 +ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL
109636 +nv94_aux_13689 nv94_aux 2-5 13689 NULL
109637 +usb_get_string_13693 usb_get_string 0 13693 NULL
109638 +fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
109639 +audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
109640 +ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
109641 +fb_sys_read_13778 fb_sys_read 3 13778 NULL
109642 +ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
109643 +random_read_13815 random_read 3 13815 NULL
109644 +mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL
109645 +hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
109646 +___mei_cl_send_13821 ___mei_cl_send 3 13821 NULL
109647 +enc_pools_insert_13849 enc_pools_insert 3 13849 NULL
109648 +evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
109649 +compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL
109650 +qp_memcpy_to_queue_13886 qp_memcpy_to_queue 5-2 13886 NULL
109651 +snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
109652 +cfg80211_inform_bss_width_13933 cfg80211_inform_bss_width 9 13933 NULL
109653 +ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
109654 +ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL nohasharray
109655 +ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940
109656 +iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
109657 +ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
109658 +lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
109659 +osc_grant_shrink_interval_seq_write_13952 osc_grant_shrink_interval_seq_write 3 13952 NULL
109660 +ocfs2_refresh_slot_info_13957 ocfs2_refresh_slot_info 0 13957 NULL
109661 +snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
109662 +qcam_read_13977 qcam_read 3 13977 NULL
109663 +dsp_read_13980 dsp_read 2 13980 NULL
109664 +dvb_demux_read_13981 dvb_demux_read 3 13981 NULL
109665 +create_files_14003 create_files 0 14003 NULL
109666 +sddr09_write_data_14014 sddr09_write_data 3 14014 NULL
109667 +btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
109668 +dmi_format_ids_14018 dmi_format_ids 2 14018 NULL
109669 +iscsi_create_flashnode_conn_14022 iscsi_create_flashnode_conn 4 14022 NULL
109670 +dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
109671 +read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
109672 +ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
109673 +ovs_nla_alloc_flow_actions_14056 ovs_nla_alloc_flow_actions 1 14056 NULL
109674 +sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
109675 +lov_stripeoffset_seq_write_14078 lov_stripeoffset_seq_write 3 14078 NULL
109676 +do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
109677 +compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
109678 +ext4_journal_blocks_per_page_14127 ext4_journal_blocks_per_page 0 14127 NULL
109679 +isku_sysfs_read_light_14140 isku_sysfs_read_light 6 14140 NULL
109680 +em_canid_change_14150 em_canid_change 3 14150 NULL
109681 +gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
109682 +print_input_mask_14168 print_input_mask 3-0 14168 NULL
109683 +ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
109684 +datafab_read_data_14186 datafab_read_data 4 14186 NULL
109685 +hfsplus_brec_find_14200 hfsplus_brec_find 0 14200 NULL
109686 +alloc_async_14208 alloc_async 1 14208 NULL
109687 +ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL
109688 +ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
109689 +dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL
109690 +ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
109691 +rr_status_14293 rr_status 5 14293 NULL
109692 +read_default_ldt_14302 read_default_ldt 2 14302 NULL
109693 +oo_objects_14319 oo_objects 0 14319 NULL
109694 +ll_get_user_pages_14328 ll_get_user_pages 3-2-0 14328 NULL
109695 +p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
109696 +alloc_tx_struct_14349 alloc_tx_struct 1 14349 NULL
109697 +hash_ipportnet4_expire_14354 hash_ipportnet4_expire 4 14354 NULL
109698 +snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
109699 +ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
109700 +smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
109701 +mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
109702 +get_kcore_size_14425 get_kcore_size 0 14425 NULL
109703 +block_size_14443 block_size 0 14443 NULL
109704 +lmv_user_md_size_14456 lmv_user_md_size 0-1 14456 NULL
109705 +snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
109706 +ath10k_write_htt_stats_mask_14458 ath10k_write_htt_stats_mask 3 14458 NULL
109707 +lustre_msg_size_v2_14470 lustre_msg_size_v2 0 14470 NULL
109708 +dma_transfer_size_14473 dma_transfer_size 0 14473 NULL
109709 +udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
109710 +ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
109711 +ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
109712 +ep0_write_14536 ep0_write 3 14536 NULL nohasharray
109713 +dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536
109714 +picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
109715 +qp_host_alloc_queue_14566 qp_host_alloc_queue 1 14566 NULL
109716 +SyS_setdomainname_14569 SyS_setdomainname 2 14569 NULL
109717 +idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
109718 +ceph_osdc_alloc_request_14597 ceph_osdc_alloc_request 3 14597 NULL
109719 +dbJoin_14644 dbJoin 0 14644 NULL
109720 +profile_replace_14652 profile_replace 3 14652 NULL
109721 +min_bytes_needed_14675 min_bytes_needed 0 14675 NULL
109722 +nvme_trans_log_info_exceptions_14677 nvme_trans_log_info_exceptions 3 14677 NULL
109723 +pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
109724 +ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
109725 +SyS_fsetxattr_14702 SyS_fsetxattr 4 14702 NULL
109726 +persistent_ram_ecc_string_14704 persistent_ram_ecc_string 0 14704 NULL
109727 +u_audio_playback_14709 u_audio_playback 3 14709 NULL
109728 +rtw_cbuf_alloc_14710 rtw_cbuf_alloc 1 14710 NULL
109729 +cgroup_path_14713 cgroup_path 3 14713 NULL
109730 +vfd_write_14717 vfd_write 3 14717 NULL
109731 +__blk_end_request_14729 __blk_end_request 3 14729 NULL
109732 +raid1_resize_14740 raid1_resize 2 14740 NULL
109733 +i915_error_state_buf_init_14742 i915_error_state_buf_init 2 14742 NULL
109734 +btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL
109735 +rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
109736 +regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL
109737 +sta_dev_read_14782 sta_dev_read 3 14782 NULL
109738 +keys_proc_write_14792 keys_proc_write 3 14792 NULL
109739 +ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
109740 +__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
109741 +hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
109742 +snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
109743 +do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
109744 +mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
109745 +lcd_write_14857 lcd_write 3 14857 NULL
109746 +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
109747 +gmux_index_read8_14890 gmux_index_read8 0 14890 NULL
109748 +acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
109749 +SYSC_readv_14901 SYSC_readv 3 14901 NULL
109750 +__arch_hweight64_14923 __arch_hweight64 0 14923 NULL nohasharray
109751 +qp_memcpy_to_queue_iov_14923 qp_memcpy_to_queue_iov 5-2 14923 &__arch_hweight64_14923
109752 +ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
109753 +queue_cnt_14951 queue_cnt 0 14951 NULL
109754 +unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
109755 +videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
109756 +mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
109757 +setkey_14987 setkey 3 14987 NULL nohasharray
109758 +gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987
109759 +xfs_dinode_size_14996 xfs_dinode_size 0 14996 NULL
109760 +blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
109761 +cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
109762 +ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
109763 +nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
109764 +ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL
109765 +pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
109766 +ceph_calc_ceph_pg_15075 ceph_calc_ceph_pg 0 15075 NULL
109767 +smscore_load_firmware_family2_15086 smscore_load_firmware_family2 3 15086 NULL
109768 +compat_SyS_pwritev_15118 compat_SyS_pwritev 3 15118 NULL
109769 +hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
109770 +start_port_15124 start_port 0 15124 NULL
109771 +ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
109772 +iwl_dbgfs_sta_drain_write_15167 iwl_dbgfs_sta_drain_write 3 15167 NULL
109773 +SYSC_setdomainname_15180 SYSC_setdomainname 2 15180 NULL
109774 +iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
109775 +mtt_alloc_res_15211 mtt_alloc_res 5 15211 NULL
109776 +bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
109777 +iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL
109778 +il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
109779 +simple_strtol_15273 simple_strtol 0 15273 NULL
109780 +fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL
109781 +ocfs2_read_refcount_block_15305 ocfs2_read_refcount_block 0 15305 NULL
109782 +xlog_ticket_alloc_15335 xlog_ticket_alloc 2 15335 NULL
109783 +kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL
109784 +ioread16_15342 ioread16 0 15342 NULL
109785 +ept_prefetch_gpte_15348 ept_prefetch_gpte 4 15348 NULL
109786 +acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
109787 +ext4_direct_IO_15369 ext4_direct_IO 4 15369 NULL
109788 +graph_depth_read_15371 graph_depth_read 3 15371 NULL
109789 +compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
109790 +fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
109791 +alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
109792 +pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
109793 +get_modalias_15406 get_modalias 2 15406 NULL
109794 +blockdev_direct_IO_15408 blockdev_direct_IO 5 15408 NULL
109795 +__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL
109796 +tcp_mtu_to_mss_15438 tcp_mtu_to_mss 2-0 15438 NULL
109797 +hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
109798 +memweight_15450 memweight 2 15450 NULL
109799 +zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
109800 +ifx_spi_write_15531 ifx_spi_write 3 15531 NULL
109801 +p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
109802 +xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL
109803 +persistent_status_15574 persistent_status 4 15574 NULL
109804 +bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
109805 +vme_user_write_15587 vme_user_write 3 15587 NULL
109806 +compat_fillonedir_15620 compat_fillonedir 3 15620 NULL
109807 +proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
109808 +tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
109809 +sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642 nohasharray
109810 +pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642
109811 +joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL
109812 +fs_path_add_15648 fs_path_add 3 15648 NULL
109813 +xsd_read_15653 xsd_read 3 15653 NULL
109814 +unix_bind_15668 unix_bind 3 15668 NULL
109815 +dm_read_15674 dm_read 3 15674 NULL nohasharray
109816 +SyS_connect_15674 SyS_connect 3 15674 &dm_read_15674
109817 +tracing_snapshot_write_15719 tracing_snapshot_write 3 15719 NULL
109818 +HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
109819 +bio_map_15794 bio_map 3-0 15794 NULL
109820 +smk_read_direct_15803 smk_read_direct 3 15803 NULL
109821 +nameseq_list_15817 nameseq_list 3-0 15817 NULL nohasharray
109822 +gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
109823 +afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
109824 +brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
109825 +table_size_15851 table_size 0-1-2 15851 NULL
109826 +write_file_tx99_15856 write_file_tx99 3 15856 NULL
109827 +media_entity_init_15870 media_entity_init 2-4 15870 NULL
109828 +__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
109829 +nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
109830 +native_read_msr_15905 native_read_msr 0 15905 NULL
109831 +parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
109832 +power_read_15939 power_read 3 15939 NULL
109833 +lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
109834 +snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL
109835 +viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
109836 +dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
109837 +read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL
109838 +si5351_msynth_params_address_16062 si5351_msynth_params_address 0-1 16062 NULL
109839 +isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
109840 +isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 NULL nohasharray
109841 +dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 &isr_hw_pm_mode_changes_read_16110
109842 +snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
109843 +compat_sys_select_16131 compat_sys_select 1 16131 NULL
109844 +fsm_init_16134 fsm_init 2 16134 NULL
109845 +ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
109846 +optimal_reclaimed_pages_16172 optimal_reclaimed_pages 0 16172 NULL
109847 +mapping_level_16188 mapping_level 2-0 16188 NULL
109848 +i40e_allocate_virt_mem_d_16191 i40e_allocate_virt_mem_d 3 16191 NULL
109849 +ath10k_htt_rx_ring_size_16201 ath10k_htt_rx_ring_size 0 16201 NULL
109850 +cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
109851 +SyS_pselect6_16210 SyS_pselect6 1 16210 NULL
109852 +create_table_16213 create_table 2 16213 NULL
109853 +ath9k_hw_ar9287_dump_eeprom_16224 ath9k_hw_ar9287_dump_eeprom 5-4 16224 NULL
109854 +atomic_read_file_16227 atomic_read_file 3 16227 NULL
109855 +BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
109856 +lov_prep_brw_set_16246 lov_prep_brw_set 3 16246 NULL
109857 +btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL nohasharray
109858 +i40e_dbg_dump_read_16247 i40e_dbg_dump_read 3 16247 &btrfs_dev_extent_chunk_offset_16247
109859 +il_dbgfs_disable_ht40_write_16249 il_dbgfs_disable_ht40_write 3 16249 NULL
109860 +SyS_fgetxattr_16254 SyS_fgetxattr 4 16254 NULL
109861 +reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
109862 +ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
109863 +nand_bch_init_16280 nand_bch_init 3-2 16280 NULL nohasharray
109864 +drbd_setsockopt_16280 drbd_setsockopt 5 16280 &nand_bch_init_16280
109865 +account_16283 account 0-4-2 16283 NULL nohasharray
109866 +mirror_status_16283 mirror_status 5 16283 &account_16283
109867 +jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL
109868 +mo_xattr_get_16288 mo_xattr_get 0 16288 NULL
109869 +stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
109870 +rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL
109871 +rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
109872 +kvm_handle_hva_range_16312 kvm_handle_hva_range 3-2 16312 NULL
109873 +sysfs_create_groups_16360 sysfs_create_groups 0 16360 NULL
109874 +total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
109875 +iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
109876 +ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
109877 +rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
109878 +__bio_add_page_16435 __bio_add_page 0-4 16435 NULL
109879 +cmdline_store_16442 cmdline_store 4 16442 NULL
109880 +btrfs_truncate_inode_items_16452 btrfs_truncate_inode_items 4 16452 NULL
109881 +netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
109882 +req_capsule_get_size_16467 req_capsule_get_size 0 16467 NULL
109883 +tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
109884 +KEY_OFFSET_16504 KEY_OFFSET 0 16504 NULL
109885 +snd_interval_max_16529 snd_interval_max 0 16529 NULL
109886 +raid10_resize_16537 raid10_resize 2 16537 NULL
109887 +lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
109888 +agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
109889 +lustre_msg_hdr_size_v2_16589 lustre_msg_hdr_size_v2 0 16589 NULL
109890 +gmux_index_read32_16604 gmux_index_read32 0 16604 NULL
109891 +rtw_set_wpa_ie_16633 rtw_set_wpa_ie 3 16633 NULL
109892 +btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL
109893 +__wa_populate_dto_urb_16699 __wa_populate_dto_urb 3-4 16699 NULL
109894 +__proc_lnet_buffers_16717 __proc_lnet_buffers 5 16717 NULL
109895 +__copy_to_user_swizzled_16748 __copy_to_user_swizzled 3-4 16748 NULL
109896 +arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
109897 +blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
109898 +i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
109899 +get_server_iovec_16804 get_server_iovec 2 16804 NULL
109900 +drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
109901 +scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
109902 +hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
109903 +alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
109904 +carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
109905 +st_write_16874 st_write 3 16874 NULL
109906 +__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
109907 +transport_init_session_tags_16878 transport_init_session_tags 1-2 16878 NULL
109908 +snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 NULL nohasharray
109909 +psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 &snd_gf1_mem_proc_dump_16926
109910 +_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
109911 +squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
109912 +keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
109913 +ocfs2_read_quota_phys_block_16990 ocfs2_read_quota_phys_block 0 16990 NULL
109914 +ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
109915 +copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
109916 +jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
109917 +__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
109918 +sddr55_read_data_17072 sddr55_read_data 4 17072 NULL
109919 +dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
109920 +simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
109921 +carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
109922 +entry_length_17093 entry_length 0 17093 NULL
109923 +ocfs2_get_refcount_cpos_end_17113 ocfs2_get_refcount_cpos_end 0 17113 NULL
109924 +write_mem_17114 write_mem 3 17114 NULL
109925 +pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
109926 +nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
109927 +jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL
109928 +sep_read_17161 sep_read 3 17161 NULL
109929 +befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
109930 +tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
109931 +UniStrnlen_17169 UniStrnlen 0 17169 NULL
109932 +access_remote_vm_17189 access_remote_vm 0 17189 NULL nohasharray
109933 +iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189 nohasharray
109934 +ocfs2_flock_handle_signal_17189 ocfs2_flock_handle_signal 0 17189 &iwl_dbgfs_txfifo_flush_write_17189
109935 +iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 NULL nohasharray
109936 +driver_state_read_17194 driver_state_read 3 17194 &iscsit_find_cmd_from_itt_or_dump_17194
109937 +sync_request_17208 sync_request 2 17208 NULL
109938 +dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
109939 +lprocfs_read_frac_helper_17261 lprocfs_read_frac_helper 0 17261 NULL
109940 +error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
109941 +alloc_ep_17269 alloc_ep 1 17269 NULL
109942 +pg_read_17276 pg_read 3 17276 NULL
109943 +raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
109944 +hmac_sha256_17278 hmac_sha256 2 17278 NULL
109945 +neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
109946 +minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
109947 +__ptlrpc_request_bufs_pack_17298 __ptlrpc_request_bufs_pack 0 17298 NULL
109948 +ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301 NULL
109949 +mb_cache_create_17307 mb_cache_create 2 17307 NULL
109950 +gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL
109951 +ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
109952 +ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
109953 +mdc_import_seq_write_17409 mdc_import_seq_write 3 17409 NULL
109954 +lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
109955 +compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL
109956 +sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
109957 +libcfs_ipif_enumerate_17445 libcfs_ipif_enumerate 0 17445 NULL
109958 +nla_get_u32_17455 nla_get_u32 0 17455 NULL
109959 +__ref_totlen_17461 __ref_totlen 0 17461 NULL
109960 +probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
109961 +TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
109962 +lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL
109963 +qp_free_res_17541 qp_free_res 5 17541 NULL
109964 +__copy_to_user_17551 __copy_to_user 3-0 17551 NULL
109965 +copy_from_user_17559 copy_from_user 0-3 17559 NULL
109966 +hash_netport4_expire_17573 hash_netport4_expire 4 17573 NULL
109967 +acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
109968 +neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
109969 +osst_execute_17607 osst_execute 7-6 17607 NULL
109970 +ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
109971 +dma_map_page_17628 dma_map_page 0 17628 NULL
109972 +ocfs2_rotate_subtree_left_17634 ocfs2_rotate_subtree_left 5 17634 NULL
109973 +twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL
109974 +SYSC_migrate_pages_17657 SYSC_migrate_pages 2 17657 NULL
109975 +packet_setsockopt_17662 packet_setsockopt 5 17662 NULL
109976 +pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
109977 +venus_rename_17707 venus_rename 4-5 17707 NULL nohasharray
109978 +__einj_error_trigger_17707 __einj_error_trigger 0 17707 &venus_rename_17707
109979 +exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
109980 +sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
109981 +dgap_do_fep_load_17765 dgap_do_fep_load 3 17765 NULL
109982 +brcmf_sdio_chip_verifynvram_17776 brcmf_sdio_chip_verifynvram 4 17776 NULL
109983 +shrink_slab_node_17794 shrink_slab_node 3 17794 NULL
109984 +gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
109985 +cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
109986 +dm_stats_message_17863 dm_stats_message 5 17863 NULL
109987 +sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
109988 +alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
109989 +virtio_cread32_17873 virtio_cread32 0 17873 NULL
109990 +ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
109991 +orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL nohasharray
109992 +i40e_align_l2obj_base_17878 i40e_align_l2obj_base 0-1 17878 &orinoco_set_key_17878
109993 +init_per_cpu_17880 init_per_cpu 1 17880 NULL
109994 +ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
109995 +ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
109996 +xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
109997 +scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
109998 +__mutex_lock_check_stamp_17947 __mutex_lock_check_stamp 0 17947 NULL
109999 +beacon_interval_write_17952 beacon_interval_write 3 17952 NULL
110000 +calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
110001 +ext4_ext_calc_credits_for_single_extent_17983 ext4_ext_calc_credits_for_single_extent 0-2 17983 NULL
110002 +smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
110003 +gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL
110004 +pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
110005 +alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
110006 +cpufreq_add_dev_symlink_18028 cpufreq_add_dev_symlink 0 18028 NULL
110007 +o2hb_highest_node_18034 o2hb_highest_node 0 18034 NULL
110008 +cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
110009 +ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
110010 +lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL
110011 +fpregs_get_18066 fpregs_get 4 18066 NULL
110012 +kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
110013 +SYSC_pselect6_18076 SYSC_pselect6 1 18076 NULL
110014 +SYSC_semtimedop_18091 SYSC_semtimedop 3 18091 NULL
110015 +mpi_alloc_18094 mpi_alloc 1 18094 NULL
110016 +hfs_direct_IO_18104 hfs_direct_IO 4 18104 NULL
110017 +dfs_file_read_18116 dfs_file_read 3 18116 NULL
110018 +svc_getnl_18120 svc_getnl 0 18120 NULL
110019 +paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-2-1 18131 NULL
110020 +selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
110021 +pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
110022 +orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
110023 +gsm_control_message_18209 gsm_control_message 4 18209 NULL
110024 +do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
110025 +gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
110026 +alloc_trace_uprobe_18247 alloc_trace_uprobe 3 18247 NULL
110027 +rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
110028 +qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
110029 +gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
110030 +alloc_ring_18278 alloc_ring 2-4 18278 NULL
110031 +bio_phys_segments_18281 bio_phys_segments 0 18281 NULL nohasharray
110032 +nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 &bio_phys_segments_18281
110033 +ext4_readpages_18283 ext4_readpages 4 18283 NULL
110034 +mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
110035 +um_idi_write_18293 um_idi_write 3 18293 NULL
110036 +nouveau_disp_create__18305 nouveau_disp_create_ 4-7 18305 NULL
110037 +vga_r_18310 vga_r 0 18310 NULL
110038 +class_add_profile_18315 class_add_profile 1-3-5 18315 NULL
110039 +csio_mem_read_18319 csio_mem_read 3 18319 NULL
110040 +alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL
110041 +ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
110042 +bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
110043 +lcd_proc_write_18351 lcd_proc_write 3 18351 NULL
110044 +pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
110045 +SyS_process_vm_readv_18366 SyS_process_vm_readv 3-5 18366 NULL
110046 +ep_io_18367 ep_io 0 18367 NULL
110047 +qib_user_sdma_num_pages_18371 qib_user_sdma_num_pages 0 18371 NULL
110048 +ci_role_write_18388 ci_role_write 3 18388 NULL
110049 +hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL
110050 +adis16136_show_serial_18402 adis16136_show_serial 3 18402 NULL
110051 +crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
110052 +iscsi_create_flashnode_sess_18433 iscsi_create_flashnode_sess 4 18433 NULL
110053 +snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
110054 +flash_dev_cache_miss_18454 flash_dev_cache_miss 4 18454 NULL
110055 +fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
110056 +regset_tls_set_18459 regset_tls_set 4 18459 NULL
110057 +pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL nohasharray
110058 +mite_bytes_in_transit_18479 mite_bytes_in_transit 0 18479 &pci_vpd_lrdt_size_18479
110059 +udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
110060 +btrfs_fiemap_18501 btrfs_fiemap 3 18501 NULL
110061 +__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
110062 +snd_vx_inb_18514 snd_vx_inb 0 18514 NULL
110063 +snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
110064 +nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 9 18530 NULL
110065 +seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
110066 +sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
110067 +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
110068 +debug_output_18575 debug_output 3 18575 NULL
110069 +filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
110070 +slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
110071 +iowarrior_write_18604 iowarrior_write 3 18604 NULL
110072 +nvc0_ram_create__18624 nvc0_ram_create_ 4 18624 NULL nohasharray
110073 +audio_get_endpoint_req_18624 audio_get_endpoint_req 0 18624 &nvc0_ram_create__18624
110074 +from_buffer_18625 from_buffer 3 18625 NULL
110075 +snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
110076 +ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL
110077 +xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL
110078 +fnic_stats_debugfs_read_18688 fnic_stats_debugfs_read 3 18688 NULL
110079 +echo_client_prep_commit_18693 echo_client_prep_commit 8 18693 NULL
110080 +iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
110081 +ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
110082 +blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
110083 +snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
110084 +o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
110085 +__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
110086 +wep_packets_read_18751 wep_packets_read 3 18751 NULL
110087 +read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
110088 +ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
110089 +SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL
110090 +alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
110091 +prealloc_18800 prealloc 0 18800 NULL
110092 +dm_stats_print_18815 dm_stats_print 7 18815 NULL
110093 +sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
110094 +mtf_test_write_18844 mtf_test_write 3 18844 NULL
110095 +sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
110096 +ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
110097 +xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
110098 +ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
110099 +ieee80211_rx_mgmt_disassoc_18927 ieee80211_rx_mgmt_disassoc 3 18927 NULL
110100 +snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
110101 +__nla_reserve_18974 __nla_reserve 3 18974 NULL
110102 +__blockdev_direct_IO_18977 __blockdev_direct_IO 0-6 18977 NULL
110103 +layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
110104 +huge_page_size_19008 huge_page_size 0 19008 NULL
110105 +hash_netport6_expire_19013 hash_netport6_expire 4 19013 NULL
110106 +sysfs_create_dir_ns_19033 sysfs_create_dir_ns 0 19033 NULL
110107 +revalidate_19043 revalidate 2 19043 NULL
110108 +afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL
110109 +osc_pinger_recov_seq_write_19056 osc_pinger_recov_seq_write 3 19056 NULL
110110 +create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
110111 +ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
110112 +ceph_create_snap_context_19082 ceph_create_snap_context 1 19082 NULL
110113 +sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
110114 +cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
110115 +ATOMIC_SUB_RETURN_19115 ATOMIC_SUB_RETURN 2 19115 NULL
110116 +snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
110117 +alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
110118 +sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
110119 +smk_write_access2_19170 smk_write_access2 3 19170 NULL
110120 +iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
110121 +vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
110122 +__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL
110123 +dev_counters_read_19216 dev_counters_read 3 19216 NULL
110124 +wbcir_tx_19219 wbcir_tx 3 19219 NULL
110125 +snd_mask_max_19224 snd_mask_max 0 19224 NULL
110126 +bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
110127 +ucma_query_19260 ucma_query 4 19260 NULL
110128 +il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
110129 +batadv_tt_save_orig_buffer_19288 batadv_tt_save_orig_buffer 4 19288 NULL nohasharray
110130 +cfg80211_rx_unprot_mlme_mgmt_19288 cfg80211_rx_unprot_mlme_mgmt 3 19288 &batadv_tt_save_orig_buffer_19288
110131 +qc_capture_19298 qc_capture 3 19298 NULL
110132 +ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 4-3 19303 NULL
110133 +event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
110134 +debug_read_19322 debug_read 3 19322 NULL
110135 +lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 NULL nohasharray
110136 +cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 &lbs_host_sleep_write_19332
110137 +closure_sub_19359 closure_sub 2 19359 NULL
110138 +firmware_data_write_19360 firmware_data_write 6-5 19360 NULL
110139 +read_zero_19366 read_zero 3 19366 NULL
110140 +interpret_user_input_19393 interpret_user_input 2 19393 NULL
110141 +sync_fill_pt_info_19397 sync_fill_pt_info 0 19397 NULL
110142 +pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
110143 +dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
110144 +SyS_sched_getaffinity_19444 SyS_sched_getaffinity 2 19444 NULL
110145 +xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
110146 +gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
110147 +gp2ap020a00f_get_thresh_reg_19468 gp2ap020a00f_get_thresh_reg 0 19468 NULL
110148 +sky2_read16_19475 sky2_read16 0 19475 NULL
110149 +__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL
110150 +kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
110151 +ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
110152 +batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL
110153 +apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL nohasharray
110154 +cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 &apei_exec_pre_map_gars_19529
110155 +nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
110156 +howmany_64_19548 howmany_64 2 19548 NULL
110157 +gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
110158 +ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
110159 +ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
110160 +nfsd_read_19568 nfsd_read 5 19568 NULL
110161 +cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
110162 +bm_status_read_19583 bm_status_read 3 19583 NULL
110163 +load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
110164 +__mei_cl_recv_19636 __mei_cl_recv 3 19636 NULL
110165 +LoadBitmap_19658 LoadBitmap 2 19658 NULL
110166 +iwl_dbgfs_pm_params_write_19660 iwl_dbgfs_pm_params_write 3 19660 NULL
110167 +read_reg_19723 read_reg 0 19723 NULL
110168 +wm8350_block_write_19727 wm8350_block_write 2-3 19727 NULL
110169 +memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
110170 +snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
110171 +p9_client_read_19750 p9_client_read 5-0 19750 NULL
110172 +pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
110173 +ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
110174 +jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
110175 +readhscx_19769 readhscx 0 19769 NULL
110176 +__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
110177 +iwl_dbgfs_disable_power_off_write_19823 iwl_dbgfs_disable_power_off_write 3 19823 NULL
110178 +irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
110179 +vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
110180 +security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
110181 +crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
110182 +cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
110183 +__nla_put_19857 __nla_put 3 19857 NULL
110184 +mrp_request_join_19882 mrp_request_join 4 19882 NULL
110185 +aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
110186 +ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
110187 +mangle_name_19923 mangle_name 0 19923 NULL
110188 +cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
110189 +guest_read_tsc_19931 guest_read_tsc 0 19931 NULL
110190 +iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
110191 +cfg80211_rx_assoc_resp_19944 cfg80211_rx_assoc_resp 4 19944 NULL
110192 +get_jack_mode_name_19976 get_jack_mode_name 4 19976 NULL
110193 +attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
110194 +rtw_set_wps_probe_resp_19989 rtw_set_wps_probe_resp 3 19989 NULL
110195 +lustre_pack_request_19992 lustre_pack_request 0 19992 NULL
110196 +diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
110197 +lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 NULL
110198 +tree_mod_log_eb_move_20011 tree_mod_log_eb_move 5 20011 NULL
110199 +SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL
110200 +split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
110201 +alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
110202 +iwl_mvm_power_mac_dbgfs_read_20067 iwl_mvm_power_mac_dbgfs_read 4 20067 NULL
110203 +target_message_20072 target_message 2 20072 NULL
110204 +rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
110205 +fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
110206 +aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL
110207 +team_options_register_20091 team_options_register 3 20091 NULL
110208 +qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
110209 +root_nfs_copy_20111 root_nfs_copy 3 20111 NULL
110210 +hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
110211 +tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
110212 +read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
110213 +wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
110214 +create_trace_probe_20175 create_trace_probe 1 20175 NULL
110215 +crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
110216 +pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
110217 +rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
110218 +tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
110219 +btrfs_header_nritems_20296 btrfs_header_nritems 0 20296 NULL
110220 +r10_sync_page_io_20307 r10_sync_page_io 3 20307 NULL
110221 +dm_get_reserved_bio_based_ios_20315 dm_get_reserved_bio_based_ios 0 20315 NULL
110222 +tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
110223 +vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL
110224 +snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
110225 +gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
110226 +handle_arr_calc_size_20355 handle_arr_calc_size 0-1 20355 NULL
110227 +smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
110228 +snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL nohasharray
110229 +read_7220_creg32_20394 read_7220_creg32 0 20394 &snd_nm256_readl_20394
110230 +__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL nohasharray
110231 +SyS_get_mempolicy_20399 SyS_get_mempolicy 3 20399 &__kfifo_from_user_20399
110232 +nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
110233 +compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
110234 +read_buf_20469 read_buf 2 20469 NULL
110235 +bio_trim_20472 bio_trim 2 20472 NULL
110236 +btrfs_get_32_20476 btrfs_get_32 0 20476 NULL
110237 +xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
110238 +drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
110239 +amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
110240 +scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
110241 +venus_create_20555 venus_create 4 20555 NULL
110242 +btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL
110243 +crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
110244 +ocfs2_cluster_lock_20588 ocfs2_cluster_lock 0 20588 NULL
110245 +kvm_test_age_hva_20593 kvm_test_age_hva 2 20593 NULL
110246 +sync_timeline_create_20601 sync_timeline_create 2 20601 NULL
110247 +lirc_write_20604 lirc_write 3 20604 NULL
110248 +qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL
110249 +snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
110250 +get_extent_skip_holes_20642 get_extent_skip_holes 2 20642 NULL
110251 +kfifo_copy_to_user_20646 kfifo_copy_to_user 3-4 20646 NULL
110252 +cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
110253 +oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
110254 +oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
110255 +snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
110256 +dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL
110257 +cpumask_size_20683 cpumask_size 0 20683 NULL
110258 +btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
110259 +read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
110260 +__maestro_read_20700 __maestro_read 0 20700 NULL
110261 +cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
110262 +pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
110263 +ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
110264 +security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
110265 +fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL
110266 +vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
110267 +ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
110268 +brcmf_p2p_escan_20763 brcmf_p2p_escan 2 20763 NULL
110269 +fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
110270 +iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
110271 +strndup_user_20819 strndup_user 2 20819 NULL
110272 +tipc_msg_build_20825 tipc_msg_build 3 20825 NULL
110273 +wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
110274 +uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
110275 +p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL
110276 +nvme_trans_supported_vpd_pages_20847 nvme_trans_supported_vpd_pages 4 20847 NULL
110277 +get_name_20855 get_name 4 20855 NULL
110278 +iwl_dbgfs_pm_params_read_20866 iwl_dbgfs_pm_params_read 3 20866 NULL
110279 +snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
110280 +srq_free_res_20868 srq_free_res 5 20868 NULL
110281 +cfs_cpt_table_create_20884 cfs_cpt_table_create 1 20884 NULL
110282 +rb_simple_write_20890 rb_simple_write 3 20890 NULL
110283 +sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
110284 +key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
110285 +vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
110286 +lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL
110287 +htable_bits_20933 htable_bits 0 20933 NULL
110288 +altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
110289 +rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
110290 +rsxx_cram_read_20957 rsxx_cram_read 3 20957 NULL
110291 +nfs_map_name_to_uid_20962 nfs_map_name_to_uid 3 20962 NULL
110292 +snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
110293 +alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
110294 +qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL
110295 +btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
110296 +rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
110297 +srp_change_queue_depth_21038 srp_change_queue_depth 2 21038 NULL
110298 +lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
110299 +reiserfs_direct_IO_21051 reiserfs_direct_IO 4 21051 NULL
110300 +proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
110301 +qdisc_get_default_21072 qdisc_get_default 2 21072 NULL
110302 +event_calibration_read_21083 event_calibration_read 3 21083 NULL
110303 +bl_add_page_to_bio_21094 bl_add_page_to_bio 2 21094 NULL nohasharray
110304 +multipath_status_21094 multipath_status 5 21094 &bl_add_page_to_bio_21094
110305 +rate_control_pid_events_read_21099 rate_control_pid_events_read 3 21099 NULL
110306 +ocfs2_extend_meta_needed_21104 ocfs2_extend_meta_needed 0 21104 NULL
110307 +ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
110308 +i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
110309 +cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
110310 +ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
110311 +scsi_execute_req_flags_21215 scsi_execute_req_flags 5 21215 NULL
110312 +get_numpages_21227 get_numpages 0-1-2 21227 NULL
110313 +input_ff_create_21240 input_ff_create 2 21240 NULL
110314 +cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
110315 +use_debug_keys_read_21251 use_debug_keys_read 3 21251 NULL
110316 +fru_length_21257 fru_length 0 21257 NULL
110317 +rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL
110318 +ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
110319 +do_msg_fill_21307 do_msg_fill 3 21307 NULL
110320 +add_res_range_21310 add_res_range 4 21310 NULL
110321 +get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
110322 +ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
110323 +read_file_bool_bmps_21344 read_file_bool_bmps 3 21344 NULL
110324 +ocfs2_find_subtree_root_21351 ocfs2_find_subtree_root 0 21351 NULL
110325 +gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
110326 +alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
110327 +SYSC_rt_sigpending_21379 SYSC_rt_sigpending 2 21379 NULL
110328 +video_ioctl2_21380 video_ioctl2 2 21380 NULL
110329 +insert_ptr_21386 insert_ptr 6 21386 NULL
110330 +diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
110331 +snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
110332 +snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
110333 +tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
110334 +tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
110335 +aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
110336 +__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL
110337 +ReadISAR_21453 ReadISAR 0 21453 NULL
110338 +mei_nfc_send_21477 mei_nfc_send 3 21477 NULL
110339 +read_file_xmit_21487 read_file_xmit 3 21487 NULL
110340 +mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
110341 +btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
110342 +il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
110343 +cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
110344 +rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
110345 +rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
110346 +xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL
110347 +snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
110348 +ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
110349 +filemap_get_page_21606 filemap_get_page 2 21606 NULL
110350 +ocfs2_refcount_cow_hunk_21630 ocfs2_refcount_cow_hunk 3-4 21630 NULL
110351 +__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
110352 +atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
110353 +ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
110354 +rtllib_alloc_txb_21687 rtllib_alloc_txb 1 21687 NULL
110355 +evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
110356 +unix_skb_len_21722 unix_skb_len 0 21722 NULL
110357 +lprocfs_wr_import_21728 lprocfs_wr_import 3 21728 NULL
110358 +mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
110359 +usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL
110360 +gen_pool_add_21776 gen_pool_add 3 21776 NULL
110361 +xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
110362 +dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
110363 +__ocfs2_cluster_lock_21812 __ocfs2_cluster_lock 0 21812 NULL
110364 +oom_adj_read_21847 oom_adj_read 3 21847 NULL
110365 +lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
110366 +brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL
110367 +sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL
110368 +ldlm_lock_create_21888 ldlm_lock_create 7 21888 NULL
110369 +dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
110370 +qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
110371 +SYSC_prctl_21980 SYSC_prctl 4 21980 NULL
110372 +compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 0-3 22001 NULL nohasharray
110373 +rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 &compat_rw_copy_check_uvector_22001
110374 +regcache_sync_block_raw_flush_22021 regcache_sync_block_raw_flush 3-4 22021 NULL
110375 +btrfs_get_16_22023 btrfs_get_16 0 22023 NULL
110376 +_sp2d_min_pg_22032 _sp2d_min_pg 0 22032 NULL
110377 +zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
110378 +ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
110379 +btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL
110380 +mem_rw_22085 mem_rw 3 22085 NULL
110381 +kstrtos32_from_user_22087 kstrtos32_from_user 2 22087 NULL
110382 +rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL
110383 +snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL
110384 +SyS_sched_setaffinity_22148 SyS_sched_setaffinity 2 22148 NULL
110385 +do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL
110386 +__kfifo_alloc_22173 __kfifo_alloc 3 22173 NULL
110387 +rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL
110388 +mem_write_22232 mem_write 3 22232 NULL
110389 +p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
110390 +prepare_to_wait_event_22247 prepare_to_wait_event 0 22247 NULL
110391 +compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
110392 +ping_common_sendmsg_22261 ping_common_sendmsg 5 22261 NULL
110393 +add_res_tree_22263 add_res_tree 7 22263 NULL
110394 +__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
110395 +queue_max_sectors_22280 queue_max_sectors 0 22280 NULL
110396 +__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL nohasharray
110397 +pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 &__tun_chr_ioctl_22300
110398 +mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
110399 +lov_setstripe_22307 lov_setstripe 2 22307 NULL
110400 +udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
110401 +C_SYSC_msgrcv_22320 C_SYSC_msgrcv 3 22320 NULL
110402 +atomic_read_22342 atomic_read 0 22342 NULL
110403 +ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL
110404 +memcg_size_22360 memcg_size 0 22360 NULL
110405 +snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
110406 +evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
110407 +alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
110408 +zoran_write_22404 zoran_write 3 22404 NULL
110409 +ATOMIC_ADD_RETURN_22413 ATOMIC_ADD_RETURN 2 22413 NULL
110410 +queue_reply_22416 queue_reply 3 22416 NULL
110411 +__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
110412 +queue_max_segments_22441 queue_max_segments 0 22441 NULL
110413 +handle_received_packet_22457 handle_received_packet 3 22457 NULL
110414 +mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
110415 +source_sink_start_ep_22472 source_sink_start_ep 0 22472 NULL
110416 +ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL
110417 +qib_user_sdma_alloc_header_22490 qib_user_sdma_alloc_header 2 22490 NULL
110418 +cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
110419 +mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
110420 +trim_no_bitmap_22524 trim_no_bitmap 4-3 22524 NULL
110421 +ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
110422 +agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
110423 +dbFindCtl_22587 dbFindCtl 0 22587 NULL
110424 +snapshot_read_22601 snapshot_read 3 22601 NULL
110425 +sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
110426 +ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
110427 +wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
110428 +pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
110429 +iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
110430 +compat_SyS_msgrcv_22661 compat_SyS_msgrcv 3 22661 NULL
110431 +ext4_ext_direct_IO_22679 ext4_ext_direct_IO 4 22679 NULL
110432 +l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
110433 +bch_dump_read_22685 bch_dump_read 3 22685 NULL
110434 +reg_umr_22686 reg_umr 5 22686 NULL
110435 +alloc_libipw_22708 alloc_libipw 1 22708 NULL
110436 +cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL
110437 +ceph_decode_32_22738 ceph_decode_32 0 22738 NULL nohasharray
110438 +__mei_cl_send_22738 __mei_cl_send 3 22738 &ceph_decode_32_22738
110439 +iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
110440 +qlcnic_sriov_init_22762 qlcnic_sriov_init 2 22762 NULL
110441 +print_frame_22769 print_frame 0 22769 NULL
110442 +ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
110443 +pla_ocp_write_22802 pla_ocp_write 4 22802 NULL
110444 +__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL
110445 +clone_bio_integrity_22842 clone_bio_integrity 4 22842 NULL
110446 +read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
110447 +create_attr_set_22861 create_attr_set 1 22861 NULL
110448 +hash_ip6_expire_22867 hash_ip6_expire 4 22867 NULL
110449 +vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
110450 +usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
110451 +mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
110452 +policy_emit_config_values_22900 policy_emit_config_values 3 22900 NULL
110453 +xstateregs_set_22932 xstateregs_set 4 22932 NULL
110454 +pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
110455 +alloc_sglist_22960 alloc_sglist 2-3 22960 NULL
110456 +caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
110457 +vme_get_size_22964 vme_get_size 0 22964 NULL
110458 +tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
110459 +usb_get_langid_22983 usb_get_langid 0 22983 NULL
110460 +remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
110461 +brcmf_sdio_chip_exit_download_23001 brcmf_sdio_chip_exit_download 4 23001 NULL
110462 +viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
110463 +cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
110464 +ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0-6-7 23029 NULL
110465 +st_status_23032 st_status 5 23032 NULL
110466 +nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
110467 +comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL
110468 +reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
110469 +unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
110470 +mei_cl_send_23068 mei_cl_send 3 23068 NULL
110471 +kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
110472 +raw_sendmsg_23078 raw_sendmsg 4 23078 NULL
110473 +get_user_hdr_len_23079 get_user_hdr_len 0 23079 NULL
110474 +isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
110475 +rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
110476 +ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
110477 +pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
110478 +dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
110479 +mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
110480 +nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
110481 +__clear_user_23118 __clear_user 0-2 23118 NULL
110482 +drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
110483 +ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL
110484 +read_file_ani_23161 read_file_ani 3 23161 NULL
110485 +usblp_write_23178 usblp_write 3 23178 NULL
110486 +gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
110487 +mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
110488 +tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL
110489 +xlog_get_bp_23229 xlog_get_bp 2 23229 NULL nohasharray
110490 +__read_status_pci_23229 __read_status_pci 0 23229 &xlog_get_bp_23229
110491 +ft1000_read_dpram_mag_32_23232 ft1000_read_dpram_mag_32 0 23232 NULL
110492 +rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
110493 +__gfn_to_rmap_23240 __gfn_to_rmap 2-1 23240 NULL
110494 +nv50_ram_create__23241 nv50_ram_create_ 4 23241 NULL
110495 +sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL
110496 +uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
110497 +diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
110498 +i2cdev_write_23310 i2cdev_write 3 23310 NULL
110499 +__aa_kvmalloc_23320 __aa_kvmalloc 1 23320 NULL
110500 +page_readlink_23346 page_readlink 3 23346 NULL
110501 +kmem_zalloc_large_23351 kmem_zalloc_large 1 23351 NULL
110502 +get_dst_timing_23358 get_dst_timing 0 23358 NULL
110503 +fd_setup_write_same_buf_23369 fd_setup_write_same_buf 3 23369 NULL
110504 +iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
110505 +vga_mm_r_23419 vga_mm_r 0 23419 NULL
110506 +ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 NULL
110507 +hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
110508 +__ata_change_queue_depth_23484 __ata_change_queue_depth 3 23484 NULL
110509 +linear_conf_23485 linear_conf 2 23485 NULL
110510 +event_filter_read_23494 event_filter_read 3 23494 NULL
110511 +lustre_acl_xattr_merge2ext_23502 lustre_acl_xattr_merge2ext 2 23502 NULL
110512 +devm_iio_device_alloc_23511 devm_iio_device_alloc 2 23511 NULL
110513 +__proc_cpt_table_23516 __proc_cpt_table 5 23516 NULL
110514 +ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
110515 +tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
110516 +btrfs_super_bytenr_23561 btrfs_super_bytenr 0 23561 NULL
110517 +venus_symlink_23570 venus_symlink 6-4 23570 NULL
110518 +iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
110519 +xfpregs_get_23586 xfpregs_get 4 23586 NULL
110520 +snd_interval_min_23590 snd_interval_min 0 23590 NULL
110521 +islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
110522 +ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL
110523 +__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
110524 +sInW_23663 sInW 0 23663 NULL
110525 +SyS_connect_23669 SyS_connect 3 23669 NULL
110526 +cx18_read_23699 cx18_read 3 23699 NULL
110527 +at_get_23708 at_get 0 23708 NULL
110528 +rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
110529 +__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
110530 +__build_packet_message_23778 __build_packet_message 4-10 23778 NULL
110531 +security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
110532 +cfg80211_inform_bss_width_frame_23782 cfg80211_inform_bss_width_frame 5 23782 NULL
110533 +mpt_free_res_23793 mpt_free_res 5 23793 NULL
110534 +map_write_23795 map_write 3 23795 NULL
110535 +rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
110536 +ocfs2_replace_cow_23803 ocfs2_replace_cow 0 23803 NULL
110537 +__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
110538 +lustre_msg_buflen_23827 lustre_msg_buflen 0 23827 NULL
110539 +ceph_copy_page_vector_to_user_23829 ceph_copy_page_vector_to_user 3-4 23829 NULL
110540 +pgdat_end_pfn_23842 pgdat_end_pfn 0 23842 NULL
110541 +iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
110542 +p54_init_common_23850 p54_init_common 1 23850 NULL
110543 +bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
110544 +ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
110545 +ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
110546 +nouveau_clock_create__23881 nouveau_clock_create_ 5 23881 NULL
110547 +tipc_snprintf_23893 tipc_snprintf 2-0 23893 NULL
110548 +usbg_prepare_w_request_23895 usbg_prepare_w_request 0 23895 NULL
110549 +add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
110550 +ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911
110551 +f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL
110552 +mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 NULL nohasharray
110553 +ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 &mpihelp_mul_karatsuba_case_23918
110554 +kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
110555 +uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL
110556 +cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
110557 +size_roundup_power2_23958 size_roundup_power2 0-1 23958 NULL
110558 +sddr55_write_data_23983 sddr55_write_data 4 23983 NULL
110559 +zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
110560 +cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
110561 +give_pages_24021 give_pages 3 24021 NULL
110562 +adis16400_show_serial_number_24037 adis16400_show_serial_number 3 24037 NULL
110563 +hmac_setkey_24043 hmac_setkey 3 24043 NULL
110564 +afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
110565 +blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
110566 +vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
110567 +pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
110568 +request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
110569 +lov_brw_24122 lov_brw 4 24122 NULL
110570 +mpu401_read_24126 mpu401_read 3-0 24126 NULL
110571 +_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL
110572 +irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
110573 +SyS_sethostname_24150 SyS_sethostname 2 24150 NULL
110574 +trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
110575 +adu_read_24177 adu_read 3 24177 NULL
110576 +safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
110577 +irq_remapping_setup_msi_irqs_24194 irq_remapping_setup_msi_irqs 2 24194 NULL
110578 +ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
110579 +tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
110580 +pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL nohasharray
110581 +mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224
110582 +pci_num_vf_24235 pci_num_vf 0 24235 NULL
110583 +sel_read_bool_24236 sel_read_bool 3 24236 NULL
110584 +em28xx_alloc_urbs_24260 em28xx_alloc_urbs 4-6 24260 NULL
110585 +calculate_sizes_24273 calculate_sizes 2 24273 NULL
110586 +thin_status_24278 thin_status 5 24278 NULL
110587 +msg_size_24288 msg_size 0 24288 NULL
110588 +gserial_connect_24302 gserial_connect 0 24302 NULL
110589 +btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
110590 +ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
110591 +si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL
110592 +C_SYSC_pwritev_24345 C_SYSC_pwritev 3 24345 NULL
110593 +kzalloc_node_24352 kzalloc_node 1 24352 NULL
110594 +qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
110595 +cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
110596 +btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
110597 +igetword_24373 igetword 0 24373 NULL
110598 +max_io_len_24384 max_io_len 0-1 24384 NULL
110599 +mpt_alloc_res_24387 mpt_alloc_res 5 24387 NULL
110600 +osc_cur_grant_bytes_seq_write_24396 osc_cur_grant_bytes_seq_write 3 24396 NULL
110601 +getxattr_24398 getxattr 4 24398 NULL nohasharray
110602 +pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 &getxattr_24398
110603 +blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
110604 +nvme_trans_log_supp_pages_24418 nvme_trans_log_supp_pages 3 24418 NULL
110605 +b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
110606 +xenbus_file_read_24427 xenbus_file_read 3 24427 NULL
110607 +ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
110608 +copy_and_ioctl_24434 copy_and_ioctl 4 24434 NULL
110609 +ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
110610 +smk_user_access_24440 smk_user_access 3 24440 NULL nohasharray
110611 +rtw_set_wps_assoc_resp_24440 rtw_set_wps_assoc_resp 3 24440 &smk_user_access_24440
110612 +evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
110613 +lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL
110614 +skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL
110615 +dut_mode_read_24489 dut_mode_read 3 24489 NULL
110616 +read_file_spec_scan_ctl_24491 read_file_spec_scan_ctl 3 24491 NULL
110617 +pd_video_read_24510 pd_video_read 3 24510 NULL
110618 +request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
110619 +xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
110620 +do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
110621 +write_cache_pages_24562 write_cache_pages 0 24562 NULL
110622 +SyS_pselect6_24582 SyS_pselect6 1 24582 NULL
110623 +udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
110624 +sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605 NULL nohasharray
110625 +lov_alloc_memmd_24605 lov_alloc_memmd 2 24605 &sensor_hub_get_physical_device_count_24605
110626 +SyS_poll_24620 SyS_poll 2 24620 NULL
110627 +context_alloc_24645 context_alloc 3 24645 NULL
110628 +blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
110629 +datafab_write_data_24696 datafab_write_data 4 24696 NULL
110630 +intelfbhw_get_p1p2_24703 intelfbhw_get_p1p2 2 24703 NULL
110631 +simple_attr_read_24738 simple_attr_read 3 24738 NULL
110632 +qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
110633 +get_dma_residue_24749 get_dma_residue 0 24749 NULL
110634 +ocfs2_cow_file_pos_24751 ocfs2_cow_file_pos 3 24751 NULL
110635 +kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
110636 +ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
110637 +datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
110638 +cache_read_24790 cache_read 3 24790 NULL
110639 +user_regset_copyout_24796 user_regset_copyout 7 24796 NULL
110640 +kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL
110641 +ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL
110642 +net2272_read_24825 net2272_read 0 24825 NULL
110643 +snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
110644 +snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
110645 +pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
110646 +l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
110647 +queues_read_24877 queues_read 3 24877 NULL
110648 +__vxge_hw_vp_initialize_24885 __vxge_hw_vp_initialize 2 24885 NULL
110649 +codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
110650 +v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL nohasharray
110651 +__btrfs_free_extent_24927 __btrfs_free_extent 7 24927 &v4l2_ctrl_new_24927
110652 +ocfs2_fiemap_24949 ocfs2_fiemap 4-3 24949 NULL
110653 +packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
110654 +ll_layout_fetch_24961 ll_layout_fetch 0 24961 NULL
110655 +twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL
110656 +llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
110657 +key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
110658 +il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
110659 +ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
110660 +nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
110661 +load_unaligned_zeropad_25050 load_unaligned_zeropad 0 25050 NULL
110662 +btrfs_stack_key_blockptr_25058 btrfs_stack_key_blockptr 0 25058 NULL
110663 +gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
110664 +ll_track_pid_seq_write_25068 ll_track_pid_seq_write 3 25068 NULL
110665 +SYSC_listxattr_25072 SYSC_listxattr 3 25072 NULL
110666 +iwl_dbgfs_tx_flush_write_25091 iwl_dbgfs_tx_flush_write 3 25091 NULL
110667 +ima_appraise_measurement_25093 ima_appraise_measurement 6 25093 NULL
110668 +blkg_path_25099 blkg_path 3 25099 NULL
110669 +snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
110670 +ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
110671 +kvm_mmu_notifier_change_pte_25169 kvm_mmu_notifier_change_pte 3 25169 NULL
110672 +sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
110673 +mon_stat_read_25238 mon_stat_read 3 25238 NULL
110674 +stripe_status_25259 stripe_status 5 25259 NULL
110675 +snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
110676 +crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
110677 +vfs_writev_25278 vfs_writev 3 25278 NULL
110678 +l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
110679 +ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL
110680 +rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
110681 +SYSC_kexec_load_25361 SYSC_kexec_load 2 25361 NULL
110682 +unix_mkname_25368 unix_mkname 0-2 25368 NULL
110683 +sel_read_mls_25369 sel_read_mls 3 25369 NULL
110684 +vsp1_entity_init_25407 vsp1_entity_init 3 25407 NULL
110685 +dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
110686 +generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
110687 +ipath_decode_err_25468 ipath_decode_err 3 25468 NULL
110688 +crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
110689 +ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL
110690 +snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
110691 +sb_permission_25523 sb_permission 0 25523 NULL
110692 +ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
110693 +ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
110694 +wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
110695 +ht_print_chan_25556 ht_print_chan 3-4-0 25556 NULL
110696 +skb_tailroom_25567 skb_tailroom 0 25567 NULL
110697 +ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
110698 +copy_user_generic_25611 copy_user_generic 0 25611 NULL
110699 +proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
110700 +befs_utf2nls_25628 befs_utf2nls 3 25628 NULL nohasharray
110701 +__get_user_pages_25628 __get_user_pages 0 25628 &befs_utf2nls_25628
110702 +__direct_map_25647 __direct_map 6-5 25647 NULL
110703 +aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
110704 +lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL
110705 +sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
110706 +rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
110707 +ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
110708 +__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 NULL nohasharray
110709 +sel_write_context_25726 sel_write_context 3 25726 &__alloc_bootmem_low_node_25726
110710 +cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
110711 +event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
110712 +sg_read_25799 sg_read 3 25799 NULL
110713 +system_enable_read_25815 system_enable_read 3 25815 NULL
110714 +realloc_buffer_25816 realloc_buffer 2 25816 NULL
110715 +pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
110716 +parport_read_25855 parport_read 0 25855 NULL
110717 +xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
110718 +key_attr_size_25865 key_attr_size 0 25865 NULL
110719 +ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
110720 +run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL
110721 +sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
110722 +lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
110723 +nvme_trans_mode_page_create_25908 nvme_trans_mode_page_create 7-4 25908 NULL
110724 +do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
110725 +rcname_read_25919 rcname_read 3 25919 NULL
110726 +snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
110727 +key_flags_read_25931 key_flags_read 3 25931 NULL
110728 +copy_play_buf_25932 copy_play_buf 3 25932 NULL
110729 +flush_25957 flush 2 25957 NULL
110730 +udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
110731 +lustre_msg_buflen_v2_25997 lustre_msg_buflen_v2 0 25997 NULL
110732 +SyS_process_vm_readv_26019 SyS_process_vm_readv 3-5 26019 NULL
110733 +xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
110734 +mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
110735 +selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
110736 +tun_do_read_26047 tun_do_read 5 26047 NULL
110737 +keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
110738 +rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
110739 +read_sb_page_26119 read_sb_page 5 26119 NULL
110740 +ath9k_hw_name_26146 ath9k_hw_name 3 26146 NULL
110741 +copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
110742 +gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
110743 +ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
110744 +disk_devt_26180 disk_devt 0 26180 NULL
110745 +cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
110746 +ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
110747 +xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
110748 +mce_write_26201 mce_write 3 26201 NULL
110749 +mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL
110750 +_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
110751 +rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL
110752 +bio_split_26235 bio_split 2 26235 NULL
110753 +crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
110754 +apei_resources_request_26279 apei_resources_request 0 26279 NULL
110755 +wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
110756 +snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL nohasharray
110757 +pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
110758 +pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
110759 +check_can_nocow_26336 check_can_nocow 2 26336 NULL
110760 +snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL
110761 +ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 6-3 26357 NULL
110762 +cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
110763 +invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
110764 +ntty_write_26404 ntty_write 3 26404 NULL
110765 +firmware_store_26408 firmware_store 4 26408 NULL
110766 +pagemap_read_26441 pagemap_read 3 26441 NULL
110767 +tower_read_26461 tower_read 3 26461 NULL nohasharray
110768 +enc_pools_add_pages_26461 enc_pools_add_pages 1 26461 &tower_read_26461
110769 +ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
110770 +ulong_write_file_26485 ulong_write_file 3 26485 NULL
110771 +dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
110772 +read_vmcore_26501 read_vmcore 3 26501 NULL
110773 +uhid_char_write_26502 uhid_char_write 3 26502 NULL
110774 +vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 4-3 26507 NULL
110775 +iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
110776 +SyS_rt_sigpending_26538 SyS_rt_sigpending 2 26538 NULL
110777 +__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
110778 +dio_new_bio_26562 dio_new_bio 0 26562 NULL
110779 +rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL
110780 +pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
110781 +irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
110782 +inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
110783 +nouveau_volt_create__26654 nouveau_volt_create_ 4 26654 NULL
110784 +cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
110785 +nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
110786 +pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
110787 +bos_desc_26752 bos_desc 0 26752 NULL
110788 +snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
110789 +dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL
110790 +qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
110791 +cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
110792 +iwl_trans_read_mem32_26825 iwl_trans_read_mem32 0 26825 NULL
110793 +smk_write_load_26829 smk_write_load 3 26829 NULL
110794 +scnprint_id_26842 scnprint_id 3-0 26842 NULL
110795 +ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
110796 +tipc_conn_sendmsg_26867 tipc_conn_sendmsg 5 26867 NULL
110797 +ath6kl_create_qos_write_26879 ath6kl_create_qos_write 3 26879 NULL
110798 +svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
110799 +cfg80211_process_auth_26916 cfg80211_process_auth 3 26916 NULL
110800 +x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
110801 +scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
110802 +sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 NULL nohasharray
110803 +pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 &sctp_setsockopt_adaptation_layer_26935
110804 +hecubafb_write_26942 hecubafb_write 3 26942 NULL
110805 +do_trimming_26952 do_trimming 3 26952 NULL nohasharray
110806 +extract_entropy_user_26952 extract_entropy_user 3 26952 &do_trimming_26952
110807 +do_direct_IO_26979 do_direct_IO 0 26979 NULL
110808 +__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
110809 +ext4_convert_unwritten_extents_27064 ext4_convert_unwritten_extents 4-3-0 27064 NULL
110810 +snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
110811 +paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
110812 +alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
110813 +btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL nohasharray
110814 +ath9k_hw_4k_dump_eeprom_27089 ath9k_hw_4k_dump_eeprom 5-4 27089 &btmrvl_hscmd_write_27089
110815 +__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
110816 +get_kernel_page_27133 get_kernel_page 0 27133 NULL
110817 +drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
110818 +pms_capture_27142 pms_capture 4 27142 NULL
110819 +btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
110820 +snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL
110821 +ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
110822 +__sg_alloc_table_27198 __sg_alloc_table 0 27198 NULL
110823 +write_kmem_27225 write_kmem 3 27225 NULL
110824 +dbAllocAG_27228 dbAllocAG 0 27228 NULL
110825 +rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
110826 +ll_track_gid_seq_write_27267 ll_track_gid_seq_write 3 27267 NULL
110827 +comedi_alloc_devpriv_27272 comedi_alloc_devpriv 2 27272 NULL
110828 +copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
110829 +virtqueue_add_inbuf_27312 virtqueue_add_inbuf 3 27312 NULL
110830 +snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
110831 +afs_cell_create_27346 afs_cell_create 2 27346 NULL
110832 +iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL
110833 +pcbit_stat_27364 pcbit_stat 2 27364 NULL
110834 +seq_read_27411 seq_read 3 27411 NULL
110835 +ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
110836 +ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
110837 +ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0-3-4 27422 NULL
110838 +cypress_write_27423 cypress_write 4 27423 NULL
110839 +sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
110840 +v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
110841 +hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
110842 +ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
110843 +btrfs_get_64_27499 btrfs_get_64 0 27499 NULL
110844 +garmin_read_process_27509 garmin_read_process 3 27509 NULL
110845 +oti_alloc_cookies_27510 oti_alloc_cookies 2 27510 NULL
110846 +ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
110847 +snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
110848 +SyS_fgetxattr_27571 SyS_fgetxattr 4 27571 NULL
110849 +sco_sock_recvmsg_27572 sco_sock_recvmsg 4 27572 NULL
110850 +libipw_alloc_txb_27579 libipw_alloc_txb 1 27579 NULL
110851 +ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 NULL nohasharray
110852 +nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &ocfs2_xattr_ibody_get_27642 nohasharray
110853 +read_flush_procfs_27642 read_flush_procfs 3 27642 &nl80211_send_connect_result_27642 nohasharray
110854 +ocfs2_direct_IO_27642 ocfs2_direct_IO 4 27642 &read_flush_procfs_27642
110855 +add_new_gdb_27643 add_new_gdb 3 27643 NULL
110856 +btrfs_fallocate_27647 btrfs_fallocate 3-4 27647 NULL
110857 +qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
110858 +cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
110859 +ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
110860 +fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
110861 +evm_write_key_27715 evm_write_key 3 27715 NULL
110862 +ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
110863 +xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
110864 +SyS_setsockopt_27759 SyS_setsockopt 5 27759 NULL
110865 +__lov_setstripe_27782 __lov_setstripe 2 27782 NULL
110866 +twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL
110867 +SyS_readv_27804 SyS_readv 3 27804 NULL
110868 +mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
110869 +fwtty_buffer_rx_27821 fwtty_buffer_rx 3 27821 NULL
110870 +hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL
110871 +init_header_complete_27833 init_header_complete 0 27833 NULL
110872 +read_profile_27859 read_profile 3 27859 NULL
110873 +sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
110874 +ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
110875 +unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
110876 +check_mapped_name_27943 check_mapped_name 3 27943 NULL
110877 +tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
110878 +tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL
110879 +device_register_27972 device_register 0 27972 NULL nohasharray
110880 +mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972
110881 +pci_enable_device_flags_27977 pci_enable_device_flags 0 27977 NULL
110882 +f2fs_bio_alloc_27983 f2fs_bio_alloc 2 27983 NULL
110883 +edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
110884 +snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
110885 +powercap_register_zone_28028 powercap_register_zone 6 28028 NULL
110886 +sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
110887 +rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL
110888 +cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL
110889 +pool_status_28055 pool_status 5 28055 NULL
110890 +init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL
110891 +lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
110892 +tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
110893 +mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
110894 +rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
110895 +vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
110896 +video_read_28148 video_read 3 28148 NULL
110897 +snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
110898 +stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
110899 +vread_28173 vread 0-3 28173 NULL
110900 +macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
110901 +counter_free_res_28187 counter_free_res 5 28187 NULL
110902 +read_disk_sb_28188 read_disk_sb 2 28188 NULL
110903 +nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
110904 +__qp_memcpy_from_queue_28220 __qp_memcpy_from_queue 3-4 28220 NULL
110905 +line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL
110906 +amd_nb_num_28228 amd_nb_num 0 28228 NULL
110907 +fuse_direct_IO_28275 fuse_direct_IO 4 28275 NULL
110908 +usemap_size_28281 usemap_size 0 28281 NULL
110909 +inline_xattr_size_28285 inline_xattr_size 0 28285 NULL
110910 +dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
110911 +SyS_ppoll_28290 SyS_ppoll 2 28290 NULL
110912 +kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL
110913 +nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL
110914 +snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
110915 +bm_entry_write_28338 bm_entry_write 3 28338 NULL
110916 +tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
110917 +snapshot_write_28351 snapshot_write 3 28351 NULL
110918 +xfs_iomap_write_unwritten_28365 xfs_iomap_write_unwritten 3-2 28365 NULL
110919 +batadv_handle_tt_response_28370 batadv_handle_tt_response 4 28370 NULL
110920 +dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
110921 +tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
110922 +bypass_pwup_write_28416 bypass_pwup_write 3 28416 NULL
110923 +subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
110924 +ksocknal_alloc_tx_28426 ksocknal_alloc_tx 2 28426 NULL
110925 +mpage_readpages_28436 mpage_readpages 3 28436 NULL
110926 +snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
110927 +key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
110928 +alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
110929 +ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
110930 +i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
110931 +sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
110932 +mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL nohasharray
110933 +run_delalloc_range_28545 run_delalloc_range 3-4 28545 &mptctl_getiocinfo_28545 nohasharray
110934 +aio_read_events_28545 aio_read_events 3 28545 &run_delalloc_range_28545
110935 +sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL
110936 +b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
110937 +asymmetric_verify_28567 asymmetric_verify 3 28567 NULL
110938 +oxygen_read32_28582 oxygen_read32 0 28582 NULL
110939 +extract_entropy_28604 extract_entropy 5-3 28604 NULL
110940 +kfifo_unused_28612 kfifo_unused 0 28612 NULL
110941 +snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
110942 +setup_usemap_28636 setup_usemap 3-4 28636 NULL
110943 +qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL
110944 +p9_fcall_alloc_28652 p9_fcall_alloc 1 28652 NULL
110945 +read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL
110946 +blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
110947 +SyS_setgroups16_28686 SyS_setgroups16 1 28686 NULL
110948 +kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 NULL
110949 +drm_plane_init_28731 drm_plane_init 6 28731 NULL
110950 +spi_execute_28736 spi_execute 5 28736 NULL
110951 +snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
110952 +read_file_btcoex_28743 read_file_btcoex 3 28743 NULL
110953 +max_hw_blocks_28748 max_hw_blocks 0 28748 NULL
110954 +rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL
110955 +ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
110956 +sel_write_member_28800 sel_write_member 3 28800 NULL
110957 +cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
110958 +iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
110959 +vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
110960 +ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
110961 +max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL
110962 +packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
110963 +da9055_group_write_28904 da9055_group_write 2-3 28904 NULL
110964 +ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
110965 +iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925 NULL
110966 +push_rx_28939 push_rx 3 28939 NULL
110967 +btrfs_trim_block_group_28963 btrfs_trim_block_group 3-4 28963 NULL
110968 +alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
110969 +hash_net6_expire_28979 hash_net6_expire 4 28979 NULL
110970 +hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
110971 +bin_uuid_28999 bin_uuid 3 28999 NULL
110972 +fd_execute_rw_29004 fd_execute_rw 3 29004 NULL
110973 +ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
110974 +rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
110975 +btrfs_root_bytenr_29058 btrfs_root_bytenr 0 29058 NULL
110976 +iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
110977 +roundup_64_29066 roundup_64 2-0-1 29066 NULL
110978 +lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
110979 +sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL
110980 +iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL
110981 +i915_error_object_create_sized_29091 i915_error_object_create_sized 3 29091 NULL
110982 +isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
110983 +snprintf_29125 snprintf 0 29125 NULL
110984 +iov_shorten_29130 iov_shorten 0 29130 NULL
110985 +proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
110986 +kvm_mmu_notifier_clear_flush_young_29154 kvm_mmu_notifier_clear_flush_young 3 29154 NULL
110987 +drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
110988 +wusb_prf_256_29203 wusb_prf_256 7 29203 NULL
110989 +iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
110990 +nvme_trans_copy_from_user_29227 nvme_trans_copy_from_user 3 29227 NULL
110991 +irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
110992 +evdev_handle_get_val_29242 evdev_handle_get_val 5-6 29242 NULL
110993 +security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
110994 +prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
110995 +ext4_fiemap_29296 ext4_fiemap 4 29296 NULL
110996 +sn9c102_read_29305 sn9c102_read 3 29305 NULL
110997 +__fuse_get_req_29315 __fuse_get_req 2 29315 NULL
110998 +lprocfs_write_helper_29323 lprocfs_write_helper 2 29323 NULL
110999 +kvm_handle_hva_29326 kvm_handle_hva 2 29326 NULL
111000 +tun_put_user_29337 tun_put_user 5 29337 NULL
111001 +__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
111002 +l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
111003 +mwifiex_cfg80211_mgmt_tx_29387 mwifiex_cfg80211_mgmt_tx 7 29387 NULL
111004 +read_file_tx99_power_29405 read_file_tx99_power 3 29405 NULL
111005 +mempool_create_29437 mempool_create 1 29437 NULL
111006 +crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
111007 +p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL
111008 +validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
111009 +SyS_flistxattr_29474 SyS_flistxattr 3 29474 NULL
111010 +do_register_entry_29478 do_register_entry 4 29478 NULL
111011 +simple_strtoul_29480 simple_strtoul 0 29480 NULL
111012 +btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
111013 +btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
111014 +write_file_regidx_29517 write_file_regidx 3 29517 NULL
111015 +atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
111016 +ftrace_write_29551 ftrace_write 3 29551 NULL
111017 +idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
111018 +leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
111019 +kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
111020 +lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
111021 +iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL
111022 +pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL
111023 +dio_set_defer_completion_29599 dio_set_defer_completion 0 29599 NULL
111024 +slots_per_page_29601 slots_per_page 0 29601 NULL
111025 +osc_cached_mb_seq_write_29610 osc_cached_mb_seq_write 3 29610 NULL
111026 +nla_get_u16_29624 nla_get_u16 0 29624 NULL
111027 +tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
111028 +sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
111029 +sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
111030 +lustre_posix_acl_xattr_2ext_29693 lustre_posix_acl_xattr_2ext 2 29693 NULL
111031 +posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 NULL
111032 +probes_write_29711 probes_write 3 29711 NULL
111033 +read_cis_cache_29735 read_cis_cache 4 29735 NULL
111034 +xfs_new_eof_29737 xfs_new_eof 2 29737 NULL
111035 +std_nic_write_29752 std_nic_write 3 29752 NULL
111036 +dbAlloc_29794 dbAlloc 0 29794 NULL
111037 +tcp_sendpage_29829 tcp_sendpage 4 29829 NULL
111038 +__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
111039 +kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
111040 +count_partial_29850 count_partial 0 29850 NULL
111041 +write_file_bool_bmps_29870 write_file_bool_bmps 3 29870 NULL
111042 +ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
111043 +scsi_end_request_29876 scsi_end_request 3 29876 NULL
111044 +crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
111045 +lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL
111046 +write_file_queue_29922 write_file_queue 3 29922 NULL
111047 +ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
111048 +__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
111049 +ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
111050 +dev_mem_write_30028 dev_mem_write 3 30028 NULL
111051 +alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
111052 +scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
111053 +drp_wmove_30043 drp_wmove 4 30043 NULL
111054 +__pci_request_selected_regions_30058 __pci_request_selected_regions 0 30058 NULL
111055 +cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
111056 +snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
111057 +rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
111058 +defragment_dma_buffer_30113 defragment_dma_buffer 0 30113 NULL
111059 +spi_async_locked_30117 spi_async_locked 0 30117 NULL
111060 +recv_stream_30138 recv_stream 4 30138 NULL
111061 +u_memcpya_30139 u_memcpya 3-2 30139 NULL
111062 +elfcorehdr_read_30159 elfcorehdr_read 2 30159 NULL
111063 +alloc_switch_ctx_30165 alloc_switch_ctx 2 30165 NULL
111064 +expand_inode_data_30169 expand_inode_data 2-3 30169 NULL
111065 +mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
111066 +drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
111067 +usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
111068 +read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
111069 +SyS_semop_30227 SyS_semop 3 30227 NULL
111070 +bitmap_file_set_bit_30228 bitmap_file_set_bit 2 30228 NULL
111071 +ocfs2_calc_bg_discontig_credits_30230 ocfs2_calc_bg_discontig_credits 0 30230 NULL
111072 +rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
111073 +hfsplus_trusted_setxattr_30270 hfsplus_trusted_setxattr 4 30270 NULL
111074 +isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
111075 +compat_readv_30273 compat_readv 3 30273 NULL
111076 +skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
111077 +pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
111078 +tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
111079 +osc_contention_seconds_seq_write_30305 osc_contention_seconds_seq_write 3 30305 NULL
111080 +ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
111081 +i8254_read_30330 i8254_read 0 30330 NULL
111082 +resource_from_user_30341 resource_from_user 3 30341 NULL
111083 +o2nm_this_node_30342 o2nm_this_node 0 30342 NULL
111084 +kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
111085 +C_SYSC_readv_30369 C_SYSC_readv 3 30369 NULL
111086 +blkdev_issue_zeroout_30392 blkdev_issue_zeroout 3 30392 NULL
111087 +c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
111088 +get_kernel_pages_30397 get_kernel_pages 0 30397 NULL
111089 +vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
111090 +tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
111091 +lstcon_session_info_30425 lstcon_session_info 6 30425 NULL
111092 +enable_write_30456 enable_write 3 30456 NULL
111093 +tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
111094 +urandom_read_30462 urandom_read 3 30462 NULL
111095 +zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
111096 +i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL
111097 +adu_write_30487 adu_write 3 30487 NULL
111098 +dtim_interval_write_30489 dtim_interval_write 3 30489 NULL
111099 +batadv_send_tt_request_30493 batadv_send_tt_request 5 30493 NULL
111100 +dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
111101 +set_config_30526 set_config 0 30526 NULL nohasharray
111102 +debug_debug2_read_30526 debug_debug2_read 3 30526 &set_config_30526
111103 +xfs_sb_version_hasftype_30559 xfs_sb_version_hasftype 0 30559 NULL
111104 +disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
111105 +set_le_30581 set_le 4 30581 NULL
111106 +blk_init_tags_30592 blk_init_tags 1 30592 NULL
111107 +i2c_hid_get_report_length_30598 i2c_hid_get_report_length 0 30598 NULL
111108 +sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL nohasharray
111109 +cpufreq_get_global_kobject_30610 cpufreq_get_global_kobject 0 30610 &sgl_map_user_pages_30610
111110 +SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
111111 +macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
111112 +ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL
111113 +compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
111114 +mlx5_ib_alloc_fast_reg_page_list_30638 mlx5_ib_alloc_fast_reg_page_list 2 30638 NULL
111115 +SyS_listxattr_30647 SyS_listxattr 3 30647 NULL
111116 +jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
111117 +ni_ai_fifo_read_30681 ni_ai_fifo_read 3 30681 NULL
111118 +dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
111119 +lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL
111120 +lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
111121 +snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
111122 +snapshot_status_30744 snapshot_status 5 30744 NULL
111123 +fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL
111124 +smk_read_doi_30813 smk_read_doi 3 30813 NULL
111125 +get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
111126 +sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
111127 +wd_autoreset_write_30862 wd_autoreset_write 3 30862 NULL
111128 +ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
111129 +pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
111130 +sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
111131 +tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
111132 +huge_page_mask_30981 huge_page_mask 0 30981 NULL
111133 +read_file_bt_ant_diversity_30983 read_file_bt_ant_diversity 3 30983 NULL
111134 +lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
111135 +ima_eventsig_init_31022 ima_eventsig_init 5 31022 NULL
111136 +template_fmt_size_31033 template_fmt_size 0 31033 NULL
111137 +do_setup_msi_irqs_31043 do_setup_msi_irqs 2 31043 NULL
111138 +stride_pg_count_31053 stride_pg_count 0-2-1-4-3-5 31053 NULL
111139 +lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
111140 +sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
111141 +proc_gid_map_write_31093 proc_gid_map_write 3 31093 NULL
111142 +compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
111143 +depth_read_31112 depth_read 3 31112 NULL
111144 +hash_ipportnet6_expire_31118 hash_ipportnet6_expire 4 31118 NULL
111145 +kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
111146 +size_inside_page_31141 size_inside_page 0 31141 NULL
111147 +w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
111148 +ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
111149 +r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
111150 +mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
111151 +__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
111152 +cpumask_weight_31215 cpumask_weight 0 31215 NULL
111153 +__read_reg_31216 __read_reg 0 31216 NULL
111154 +atm_get_addr_31221 atm_get_addr 3 31221 NULL
111155 +tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL
111156 +cyy_readb_31240 cyy_readb 0 31240 NULL
111157 +_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
111158 +ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
111159 +hash_netportnet4_expire_31290 hash_netportnet4_expire 4 31290 NULL
111160 +uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
111161 +sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
111162 +command_file_write_31318 command_file_write 3 31318 NULL
111163 +hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL
111164 +em28xx_init_usb_xfer_31337 em28xx_init_usb_xfer 4-6 31337 NULL
111165 +outlen_write_31358 outlen_write 3 31358 NULL
111166 +ieee80211_rx_mgmt_auth_31366 ieee80211_rx_mgmt_auth 3 31366 NULL
111167 +xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
111168 +vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3-2 31374 NULL
111169 +trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
111170 +inb_31388 inb 0 31388 NULL
111171 +key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
111172 +_sp2d_max_pg_31422 _sp2d_max_pg 0 31422 NULL
111173 +TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
111174 +snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
111175 +transport_alloc_session_tags_31449 transport_alloc_session_tags 2-3 31449 NULL
111176 +opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
111177 +xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
111178 +alg_setkey_31485 alg_setkey 3 31485 NULL
111179 +rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
111180 +qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
111181 +__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
111182 +hidraw_write_31536 hidraw_write 3 31536 NULL
111183 +usbvision_read_31555 usbvision_read 3 31555 NULL
111184 +tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
111185 +get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL
111186 +osst_write_31581 osst_write 3 31581 NULL
111187 +snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL
111188 +iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
111189 +mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL
111190 +arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
111191 +videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
111192 +pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
111193 +xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
111194 +__lgread_31668 __lgread 4 31668 NULL
111195 +copy_from_user_nmi_31672 copy_from_user_nmi 3-0 31672 NULL
111196 +forced_ps_read_31685 forced_ps_read 3 31685 NULL
111197 +fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL
111198 +utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL nohasharray
111199 +lu_buf_check_and_grow_31735 lu_buf_check_and_grow 2 31735 &utf16s_to_utf8s_31735
111200 +shmem_pwrite_slow_31741 shmem_pwrite_slow 3-2 31741 NULL
111201 +input_abs_get_max_31742 input_abs_get_max 0 31742 NULL nohasharray
111202 +NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 &input_abs_get_max_31742
111203 +bcm_char_read_31750 bcm_char_read 3 31750 NULL
111204 +snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
111205 +SyS_lsetxattr_31766 SyS_lsetxattr 4 31766 NULL
111206 +usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
111207 +ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
111208 +isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
111209 +data_write_31805 data_write 3 31805 NULL
111210 +SyS_msgsnd_31814 SyS_msgsnd 3 31814 NULL
111211 +strnlen_user_31815 strnlen_user 0-2 31815 NULL
111212 +sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
111213 +SyS_ppoll_31855 SyS_ppoll 2 31855 NULL
111214 +iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL
111215 +drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
111216 +ddb_output_write_31902 ddb_output_write 3-0 31902 NULL
111217 +xattr_permission_31907 xattr_permission 0 31907 NULL
111218 +lu_buf_realloc_31915 lu_buf_realloc 2 31915 NULL
111219 +new_dir_31919 new_dir 3 31919 NULL
111220 +kmem_alloc_31920 kmem_alloc 1 31920 NULL
111221 +SYSC_sethostname_31940 SYSC_sethostname 2 31940 NULL
111222 +read_mem_31942 read_mem 3 31942 NULL nohasharray
111223 +iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 &read_mem_31942
111224 +vb2_write_31948 vb2_write 3 31948 NULL
111225 +pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
111226 +regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL
111227 +copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
111228 +iblock_execute_rw_31982 iblock_execute_rw 3 31982 NULL nohasharray
111229 +vx_read_status_31982 vx_read_status 0 31982 &iblock_execute_rw_31982
111230 +find_next_zero_bit_31990 find_next_zero_bit 0 31990 NULL
111231 +lustre_acl_xattr_merge2posix_31992 lustre_acl_xattr_merge2posix 2 31992 NULL
111232 +sysfs_create_file_31996 sysfs_create_file 0 31996 NULL
111233 +calc_hmac_32010 calc_hmac 3 32010 NULL
111234 +aead_len_32021 aead_len 0 32021 NULL
111235 +posix_acl_set_32037 posix_acl_set 4 32037 NULL
111236 +stk_read_32038 stk_read 3 32038 NULL
111237 +ocfs2_update_edge_lengths_32046 ocfs2_update_edge_lengths 3 32046 NULL
111238 +SYSC_llistxattr_32061 SYSC_llistxattr 3 32061 NULL
111239 +proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
111240 +cow_file_range_inline_32091 cow_file_range_inline 3 32091 NULL
111241 +bio_alloc_32095 bio_alloc 2 32095 NULL
111242 +ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
111243 +disk_status_32120 disk_status 4 32120 NULL
111244 +kobject_add_internal_32133 kobject_add_internal 0 32133 NULL
111245 +venus_link_32165 venus_link 5 32165 NULL
111246 +do_writepages_32173 do_writepages 0 32173 NULL
111247 +del_ptr_32197 del_ptr 4 32197 NULL
111248 +wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
111249 +riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
111250 +caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
111251 +lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
111252 +ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
111253 +kvm_set_spte_hva_32312 kvm_set_spte_hva 2 32312 NULL
111254 +cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL
111255 +SyS_select_32319 SyS_select 1 32319 NULL
111256 +nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
111257 +nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
111258 +t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
111259 +dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray
111260 +rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357
111261 +ReadHDLCPCI_32362 ReadHDLCPCI 0 32362 NULL nohasharray
111262 +sel_read_initcon_32362 sel_read_initcon 3 32362 &ReadHDLCPCI_32362
111263 +ocfs2_cancel_convert_32392 ocfs2_cancel_convert 0 32392 NULL
111264 +ll_setxattr_common_32398 ll_setxattr_common 4 32398 NULL
111265 +xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
111266 +vmci_qp_alloc_32405 vmci_qp_alloc 5-3 32405 NULL
111267 +cache_status_32462 cache_status 5 32462 NULL
111268 +fill_readbuf_32464 fill_readbuf 3 32464 NULL
111269 +dgap_usertoboard_32490 dgap_usertoboard 4 32490 NULL
111270 +ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
111271 +bypass_pwoff_write_32499 bypass_pwoff_write 3 32499 NULL
111272 +mdc_pinger_recov_seq_write_32510 mdc_pinger_recov_seq_write 3 32510 NULL
111273 +ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
111274 +disconnect_32521 disconnect 4 32521 NULL
111275 +qsfp_read_32522 qsfp_read 0-2-4 32522 NULL
111276 +ocfs2_refresh_qinfo_32524 ocfs2_refresh_qinfo 0 32524 NULL nohasharray
111277 +audio_get_intf_req_32524 audio_get_intf_req 0 32524 &ocfs2_refresh_qinfo_32524
111278 +ilo_read_32531 ilo_read 3 32531 NULL
111279 +ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
111280 +format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
111281 +aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
111282 +osc_iocontrol_32565 osc_iocontrol 3 32565 NULL
111283 +mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
111284 +pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
111285 +read_file_beacon_32595 read_file_beacon 3 32595 NULL
111286 +ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
111287 +irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
111288 +cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
111289 +kvmalloc_32646 kvmalloc 1 32646 NULL
111290 +ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
111291 +generic_readlink_32654 generic_readlink 3 32654 NULL
111292 +move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
111293 +apei_res_add_32674 apei_res_add 0 32674 NULL
111294 +compat_SyS_preadv_32679 compat_SyS_preadv 3 32679 NULL
111295 +jfs_readpages_32702 jfs_readpages 4 32702 NULL
111296 +rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
111297 +i40e_pci_sriov_enable_32742 i40e_pci_sriov_enable 2 32742 NULL
111298 +megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
111299 +stats_read_ul_32751 stats_read_ul 3 32751 NULL
111300 +vmci_transport_dgram_dequeue_32775 vmci_transport_dgram_dequeue 4 32775 NULL
111301 +sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
111302 +rproc_name_read_32805 rproc_name_read 3 32805 NULL
111303 +new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
111304 +cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 NULL nohasharray
111305 +ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 &cifs_writedata_alloc_32880
111306 +vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 6-4 32884 NULL
111307 +il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
111308 +zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
111309 +rmap_recycle_32938 rmap_recycle 3 32938 NULL
111310 +xfs_log_reserve_32959 xfs_log_reserve 2 32959 NULL
111311 +ocfs2_check_dir_trailer_32968 ocfs2_check_dir_trailer 0 32968 NULL
111312 +compat_filldir_32999 compat_filldir 3 32999 NULL
111313 +SyS_syslog_33007 SyS_syslog 3 33007 NULL
111314 +br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL
111315 +write_file_bt_ant_diversity_33019 write_file_bt_ant_diversity 3 33019 NULL
111316 +mic_virtio_copy_to_user_33048 mic_virtio_copy_to_user 3 33048 NULL
111317 +SYSC_lgetxattr_33049 SYSC_lgetxattr 4 33049 NULL
111318 +pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
111319 +ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
111320 +bitmap_resize_33054 bitmap_resize 2 33054 NULL
111321 +stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
111322 +sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
111323 +alloc_tio_33077 alloc_tio 3 33077 NULL
111324 +acl_permission_check_33083 acl_permission_check 0 33083 NULL
111325 +fb_sys_write_33130 fb_sys_write 3 33130 NULL
111326 +__len_within_target_33132 __len_within_target 0 33132 NULL
111327 +SyS_poll_33152 SyS_poll 2 33152 NULL
111328 +debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
111329 +dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL
111330 +pp_read_33210 pp_read 3 33210 NULL
111331 +xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
111332 +snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
111333 +cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
111334 +sync_pt_create_33282 sync_pt_create 2 33282 NULL
111335 +mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
111336 +isku_sysfs_read_keys_easyzone_33318 isku_sysfs_read_keys_easyzone 6 33318 NULL
111337 +vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL
111338 +joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
111339 +lov_stripesize_seq_write_33353 lov_stripesize_seq_write 3 33353 NULL
111340 +create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
111341 +irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
111342 +read_file_regidx_33370 read_file_regidx 3 33370 NULL
111343 +ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
111344 +scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
111345 +ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 3-2 33394 NULL
111346 +cfs_trace_copyin_string_33396 cfs_trace_copyin_string 4 33396 NULL
111347 +snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 NULL
111348 +hash_netiface6_expire_33421 hash_netiface6_expire 4 33421 NULL
111349 +dis_tap_write_33426 dis_tap_write 3 33426 NULL
111350 +message_stats_list_33440 message_stats_list 5 33440 NULL
111351 +ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
111352 +create_entry_33479 create_entry 2 33479 NULL
111353 +ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
111354 +elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
111355 +res_counter_read_33499 res_counter_read 4 33499 NULL
111356 +hash_netnet4_expire_33500 hash_netnet4_expire 4 33500 NULL
111357 +fb_read_33506 fb_read 3 33506 NULL
111358 +musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
111359 +ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
111360 +nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
111361 +aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
111362 +tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL nohasharray
111363 +osc_max_rpcs_in_flight_seq_write_33539 osc_max_rpcs_in_flight_seq_write 3 33539 &tomoyo_read_self_33539
111364 +count_subheaders_33591 count_subheaders 0 33591 NULL
111365 +scsi_execute_33596 scsi_execute 5 33596 NULL
111366 +comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
111367 +xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL
111368 +usb_gstrings_attach_33615 usb_gstrings_attach 3 33615 NULL nohasharray
111369 +il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 &usb_gstrings_attach_33615
111370 +stride_page_count_33641 stride_page_count 2 33641 NULL
111371 +irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
111372 +inw_p_33668 inw_p 0 33668 NULL
111373 +arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
111374 +i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL
111375 +nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL
111376 +netlink_sendmsg_33708 netlink_sendmsg 4 33708 NULL
111377 +tipc_link_stats_33716 tipc_link_stats 3 33716 NULL
111378 +ext4_wb_update_i_disksize_33717 ext4_wb_update_i_disksize 2 33717 NULL
111379 +pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
111380 +write_file_spectral_count_33723 write_file_spectral_count 3 33723 NULL
111381 +__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
111382 +vifs_state_read_33762 vifs_state_read 3 33762 NULL
111383 +hashtab_create_33769 hashtab_create 3 33769 NULL
111384 +if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
111385 +filter_write_33819 filter_write 3 33819 NULL
111386 +sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
111387 +scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL
111388 +ext4_journal_extend_33835 ext4_journal_extend 2 33835 NULL
111389 +oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
111390 +get_user_pages_33908 get_user_pages 0 33908 NULL
111391 +ath6kl_roam_mode_write_33912 ath6kl_roam_mode_write 3 33912 NULL
111392 +queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
111393 +sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
111394 +lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
111395 +read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
111396 +hfsplus_osx_setxattr_33952 hfsplus_osx_setxattr 4 33952 NULL
111397 +__proc_dump_kernel_33954 __proc_dump_kernel 5 33954 NULL
111398 +vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
111399 +lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL
111400 +ppp_write_34034 ppp_write 3 34034 NULL
111401 +tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL
111402 +memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL
111403 +pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
111404 +__irq_domain_add_34101 __irq_domain_add 2 34101 NULL
111405 +proc_scsi_host_write_34107 proc_scsi_host_write 3 34107 NULL
111406 +islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
111407 +ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2-0 34135 NULL
111408 +ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
111409 +shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
111410 +ocfs2_xattr_list_entry_34165 ocfs2_xattr_list_entry 0 34165 NULL
111411 +skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
111412 +ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL
111413 +sysfs_bin_read_34228 sysfs_bin_read 3 34228 NULL
111414 +bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
111415 +ocfs2_dlm_lock_34265 ocfs2_dlm_lock 0 34265 NULL
111416 +device_private_init_34279 device_private_init 0 34279 NULL
111417 +ext4_get_groups_count_34324 ext4_get_groups_count 0 34324 NULL
111418 +pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 NULL nohasharray
111419 +iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 &pcpu_need_to_extend_34326
111420 +crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL nohasharray
111421 +sync_page_io_34363 sync_page_io 3 34363 &crypto_ablkcipher_ivsize_34363
111422 +rngapi_reset_34366 rngapi_reset 3 34366 NULL
111423 +ea_read_34378 ea_read 0 34378 NULL
111424 +fuse_send_read_34379 fuse_send_read 4 34379 NULL
111425 +av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
111426 +usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
111427 +read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
111428 +iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
111429 +ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
111430 +wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL
111431 +nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
111432 +usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
111433 +mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
111434 +skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
111435 +i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
111436 +ocfs2_mv_xattr_buckets_34484 ocfs2_mv_xattr_buckets 6 34484 NULL
111437 +security_inode_permission_34488 security_inode_permission 0 34488 NULL
111438 +SyS_pwritev_34494 SyS_pwritev 3 34494 NULL
111439 +qp_alloc_res_34496 qp_alloc_res 5 34496 NULL
111440 +lu_buf_check_and_alloc_34505 lu_buf_check_and_alloc 2 34505 NULL
111441 +ext4_fallocate_34537 ext4_fallocate 4-3 34537 NULL nohasharray
111442 +tracing_stats_read_34537 tracing_stats_read 3 34537 &ext4_fallocate_34537
111443 +hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4-0 34547 NULL
111444 +dbBackSplit_34561 dbBackSplit 0 34561 NULL
111445 +alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
111446 +lov_stripecount_seq_write_34582 lov_stripecount_seq_write 3 34582 NULL
111447 +init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
111448 +inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
111449 +ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL nohasharray
111450 +cw1200_queue_init_34599 cw1200_queue_init 4 34599 &ceph_msgpool_init_34599
111451 +brcmf_cfg80211_mgmt_tx_34608 brcmf_cfg80211_mgmt_tx 7 34608 NULL
111452 +__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
111453 +apei_get_nvs_resources_34616 apei_get_nvs_resources 0 34616 NULL
111454 +__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
111455 +cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
111456 +kvm_set_spte_hva_34671 kvm_set_spte_hva 2 34671 NULL
111457 +sleep_auth_write_34676 sleep_auth_write 3 34676 NULL
111458 +isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
111459 +batadv_tvlv_realloc_packet_buff_34688 batadv_tvlv_realloc_packet_buff 3-4 34688 NULL
111460 +port_print_34704 port_print 3 34704 NULL
111461 +ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
111462 +platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
111463 +reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
111464 +lsm_alloc_plain_34755 lsm_alloc_plain 1 34755 NULL
111465 +bootmode_store_34762 bootmode_store 4 34762 NULL
111466 +device_add_34766 device_add 0 34766 NULL
111467 +qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
111468 +SYSC_keyctl_34800 SYSC_keyctl 4 34800 NULL
111469 +can_nocow_extent_34801 can_nocow_extent 2 34801 NULL
111470 +drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
111471 +ll_setxattr_34806 ll_setxattr 4 34806 NULL
111472 +file_page_index_34820 file_page_index 0-2 34820 NULL
111473 +b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
111474 +nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
111475 +acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
111476 +usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
111477 +ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL
111478 +msg_print_text_34889 msg_print_text 0 34889 NULL
111479 +ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
111480 +si476x_radio_read_rsq_primary_blob_34916 si476x_radio_read_rsq_primary_blob 3 34916 NULL
111481 +__inode_permission_34925 __inode_permission 0 34925 NULL nohasharray
111482 +btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 &__inode_permission_34925
111483 +ceph_aio_write_34930 ceph_aio_write 4 34930 NULL
111484 +sec_flags2str_34933 sec_flags2str 3 34933 NULL
111485 +snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
111486 +i2c_transfer_34958 i2c_transfer 0 34958 NULL
111487 +do_add_page_to_bio_34974 do_add_page_to_bio 2-10 34974 NULL
111488 +rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
111489 +l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
111490 +sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
111491 +coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
111492 +brcmf_sdio_chip_writenvram_35042 brcmf_sdio_chip_writenvram 4 35042 NULL
111493 +pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
111494 +__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
111495 +capi_write_35104 capi_write 3 35104 NULL nohasharray
111496 +tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
111497 +ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
111498 +ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL
111499 +message_stats_print_35158 message_stats_print 6 35158 NULL
111500 +iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
111501 +ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
111502 +unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
111503 +security_key_getsecurity_35218 security_key_getsecurity 0 35218 NULL nohasharray
111504 +striped_read_35218 striped_read 0-2 35218 &security_key_getsecurity_35218
111505 +rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL
111506 +set_fd_set_35249 set_fd_set 1 35249 NULL
111507 +ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
111508 +jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL
111509 +dis_disc_write_35265 dis_disc_write 3 35265 NULL
111510 +dma_show_regs_35266 dma_show_regs 3 35266 NULL
111511 +irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
111512 +i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
111513 +isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
111514 +__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 NULL nohasharray
111515 +brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 &__btrfs_buffered_write_35311
111516 +tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
111517 +ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL
111518 +nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
111519 +ieee80211_rx_mgmt_deauth_35351 ieee80211_rx_mgmt_deauth 3 35351 NULL
111520 +compat_filldir64_35354 compat_filldir64 3 35354 NULL
111521 +read_kmem_35372 read_kmem 3 35372 NULL
111522 +SyS_getxattr_35408 SyS_getxattr 4 35408 NULL
111523 +rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
111524 +buffer_to_user_35439 buffer_to_user 3 35439 NULL
111525 +fiemap_prepare_and_copy_exts_35494 fiemap_prepare_and_copy_exts 5 35494 NULL
111526 +btrfs_prealloc_file_range_trans_35500 btrfs_prealloc_file_range_trans 4 35500 NULL
111527 +async_setkey_35521 async_setkey 3 35521 NULL
111528 +__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
111529 +iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
111530 +pstore_mkfile_35536 pstore_mkfile 7 35536 NULL
111531 +rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
111532 +ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
111533 +ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
111534 +sysfs_create_subdir_35567 sysfs_create_subdir 0 35567 NULL
111535 +ext4_blocks_for_truncate_35579 ext4_blocks_for_truncate 0 35579 NULL
111536 +ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
111537 +spk_msg_set_35586 spk_msg_set 3 35586 NULL
111538 +kernel_readv_35617 kernel_readv 3 35617 NULL
111539 +reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
111540 +pci_request_regions_35635 pci_request_regions 0 35635 NULL
111541 +ptlrpcd_steal_rqset_35637 ptlrpcd_steal_rqset 0 35637 NULL
111542 +spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
111543 +rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
111544 +compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
111545 +SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL
111546 +rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
111547 +md_super_write_35703 md_super_write 4 35703 NULL
111548 +ocfs2_extent_recs_per_gd_35710 ocfs2_extent_recs_per_gd 0 35710 NULL
111549 +iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
111550 +udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
111551 +pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
111552 +tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
111553 +vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL
111554 +mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
111555 +fls64_35862 fls64 0 35862 NULL
111556 +kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
111557 +ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
111558 +uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
111559 +SyS_set_mempolicy_35909 SyS_set_mempolicy 3 35909 NULL
111560 +kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
111561 +rbio_nr_pages_35916 rbio_nr_pages 0-1-2 35916 NULL
111562 +sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
111563 +rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
111564 +put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
111565 +ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL
111566 +ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL nohasharray
111567 +generic_ocp_read_35974 generic_ocp_read 3 35974 &ceph_buffer_new_35974
111568 +acl_alloc_35979 acl_alloc 1 35979 NULL
111569 +device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL
111570 +generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
111571 +write_file_antenna_35998 write_file_antenna 3 35998 NULL nohasharray
111572 +kuc_alloc_35998 kuc_alloc 1 35998 &write_file_antenna_35998
111573 +il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
111574 +__videobuf_alloc_36031 __videobuf_alloc 1 36031 NULL
111575 +account_shadowed_36048 account_shadowed 2 36048 NULL
111576 +gpio_power_read_36059 gpio_power_read 3 36059 NULL
111577 +write_emulate_36065 write_emulate 2-4 36065 NULL
111578 +stack_max_size_write_36068 stack_max_size_write 3 36068 NULL
111579 +radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL
111580 +ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
111581 +ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
111582 +snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
111583 +mtip_hw_read_device_status_36082 mtip_hw_read_device_status 3 36082 NULL
111584 +vga_arb_write_36112 vga_arb_write 3 36112 NULL
111585 +simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
111586 +ext3_readpages_36144 ext3_readpages 4 36144 NULL
111587 +twl_set_36154 twl_set 2 36154 NULL
111588 +b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
111589 +btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
111590 +snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
111591 +SyS_kexec_load_36176 SyS_kexec_load 2 36176 NULL
111592 +ramoops_init_przs_36199 ramoops_init_przs 4 36199 NULL
111593 +SYSC_sched_getaffinity_36208 SYSC_sched_getaffinity 2 36208 NULL
111594 +SYSC_process_vm_readv_36216 SYSC_process_vm_readv 3-5 36216 NULL
111595 +atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
111596 +viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
111597 +SYSC_getxattr_36242 SYSC_getxattr 4 36242 NULL
111598 +rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
111599 +scrub_stripe_36248 scrub_stripe 5-4 36248 NULL
111600 +compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
111601 +usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL nohasharray
111602 +cfs_hash_buckets_realloc_36276 cfs_hash_buckets_realloc 4 36276 &usb_buffer_alloc_36276
111603 +codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
111604 +crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL
111605 +nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
111606 +lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
111607 +cfg80211_rx_mlme_mgmt_36306 cfg80211_rx_mlme_mgmt 3 36306 NULL
111608 +ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
111609 +fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
111610 +lc_create_36332 lc_create 4 36332 NULL
111611 +jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
111612 +isku_sysfs_read_key_mask_36343 isku_sysfs_read_key_mask 6 36343 NULL
111613 +ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL
111614 +v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL
111615 +to_sector_36361 to_sector 0-1 36361 NULL
111616 +tunables_read_36385 tunables_read 3 36385 NULL
111617 +afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
111618 +sierra_write_36402 sierra_write 4 36402 NULL
111619 +iwl_dbgfs_d3_sram_write_36403 iwl_dbgfs_d3_sram_write 3 36403 NULL
111620 +SyS_sethostname_36417 SyS_sethostname 2 36417 NULL
111621 +ReadW6692B_36445 ReadW6692B 0 36445 NULL
111622 +sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
111623 +alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
111624 +SyS_process_vm_writev_36476 SyS_process_vm_writev 3-5 36476 NULL
111625 +b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
111626 +tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 NULL nohasharray
111627 +ip6_append_data_36490 ip6_append_data 4 36490 &tx_tx_checksum_result_read_36490
111628 +cmd_loop_36491 cmd_loop 0 36491 NULL
111629 +__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
111630 +mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
111631 +get_param_l_36518 get_param_l 0 36518 NULL
111632 +ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
111633 +crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
111634 +cpu_type_read_36540 cpu_type_read 3 36540 NULL
111635 +__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL nohasharray
111636 +macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
111637 +btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL
111638 +__erst_read_36579 __erst_read 0 36579 NULL
111639 +put_cmsg_36589 put_cmsg 4 36589 NULL
111640 +fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
111641 +vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
111642 +convert_extent_item_v0_36645 convert_extent_item_v0 4 36645 NULL
111643 +ced_ioctl_36647 ced_ioctl 2 36647 NULL
111644 +lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
111645 +osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
111646 +iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
111647 +xillybus_read_36678 xillybus_read 3 36678 NULL
111648 +gsmtty_write_36702 gsmtty_write 3 36702 NULL
111649 +snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL
111650 +cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
111651 +ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
111652 +ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
111653 +i40e_init_lan_hmc_36796 i40e_init_lan_hmc 2-3-4-5 36796 NULL
111654 +proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
111655 +hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
111656 +int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
111657 +fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
111658 +keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
111659 +cm_write_36858 cm_write 3 36858 NULL
111660 +tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
111661 +svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
111662 +raid56_parity_write_36877 raid56_parity_write 5 36877 NULL
111663 +__btrfs_map_block_36883 __btrfs_map_block 3 36883 NULL
111664 +ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
111665 +selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
111666 +OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
111667 +audio_set_endpoint_req_36918 audio_set_endpoint_req 0 36918 NULL
111668 +crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
111669 +il4965_rs_sta_dbgfs_scale_table_write_36979 il4965_rs_sta_dbgfs_scale_table_write 3 36979 NULL
111670 +drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL
111671 +auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
111672 +setxattr_37006 setxattr 4 37006 NULL
111673 +ocfs2_dlm_unlock_37037 ocfs2_dlm_unlock 0 37037 NULL
111674 +command_file_read_37038 command_file_read 3 37038 NULL
111675 +figure_loop_size_37051 figure_loop_size 2-3 37051 NULL
111676 +ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL nohasharray
111677 +qp_broker_create_37053 qp_broker_create 6-5 37053 &ieee80211_if_read_drop_unencrypted_37053
111678 +SYSC_setxattr_37078 SYSC_setxattr 4 37078 NULL
111679 +parse_command_37079 parse_command 2 37079 NULL
111680 +pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
111681 +tun_get_user_37094 tun_get_user 5 37094 NULL
111682 +has_wrprotected_page_37123 has_wrprotected_page 3-2 37123 NULL
111683 +snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
111684 +mtt_free_res_37144 mtt_free_res 5 37144 NULL
111685 +msg_word_37164 msg_word 0 37164 NULL
111686 +f2fs_direct_IO_37167 f2fs_direct_IO 4 37167 NULL
111687 +can_set_xattr_37182 can_set_xattr 4 37182 NULL
111688 +vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
111689 +sysfs_add_file_37200 sysfs_add_file 0 37200 NULL
111690 +forced_ps_write_37209 forced_ps_write 3 37209 NULL
111691 +crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL nohasharray
111692 +ext4_ind_direct_IO_37212 ext4_ind_direct_IO 0-4 37212 &crypto_shash_descsize_37212
111693 +bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2-0 37213 NULL
111694 +regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
111695 +__do_replace_37227 __do_replace 5 37227 NULL
111696 +iwl_dbgfs_d3_sram_read_37237 iwl_dbgfs_d3_sram_read 3 37237 NULL
111697 +rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
111698 +exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
111699 +ieee80211_if_read_power_mode_37305 ieee80211_if_read_power_mode 3 37305 NULL
111700 +ext3_direct_IO_37308 ext3_direct_IO 4 37308 NULL
111701 +ocfs2_calc_extend_credits_37310 ocfs2_calc_extend_credits 0 37310 NULL
111702 +jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
111703 +send_msg_37323 send_msg 4 37323 NULL
111704 +l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL nohasharray
111705 +bnx2x_vf_fill_fw_str_37327 bnx2x_vf_fill_fw_str 3 37327 &l2cap_create_connless_pdu_37327
111706 +scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
111707 +rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
111708 +security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
111709 +hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
111710 +acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL
111711 +tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428 NULL
111712 +iwl_print_last_event_logs_37433 iwl_print_last_event_logs 7-9-0 37433 NULL
111713 +fru_alloc_37442 fru_alloc 1 37442 NULL
111714 +tcp_established_options_37450 tcp_established_options 0 37450 NULL nohasharray
111715 +tipc_send2port_37450 tipc_send2port 4 37450 &tcp_established_options_37450
111716 +brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
111717 +get_est_timing_37484 get_est_timing 0 37484 NULL
111718 +kmem_realloc_37489 kmem_realloc 2 37489 NULL
111719 +__hfsplus_setxattr_37499 __hfsplus_setxattr 4 37499 NULL
111720 +bitmap_dirty_bits_37503 bitmap_dirty_bits 2 37503 NULL
111721 +osc_active_seq_write_37514 osc_active_seq_write 3 37514 NULL
111722 +bdev_writeseg_37519 bdev_writeseg 2-3 37519 NULL
111723 +xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL
111724 +fault_inject_read_37534 fault_inject_read 3 37534 NULL
111725 +hdr_size_37536 hdr_size 0 37536 NULL
111726 +extent_map_end_37550 extent_map_end 0 37550 NULL
111727 +sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL
111728 +ioat_chansts_37558 ioat_chansts 0 37558 NULL
111729 +xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
111730 +qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL
111731 +kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
111732 +SYSC_mbind_37622 SYSC_mbind 5 37622 NULL
111733 +SyS_mbind_37638 SyS_mbind 5 37638 NULL
111734 +bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
111735 +rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
111736 +vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
111737 +SYSC_get_mempolicy_37664 SYSC_get_mempolicy 3 37664 NULL
111738 +__wa_seg_calculate_isoc_frame_count_37672 __wa_seg_calculate_isoc_frame_count 0 37672 NULL
111739 +ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
111740 +regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
111741 +page_chain_free_37697 page_chain_free 0 37697 NULL
111742 +nametbl_header_37698 nametbl_header 2-0 37698 NULL
111743 +__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
111744 +dynamic_ps_timeout_write_37713 dynamic_ps_timeout_write 3 37713 NULL
111745 +read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
111746 +ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
111747 +ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
111748 +dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
111749 +il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
111750 +smk_read_logging_37804 smk_read_logging 3 37804 NULL
111751 +ocrdma_alloc_frmr_page_list_37815 ocrdma_alloc_frmr_page_list 2 37815 NULL
111752 +rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
111753 +android_get_p2p_addr_37832 android_get_p2p_addr 0 37832 NULL
111754 +jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL
111755 +o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
111756 +xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
111757 +set_registers_37883 set_registers 4 37883 NULL
111758 +btrfs_stack_file_extent_disk_bytenr_37888 btrfs_stack_file_extent_disk_bytenr 0 37888 NULL
111759 +pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL nohasharray
111760 +_rtw_malloc_37928 _rtw_malloc 1 37928 &pkt_alloc_packet_data_37928
111761 +read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
111762 +write_file_bool_37957 write_file_bool 3 37957 NULL
111763 +fifo_alloc_37961 fifo_alloc 1 37961 NULL
111764 +rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
111765 +persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL
111766 +vfs_readv_38011 vfs_readv 3 38011 NULL
111767 +aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
111768 +il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 NULL nohasharray
111769 +klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 &il_dbgfs_chain_noise_read_38044
111770 +SyS_llistxattr_38048 SyS_llistxattr 3 38048 NULL
111771 +sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL
111772 +_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL
111773 +nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
111774 +alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
111775 +xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL nohasharray
111776 +wcn36xx_smd_rsp_process_38081 wcn36xx_smd_rsp_process 3 38081 &xfs_buf_readahead_map_38081
111777 +uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
111778 +request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
111779 +proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
111780 +ep0_read_38095 ep0_read 3 38095 NULL
111781 +sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL nohasharray
111782 +osc_checksum_seq_write_38096 osc_checksum_seq_write 3 38096 &sk_wmem_schedule_38096
111783 +o2hb_read_slots_38105 o2hb_read_slots 2 38105 NULL
111784 +snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
111785 +vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
111786 +__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3 38153 NULL
111787 +btrfs_extent_same_38163 btrfs_extent_same 3-2 38163 NULL
111788 +kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
111789 +cdev_add_38176 cdev_add 2-3 38176 NULL
111790 +rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
111791 +get_ucode_user_38202 get_ucode_user 3 38202 NULL
111792 +osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
111793 +ceph_decode_16_38239 ceph_decode_16 0 38239 NULL
111794 +_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
111795 +mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray
111796 +ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268 nohasharray
111797 +SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268
111798 +xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
111799 +xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
111800 +ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
111801 +ucma_query_path_38305 ucma_query_path 3 38305 NULL
111802 +isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
111803 +ida_simple_get_38326 ida_simple_get 0 38326 NULL
111804 +__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
111805 +btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
111806 +xfs_free_file_space_38383 xfs_free_file_space 2-3 38383 NULL
111807 +dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
111808 +ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
111809 +pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
111810 +kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
111811 +blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL
111812 +dev_names_read_38509 dev_names_read 3 38509 NULL
111813 +iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
111814 +event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
111815 +ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
111816 +btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
111817 +kuc_len_38557 kuc_len 0-1 38557 NULL
111818 +irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
111819 +il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
111820 +_ipw_read32_38565 _ipw_read32 0 38565 NULL
111821 +snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
111822 +copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
111823 +icn_writecmd_38629 icn_writecmd 2 38629 NULL
111824 +write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL
111825 +ext2_readpages_38640 ext2_readpages 4 38640 NULL
111826 +audit_init_entry_38644 audit_init_entry 1 38644 NULL
111827 +qp_broker_alloc_38646 qp_broker_alloc 6-5 38646 NULL
111828 +mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
111829 +nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
111830 +snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL
111831 +iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
111832 +rbio_add_io_page_38700 rbio_add_io_page 6 38700 NULL
111833 +alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
111834 +w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL
111835 +udf_readpages_38761 udf_readpages 4 38761 NULL
111836 +iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
111837 +bcache_device_init_38781 bcache_device_init 3 38781 NULL
111838 +snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
111839 +slab_order_38794 slab_order 0 38794 NULL
111840 +do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL
111841 +err_decode_38804 err_decode 2 38804 NULL
111842 +ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
111843 +direct_entry_38836 direct_entry 3 38836 NULL
111844 +compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
111845 +read_nic_io_word_38853 read_nic_io_word 0 38853 NULL
111846 +interfaces_38859 interfaces 2 38859 NULL
111847 +pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
111848 +dbgfs_state_38894 dbgfs_state 3 38894 NULL
111849 +f2fs_xattr_set_acl_38895 f2fs_xattr_set_acl 4 38895 NULL
111850 +il_dbgfs_sram_write_38942 il_dbgfs_sram_write 3 38942 NULL
111851 +__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
111852 +usb_maxpacket_38977 usb_maxpacket 0 38977 NULL nohasharray
111853 +C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 &usb_maxpacket_38977
111854 +OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
111855 +lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
111856 +get_nodes_39012 get_nodes 3 39012 NULL
111857 +twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL
111858 +__blkdev_issue_zeroout_39020 __blkdev_issue_zeroout 3 39020 NULL
111859 +_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
111860 +do_write_kmem_39051 do_write_kmem 0-1-3 39051 NULL
111861 +ReadHFC_39104 ReadHFC 0 39104 NULL
111862 +tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
111863 +__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL
111864 +ea_foreach_39133 ea_foreach 0 39133 NULL
111865 +generic_permission_39150 generic_permission 0 39150 NULL
111866 +proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
111867 +ath9k_hw_ar9003_dump_eeprom_39156 ath9k_hw_ar9003_dump_eeprom 5-4 39156 NULL
111868 +echo_client_kbrw_39170 echo_client_kbrw 6 39170 NULL
111869 +ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
111870 +ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
111871 +qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
111872 +ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
111873 +posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL
111874 +snd_pcm_capture_forward_39248 snd_pcm_capture_forward 2 39248 NULL
111875 +r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
111876 +pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
111877 +i915_error_state_read_39254 i915_error_state_read 3 39254 NULL
111878 +rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
111879 +__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
111880 +insert_reserved_file_extent_39327 insert_reserved_file_extent 3 39327 NULL
111881 +wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
111882 +ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
111883 +do_write_log_from_user_39362 do_write_log_from_user 3-0 39362 NULL
111884 +vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
111885 +regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
111886 +fnic_trace_debugfs_read_39380 fnic_trace_debugfs_read 3 39380 NULL
111887 +ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
111888 +__send_to_port_39386 __send_to_port 3 39386 NULL
111889 +user_power_read_39414 user_power_read 3 39414 NULL
111890 +alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
111891 +mic_desc_size_39464 mic_desc_size 0 39464 NULL
111892 +apei_resources_add_39470 apei_resources_add 0 39470 NULL
111893 +setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
111894 +ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
111895 +cl_req_alloc_39523 cl_req_alloc 4 39523 NULL
111896 +int_proc_write_39542 int_proc_write 3 39542 NULL
111897 +mdc_unpack_capa_39553 mdc_unpack_capa 0 39553 NULL
111898 +pp_write_39554 pp_write 3 39554 NULL
111899 +datablob_format_39571 datablob_format 2 39571 NULL nohasharray
111900 +ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
111901 +ext_depth_39607 ext_depth 0 39607 NULL
111902 +batadv_tt_tvlv_generate_39615 batadv_tt_tvlv_generate 4 39615 NULL
111903 +nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
111904 +sdio_readb_39618 sdio_readb 0 39618 NULL
111905 +set_dev_class_39645 set_dev_class 4 39645 NULL
111906 +snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
111907 +tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL
111908 +kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
111909 +v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
111910 +hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL
111911 +do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
111912 +sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL
111913 +ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
111914 +adt7316_spi_multi_read_39765 adt7316_spi_multi_read 3 39765 NULL
111915 +security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
111916 +snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
111917 +get_priv_size_39828 get_priv_size 0-1 39828 NULL
111918 +pkt_add_39897 pkt_add 3 39897 NULL
111919 +read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
111920 +gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
111921 +dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
111922 +aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
111923 +exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
111924 +oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
111925 +__spi_async_39932 __spi_async 0 39932 NULL
111926 +__get_order_39935 __get_order 0 39935 NULL
111927 +error_error_frame_read_39947 error_error_frame_read 3 39947 NULL
111928 +tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL
111929 +lstcon_group_list_39958 lstcon_group_list 2 39958 NULL
111930 +dma_push_rx_39973 dma_push_rx 2 39973 NULL
111931 +broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
111932 +mthca_array_init_39987 mthca_array_init 2 39987 NULL
111933 +fw_device_op_read_39990 fw_device_op_read 3 39990 NULL
111934 +server_name2svname_39998 server_name2svname 4 39998 NULL
111935 +xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
111936 +ivtvfb_write_40023 ivtvfb_write 3 40023 NULL
111937 +disc_pwup_write_40027 disc_pwup_write 3 40027 NULL
111938 +ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
111939 +datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
111940 +l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 NULL nohasharray
111941 +add_tty_40055 add_tty 1 40055 &l2cap_create_iframe_pdu_40055
111942 +atomic_xchg_40070 atomic_xchg 0 40070 NULL
111943 +sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
111944 +dwc2_max_desc_num_40132 dwc2_max_desc_num 0 40132 NULL
111945 +rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
111946 +ath10k_write_simulate_fw_crash_40143 ath10k_write_simulate_fw_crash 3 40143 NULL
111947 +iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
111948 +pt_write_40159 pt_write 3 40159 NULL
111949 +scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
111950 +ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL
111951 +allocate_probes_40204 allocate_probes 1 40204 NULL
111952 +au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
111953 +compress_file_range_40225 compress_file_range 3-4 40225 NULL
111954 +osst_read_40237 osst_read 3 40237 NULL
111955 +lpage_info_slot_40243 lpage_info_slot 3-1 40243 NULL
111956 +ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4-3 40248 NULL
111957 +ptlrpc_queue_wait_40252 ptlrpc_queue_wait 0 40252 NULL
111958 +rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
111959 +ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
111960 +usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
111961 +rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
111962 +SyS_bind_40303 SyS_bind 3 40303 NULL
111963 +ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
111964 +mmio_read_40348 mmio_read 4 40348 NULL
111965 +event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
111966 +ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 3-2 40365 NULL
111967 +get_chars_40373 get_chars 3 40373 NULL
111968 +fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL
111969 +tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
111970 +zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
111971 +gp2ap020a00f_write_event_threshold_40461 gp2ap020a00f_write_event_threshold 2 40461 NULL
111972 +SyS_writev_40467 SyS_writev 3 40467 NULL
111973 +SyS_select_40473 SyS_select 1 40473 NULL
111974 +afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL
111975 +batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
111976 +devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
111977 +__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL
111978 +TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
111979 +ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL
111980 +ima_write_policy_40548 ima_write_policy 3 40548 NULL
111981 +esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
111982 +b1_get_byte_40597 b1_get_byte 0 40597 NULL
111983 +get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
111984 +twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL
111985 +__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL
111986 +pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
111987 +fops_read_40672 fops_read 3 40672 NULL
111988 +idr_get_empty_slot_40674 idr_get_empty_slot 0 40674 NULL
111989 +alloc_rbio_40676 alloc_rbio 4 40676 NULL
111990 +videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 4-3 40678 NULL
111991 +pci_enable_resources_40680 pci_enable_resources 0 40680 NULL
111992 +nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
111993 +__seq_open_private_40715 __seq_open_private 3 40715 NULL
111994 +fuse_readpages_40737 fuse_readpages 4 40737 NULL
111995 +xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL
111996 +security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
111997 +card_send_command_40757 card_send_command 3 40757 NULL
111998 +ad1889_readl_40765 ad1889_readl 0 40765 NULL
111999 +pg_write_40766 pg_write 3 40766 NULL
112000 +show_list_40775 show_list 3-0 40775 NULL
112001 +kfifo_out_copy_r_40784 kfifo_out_copy_r 3-0 40784 NULL
112002 +bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
112003 +pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL
112004 +add_action_40811 add_action 4 40811 NULL
112005 +nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
112006 +SyS_mbind_40828 SyS_mbind 5 40828 NULL
112007 +nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL
112008 +v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
112009 +read_file_queue_40895 read_file_queue 3 40895 NULL
112010 +waiters_read_40902 waiters_read 3 40902 NULL
112011 +isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
112012 +gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
112013 +vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
112014 +snd_vx_create_40948 snd_vx_create 4 40948 NULL nohasharray
112015 +sg_alloc_table_40948 sg_alloc_table 0 40948 &snd_vx_create_40948
112016 +rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
112017 +il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
112018 +iwl_dbgfs_scan_ant_rxchain_read_40999 iwl_dbgfs_scan_ant_rxchain_read 3 40999 NULL
112019 +mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
112020 +__proc_dobitmasks_41029 __proc_dobitmasks 5 41029 NULL
112021 +_req_append_segment_41031 _req_append_segment 2 41031 NULL
112022 +mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
112023 +ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
112024 +lprocfs_write_frac_helper_41050 lprocfs_write_frac_helper 2 41050 NULL
112025 +calculate_order_41061 calculate_order 0 41061 NULL
112026 +vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray
112027 +beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062
112028 +cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
112029 +nvme_map_user_pages_41093 nvme_map_user_pages 4-3 41093 NULL nohasharray
112030 +roccat_read_41093 roccat_read 3 41093 &nvme_map_user_pages_41093
112031 +dma_attach_41094 dma_attach 5-6 41094 NULL
112032 +provide_user_output_41105 provide_user_output 3 41105 NULL
112033 +f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
112034 +ath10k_read_wmi_services_41112 ath10k_read_wmi_services 3 41112 NULL
112035 +ocfs2_extend_trans_41116 ocfs2_extend_trans 2 41116 NULL
112036 +v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
112037 +tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
112038 +dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
112039 +dgap_driver_kzmalloc_41189 dgap_driver_kzmalloc 1 41189 NULL
112040 +compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
112041 +dfs_file_write_41196 dfs_file_write 3 41196 NULL
112042 +nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
112043 +cfg80211_process_disassoc_41231 cfg80211_process_disassoc 3 41231 NULL
112044 +hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
112045 +erst_read_41260 erst_read 0 41260 NULL
112046 +alloc_context_41283 alloc_context 1 41283 NULL
112047 +o2hb_setup_one_bio_41341 o2hb_setup_one_bio 4 41341 NULL
112048 +twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
112049 +rtw_android_set_block_41347 rtw_android_set_block 0 41347 NULL
112050 +cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
112051 +kmp_init_41373 kmp_init 2 41373 NULL
112052 +isr_commands_read_41398 isr_commands_read 3 41398 NULL
112053 +rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
112054 +xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
112055 +isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
112056 +lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
112057 +iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
112058 +ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4 41442 NULL
112059 +pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
112060 +se_io_cb_41461 se_io_cb 3 41461 NULL
112061 +layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
112062 +rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
112063 +bl_alloc_init_bio_41478 bl_alloc_init_bio 1 41478 NULL
112064 +kvm_unmap_hva_range_41484 kvm_unmap_hva_range 3-2 41484 NULL
112065 +wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
112066 +SyS_get_mempolicy_41495 SyS_get_mempolicy 3 41495 NULL
112067 +hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
112068 +xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
112069 +SyS_preadv_41523 SyS_preadv 3 41523 NULL
112070 +dm_get_reserved_rq_based_ios_41529 dm_get_reserved_rq_based_ios 0 41529 NULL
112071 +tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
112072 +ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
112073 +nr_status_frames_41559 nr_status_frames 0-1 41559 NULL nohasharray
112074 +si476x_radio_fops_read_41559 si476x_radio_fops_read 3 41559 &nr_status_frames_41559
112075 +rng_dev_read_41581 rng_dev_read 3 41581 NULL
112076 +batadv_tvlv_container_ogm_append_41588 batadv_tvlv_container_ogm_append 4 41588 NULL
112077 +vga_io_r_41609 vga_io_r 0 41609 NULL
112078 +tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
112079 +lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 NULL nohasharray
112080 +usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 &lbs_bcnmiss_write_41613
112081 +a2mp_send_41615 a2mp_send 4 41615 NULL
112082 +lstcon_batch_list_41627 lstcon_batch_list 2 41627 NULL
112083 +mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
112084 +rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
112085 +get_std_timing_41654 get_std_timing 0 41654 NULL
112086 +ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
112087 +fill_pcm_stream_name_41685 fill_pcm_stream_name 2 41685 NULL
112088 +lov_unpackmd_41701 lov_unpackmd 4 41701 NULL
112089 +apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL
112090 +fillonedir_41746 fillonedir 3 41746 NULL
112091 +iwl_dbgfs_bt_notif_read_41794 iwl_dbgfs_bt_notif_read 3 41794 NULL
112092 +hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
112093 +rtw_android_get_macaddr_41812 rtw_android_get_macaddr 0 41812 NULL
112094 +sco_send_frame_41815 sco_send_frame 3 41815 NULL
112095 +ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
112096 +do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
112097 +keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
112098 +pci_map_single_41869 pci_map_single 0 41869 NULL
112099 +usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
112100 +v_APCI3120_InterruptDmaMoveBlock16bit_41914 v_APCI3120_InterruptDmaMoveBlock16bit 4 41914 NULL
112101 +get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
112102 +nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
112103 +ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL
112104 +sci_rxfill_41945 sci_rxfill 0 41945 NULL
112105 +read_gssp_41947 read_gssp 3 41947 NULL
112106 +ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
112107 +portnames_read_41958 portnames_read 3 41958 NULL
112108 +dst_mtu_41969 dst_mtu 0 41969 NULL
112109 +cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
112110 +pool_allocate_42012 pool_allocate 3 42012 NULL
112111 +spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
112112 +rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL
112113 +create_dir_42025 create_dir 0 42025 NULL
112114 +acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
112115 +__btrfs_drop_extents_42032 __btrfs_drop_extents 5 42032 NULL
112116 +__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
112117 +irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
112118 +jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
112119 +ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
112120 +InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
112121 +scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
112122 +sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
112123 +submit_inquiry_42108 submit_inquiry 3 42108 NULL
112124 +dw_dma_cyclic_prep_42113 dw_dma_cyclic_prep 3-4 42113 NULL
112125 +obd_get_info_42156 obd_get_info 0 42156 NULL
112126 +blk_ioctl_zeroout_42160 blk_ioctl_zeroout 3 42160 NULL
112127 +mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
112128 +read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
112129 +oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
112130 +write_file_beacon_42185 write_file_beacon 3 42185 NULL
112131 +get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
112132 +pla_ocp_read_42235 pla_ocp_read 3 42235 NULL
112133 +rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
112134 +find_last_bit_42260 find_last_bit 0 42260 NULL
112135 +__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL
112136 +snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
112137 +__cpus_weight_42299 __cpus_weight 2-0 42299 NULL
112138 +sel_read_perm_42302 sel_read_perm 3 42302 NULL
112139 +sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
112140 +ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
112141 +xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
112142 +hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
112143 +tcp_sync_mss_42330 tcp_sync_mss 2-0 42330 NULL
112144 +ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
112145 +tipc_send_42374 tipc_send 3 42374 NULL
112146 +drbd_md_last_sector_42378 drbd_md_last_sector 0 42378 NULL
112147 +il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
112148 +msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
112149 +krng_get_random_42420 krng_get_random 3 42420 NULL
112150 +gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
112151 +key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
112152 +alloc_request_42448 alloc_request 0 42448 NULL
112153 +snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
112154 +tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
112155 +kuc_free_42455 kuc_free 2 42455 NULL
112156 +__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
112157 +omfs_readpages_42490 omfs_readpages 4 42490 NULL
112158 +bypass_write_42498 bypass_write 3 42498 NULL
112159 +SyS_mincore_42511 SyS_mincore 1-2 42511 NULL
112160 +kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
112161 +dio_bio_complete_42524 dio_bio_complete 0 42524 NULL
112162 +smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
112163 +dbAllocNear_42546 dbAllocNear 0 42546 NULL
112164 +ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 NULL
112165 +udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
112166 +iwl_print_event_log_42566 iwl_print_event_log 7-5-0 42566 NULL
112167 +xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
112168 +oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
112169 +ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
112170 +scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
112171 +br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
112172 +parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL
112173 +_regmap_raw_write_42652 _regmap_raw_write 4-2 42652 NULL
112174 +request_key_and_link_42693 request_key_and_link 4 42693 NULL
112175 +vb2_read_42703 vb2_read 3 42703 NULL
112176 +read_status_42722 read_status 0 42722 NULL
112177 +dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
112178 +set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
112179 +ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
112180 +dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL
112181 +x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
112182 +snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
112183 +cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL nohasharray
112184 +isku_sysfs_read_info_42781 isku_sysfs_read_info 6 42781 &cryptd_hash_setkey_42781
112185 +elfcorehdr_read_notes_42786 elfcorehdr_read_notes 2 42786 NULL
112186 +koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
112187 +ptlrpc_request_bufs_pack_42793 ptlrpc_request_bufs_pack 0 42793 NULL
112188 +ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0 42796 NULL
112189 +fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
112190 +drm_ioctl_42813 drm_ioctl 2 42813 NULL
112191 +iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
112192 +set_arg_42824 set_arg 3 42824 NULL
112193 +si476x_radio_read_rsq_blob_42827 si476x_radio_read_rsq_blob 3 42827 NULL
112194 +ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
112195 +nvme_trans_unit_serial_page_42879 nvme_trans_unit_serial_page 4 42879 NULL
112196 +xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
112197 +hd_end_request_42904 hd_end_request 2 42904 NULL
112198 +sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL
112199 +sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL nohasharray
112200 +mdc_unpack_acl_42941 mdc_unpack_acl 0 42941 &sctp_getsockopt_maxburst_42941
112201 +vx_reset_chk_42946 vx_reset_chk 0 42946 NULL
112202 +blkdev_direct_IO_42962 blkdev_direct_IO 4 42962 NULL
112203 +read_file_node_stat_42964 read_file_node_stat 3 42964 NULL
112204 +compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
112205 +nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL nohasharray
112206 +rtw_os_xmit_resource_alloc_42990 rtw_os_xmit_resource_alloc 3 42990 &nfs_idmap_get_desc_42990
112207 +isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
112208 +wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
112209 +nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
112210 +nfs_map_group_to_gid_43082 nfs_map_group_to_gid 3 43082 NULL
112211 +_xfer_secondary_pool_43089 _xfer_secondary_pool 2 43089 NULL
112212 +sysfs_create_file_ns_43103 sysfs_create_file_ns 0 43103 NULL
112213 +ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
112214 +calculate_node_totalpages_43118 calculate_node_totalpages 2-3 43118 NULL
112215 +read_file_dfs_43145 read_file_dfs 3 43145 NULL
112216 +cfs_cpt_table_alloc_43159 cfs_cpt_table_alloc 1 43159 NULL
112217 +usb_string_sub_43164 usb_string_sub 0 43164 NULL
112218 +il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
112219 +ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
112220 +ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
112221 +uio_write_43202 uio_write 3 43202 NULL
112222 +iso_callback_43208 iso_callback 3 43208 NULL
112223 +ath10k_p2p_calc_noa_ie_len_43209 ath10k_p2p_calc_noa_ie_len 0 43209 NULL
112224 +f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL
112225 +atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL
112226 +batadv_tt_tvlv_unicast_handler_v1_43239 batadv_tt_tvlv_unicast_handler_v1 5 43239 NULL
112227 +vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
112228 +ide_end_rq_43269 ide_end_rq 4 43269 NULL
112229 +nilfs_direct_IO_43271 nilfs_direct_IO 4 43271 NULL
112230 +parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray
112231 +evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278
112232 +filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
112233 +mpage_alloc_43299 mpage_alloc 3 43299 NULL
112234 +mmu_set_spte_43327 mmu_set_spte 7-6 43327 NULL
112235 +__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
112236 +xenfb_write_43412 xenfb_write 3 43412 NULL
112237 +__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
112238 +usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
112239 +ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
112240 +usb_string_43443 usb_string 0 43443 NULL nohasharray
112241 +usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443
112242 +get_vm_area_size_43444 get_vm_area_size 0 43444 NULL
112243 +nvme_trans_device_id_page_43466 nvme_trans_device_id_page 4 43466 NULL
112244 +tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
112245 +ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
112246 +do_readlink_43518 do_readlink 2 43518 NULL
112247 +dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
112248 +read_events_43534 read_events 3 43534 NULL
112249 +cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
112250 +tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL
112251 +request_resource_43548 request_resource 0 43548 NULL
112252 +rpc_malloc_43573 rpc_malloc 2 43573 NULL
112253 +handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
112254 +lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
112255 +proc_read_43614 proc_read 3 43614 NULL
112256 +disable_dma_on_even_43618 disable_dma_on_even 0 43618 NULL
112257 +alloc_thread_groups_43625 alloc_thread_groups 2 43625 NULL
112258 +random_write_43656 random_write 3 43656 NULL
112259 +bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
112260 +ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
112261 +write_file_tx99_power_43670 write_file_tx99_power 3 43670 NULL
112262 +dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL
112263 +max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL
112264 +drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
112265 +snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
112266 +fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL
112267 +gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
112268 +sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
112269 +ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
112270 +byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
112271 +btrfs_copy_from_user_43806 btrfs_copy_from_user 0-3-1 43806 NULL
112272 +ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
112273 +ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
112274 +read_flush_43851 read_flush 3 43851 NULL
112275 +pm860x_bulk_write_43875 pm860x_bulk_write 2-3 43875 NULL
112276 +SendString_43928 SendString 3 43928 NULL
112277 +stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
112278 +__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL
112279 +nla_reserve_43984 nla_reserve 3 43984 NULL
112280 +__clkdev_alloc_43990 __clkdev_alloc 1 43990 NULL
112281 +scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
112282 +kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &scsi_command_size_43992 nohasharray
112283 +bcm_recvmsg_43992 bcm_recvmsg 4 43992 &kvm_read_guest_virt_43992
112284 +emit_flags_44006 emit_flags 4-3 44006 NULL
112285 +write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
112286 +fru_strlen_44046 fru_strlen 0 44046 NULL
112287 +ath9k_def_dump_modal_eeprom_44078 ath9k_def_dump_modal_eeprom 3-2-0 44078 NULL
112288 +SYSC_add_key_44079 SYSC_add_key 4 44079 NULL
112289 +__vxge_hw_vpath_tim_configure_44093 __vxge_hw_vpath_tim_configure 2 44093 NULL
112290 +xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
112291 +skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
112292 +tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
112293 +SyS_process_vm_writev_44129 SyS_process_vm_writev 3-5 44129 NULL
112294 +ttm_get_pages_44142 ttm_get_pages 2 44142 NULL
112295 +scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
112296 +ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
112297 +SYSC_set_mempolicy_44176 SYSC_set_mempolicy 3 44176 NULL
112298 +readreg_ipac_44186 readreg_ipac 0 44186 NULL
112299 +handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
112300 +srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
112301 +scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
112302 +sigma_action_write_regmap_44240 sigma_action_write_regmap 3 44240 NULL
112303 +apei_resources_sub_44252 apei_resources_sub 0 44252 NULL
112304 +device_create_file_44285 device_create_file 0 44285 NULL
112305 +ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
112306 +bitmap_scnprintf_44318 bitmap_scnprintf 0-2 44318 NULL
112307 +dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
112308 +rs_init_44327 rs_init 1 44327 NULL
112309 +blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL nohasharray
112310 +nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 &blk_queue_init_tags_44355
112311 +alloc_requests_44372 alloc_requests 0 44372 NULL
112312 +rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
112313 +mtip_hw_read_flags_44396 mtip_hw_read_flags 3 44396 NULL
112314 +aoedev_flush_44398 aoedev_flush 2 44398 NULL
112315 +strlcpy_44400 strlcpy 3 44400 NULL
112316 +drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
112317 +osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
112318 +ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
112319 +iwl_dbgfs_bf_params_write_44450 iwl_dbgfs_bf_params_write 3 44450 NULL
112320 +write_file_debug_44476 write_file_debug 3 44476 NULL
112321 +btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
112322 +sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
112323 +bio_advance_44496 bio_advance 2 44496 NULL
112324 +ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
112325 +ac_register_board_44504 ac_register_board 3 44504 NULL
112326 +security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
112327 +iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
112328 +spidev_write_44510 spidev_write 3 44510 NULL
112329 +SyS_io_getevents_44519 SyS_io_getevents 3 44519 NULL
112330 +ieee80211_rx_mgmt_assoc_resp_44525 ieee80211_rx_mgmt_assoc_resp 3 44525 NULL
112331 +comm_write_44537 comm_write 3 44537 NULL
112332 +xfs_log_calc_unit_res_44540 xfs_log_calc_unit_res 0-2 44540 NULL
112333 +dgrp_config_proc_write_44571 dgrp_config_proc_write 3 44571 NULL
112334 +nouveau_perfmon_create__44602 nouveau_perfmon_create_ 4 44602 NULL
112335 +alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
112336 +mpi_resize_44674 mpi_resize 2 44674 NULL
112337 +sysfs_create_link_44685 sysfs_create_link 0 44685 NULL
112338 +ts_read_44687 ts_read 3 44687 NULL
112339 +lov_emerg_alloc_44698 lov_emerg_alloc 1 44698 NULL
112340 +__ocfs2_rotate_tree_left_44705 __ocfs2_rotate_tree_left 3 44705 NULL
112341 +xfer_to_user_44713 xfer_to_user 3 44713 NULL nohasharray
112342 +__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 &xfer_to_user_44713
112343 +_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
112344 +clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
112345 +fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
112346 +key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
112347 +tnode_new_44757 tnode_new 3 44757 NULL nohasharray
112348 +pty_write_44757 pty_write 3 44757 &tnode_new_44757
112349 +__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL
112350 +handsfree_ramp_44777 handsfree_ramp 2 44777 NULL
112351 +irq_domain_add_legacy_44781 irq_domain_add_legacy 4-2 44781 NULL
112352 +sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
112353 +rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
112354 +qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
112355 +mei_cl_read_start_44824 mei_cl_read_start 2 44824 NULL
112356 +rmap_write_protect_44833 rmap_write_protect 2 44833 NULL
112357 +sisusb_write_44834 sisusb_write 3 44834 NULL
112358 +kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
112359 +qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL
112360 +copydesc_user_44855 copydesc_user 3 44855 NULL
112361 +set_advertising_44870 set_advertising 4 44870 NULL
112362 +init_rs_44873 init_rs 1 44873 NULL
112363 +skb_availroom_44883 skb_availroom 0 44883 NULL
112364 +ocfs2_wait_for_mask_44893 ocfs2_wait_for_mask 0 44893 NULL
112365 +do_tty_write_44896 do_tty_write 5 44896 NULL
112366 +regmap_spi_read_44921 regmap_spi_read 3-5 44921 NULL
112367 +tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
112368 +bytepos_delta_45017 bytepos_delta 0-2 45017 NULL
112369 +ptrace_writedata_45021 ptrace_writedata 4 45021 NULL
112370 +dm_kvzalloc_45025 dm_kvzalloc 1 45025 NULL
112371 +vhci_get_user_45039 vhci_get_user 3 45039 NULL
112372 +sysfs_do_create_link_sd_45057 sysfs_do_create_link_sd 0 45057 NULL
112373 +sel_write_user_45060 sel_write_user 3 45060 NULL
112374 +snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
112375 +kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL
112376 +pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
112377 +usbdev_read_45114 usbdev_read 3 45114 NULL
112378 +send_to_tty_45141 send_to_tty 3 45141 NULL
112379 +cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL
112380 +gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
112381 +device_write_45156 device_write 3 45156 NULL nohasharray
112382 +ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 &device_write_45156
112383 +tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
112384 +sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
112385 +snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray
112386 +sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 &snd_sb_csp_load_user_45190
112387 +iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
112388 +spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
112389 +ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
112390 +event_enable_write_45238 event_enable_write 3 45238 NULL
112391 +prism2_pda_proc_read_45246 prism2_pda_proc_read 3 45246 NULL
112392 +input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
112393 +gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL
112394 +snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
112395 +e1000_tx_map_45309 e1000_tx_map 5 45309 NULL
112396 +copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
112397 +null_alloc_repbuf_45375 null_alloc_repbuf 3 45375 NULL
112398 +sock_recv_errqueue_45412 sock_recv_errqueue 3 45412 NULL
112399 +ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval 3 45421 NULL
112400 +ll_iocontrol_register_45430 ll_iocontrol_register 2 45430 NULL
112401 +tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
112402 +__node_remap_45458 __node_remap 4 45458 NULL
112403 +rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
112404 +tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
112405 +rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
112406 +i40e_alloc_vfs_45511 i40e_alloc_vfs 2 45511 NULL
112407 +cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
112408 +copy_macs_45534 copy_macs 4 45534 NULL
112409 +nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
112410 +v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
112411 +cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
112412 +stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
112413 +_regmap_bus_raw_write_45559 _regmap_bus_raw_write 2 45559 NULL
112414 +posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
112415 +venus_rmdir_45564 venus_rmdir 4 45564 NULL
112416 +ath6kl_keepalive_write_45600 ath6kl_keepalive_write 3 45600 NULL
112417 +hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
112418 +compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
112419 +dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
112420 +smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
112421 +unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
112422 +bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699
112423 +sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
112424 +snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL nohasharray
112425 +task_cgroup_path_45734 task_cgroup_path 3 45734 &snd_cs46xx_io_read_45734
112426 +rw_copy_check_uvector_45748 rw_copy_check_uvector 3-0 45748 NULL nohasharray
112427 +v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
112428 +lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
112429 +alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
112430 +osc_checksum_type_seq_write_45785 osc_checksum_type_seq_write 3 45785 NULL
112431 +raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
112432 +rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL
112433 +lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
112434 +pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
112435 +ll_max_readahead_mb_seq_write_45815 ll_max_readahead_mb_seq_write 3 45815 NULL
112436 +memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
112437 +ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
112438 +x509_process_extension_45854 x509_process_extension 5 45854 NULL
112439 +efx_tx_queue_insert_45859 efx_tx_queue_insert 2 45859 NULL
112440 +isdn_write_45863 isdn_write 3 45863 NULL
112441 +tpm_config_in_45880 tpm_config_in 0 45880 NULL
112442 +get_rdac_req_45882 get_rdac_req 3 45882 NULL
112443 +ima_eventdigest_init_common_45889 ima_eventdigest_init_common 2 45889 NULL
112444 +ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
112445 +cfs_cpt_weight_45903 cfs_cpt_weight 0 45903 NULL
112446 +wm_adsp_region_to_reg_45915 wm_adsp_region_to_reg 0-2 45915 NULL
112447 +dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
112448 +alloc_mr_45935 alloc_mr 1 45935 NULL
112449 +copy_to_45969 copy_to 3 45969 NULL
112450 +rb_simple_read_45972 rb_simple_read 3 45972 NULL
112451 +ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
112452 +kobject_init_and_add_46003 kobject_init_and_add 0 46003 NULL
112453 +sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
112454 +fnic_reset_stats_read_46030 fnic_reset_stats_read 3 46030 NULL nohasharray
112455 +get_free_entries_46030 get_free_entries 1 46030 &fnic_reset_stats_read_46030
112456 +__access_remote_vm_46031 __access_remote_vm 0 46031 NULL
112457 +snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
112458 +__ocfs2_move_extent_46060 __ocfs2_move_extent 3-4 46060 NULL nohasharray
112459 +dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
112460 +sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
112461 +il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL nohasharray
112462 +memcg_update_array_size_46111 memcg_update_array_size 1 46111 &il3945_ucode_general_stats_read_46111
112463 +C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL
112464 +mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
112465 +rtw_buf_update_46138 rtw_buf_update 4 46138 NULL
112466 +vb2_dma_sg_get_userptr_46146 vb2_dma_sg_get_userptr 3-2 46146 NULL
112467 +__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
112468 +twl_direction_out_46182 twl_direction_out 2 46182 NULL
112469 +vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
112470 +fq_resize_46195 fq_resize 2 46195 NULL
112471 +add_conn_list_46197 add_conn_list 3-0 46197 NULL
112472 +i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
112473 +tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
112474 +dsp_write_46218 dsp_write 2 46218 NULL
112475 +hash_netiface4_expire_46226 hash_netiface4_expire 4 46226 NULL
112476 +xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL
112477 +mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
112478 +ReadReg_46277 ReadReg 0 46277 NULL
112479 +sptlrpc_req_get_ctx_46303 sptlrpc_req_get_ctx 0 46303 NULL
112480 +sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
112481 +__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL nohasharray
112482 +compat_SyS_readv_46328 compat_SyS_readv 3 46328 &__hwahc_dev_set_key_46328
112483 +iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
112484 +smk_write_direct_46363 smk_write_direct 3 46363 NULL
112485 +fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
112486 +crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
112487 +ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL
112488 +cfs_power2_roundup_46433 cfs_power2_roundup 0-1 46433 NULL
112489 +cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
112490 +parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL
112491 +il_dbgfs_clear_traffic_stats_write_46458 il_dbgfs_clear_traffic_stats_write 3 46458 NULL
112492 +filldir64_46469 filldir64 3 46469 NULL
112493 +fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL
112494 +pin_code_reply_46510 pin_code_reply 4 46510 NULL
112495 +mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
112496 +kmsg_read_46514 kmsg_read 3 46514 NULL nohasharray
112497 +nouveau_drm_ioctl_46514 nouveau_drm_ioctl 2 46514 &kmsg_read_46514
112498 +nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
112499 +dn_current_mss_46574 dn_current_mss 0 46574 NULL
112500 +serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
112501 +snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
112502 +il3945_stats_flag_46606 il3945_stats_flag 3-0 46606 NULL
112503 +vscnprintf_46617 vscnprintf 0-2 46617 NULL
112504 +__kfifo_out_r_46623 __kfifo_out_r 3-0 46623 NULL
112505 +request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
112506 +pci_enable_device_46642 pci_enable_device 0 46642 NULL
112507 +vfs_getxattr_alloc_46649 vfs_getxattr_alloc 0 46649 NULL
112508 +e1000_tx_map_46672 e1000_tx_map 4 46672 NULL
112509 +alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
112510 +__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
112511 +erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
112512 +wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL
112513 +irq_domain_add_simple_46734 irq_domain_add_simple 2 46734 NULL
112514 +read_file_tx99_46741 read_file_tx99 3 46741 NULL
112515 +ext4_count_free_46754 ext4_count_free 2 46754 NULL
112516 +hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
112517 +int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
112518 +_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
112519 +xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
112520 +readreg_46845 readreg 0 46845 NULL
112521 +spi_async_46857 spi_async 0 46857 NULL
112522 +SyS_move_pages_46863 SyS_move_pages 2 46863 NULL nohasharray
112523 +vsnprintf_46863 vsnprintf 0 46863 &SyS_move_pages_46863
112524 +nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
112525 +qp_memcpy_from_queue_iov_46874 qp_memcpy_from_queue_iov 5-4 46874 NULL
112526 +lov_iocontrol_46876 lov_iocontrol 3 46876 NULL
112527 +ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
112528 +sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
112529 +ieee80211_if_fmt_power_mode_46906 ieee80211_if_fmt_power_mode 3 46906 NULL
112530 +wlcore_alloc_hw_46917 wlcore_alloc_hw 1-3 46917 NULL
112531 +fb_write_46924 fb_write 3 46924 NULL
112532 +__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
112533 +qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
112534 +SYSC_poll_46965 SYSC_poll 2 46965 NULL
112535 +crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
112536 +mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
112537 +strlcat_46985 strlcat 3 46985 NULL
112538 +bitmap_file_clear_bit_46990 bitmap_file_clear_bit 2 46990 NULL
112539 +sel_write_bool_46996 sel_write_bool 3 46996 NULL nohasharray
112540 +gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 &sel_write_bool_46996
112541 +blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
112542 +cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL
112543 +fs_path_len_47060 fs_path_len 0 47060 NULL
112544 +ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0 47070 NULL
112545 +pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
112546 +scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
112547 +iwl_dump_nic_event_log_47089 iwl_dump_nic_event_log 0 47089 NULL
112548 +ptlrpc_lprocfs_threads_max_seq_write_47104 ptlrpc_lprocfs_threads_max_seq_write 3 47104 NULL
112549 +mousedev_read_47123 mousedev_read 3 47123 NULL
112550 +acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 NULL nohasharray
112551 +ses_recv_diag_47143 ses_recv_diag 4 47143 &acpi_ut_initialize_buffer_47143
112552 +mxms_headerlen_47161 mxms_headerlen 0 47161 NULL
112553 +rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
112554 +rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL
112555 +can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
112556 +options_write_47243 options_write 3 47243 NULL
112557 +portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
112558 +ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
112559 +gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
112560 +vsnprintf_47291 vsnprintf 0 47291 NULL
112561 +SYSC_semop_47292 SYSC_semop 3 47292 NULL
112562 +tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
112563 +nouveau_fb_create__47316 nouveau_fb_create_ 4 47316 NULL
112564 +ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
112565 +avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
112566 +kvm_arch_create_memslot_47364 kvm_arch_create_memslot 3 47364 NULL nohasharray
112567 +__output_copy_user_47364 __output_copy_user 3 47364 &kvm_arch_create_memslot_47364
112568 +__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
112569 +trace_options_core_read_47390 trace_options_core_read 3 47390 NULL nohasharray
112570 +nv_rd32_47390 nv_rd32 0 47390 &trace_options_core_read_47390
112571 +nametbl_list_47391 nametbl_list 2 47391 NULL
112572 +dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
112573 +pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
112574 +lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL
112575 +sta_vht_capa_read_47409 sta_vht_capa_read 3 47409 NULL
112576 +crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
112577 +lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL
112578 +posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
112579 +nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL
112580 +newpart_47485 newpart 6-4 47485 NULL
112581 +mcp23s17_read_regs_47491 mcp23s17_read_regs 4 47491 NULL
112582 +core_sys_select_47494 core_sys_select 1 47494 NULL
112583 +alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
112584 +unlink_simple_47506 unlink_simple 3 47506 NULL
112585 +pstore_decompress_47510 pstore_decompress 0 47510 NULL
112586 +__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL
112587 +process_vm_rw_47533 process_vm_rw 3-5 47533 NULL nohasharray
112588 +vscnprintf_47533 vscnprintf 0-2 47533 &process_vm_rw_47533
112589 +einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL
112590 +ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
112591 +read_ldt_47570 read_ldt 2 47570 NULL
112592 +isku_sysfs_read_last_set_47572 isku_sysfs_read_last_set 6 47572 NULL
112593 +btrfs_stack_header_bytenr_47589 btrfs_stack_header_bytenr 0 47589 NULL
112594 +ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
112595 +sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL
112596 +cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
112597 +twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL
112598 +get_size_47644 get_size 1-2 47644 NULL
112599 +packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray
112600 +ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700
112601 +save_microcode_47717 save_microcode 3 47717 NULL
112602 +bits_to_user_47733 bits_to_user 2-3 47733 NULL
112603 +carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
112604 +ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
112605 +mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
112606 +alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
112607 +uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
112608 +SyS_setgroups16_47780 SyS_setgroups16 1 47780 NULL
112609 +error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
112610 +posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL
112611 +W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL
112612 +lov_packmd_47810 lov_packmd 0 47810 NULL
112613 +tree_mod_log_insert_move_47823 tree_mod_log_insert_move 5 47823 NULL
112614 +pinconf_dbg_config_write_47835 pinconf_dbg_config_write 3 47835 NULL
112615 +KEY_SIZE_47855 KEY_SIZE 0 47855 NULL
112616 +vhci_read_47878 vhci_read 3 47878 NULL
112617 +keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
112618 +cfs_percpt_alloc_47918 cfs_percpt_alloc 2 47918 NULL
112619 +comedi_write_47926 comedi_write 3 47926 NULL
112620 +nvme_trans_get_blk_desc_len_47946 nvme_trans_get_blk_desc_len 0-2 47946 NULL
112621 +gether_get_ifname_47972 gether_get_ifname 3 47972 NULL
112622 +mempool_resize_47983 mempool_resize 2 47983 NULL nohasharray
112623 +iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 &mempool_resize_47983
112624 +dbg_port_buf_47990 dbg_port_buf 2 47990 NULL
112625 +ib_umad_write_47993 ib_umad_write 3 47993 NULL
112626 +lustre_cfg_len_48002 lustre_cfg_len 0 48002 NULL
112627 +gdm_tty_recv_complete_48011 gdm_tty_recv_complete 2 48011 NULL
112628 +ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
112629 +bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
112630 +pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
112631 +SYSC_writev_48040 SYSC_writev 3 48040 NULL
112632 +wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
112633 +posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
112634 +palmas_bulk_write_48068 palmas_bulk_write 2-3-5 48068 NULL
112635 +disc_write_48070 disc_write 3 48070 NULL
112636 +mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
112637 +skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
112638 +vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
112639 +set_discoverable_48141 set_discoverable 4 48141 NULL
112640 +dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
112641 +_add_to_r4w_48152 _add_to_r4w 4 48152 NULL
112642 +isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
112643 +c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
112644 +rbd_obj_method_sync_48170 rbd_obj_method_sync 8 48170 NULL
112645 +alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
112646 +brcmf_sdio_chip_cm3_exitdl_48192 brcmf_sdio_chip_cm3_exitdl 4 48192 NULL
112647 +cfg80211_process_deauth_48200 cfg80211_process_deauth 3 48200 NULL
112648 +ext4_index_trans_blocks_48205 ext4_index_trans_blocks 0-2 48205 NULL
112649 +snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
112650 +ll_direct_IO_26_48216 ll_direct_IO_26 4 48216 NULL
112651 +uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
112652 +nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
112653 +read_file_recv_48232 read_file_recv 3 48232 NULL
112654 +unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL
112655 +nouveau_i2c_port_create__48240 nouveau_i2c_port_create_ 7 48240 NULL
112656 +nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
112657 +batadv_socket_read_48257 batadv_socket_read 3 48257 NULL
112658 +cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
112659 +trace_options_write_48275 trace_options_write 3 48275 NULL
112660 +send_set_info_48288 send_set_info 7 48288 NULL
112661 +lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
112662 +timblogiw_read_48305 timblogiw_read 3 48305 NULL
112663 +hash_setkey_48310 hash_setkey 3 48310 NULL
112664 +audio_set_intf_req_48319 audio_set_intf_req 0 48319 NULL
112665 +kvm_mmu_pte_write_48340 kvm_mmu_pte_write 2 48340 NULL
112666 +skb_add_data_48363 skb_add_data 3 48363 NULL
112667 +tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
112668 +lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
112669 +uhid_event_from_user_48417 uhid_event_from_user 2 48417 NULL
112670 +div64_u64_rem_48418 div64_u64_rem 0-1-2 48418 NULL
112671 +pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
112672 +print_filtered_48442 print_filtered 2-0 48442 NULL
112673 +tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
112674 +compat_SyS_preadv64_48469 compat_SyS_preadv64 3 48469 NULL
112675 +ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
112676 +r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
112677 +ocfs2_refcount_cow_48495 ocfs2_refcount_cow 3 48495 NULL
112678 +send_control_msg_48498 send_control_msg 6 48498 NULL
112679 +count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
112680 +diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
112681 +brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
112682 +phantom_get_free_48514 phantom_get_free 0 48514 NULL
112683 +drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL
112684 +raid10_size_48571 raid10_size 0-2-3 48571 NULL
112685 +llog_data_len_48607 llog_data_len 1 48607 NULL
112686 +do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
112687 +ll_rw_extents_stats_pp_seq_write_48651 ll_rw_extents_stats_pp_seq_write 3 48651 NULL
112688 +mtd_read_48655 mtd_read 0 48655 NULL
112689 +aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
112690 +sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL
112691 +hysdn_log_write_48694 hysdn_log_write 3 48694 NULL
112692 +altera_drscan_48698 altera_drscan 2 48698 NULL
112693 +kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
112694 +recv_msg_48709 recv_msg 4 48709 NULL
112695 +lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL
112696 +SyS_lgetxattr_48719 SyS_lgetxattr 4 48719 NULL
112697 +ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
112698 +ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
112699 +l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
112700 +gfs2_direct_IO_48774 gfs2_direct_IO 4 48774 NULL
112701 +il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
112702 +twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
112703 +atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
112704 +azx_get_position_48841 azx_get_position 0 48841 NULL
112705 +vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
112706 +comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL
112707 +suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL
112708 +sptlrpc_cli_alloc_reqbuf_48855 sptlrpc_cli_alloc_reqbuf 0 48855 NULL
112709 +C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray
112710 +viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864
112711 +__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
112712 +crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
112713 +joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 NULL
112714 +xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
112715 +msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
112716 +sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL
112717 +si5351_write_parameters_48940 si5351_write_parameters 2 48940 NULL
112718 +event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
112719 +nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
112720 +vmci_handle_arr_create_48971 vmci_handle_arr_create 1 48971 NULL
112721 +rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
112722 +sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
112723 +null_alloc_rs_49019 null_alloc_rs 2 49019 NULL
112724 +filemap_check_errors_49022 filemap_check_errors 0 49022 NULL
112725 +aic_inb_49023 aic_inb 0 49023 NULL
112726 +transient_status_49027 transient_status 4 49027 NULL
112727 +iwl_mvm_power_legacy_dbgfs_read_49038 iwl_mvm_power_legacy_dbgfs_read 4 49038 NULL
112728 +aic7xxx_rem_scb_from_disc_list_49041 aic7xxx_rem_scb_from_disc_list 0 49041 NULL
112729 +scsi_register_49094 scsi_register 2 49094 NULL
112730 +compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
112731 +xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
112732 +ll_max_cached_mb_seq_write_49122 ll_max_cached_mb_seq_write 3 49122 NULL
112733 +pt_read_49136 pt_read 3 49136 NULL
112734 +ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
112735 +f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL
112736 +ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
112737 +__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL
112738 +ath6kl_bgscan_int_write_49178 ath6kl_bgscan_int_write 3 49178 NULL
112739 +dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
112740 +print_queue_49191 print_queue 4-0 49191 NULL
112741 +root_nfs_cat_49192 root_nfs_cat 3 49192 NULL
112742 +iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
112743 +il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
112744 +do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
112745 +nouveau_therm_create__49228 nouveau_therm_create_ 4 49228 NULL
112746 +hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
112747 +ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
112748 +isku_sysfs_read_keys_media_49268 isku_sysfs_read_keys_media 6 49268 NULL
112749 +ptlrpc_check_set_49277 ptlrpc_check_set 0 49277 NULL
112750 +rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
112751 +viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL
112752 +uio_read_49300 uio_read 3 49300 NULL
112753 +isku_sysfs_read_keys_macro_49312 isku_sysfs_read_keys_macro 6 49312 NULL
112754 +SYSC_mincore_49319 SYSC_mincore 2-1 49319 NULL
112755 +fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL
112756 +srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-4-3 49330 NULL
112757 +joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
112758 +iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
112759 +ext4_ext_index_trans_blocks_49396 ext4_ext_index_trans_blocks 0 49396 NULL
112760 +rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
112761 +tnode_alloc_49407 tnode_alloc 1 49407 NULL
112762 +samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
112763 +compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL
112764 +__hfsplus_getxattr_49460 __hfsplus_getxattr 0 49460 NULL
112765 +agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
112766 +xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
112767 +isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
112768 +iwl_dbgfs_disable_power_off_read_49517 iwl_dbgfs_disable_power_off_read 3 49517 NULL
112769 +SyS_listxattr_49519 SyS_listxattr 3 49519 NULL
112770 +emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL
112771 +smk_write_access_49561 smk_write_access 3 49561 NULL
112772 +alloc_chunk_49575 alloc_chunk 1 49575 NULL
112773 +sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
112774 +ptlrpc_request_pack_49581 ptlrpc_request_pack 0 49581 NULL
112775 +readfifo_49583 readfifo 1 49583 NULL
112776 +tap_write_49595 tap_write 3 49595 NULL
112777 +isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
112778 +btrfs_mksubvol_49616 btrfs_mksubvol 3 49616 NULL
112779 +heap_init_49617 heap_init 2 49617 NULL
112780 +smk_write_doi_49621 smk_write_doi 3 49621 NULL
112781 +port_fops_read_49626 port_fops_read 3 49626 NULL
112782 +btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
112783 +aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 4-3 49683 NULL
112784 +SyS_pwritev_49688 SyS_pwritev 3 49688 NULL
112785 +__copy_from_user_nocheck_49699 __copy_from_user_nocheck 0-3 49699 NULL
112786 +cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
112787 +write_pool_49718 write_pool 3 49718 NULL
112788 +kvm_mmu_notifier_invalidate_page_49723 kvm_mmu_notifier_invalidate_page 3 49723 NULL
112789 +sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
112790 +zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
112791 +btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
112792 +fuse_wr_pages_49753 fuse_wr_pages 0-1-2 49753 NULL
112793 +key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
112794 +fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
112795 +w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL
112796 +ceph_osdc_readpages_49789 ceph_osdc_readpages 0 49789 NULL
112797 +nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
112798 +ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
112799 +add_uuid_49831 add_uuid 4 49831 NULL
112800 +iraw_loop_49842 iraw_loop 0-1 49842 NULL
112801 +twl4030_write_49846 twl4030_write 2 49846 NULL
112802 +scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
112803 +timeradd_entry_49850 timeradd_entry 3 49850 NULL
112804 +fiemap_count_to_size_49869 fiemap_count_to_size 0-1 49869 NULL
112805 +sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
112806 +ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
112807 +osc_brw_49896 osc_brw 4 49896 NULL
112808 +config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL
112809 +ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
112810 +drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL
112811 +l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
112812 +dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
112813 +isdn_read_50021 isdn_read 3 50021 NULL
112814 +mdc_rename_pack_50023 mdc_rename_pack 4-6 50023 NULL
112815 +ioread8_50049 ioread8 0 50049 NULL
112816 +fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL
112817 +__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL
112818 +dev_set_alias_50084 dev_set_alias 3 50084 NULL
112819 +libcfs_ioctl_popdata_50087 libcfs_ioctl_popdata 3 50087 NULL
112820 +sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
112821 +altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
112822 +android_set_cntry_50100 android_set_cntry 0 50100 NULL
112823 +read_file_slot_50111 read_file_slot 3 50111 NULL
112824 +rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL
112825 +jfs_direct_IO_50125 jfs_direct_IO 4 50125 NULL
112826 +SYSC_preadv_50134 SYSC_preadv 3 50134 NULL
112827 +copy_items_50140 copy_items 6 50140 NULL
112828 +tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
112829 +kmalloc_node_50163 kmalloc_node 1 50163 NULL
112830 +rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
112831 +ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL
112832 +odev_update_50169 odev_update 2 50169 NULL
112833 +ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL nohasharray
112834 +ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 &ubi_resize_volume_50172
112835 +cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
112836 +cyttsp4_probe_50201 cyttsp4_probe 4 50201 NULL
112837 +rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
112838 +mthca_buddy_init_50206 mthca_buddy_init 2 50206 NULL
112839 +l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
112840 +mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
112841 +sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
112842 +rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
112843 +soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL
112844 +SYSC_flistxattr_50307 SYSC_flistxattr 3 50307 NULL
112845 +SYSC_sched_setaffinity_50310 SYSC_sched_setaffinity 2 50310 NULL
112846 +soc_camera_read_50319 soc_camera_read 3 50319 NULL
112847 +do_launder_page_50329 do_launder_page 0 50329 NULL
112848 +nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
112849 +lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
112850 +snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 NULL
112851 +tpm_read_50344 tpm_read 3 50344 NULL
112852 +isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
112853 +iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 NULL
112854 +xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
112855 +roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
112856 +sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
112857 +l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
112858 +iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
112859 +validate_acl_mac_addrs_50429 validate_acl_mac_addrs 0 50429 NULL
112860 +btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
112861 +pgctrl_write_50453 pgctrl_write 3 50453 NULL
112862 +device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL
112863 +cfs_size_round_50472 cfs_size_round 0-1 50472 NULL
112864 +cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
112865 +mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL
112866 +pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
112867 +ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
112868 +usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL
112869 +fat_readpages_50582 fat_readpages 4 50582 NULL
112870 +iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
112871 +xillybus_write_50605 xillybus_write 3 50605 NULL
112872 +rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
112873 +sparse_early_usemaps_alloc_node_50623 sparse_early_usemaps_alloc_node 4 50623 NULL
112874 +simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
112875 +ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL
112876 +bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
112877 +prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL
112878 +xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL
112879 +dev_mem_read_50706 dev_mem_read 3 50706 NULL
112880 +blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
112881 +__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
112882 +ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
112883 +tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL
112884 +bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
112885 +tpm_write_50798 tpm_write 3 50798 NULL
112886 +write_flush_50803 write_flush 3 50803 NULL
112887 +dvb_play_50814 dvb_play 3 50814 NULL
112888 +dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
112889 +SetArea_50835 SetArea 4 50835 NULL
112890 +videobuf_dma_init_user_50839 videobuf_dma_init_user 4-3 50839 NULL
112891 +carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
112892 +SyS_lgetxattr_50889 SyS_lgetxattr 4 50889 NULL
112893 +netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
112894 +__bdev_writeseg_50903 __bdev_writeseg 4 50903 NULL
112895 +xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
112896 +blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
112897 +hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
112898 +chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
112899 +show_device_status_50947 show_device_status 0 50947 NULL
112900 +irq_timeout_write_50950 irq_timeout_write 3 50950 NULL
112901 +virtio_cread16_50951 virtio_cread16 0 50951 NULL
112902 +sdio_uart_write_50954 sdio_uart_write 3 50954 NULL
112903 +SyS_setxattr_50957 SyS_setxattr 4 50957 NULL
112904 +iwl_statistics_flag_50981 iwl_statistics_flag 3-0 50981 NULL
112905 +timeout_write_50991 timeout_write 3 50991 NULL
112906 +proc_write_51003 proc_write 3 51003 NULL
112907 +jbd2_journal_extend_51012 jbd2_journal_extend 2 51012 NULL
112908 +lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
112909 +fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 NULL
112910 +BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
112911 +dump_midi_51040 dump_midi 3 51040 NULL
112912 +usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL
112913 +srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
112914 +do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
112915 +wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
112916 +jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
112917 +__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
112918 +ti_recv_51110 ti_recv 3 51110 NULL
112919 +uasp_prepare_r_request_51124 uasp_prepare_r_request 0 51124 NULL
112920 +nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
112921 +alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
112922 +simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
112923 +xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
112924 +nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
112925 +snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
112926 +drm_property_create_51239 drm_property_create 4 51239 NULL
112927 +__mxt_read_reg_51249 __mxt_read_reg 0 51249 NULL
112928 +st_read_51251 st_read 3 51251 NULL
112929 +compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
112930 +target_alloc_sgl_51264 target_alloc_sgl 3 51264 NULL
112931 +dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
112932 +ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
112933 +pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
112934 +bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
112935 +init_map_ipmac_51317 init_map_ipmac 5 51317 NULL
112936 +alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
112937 +ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
112938 +alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
112939 +ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
112940 +ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
112941 +radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
112942 +ceph_sync_read_51410 ceph_sync_read 3-0 51410 NULL
112943 +blk_register_region_51424 blk_register_region 1-2 51424 NULL
112944 +mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
112945 +hfsplus_brec_read_51436 hfsplus_brec_read 0 51436 NULL
112946 +ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
112947 +print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
112948 +____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
112949 +xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
112950 +kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL
112951 +ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
112952 +__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
112953 +batadv_tt_prepare_tvlv_local_data_51568 batadv_tt_prepare_tvlv_local_data 0 51568 NULL
112954 +ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
112955 +aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
112956 +table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
112957 +extent_fiemap_51621 extent_fiemap 3 51621 NULL
112958 +sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
112959 +iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
112960 +ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
112961 +sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
112962 +host_mapping_level_51696 host_mapping_level 0 51696 NULL
112963 +sel_write_access_51704 sel_write_access 3 51704 NULL
112964 +tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
112965 +v9fs_alloc_rdir_buf_51716 v9fs_alloc_rdir_buf 2 51716 NULL
112966 +drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
112967 +sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
112968 +cm4040_read_51732 cm4040_read 3 51732 NULL
112969 +get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL
112970 +ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
112971 +if_write_51756 if_write 3 51756 NULL
112972 +qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
112973 +buffer_from_user_51826 buffer_from_user 3 51826 NULL
112974 +ioread32_51847 ioread32 0 51847 NULL nohasharray
112975 +read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
112976 +do_readv_writev_51849 do_readv_writev 4 51849 NULL
112977 +SYSC_sendto_51852 SYSC_sendto 6 51852 NULL
112978 +bm_page_io_async_51858 bm_page_io_async 2 51858 NULL
112979 +pointer_size_read_51863 pointer_size_read 3 51863 NULL
112980 +get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
112981 +user_read_51881 user_read 3 51881 NULL
112982 +dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
112983 +SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 NULL
112984 +wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
112985 +dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
112986 +__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL
112987 +xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
112988 +scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
112989 +snd_mask_min_51969 snd_mask_min 0 51969 NULL
112990 +__blkdev_get_51972 __blkdev_get 0 51972 NULL
112991 +get_zone_51981 get_zone 0-1 51981 NULL
112992 +ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
112993 +_c4iw_write_mem_dma_51991 _c4iw_write_mem_dma 3 51991 NULL
112994 +dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
112995 +skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
112996 +rdmalt_52022 rdmalt 0 52022 NULL
112997 +override_release_52032 override_release 2 52032 NULL
112998 +end_port_52042 end_port 0 52042 NULL
112999 +dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
113000 +msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
113001 +dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
113002 +__fuse_request_alloc_52060 __fuse_request_alloc 1 52060 NULL
113003 +isofs_readpages_52067 isofs_readpages 4 52067 NULL
113004 +nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
113005 +o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
113006 +split_scan_timeout_write_52128 split_scan_timeout_write 3 52128 NULL
113007 +retry_count_read_52129 retry_count_read 3 52129 NULL
113008 +gdm_usb_hci_send_52138 gdm_usb_hci_send 3 52138 NULL
113009 +sub_alloc_52140 sub_alloc 0 52140 NULL
113010 +hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL
113011 +htable_size_52148 htable_size 0-1 52148 NULL
113012 +smk_write_load2_52155 smk_write_load2 3 52155 NULL
113013 +ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
113014 +mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
113015 +print_prefix_52176 print_prefix 0 52176 NULL
113016 +proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
113017 +vmci_qp_broker_alloc_52216 vmci_qp_broker_alloc 6-5 52216 NULL
113018 +fuse_request_alloc_52243 fuse_request_alloc 1 52243 NULL nohasharray
113019 +xfs_iomap_eof_align_last_fsb_52243 xfs_iomap_eof_align_last_fsb 3 52243 &fuse_request_alloc_52243
113020 +mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
113021 +shrink_slab_52261 shrink_slab 2 52261 NULL
113022 +sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
113023 +handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
113024 +kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
113025 +read_file_reset_52310 read_file_reset 3 52310 NULL
113026 +request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
113027 +hwflags_read_52318 hwflags_read 3 52318 NULL
113028 +test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
113029 +hur_len_52339 hur_len 0 52339 NULL
113030 +bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
113031 +copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
113032 +iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
113033 +hfsplus_find_attr_52374 hfsplus_find_attr 0 52374 NULL
113034 +mq_emit_config_values_52378 mq_emit_config_values 3 52378 NULL
113035 +isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
113036 +jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
113037 +aer_inject_write_52399 aer_inject_write 3 52399 NULL
113038 +cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
113039 +line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
113040 +hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL
113041 +delay_status_52431 delay_status 5 52431 NULL
113042 +ath6kl_delete_qos_write_52435 ath6kl_delete_qos_write 3 52435 NULL
113043 +ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
113044 +acpi_nvs_for_each_region_52448 acpi_nvs_for_each_region 0 52448 NULL
113045 +alauda_read_data_52452 alauda_read_data 3 52452 NULL
113046 +ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1 52477 NULL
113047 +usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL
113048 +ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
113049 +fd_do_rw_52495 fd_do_rw 3 52495 NULL
113050 +int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
113051 +lmv_get_easize_52504 lmv_get_easize 0 52504 NULL
113052 +pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
113053 +bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
113054 +dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
113055 +raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
113056 +dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
113057 +debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL
113058 +isku_sysfs_read_macro_52587 isku_sysfs_read_macro 6 52587 NULL
113059 +SyS_setsockopt_52610 SyS_setsockopt 5 52610 NULL
113060 +ll_sa_entry_alloc_52611 ll_sa_entry_alloc 4 52611 NULL
113061 +tps80031_writes_52638 tps80031_writes 3-4 52638 NULL
113062 +brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
113063 +nvme_queue_extra_52661 nvme_queue_extra 0-1 52661 NULL
113064 +SYSC_gethostname_52677 SYSC_gethostname 2 52677 NULL
113065 +nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL
113066 +nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
113067 +__dm_stat_bio_52722 __dm_stat_bio 3 52722 NULL
113068 +cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
113069 +blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
113070 +relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
113071 +hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL
113072 +carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
113073 +ieee80211_if_read_beacon_timeout_52756 ieee80211_if_read_beacon_timeout 3 52756 NULL
113074 +nvme_trans_ext_inq_page_52776 nvme_trans_ext_inq_page 3 52776 NULL
113075 +pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
113076 +ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
113077 +mon_bin_get_event_52863 mon_bin_get_event 4-6 52863 NULL
113078 +twl6030_gpadc_write_52867 twl6030_gpadc_write 1 52867 NULL
113079 +qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL
113080 +twlreg_write_52880 twlreg_write 3 52880 NULL
113081 +pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
113082 +cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
113083 +kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
113084 +dio_bio_reap_52913 dio_bio_reap 0 52913 NULL
113085 +__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
113086 +iblock_get_bio_52936 iblock_get_bio 3 52936 NULL
113087 +__nodes_remap_52951 __nodes_remap 5 52951 NULL
113088 +send_packet_52960 send_packet 4 52960 NULL
113089 +ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
113090 +tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
113091 +num_node_state_52989 num_node_state 0 52989 NULL
113092 +efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
113093 +uasp_alloc_stream_res_53015 uasp_alloc_stream_res 0 53015 NULL
113094 +btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
113095 +tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
113096 +bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
113097 +regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL
113098 +cfi_read_query_53066 cfi_read_query 0 53066 NULL
113099 +iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL
113100 +mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
113101 +mic_virtio_copy_from_user_53107 mic_virtio_copy_from_user 3 53107 NULL
113102 +verity_status_53120 verity_status 5 53120 NULL
113103 +brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
113104 +ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
113105 +ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL
113106 +btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL
113107 +clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
113108 +tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
113109 +ptlrpc_lprocfs_req_history_max_seq_write_53243 ptlrpc_lprocfs_req_history_max_seq_write 3 53243 NULL
113110 +hfsplus_xattr_set_posix_acl_53249 hfsplus_xattr_set_posix_acl 4 53249 NULL
113111 +xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5 53258 NULL
113112 +wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL
113113 +btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
113114 +ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL
113115 +find_nr_power_limit_53330 find_nr_power_limit 0 53330 NULL
113116 +gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
113117 +vm_mmap_53339 vm_mmap 0 53339 NULL
113118 +read_6120_creg32_53363 read_6120_creg32 0 53363 NULL
113119 +sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL
113120 +get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
113121 +isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
113122 +mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
113123 +apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
113124 +paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL
113125 +ima_write_template_field_data_53475 ima_write_template_field_data 2 53475 NULL
113126 +iowarrior_read_53483 iowarrior_read 3 53483 NULL
113127 +osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
113128 +do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
113129 +snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
113130 +dbAllocNext_53506 dbAllocNext 0 53506 NULL
113131 +ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
113132 +check_acl_53512 check_acl 0 53512 NULL
113133 +nft_data_dump_53549 nft_data_dump 5 53549 NULL
113134 +SYSC_bind_53582 SYSC_bind 3 53582 NULL
113135 +cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL
113136 +proc_uid_map_write_53596 proc_uid_map_write 3 53596 NULL
113137 +pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
113138 +___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
113139 +xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
113140 +ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
113141 +nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
113142 +fuse_fill_write_pages_53682 fuse_fill_write_pages 0-4 53682 NULL
113143 +v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
113144 +bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray
113145 +igb_alloc_q_vector_53690 igb_alloc_q_vector 4-6 53690 &bdev_logical_block_size_53690
113146 +find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
113147 +bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
113148 +__proc_debug_mb_53732 __proc_debug_mb 5 53732 NULL
113149 +wdm_write_53735 wdm_write 3 53735 NULL
113150 +amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 NULL nohasharray
113151 +lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 &amdtp_out_stream_get_max_payload_53755
113152 +ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
113153 +__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
113154 +qp_alloc_host_work_53798 qp_alloc_host_work 5-3 53798 NULL
113155 +regmap_raw_write_53803 regmap_raw_write 2-4 53803 NULL
113156 +lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
113157 +nls_nullsize_53815 nls_nullsize 0 53815 NULL
113158 +setup_data_read_53822 setup_data_read 3 53822 NULL
113159 +pms_read_53873 pms_read 3 53873 NULL
113160 +ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
113161 +SyS_setgroups_53900 SyS_setgroups 1 53900 NULL
113162 +batadv_tt_tvlv_ogm_handler_v1_53909 batadv_tt_tvlv_ogm_handler_v1 5 53909 NULL
113163 +usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL
113164 +ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 0 53938 NULL
113165 +idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
113166 +__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
113167 +ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval 3 53998 NULL
113168 +hfsplus_attr_build_key_54013 hfsplus_attr_build_key 0 54013 NULL
113169 +snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL
113170 +mdc_kuc_write_54019 mdc_kuc_write 3 54019 NULL
113171 +ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
113172 +batadv_tt_update_orig_54049 batadv_tt_update_orig 6-4 54049 NULL
113173 +pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
113174 +nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
113175 +rproc_state_read_54057 rproc_state_read 3 54057 NULL
113176 +_malloc_54077 _malloc 1 54077 NULL
113177 +bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
113178 +altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL nohasharray
113179 +lustre_posix_acl_xattr_filter_54103 lustre_posix_acl_xattr_filter 2 54103 &altera_set_ir_pre_54103
113180 +__comedi_buf_write_alloc_54112 __comedi_buf_write_alloc 0-2 54112 NULL
113181 +strn_len_54122 strn_len 0 54122 NULL
113182 +isku_receive_54130 isku_receive 4 54130 NULL
113183 +isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
113184 +irq_blk_threshold_write_54138 irq_blk_threshold_write 3 54138 NULL
113185 +memcpy_toiovec_54166 memcpy_toiovec 3 54166 NULL
113186 +nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL
113187 +p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL
113188 +do_sys_poll_54221 do_sys_poll 2 54221 NULL
113189 +__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
113190 +pi_read_regr_54231 pi_read_regr 0 54231 NULL
113191 +mcp23s08_read_regs_54246 mcp23s08_read_regs 4 54246 NULL
113192 +reada_add_block_54247 reada_add_block 2 54247 NULL
113193 +xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
113194 +ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
113195 +audio_write_54261 audio_write 4 54261 NULL nohasharray
113196 +wusb_prf_54261 wusb_prf 7 54261 &audio_write_54261
113197 +mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
113198 +kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL
113199 +tipc_multicast_54285 tipc_multicast 4 54285 NULL
113200 +altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
113201 +dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
113202 +reclaim_pages_54301 reclaim_pages 3 54301 NULL
113203 +sprintf_54306 sprintf 0 54306 NULL
113204 +bio_add_pc_page_54319 bio_add_pc_page 4 54319 NULL
113205 +br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
113206 +__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
113207 +__get_free_pages_54352 __get_free_pages 0 54352 NULL
113208 +tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
113209 +read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
113210 +vfs_readlink_54368 vfs_readlink 3 54368 NULL
113211 +do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
113212 +intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
113213 +ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
113214 +snd_pcm_oss_read2_54387 snd_pcm_oss_read2 3-0 54387 NULL
113215 +iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
113216 +ll_ra_count_get_54410 ll_ra_count_get 3 54410 NULL
113217 +copy_gadget_strings_54417 copy_gadget_strings 2-3 54417 NULL
113218 +sparse_early_mem_maps_alloc_node_54485 sparse_early_mem_maps_alloc_node 4 54485 NULL
113219 +simple_strtoull_54493 simple_strtoull 0 54493 NULL
113220 +btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
113221 +cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
113222 +rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
113223 +vmci_transport_dgram_enqueue_54525 vmci_transport_dgram_enqueue 4 54525 NULL
113224 +viacam_read_54526 viacam_read 3 54526 NULL
113225 +unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
113226 +setsockopt_54539 setsockopt 5 54539 NULL
113227 +lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL
113228 +SYSC_setsockopt_54561 SYSC_setsockopt 5 54561 NULL
113229 +nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
113230 +fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
113231 +nvme_npages_54601 nvme_npages 0-1 54601 NULL
113232 +irq_pkt_threshold_write_54605 irq_pkt_threshold_write 3 54605 NULL
113233 +port_fops_write_54627 port_fops_write 3 54627 NULL
113234 +irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
113235 +dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
113236 +twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL
113237 +tdp_page_fault_54663 tdp_page_fault 2 54663 NULL
113238 +bus_add_device_54665 bus_add_device 0 54665 NULL
113239 +cw1200_queue_stats_init_54670 cw1200_queue_stats_init 2 54670 NULL
113240 +bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
113241 +evm_read_key_54674 evm_read_key 3 54674 NULL
113242 +tipc_link_send_sections_fast_54689 tipc_link_send_sections_fast 3 54689 NULL
113243 +__btrfs_inc_extent_ref_54706 __btrfs_inc_extent_ref 7 54706 NULL
113244 +rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
113245 +ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
113246 +kzalloc_54740 kzalloc 1 54740 NULL
113247 +wep_iv_read_54744 wep_iv_read 3 54744 NULL
113248 +lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL
113249 +iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
113250 +adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL
113251 +ldsem_atomic_update_54774 ldsem_atomic_update 1 54774 NULL
113252 +flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
113253 +nfsd_write_54809 nfsd_write 6 54809 NULL
113254 +ar9287_dump_modal_eeprom_54814 ar9287_dump_modal_eeprom 3-2 54814 NULL
113255 +crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 NULL nohasharray
113256 +kvzalloc_54815 kvzalloc 1 54815 &crypto_tfm_ctx_alignment_54815 nohasharray
113257 +aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 &kvzalloc_54815
113258 +generic_perform_write_54832 generic_perform_write 3 54832 NULL
113259 +write_rio_54837 write_rio 3 54837 NULL
113260 +ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 NULL nohasharray
113261 +nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 &ext3_acl_from_disk_54839
113262 +ll_layout_conf_54841 ll_layout_conf 0 54841 NULL
113263 +ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
113264 +printer_read_54851 printer_read 3 54851 NULL
113265 +alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
113266 +broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
113267 +prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
113268 +iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL nohasharray
113269 +kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913
113270 +btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
113271 +bio_add_page_54933 bio_add_page 0-3 54933 NULL
113272 +mxms_structlen_54939 mxms_structlen 0 54939 NULL
113273 +add_port_54941 add_port 2 54941 NULL
113274 +ath9k_dump_btcoex_54949 ath9k_dump_btcoex 3-0 54949 NULL
113275 +alauda_write_data_54967 alauda_write_data 3 54967 NULL
113276 +c4_add_card_54968 c4_add_card 3 54968 NULL
113277 +ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
113278 +cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
113279 +error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
113280 +dgap_do_bios_load_55025 dgap_do_bios_load 3 55025 NULL
113281 +apei_exec_run_55075 apei_exec_run 0 55075 NULL
113282 +bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
113283 +read_dma_55086 read_dma 3 55086 NULL
113284 +rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
113285 +crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
113286 +filldir_55137 filldir 3 55137 NULL
113287 +ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
113288 +npages_to_npools_55149 npages_to_npools 0-1 55149 NULL
113289 +ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
113290 +mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL
113291 +sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
113292 +sched_feat_write_55202 sched_feat_write 3 55202 NULL
113293 +ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL
113294 +__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
113295 +do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
113296 +qxl_alloc_client_monitors_config_55216 qxl_alloc_client_monitors_config 2 55216 NULL
113297 +nouveau_mc_create__55217 nouveau_mc_create_ 4 55217 NULL
113298 +dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
113299 +memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
113300 +lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL
113301 +persistent_ram_new_55286 persistent_ram_new 2-1 55286 NULL
113302 +rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
113303 +lov_get_stripecnt_55297 lov_get_stripecnt 0-3 55297 NULL
113304 +gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
113305 +wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
113306 +qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 NULL
113307 +__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL
113308 +vme_user_read_55338 vme_user_read 3 55338 NULL
113309 +__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 NULL nohasharray
113310 +sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 &__wa_xfer_setup_sizes_55342
113311 +tipc_send2name_55373 tipc_send2name 5 55373 NULL
113312 +cw1200_sdio_align_size_55391 cw1200_sdio_align_size 2 55391 NULL
113313 +iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
113314 +sysfs_chmod_file_55408 sysfs_chmod_file 0 55408 NULL
113315 +si476x_radio_read_rds_blckcnt_blob_55427 si476x_radio_read_rds_blckcnt_blob 3 55427 NULL
113316 +sysfs_sd_setattr_55437 sysfs_sd_setattr 0 55437 NULL
113317 +__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
113318 +cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
113319 +snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
113320 +i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
113321 +batadv_tt_entries_55487 batadv_tt_entries 0-1 55487 NULL
113322 +ras_stride_increase_window_55501 ras_stride_increase_window 3 55501 NULL
113323 +tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 NULL
113324 +ea_get_55522 ea_get 3-0 55522 NULL
113325 +buffer_size_55534 buffer_size 0 55534 NULL
113326 +set_msr_interception_55538 set_msr_interception 2 55538 NULL
113327 +tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
113328 +dgap_do_config_load_55548 dgap_do_config_load 2 55548 NULL
113329 +hash_ipport6_expire_55549 hash_ipport6_expire 4 55549 NULL
113330 +dm_stats_list_55551 dm_stats_list 4 55551 NULL
113331 +__vdev_disk_physio_55568 __vdev_disk_physio 4 55568 NULL
113332 +add_partition_55588 add_partition 2 55588 NULL
113333 +kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
113334 +SyS_keyctl_55602 SyS_keyctl 4 55602 NULL nohasharray
113335 +config_buf_55602 config_buf 0 55602 &SyS_keyctl_55602
113336 +macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
113337 +selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
113338 +edge_tty_recv_55622 edge_tty_recv 3 55622 NULL
113339 +pktgen_if_write_55628 pktgen_if_write 3 55628 NULL nohasharray
113340 +reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 &pktgen_if_write_55628
113341 +osc_obd_max_pages_per_rpc_seq_write_55636 osc_obd_max_pages_per_rpc_seq_write 3 55636 NULL
113342 +xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
113343 +lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
113344 +il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
113345 +get_info_55681 get_info 3 55681 NULL
113346 +iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL
113347 +echo_big_lmm_get_55690 echo_big_lmm_get 0 55690 NULL
113348 +genl_allocate_reserve_groups_55705 genl_allocate_reserve_groups 1 55705 NULL
113349 +pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
113350 +ocfs2_lock_refcount_tree_55719 ocfs2_lock_refcount_tree 0 55719 NULL
113351 +tap_pwup_write_55723 tap_pwup_write 3 55723 NULL
113352 +__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2 55738 NULL
113353 +set_local_name_55757 set_local_name 4 55757 NULL
113354 +strlen_55778 strlen 0 55778 NULL
113355 +set_spte_55783 set_spte 4-5 55783 NULL
113356 +req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray
113357 +conf_read_55786 conf_read 3 55786 &req_bio_endio_55786
113358 +uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
113359 +sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
113360 +ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
113361 +hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
113362 +shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
113363 +hsc_write_55875 hsc_write 3 55875 NULL
113364 +ramdisk_store_55885 ramdisk_store 4 55885 NULL
113365 +pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
113366 +hash_ip4_expire_55911 hash_ip4_expire 4 55911 NULL
113367 +snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
113368 +ext2_direct_IO_55932 ext2_direct_IO 4 55932 NULL
113369 +kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL
113370 +sel_read_policy_55947 sel_read_policy 3 55947 NULL
113371 +ceph_get_direct_page_vector_55956 ceph_get_direct_page_vector 2 55956 NULL
113372 +simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
113373 +tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
113374 +btrfs_clone_55977 btrfs_clone 5-3 55977 NULL
113375 +wa_xfer_create_subset_sg_55992 wa_xfer_create_subset_sg 3-2 55992 NULL
113376 +nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL
113377 +dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
113378 +pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
113379 +add_sysfs_param_56108 add_sysfs_param 0 56108 NULL
113380 +usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
113381 +sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
113382 +write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL
113383 +__i2c_transfer_56162 __i2c_transfer 0 56162 NULL
113384 +rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
113385 +ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 3-0 56194 NULL
113386 +vring_add_indirect_56222 vring_add_indirect 4 56222 NULL
113387 +ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
113388 +do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
113389 +fd_copyin_56247 fd_copyin 3 56247 NULL
113390 +sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL
113391 +il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
113392 +ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL
113393 +RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
113394 +dvb_aplay_56296 dvb_aplay 3 56296 NULL
113395 +btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
113396 +speakup_file_write_56310 speakup_file_write 3 56310 NULL
113397 +pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
113398 +journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
113399 +snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
113400 +vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
113401 +mite_device_bytes_transferred_56355 mite_device_bytes_transferred 0 56355 NULL
113402 +iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4-0 56368 NULL
113403 +dev_read_56369 dev_read 3 56369 NULL
113404 +ath10k_read_simulate_fw_crash_56371 ath10k_read_simulate_fw_crash 3 56371 NULL
113405 +write_gssp_56404 write_gssp 3 56404 NULL
113406 +ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
113407 +do_get_write_access_56410 do_get_write_access 0 56410 NULL
113408 +store_msg_56417 store_msg 3 56417 NULL
113409 +pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
113410 +fl_create_56435 fl_create 5 56435 NULL
113411 +gnttab_map_56439 gnttab_map 2 56439 NULL
113412 +cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL
113413 +set_connectable_56458 set_connectable 4 56458 NULL
113414 +osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
113415 +putused_user_56467 putused_user 3 56467 NULL
113416 +lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL
113417 +calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
113418 +crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
113419 +ieee80211_rx_mgmt_probe_beacon_56491 ieee80211_rx_mgmt_probe_beacon 3 56491 NULL
113420 +init_map_ip_56508 init_map_ip 5 56508 NULL
113421 +lustre_posix_acl_xattr_reduce_space_56512 lustre_posix_acl_xattr_reduce_space 3 56512 NULL
113422 +cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
113423 +ip_options_get_56538 ip_options_get 4 56538 NULL
113424 +ll_wr_track_id_56544 ll_wr_track_id 2 56544 NULL
113425 +alloc_apertures_56561 alloc_apertures 1 56561 NULL
113426 +rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
113427 +portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
113428 +event_filter_write_56609 event_filter_write 3 56609 NULL
113429 +nvme_trans_log_temperature_56613 nvme_trans_log_temperature 3 56613 NULL
113430 +edac_device_create_block_56619 edac_device_create_block 0 56619 NULL
113431 +gather_array_56641 gather_array 3 56641 NULL
113432 +lookup_extent_backref_56644 lookup_extent_backref 9 56644 NULL
113433 +uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
113434 +tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL
113435 +snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
113436 +dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL
113437 +sta_flags_read_56710 sta_flags_read 3 56710 NULL
113438 +ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
113439 +__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
113440 +__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL
113441 +drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
113442 +btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
113443 +ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 NULL
113444 +do_syslog_56807 do_syslog 3 56807 NULL
113445 +mtdchar_write_56831 mtdchar_write 3 56831 NULL
113446 +snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL
113447 +si476x_radio_read_agc_blob_56849 si476x_radio_read_agc_blob 3 56849 NULL
113448 +ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
113449 +pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
113450 +debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
113451 +batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL
113452 +hfsplus_find_cat_56899 hfsplus_find_cat 0 56899 NULL
113453 +hfsplus_setxattr_56902 hfsplus_setxattr 4 56902 NULL
113454 +strcspn_56913 strcspn 0 56913 NULL
113455 +__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
113456 +journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
113457 +nouveau_xtensa_create__56952 nouveau_xtensa_create_ 8 56952 NULL
113458 +diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
113459 +nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
113460 +sptlrpc_secflags2str_56995 sptlrpc_secflags2str 3 56995 NULL
113461 +vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
113462 +btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
113463 +aircable_process_packet_57027 aircable_process_packet 4 57027 NULL
113464 +ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 NULL nohasharray
113465 +skb_network_offset_57043 skb_network_offset 0 57043 &ieee80211_if_fmt_state_57043
113466 +bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
113467 +xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL
113468 +cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
113469 +sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
113470 +pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
113471 +tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
113472 +altera_get_note_57099 altera_get_note 6 57099 NULL
113473 +hpfs_readpages_57106 hpfs_readpages 4 57106 NULL
113474 +crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
113475 +sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
113476 +cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
113477 +rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 NULL nohasharray
113478 +nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 &rds_ib_sub_signaled_57136 nohasharray
113479 +ima_show_htable_value_57136 ima_show_htable_value 2 57136 &nl80211_send_deauth_57136
113480 +snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
113481 +udl_prime_create_57159 udl_prime_create 2 57159 NULL
113482 +stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
113483 +rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
113484 +hash_netnet6_expire_57191 hash_netnet6_expire 4 57191 NULL
113485 +tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
113486 +dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL
113487 +rsxx_cram_write_57244 rsxx_cram_write 3 57244 NULL
113488 +ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
113489 +oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
113490 +alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
113491 +lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL
113492 +pstore_file_read_57288 pstore_file_read 3 57288 NULL
113493 +snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
113494 +ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
113495 +write_file_regval_57313 write_file_regval 3 57313 NULL
113496 +__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
113497 +usblp_read_57342 usblp_read 3 57342 NULL
113498 +print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
113499 +dio_send_cur_page_57348 dio_send_cur_page 0 57348 NULL
113500 +tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
113501 +tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
113502 +read_file_blob_57406 read_file_blob 3 57406 NULL
113503 +enclosure_register_57412 enclosure_register 3 57412 NULL
113504 +compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL
113505 +copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
113506 +__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
113507 +sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 NULL
113508 +ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL
113509 +tipc_port_reject_sections_57478 tipc_port_reject_sections 4 57478 NULL
113510 +bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
113511 +skb_headlen_57501 skb_headlen 0 57501 NULL
113512 +copy_in_user_57502 copy_in_user 3 57502 NULL
113513 +ckhdid_printf_57505 ckhdid_printf 2 57505 NULL
113514 +init_tag_map_57515 init_tag_map 3 57515 NULL
113515 +il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 NULL nohasharray
113516 +wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 &il_dbgfs_force_reset_read_57517
113517 +cmm_read_57520 cmm_read 3 57520 NULL
113518 +inode_permission_57531 inode_permission 0 57531 NULL
113519 +acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL
113520 +ptlrpc_lprocfs_hp_ratio_seq_write_57537 ptlrpc_lprocfs_hp_ratio_seq_write 3 57537 NULL
113521 +ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL
113522 +obd_unpackmd_57563 obd_unpackmd 0 57563 NULL
113523 +snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
113524 +get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL
113525 +ldlm_cli_enqueue_local_57582 ldlm_cli_enqueue_local 11 57582 NULL
113526 +il_dbgfs_interrupt_write_57591 il_dbgfs_interrupt_write 3 57591 NULL
113527 +read_file_spectral_fft_period_57593 read_file_spectral_fft_period 3 57593 NULL
113528 +tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
113529 +sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 NULL
113530 +mem_read_57631 mem_read 3 57631 NULL
113531 +r3964_write_57662 r3964_write 4 57662 NULL
113532 +proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL
113533 +__lgwrite_57669 __lgwrite 4 57669 NULL
113534 +f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL
113535 +i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
113536 +ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
113537 +nouveau_gpio_create__57735 nouveau_gpio_create_ 4-5 57735 NULL
113538 +pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 NULL nohasharray
113539 +compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 &pppol2tp_recvmsg_57742
113540 +ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
113541 +SYSC_process_vm_writev_57776 SYSC_process_vm_writev 3-5 57776 NULL
113542 +apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL
113543 +ld2_57794 ld2 0 57794 NULL
113544 +ivtv_read_57796 ivtv_read 3 57796 NULL
113545 +bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
113546 +copy_to_user_57835 copy_to_user 3-0 57835 NULL
113547 +flash_read_57843 flash_read 3 57843 NULL
113548 +kiblnd_create_tx_pool_57846 kiblnd_create_tx_pool 2 57846 NULL
113549 +xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
113550 +iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
113551 +memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
113552 +twl_i2c_write_57923 twl_i2c_write 3-4 57923 NULL
113553 +__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
113554 +sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
113555 +xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL
113556 +key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
113557 +ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
113558 +ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
113559 +do_rx_dma_57996 do_rx_dma 5 57996 NULL
113560 +rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
113561 +iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
113562 +io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
113563 +mce_async_out_58056 mce_async_out 3 58056 NULL
113564 +ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
113565 +dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
113566 +cm4040_write_58079 cm4040_write 3 58079 NULL
113567 +ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
113568 +slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
113569 +garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
113570 +ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
113571 +btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
113572 +read_file_debug_58256 read_file_debug 3 58256 NULL
113573 +osc_max_dirty_mb_seq_write_58263 osc_max_dirty_mb_seq_write 3 58263 NULL
113574 +cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
113575 +profile_load_58267 profile_load 3 58267 NULL
113576 +kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL
113577 +acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
113578 +iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
113579 +ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
113580 +tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
113581 +ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 3 58331 NULL
113582 +__copy_from_user_swizzled_58337 __copy_from_user_swizzled 2-4 58337 NULL
113583 +SyS_migrate_pages_58348 SyS_migrate_pages 2 58348 NULL
113584 +brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
113585 +il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
113586 +_drbd_md_sync_page_io_58403 _drbd_md_sync_page_io 6 58403 NULL
113587 +kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3 58406 NULL nohasharray
113588 +idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 &kvm_mmu_write_protect_pt_masked_58406
113589 +i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
113590 +capabilities_read_58457 capabilities_read 3 58457 NULL
113591 +lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
113592 +compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
113593 +nv_rd08_58472 nv_rd08 0 58472 NULL
113594 +acpi_tables_sysfs_init_58477 acpi_tables_sysfs_init 0 58477 NULL
113595 +snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
113596 +snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
113597 +btrfs_cont_expand_58498 btrfs_cont_expand 2-3 58498 NULL
113598 +rndis_add_response_58544 rndis_add_response 2 58544 NULL
113599 +ldlm_srv_pool_shrink_58554 ldlm_srv_pool_shrink 0 58554 NULL
113600 +wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
113601 +scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
113602 +get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL
113603 +ea_read_inline_58589 ea_read_inline 0 58589 NULL
113604 +isku_sysfs_read_keys_thumbster_58590 isku_sysfs_read_keys_thumbster 6 58590 NULL
113605 +xip_file_read_58592 xip_file_read 3 58592 NULL
113606 +ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL
113607 +radeon_bo_size_58606 radeon_bo_size 0 58606 NULL
113608 +skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
113609 +tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
113610 +iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL
113611 +find_zero_58685 find_zero 0-1 58685 NULL
113612 +uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
113613 +tps6586x_writes_58689 tps6586x_writes 2-3 58689 NULL
113614 +vx_send_msg_58711 vx_send_msg 0 58711 NULL
113615 +csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL
113616 +frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
113617 +ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
113618 +agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
113619 +regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
113620 +raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
113621 +isku_sysfs_read_58806 isku_sysfs_read 5 58806 NULL
113622 +ep_read_58813 ep_read 3 58813 NULL
113623 +command_write_58841 command_write 3 58841 NULL
113624 +ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
113625 +gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
113626 +esas2r_change_queue_depth_58886 esas2r_change_queue_depth 2 58886 NULL
113627 +lprocfs_wr_pinger_recov_58914 lprocfs_wr_pinger_recov 3 58914 NULL
113628 +print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
113629 +pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
113630 +sptlrpc_import_sec_adapt_58948 sptlrpc_import_sec_adapt 0 58948 NULL
113631 +wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL
113632 +ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout 3 58965 NULL
113633 +crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
113634 +init_list_set_59005 init_list_set 3 59005 NULL
113635 +ep_write_59008 ep_write 3 59008 NULL
113636 +lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL
113637 +SyS_preadv_59029 SyS_preadv 3 59029 NULL
113638 +init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
113639 +selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
113640 +crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
113641 +regmap_bulk_write_59049 regmap_bulk_write 2-4 59049 NULL
113642 +sysfs_link_sibling_59078 sysfs_link_sibling 0 59078 NULL
113643 +mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
113644 +scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
113645 +nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
113646 +print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
113647 +framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
113648 +radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
113649 +pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
113650 +md_getxattr_59161 md_getxattr 0 59161 NULL
113651 +ksize_59176 ksize 0 59176 NULL
113652 +setup_window_59178 setup_window 4-2-5-7 59178 NULL
113653 +ocfs2_move_extent_59187 ocfs2_move_extent 2-5 59187 NULL
113654 +xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
113655 +check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 NULL
113656 +dt3155_read_59226 dt3155_read 3 59226 NULL
113657 +paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL
113658 +tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
113659 +nla_len_59258 nla_len 0 59258 NULL
113660 +drbd_bm_write_page_59290 drbd_bm_write_page 2 59290 NULL
113661 +btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
113662 +fd_copyout_59323 fd_copyout 3 59323 NULL
113663 +read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
113664 +rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
113665 +xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
113666 +__map_request_59350 __map_request 0 59350 NULL
113667 +f2fs_fallocate_59377 f2fs_fallocate 3-4 59377 NULL
113668 +pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
113669 +journal_init_dev_59384 journal_init_dev 5 59384 NULL
113670 +__net_get_random_once_59389 __net_get_random_once 2 59389 NULL
113671 +isku_sysfs_read_keys_function_59412 isku_sysfs_read_keys_function 6 59412 NULL
113672 +pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL
113673 +vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
113674 +SyS_sched_setaffinity_59442 SyS_sched_setaffinity 2 59442 NULL
113675 +fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
113676 +ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
113677 +mic_vringh_copy_59523 mic_vringh_copy 4 59523 NULL
113678 +mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL
113679 +tunables_write_59563 tunables_write 3 59563 NULL
113680 +__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL
113681 +write_pbl_59583 write_pbl 4 59583 NULL
113682 +memdup_user_59590 memdup_user 2 59590 NULL
113683 +xrcdn_free_res_59616 xrcdn_free_res 5 59616 NULL nohasharray
113684 +mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 &xrcdn_free_res_59616
113685 +ath6kl_endpoint_stats_write_59621 ath6kl_endpoint_stats_write 3 59621 NULL
113686 +mtrr_write_59622 mtrr_write 3 59622 NULL
113687 +find_first_zero_bit_59636 find_first_zero_bit 0 59636 NULL
113688 +SyS_setdomainname_59646 SyS_setdomainname 2 59646 NULL
113689 +hidraw_read_59650 hidraw_read 3 59650 NULL
113690 +v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
113691 +__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL
113692 +iwl_dbgfs_mac_params_read_59666 iwl_dbgfs_mac_params_read 3 59666 NULL
113693 +alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
113694 +mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
113695 +ioperm_get_59701 ioperm_get 4-3 59701 NULL
113696 +prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
113697 +ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
113698 +fat_direct_IO_59741 fat_direct_IO 4 59741 NULL
113699 +qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
113700 +strnlen_59746 strnlen 0 59746 NULL
113701 +ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
113702 +long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
113703 +venus_remove_59781 venus_remove 4 59781 NULL
113704 +mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL
113705 +C_SYSC_preadv_59801 C_SYSC_preadv 3 59801 NULL
113706 +ipw_write_59807 ipw_write 3 59807 NULL
113707 +scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
113708 +ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
113709 +gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
113710 +regmap_raw_write_async_59849 regmap_raw_write_async 2-4 59849 NULL
113711 +pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
113712 +l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
113713 +ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
113714 +ocfs2_extend_rotate_transaction_59894 ocfs2_extend_rotate_transaction 2-3 59894 NULL
113715 +aic7xxx_abort_waiting_scb_59932 aic7xxx_abort_waiting_scb 0 59932 NULL
113716 +kvm_mmu_notifier_invalidate_range_start_59944 kvm_mmu_notifier_invalidate_range_start 3-4 59944 NULL
113717 +dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 NULL nohasharray
113718 +il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 &dapm_widget_power_read_file_59950
113719 +il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL
113720 +__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
113721 +osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
113722 +ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
113723 +ieee80211_if_fmt_dot11MeshAwakeWindowDuration_60006 ieee80211_if_fmt_dot11MeshAwakeWindowDuration 3 60006 NULL
113724 +rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
113725 +mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
113726 +osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
113727 +xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
113728 +bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
113729 +do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
113730 +vcs_size_60050 vcs_size 0 60050 NULL
113731 +gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL
113732 +compat_writev_60063 compat_writev 3 60063 NULL
113733 +ath6kl_listen_int_write_60066 ath6kl_listen_int_write 3 60066 NULL
113734 +c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
113735 +rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
113736 +ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
113737 +SYSC_msgsnd_60113 SYSC_msgsnd 3 60113 NULL
113738 +nfs_idmap_request_key_60124 nfs_idmap_request_key 2 60124 NULL
113739 +__mutex_lock_common_60134 __mutex_lock_common 0 60134 NULL
113740 +ld_usb_read_60156 ld_usb_read 3 60156 NULL
113741 +jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
113742 +init_state_60165 init_state 2 60165 NULL
113743 +jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 NULL nohasharray
113744 +sg_build_sgat_60179 sg_build_sgat 3 60179 &jffs2_alloc_full_dirent_60179
113745 +fuse_async_req_send_60183 fuse_async_req_send 0-3 60183 NULL
113746 +rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
113747 +qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 NULL
113748 +btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL
113749 +irq_alloc_domain_generic_chips_60264 irq_alloc_domain_generic_chips 2-3 60264 NULL
113750 +display_crc_ctl_write_60273 display_crc_ctl_write 3 60273 NULL
113751 +printer_write_60276 printer_write 3 60276 NULL
113752 +do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
113753 +getDataLength_60301 getDataLength 0 60301 NULL
113754 +usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL
113755 +__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL
113756 +dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
113757 +mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
113758 +ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
113759 +driver_names_read_60399 driver_names_read 3 60399 NULL
113760 +simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
113761 +excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
113762 +kmalloc_60432 kmalloc 1 60432 NULL nohasharray
113763 +tstats_write_60432 tstats_write 3 60432 &kmalloc_60432
113764 +snd_hda_get_num_raw_conns_60462 snd_hda_get_num_raw_conns 0 60462 NULL
113765 +crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
113766 +lustre_msg_early_size_60496 lustre_msg_early_size 0 60496 NULL
113767 +v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
113768 +nonpaging_map_60551 nonpaging_map 4 60551 NULL
113769 +osc_lockless_truncate_seq_write_60553 osc_lockless_truncate_seq_write 3 60553 NULL
113770 +tracing_entries_write_60563 tracing_entries_write 3 60563 NULL
113771 +skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
113772 +wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
113773 +acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
113774 +__proc_lnet_stats_60647 __proc_lnet_stats 5 60647 NULL
113775 +if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
113776 +ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
113777 +vga_rcrt_60731 vga_rcrt 0 60731 NULL
113778 +snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
113779 +raid_status_60755 raid_status 5 60755 NULL
113780 +sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
113781 +opticon_write_60775 opticon_write 4 60775 NULL
113782 +acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
113783 +snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
113784 +pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
113785 +alloc_buf_60864 alloc_buf 3-2 60864 NULL
113786 +generic_writepages_60871 generic_writepages 0 60871 NULL
113787 +ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL
113788 +iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
113789 +libcfs_sock_ioctl_60915 libcfs_sock_ioctl 0 60915 NULL
113790 +mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
113791 +scrub_chunk_60926 scrub_chunk 5 60926 NULL
113792 +submit_extent_page_60928 submit_extent_page 5 60928 NULL
113793 +pti_char_write_60960 pti_char_write 3 60960 NULL
113794 +mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
113795 +__a2mp_build_60987 __a2mp_build 3 60987 NULL
113796 +hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
113797 +ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
113798 +graph_depth_write_61024 graph_depth_write 3 61024 NULL
113799 +sdhci_pltfm_register_61031 sdhci_pltfm_register 3 61031 NULL
113800 +lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL
113801 +symtab_init_61050 symtab_init 2 61050 NULL
113802 +fuse_send_write_61053 fuse_send_write 0-4 61053 NULL
113803 +bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-2 61062 NULL
113804 +ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
113805 +get_derived_key_61100 get_derived_key 4 61100 NULL
113806 +i40e_calculate_l2fpm_size_61104 i40e_calculate_l2fpm_size 0-1-2-3-4 61104 NULL
113807 +alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
113808 +__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL
113809 +vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
113810 +afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
113811 +brcmf_sdio_chip_cr4_exitdl_61143 brcmf_sdio_chip_cr4_exitdl 4 61143 NULL
113812 +osl_malloc_61156 osl_malloc 2 61156 NULL
113813 +pair_device_61175 pair_device 4 61175 NULL nohasharray
113814 +event_oom_late_read_61175 event_oom_late_read 3 61175 &pair_device_61175
113815 +dio_bio_add_page_61178 dio_bio_add_page 0 61178 NULL
113816 +SyS_prctl_61202 SyS_prctl 4 61202 NULL
113817 +arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
113818 +smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
113819 +btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL nohasharray
113820 +find_get_pages_tag_61270 find_get_pages_tag 0 61270 &btrfs_bio_alloc_61270 nohasharray
113821 +ifalias_store_61270 ifalias_store 4 61270 &find_get_pages_tag_61270
113822 +vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL nohasharray
113823 +hfsplus_getxattr_finder_info_61283 hfsplus_getxattr_finder_info 0 61283 &vortex_adbdma_getlinearpos_61283
113824 +nvme_trans_copy_to_user_61288 nvme_trans_copy_to_user 3 61288 NULL
113825 +xfer_from_user_61307 xfer_from_user 3 61307 NULL
113826 +xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
113827 +C_SYSC_msgsnd_61330 C_SYSC_msgsnd 3 61330 NULL
113828 +write_file_spectral_short_repeat_61335 write_file_spectral_short_repeat 3 61335 NULL
113829 +st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
113830 +rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
113831 +__dm_get_reserved_ios_61342 __dm_get_reserved_ios 0-3-2 61342 NULL
113832 +f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
113833 +debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
113834 +system_enable_write_61396 system_enable_write 3 61396 NULL
113835 +xfs_zero_remaining_bytes_61423 xfs_zero_remaining_bytes 3 61423 NULL
113836 +unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
113837 +snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
113838 +btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
113839 +ocfs2_get_refcount_rec_61514 ocfs2_get_refcount_rec 0 61514 NULL
113840 +erst_errno_61526 erst_errno 0 61526 NULL
113841 +trace_options_core_write_61551 trace_options_core_write 3 61551 NULL
113842 +dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
113843 +parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL
113844 +fan_proc_write_61569 fan_proc_write 3 61569 NULL
113845 +ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
113846 +ldlm_pool_rw_atomic_seq_write_61572 ldlm_pool_rw_atomic_seq_write 3 61572 NULL
113847 +seq_open_private_61589 seq_open_private 3 61589 NULL
113848 +ept_gpte_to_gfn_lvl_61591 ept_gpte_to_gfn_lvl 0-1-2 61591 NULL
113849 +netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
113850 +nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL
113851 +configfs_write_file_61621 configfs_write_file 3 61621 NULL
113852 +ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL
113853 +i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
113854 +snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
113855 +resize_stripes_61650 resize_stripes 2 61650 NULL
113856 +ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL
113857 +insert_one_name_61668 insert_one_name 7 61668 NULL
113858 +qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
113859 +lock_loop_61681 lock_loop 1 61681 NULL
113860 +__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
113861 +filter_read_61692 filter_read 3 61692 NULL
113862 +iov_length_61716 iov_length 0 61716 NULL
113863 +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
113864 +null_alloc_reqbuf_61719 null_alloc_reqbuf 3 61719 NULL
113865 +read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
113866 +read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
113867 +SyS_sendto_61763 SyS_sendto 6 61763 NULL
113868 +mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL
113869 +bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
113870 +regcache_sync_block_61846 regcache_sync_block 5-4 61846 NULL
113871 +ath9k_hw_def_dump_eeprom_61853 ath9k_hw_def_dump_eeprom 5-4 61853 NULL
113872 +fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
113873 +evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
113874 +SYSC_lsetxattr_61869 SYSC_lsetxattr 4 61869 NULL
113875 +get_fw_name_61874 get_fw_name 3 61874 NULL
113876 +btrfs_ioctl_clone_61886 btrfs_ioctl_clone 3-4-5 61886 NULL
113877 +lprocfs_write_frac_u64_helper_61897 lprocfs_write_frac_u64_helper 2 61897 NULL
113878 +lov_mds_md_stripecnt_61899 lov_mds_md_stripecnt 0-1 61899 NULL
113879 +clear_refs_write_61904 clear_refs_write 3 61904 NULL nohasharray
113880 +import_sec_check_expire_61904 import_sec_check_expire 0 61904 &clear_refs_write_61904
113881 +rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
113882 +au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
113883 +sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
113884 +edac_device_create_instance_61940 edac_device_create_instance 0 61940 NULL
113885 +SyS_kexec_load_61946 SyS_kexec_load 2 61946 NULL
113886 +il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
113887 +squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
113888 +fix_read_error_61965 fix_read_error 4 61965 NULL
113889 +ocfs2_quota_write_61972 ocfs2_quota_write 4-5 61972 NULL
113890 +fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
113891 +cow_file_range_61979 cow_file_range 3 61979 NULL
113892 +dequeue_event_62000 dequeue_event 3 62000 NULL
113893 +xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
113894 +SyS_setxattr_62019 SyS_setxattr 4 62019 NULL
113895 +jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
113896 +SYSC_select_62024 SYSC_select 1 62024 NULL
113897 +pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
113898 +sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
113899 +do_pselect_62061 do_pselect 1 62061 NULL
113900 +pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
113901 +jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
113902 +btrfs_direct_IO_62114 btrfs_direct_IO 4 62114 NULL
113903 +ip_recv_error_62117 ip_recv_error 3 62117 NULL
113904 +generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
113905 +llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
113906 +kobject_add_varg_62133 kobject_add_varg 0 62133 NULL nohasharray
113907 +qib_diag_write_62133 qib_diag_write 3 62133 &kobject_add_varg_62133
113908 +device_add_attrs_62135 device_add_attrs 0 62135 NULL nohasharray
113909 +ql_status_62135 ql_status 5 62135 &device_add_attrs_62135
113910 +video_usercopy_62151 video_usercopy 2 62151 NULL
113911 +SyS_getxattr_62166 SyS_getxattr 4 62166 NULL
113912 +prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
113913 +write_file_dfs_62180 write_file_dfs 3 62180 NULL
113914 +alloc_upcall_62186 alloc_upcall 2 62186 NULL
113915 +btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
113916 +sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
113917 +SYSC_setgroups16_62232 SYSC_setgroups16 1 62232 NULL
113918 +nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
113919 +subtract_dirty_62242 subtract_dirty 2-3 62242 NULL
113920 +get_random_int_62279 get_random_int 0 62279 NULL
113921 +il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
113922 +sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
113923 +subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
113924 +Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
113925 +ocfs2_xattr_buckets_per_cluster_62330 ocfs2_xattr_buckets_per_cluster 0 62330 NULL
113926 +subseq_list_62332 subseq_list 3-0 62332 NULL
113927 +ll_statahead_max_seq_write_62333 ll_statahead_max_seq_write 3 62333 NULL
113928 +flash_write_62354 flash_write 3 62354 NULL
113929 +xfpregs_set_62363 xfpregs_set 4 62363 NULL
113930 +rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
113931 +altera_irscan_62396 altera_irscan 2 62396 NULL
113932 +set_ssp_62411 set_ssp 4 62411 NULL
113933 +ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL
113934 +pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
113935 +test_iso_queue_62534 test_iso_queue 5 62534 NULL
113936 +debugfs_read_62535 debugfs_read 3 62535 NULL
113937 +sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
113938 +qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
113939 +link_send_sections_long_62557 link_send_sections_long 3 62557 NULL
113940 +xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
113941 +compat_SyS_rt_sigpending_62580 compat_SyS_rt_sigpending 2 62580 NULL
113942 +get_subdir_62581 get_subdir 3 62581 NULL
113943 +nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL
113944 +tipc_port_recv_sections_62609 tipc_port_recv_sections 3 62609 NULL
113945 +dut_mode_write_62630 dut_mode_write 3 62630 NULL
113946 +vfs_fsync_range_62635 vfs_fsync_range 0 62635 NULL
113947 +lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
113948 +ocfs2_wait_for_mask_interruptible_62675 ocfs2_wait_for_mask_interruptible 0 62675 NULL
113949 +printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
113950 +bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
113951 +gfs2_log_write_62717 gfs2_log_write 3 62717 NULL
113952 +rdm_62719 rdm 0 62719 NULL
113953 +obd_ioctl_popdata_62741 obd_ioctl_popdata 3 62741 NULL
113954 +key_replays_read_62746 key_replays_read 3 62746 NULL
113955 +lov_verify_lmm_62747 lov_verify_lmm 2 62747 NULL
113956 +mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL
113957 +ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
113958 +C_SYSC_ipc_62776 C_SYSC_ipc 3 62776 NULL
113959 +SyS_sched_getaffinity_62786 SyS_sched_getaffinity 2 62786 NULL
113960 +dm_stats_account_io_62787 dm_stats_account_io 3 62787 NULL
113961 +tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
113962 +__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
113963 +bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL
113964 +xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
113965 +rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
113966 +read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL
113967 +l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL
113968 +aoechr_write_62883 aoechr_write 3 62883 NULL
113969 +if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
113970 +ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL
113971 +mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
113972 +getdqbuf_62908 getdqbuf 1 62908 NULL
113973 +ll_statahead_agl_seq_write_62928 ll_statahead_agl_seq_write 3 62928 NULL
113974 +agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
113975 +kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL
113976 +__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
113977 +pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
113978 +scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
113979 +unlink1_63059 unlink1 3 63059 NULL
113980 +xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL
113981 +iwl_dbgfs_fw_rx_stats_read_63070 iwl_dbgfs_fw_rx_stats_read 3 63070 NULL
113982 +sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5 63087 NULL
113983 +iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
113984 +ext4_chunk_trans_blocks_63123 ext4_chunk_trans_blocks 0-2 63123 NULL
113985 +smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
113986 +SyS_syslog_63178 SyS_syslog 3 63178 NULL
113987 +vme_master_read_63221 vme_master_read 0 63221 NULL
113988 +SyS_gethostname_63227 SyS_gethostname 2 63227 NULL
113989 +ptp_read_63251 ptp_read 4 63251 NULL
113990 +xfs_dir2_leaf_getdents_63262 xfs_dir2_leaf_getdents 3 63262 NULL
113991 +raid5_resize_63306 raid5_resize 2 63306 NULL
113992 +proc_info_read_63344 proc_info_read 3 63344 NULL
113993 +ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
113994 +idmouse_read_63374 idmouse_read 3 63374 NULL
113995 +usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 NULL nohasharray
113996 +edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 &usbnet_read_cmd_nopm_63388
113997 +rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
113998 +nouveau_event_create_63411 nouveau_event_create 1 63411 NULL
113999 +l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
114000 +nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
114001 +si5351_bulk_write_63468 si5351_bulk_write 2-3 63468 NULL
114002 +snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
114003 +reada_find_extent_63486 reada_find_extent 2 63486 NULL
114004 +read_kcore_63488 read_kcore 3 63488 NULL
114005 +snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
114006 +efx_mcdi_rpc_async_63529 efx_mcdi_rpc_async 4-5 63529 NULL
114007 +ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
114008 +write_file_spectral_period_63536 write_file_spectral_period 3 63536 NULL
114009 +if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
114010 +append_to_buffer_63550 append_to_buffer 3 63550 NULL
114011 +kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 NULL
114012 +rproc_alloc_63577 rproc_alloc 5 63577 NULL
114013 +write_debug_level_63613 write_debug_level 3 63613 NULL
114014 +symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
114015 +proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
114016 +ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
114017 +ldlm_cli_enqueue_63657 ldlm_cli_enqueue 8 63657 NULL
114018 +hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
114019 +vbi_read_63673 vbi_read 3 63673 NULL
114020 +write_file_spectral_fft_period_63696 write_file_spectral_fft_period 3 63696 NULL
114021 +nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
114022 +btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
114023 +selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
114024 +snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
114025 +snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
114026 +spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
114027 +mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
114028 +copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
114029 +prepare_copy_63826 prepare_copy 2 63826 NULL
114030 +sel_write_load_63830 sel_write_load 3 63830 NULL
114031 +ll_readlink_63836 ll_readlink 3 63836 NULL
114032 +proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
114033 +xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
114034 +uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL
114035 +snd_compr_write_63923 snd_compr_write 3 63923 NULL
114036 +afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
114037 +__team_options_register_63941 __team_options_register 3 63941 NULL
114038 +macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
114039 +sysfs_add_one_63969 sysfs_add_one 0 63969 NULL
114040 +set_bredr_63975 set_bredr 4 63975 NULL
114041 +construct_key_and_link_63985 construct_key_and_link 3 63985 NULL
114042 +rs_extent_to_bm_page_63996 rs_extent_to_bm_page 0-1 63996 NULL
114043 +read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
114044 +hfsplus_security_setxattr_64009 hfsplus_security_setxattr 4 64009 NULL
114045 +SyS_rt_sigpending_64018 SyS_rt_sigpending 2 64018 NULL
114046 +dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
114047 +SyS_fsetxattr_64039 SyS_fsetxattr 4 64039 NULL
114048 +get_u8_64076 get_u8 0 64076 NULL
114049 +xilly_malloc_64077 xilly_malloc 2 64077 NULL
114050 +sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
114051 +vmci_handle_arr_get_size_64088 vmci_handle_arr_get_size 0 64088 NULL
114052 +lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
114053 +SyS_set_mempolicy_64096 SyS_set_mempolicy 3 64096 NULL
114054 +SyS_mq_timedsend_64107 SyS_mq_timedsend 3 64107 NULL
114055 +rdma_addr_size_64116 rdma_addr_size 0 64116 NULL
114056 +do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
114057 +bypass_wd_write_64120 bypass_wd_write 3 64120 NULL
114058 +ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL
114059 +init_bch_64130 init_bch 1-2 64130 NULL
114060 +ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
114061 +read_div_64147 read_div 0 64147 NULL
114062 +dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
114063 +cpumask_scnprintf_64170 cpumask_scnprintf 0-2 64170 NULL
114064 +xfs_vm_direct_IO_64223 xfs_vm_direct_IO 4 64223 NULL
114065 +read_pulse_64227 read_pulse 0-3 64227 NULL
114066 +ea_len_64229 ea_len 0 64229 NULL
114067 +io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
114068 +btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
114069 +sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL
114070 +xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 NULL nohasharray
114071 +event_id_read_64288 event_id_read 3 64288 &xfs_dir_cilookup_result_64288
114072 +ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
114073 +snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL
114074 +error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
114075 +sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
114076 +ts_write_64336 ts_write 3 64336 NULL
114077 +usbtmc_write_64340 usbtmc_write 3 64340 NULL
114078 +bnx2x_vfop_mcast_cmd_64354 bnx2x_vfop_mcast_cmd 5 64354 NULL
114079 +user_regset_copyin_64360 user_regset_copyin 7 64360 NULL
114080 +wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
114081 +reg_create_64372 reg_create 5 64372 NULL
114082 +ilo_write_64378 ilo_write 3 64378 NULL
114083 +btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
114084 +vmcs_readl_64381 vmcs_readl 0 64381 NULL
114085 +nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
114086 +ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
114087 +pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
114088 +rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
114089 +snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
114090 +keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
114091 +oom_adj_write_64428 oom_adj_write 3 64428 NULL
114092 +read_file_spectral_short_repeat_64431 read_file_spectral_short_repeat 3 64431 NULL
114093 +ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
114094 +single_open_size_64483 single_open_size 4 64483 NULL
114095 +p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
114096 +msg_data_sz_64503 msg_data_sz 0 64503 NULL
114097 +remove_uuid_64505 remove_uuid 4 64505 NULL
114098 +crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
114099 +opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
114100 +iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL
114101 +ses_send_diag_64527 ses_send_diag 4 64527 NULL
114102 +prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
114103 +SyS_bind_64544 SyS_bind 3 64544 NULL
114104 +rbd_obj_read_sync_64554 rbd_obj_read_sync 4-3 64554 NULL
114105 +__btrfs_prealloc_file_range_64557 __btrfs_prealloc_file_range 3 64557 NULL
114106 +__spi_sync_64561 __spi_sync 0 64561 NULL nohasharray
114107 +ll_max_rw_chunk_seq_write_64561 ll_max_rw_chunk_seq_write 3 64561 &__spi_sync_64561
114108 +__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
114109 +kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL
114110 +do_erase_64574 do_erase 4 64574 NULL
114111 +fanotify_write_64623 fanotify_write 3 64623 NULL
114112 +regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL
114113 +ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray
114114 +tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661
114115 +efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
114116 +rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
114117 +nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
114118 +sec_bulk_write_64691 sec_bulk_write 2-3 64691 NULL
114119 +snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
114120 +dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
114121 +atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
114122 +i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
114123 +AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL
114124 +squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
114125 +bio_map_kern_64751 bio_map_kern 3 64751 NULL
114126 +rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
114127 +message_for_md_64777 message_for_md 5 64777 NULL
114128 +isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
114129 +regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL
114130 +nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
114131 +rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
114132 +proc_projid_map_write_64810 proc_projid_map_write 3 64810 NULL
114133 +megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
114134 +ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
114135 +do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
114136 +altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
114137 +lprocfs_write_u64_helper_64880 lprocfs_write_u64_helper 2 64880 NULL
114138 +ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
114139 +ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
114140 +ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
114141 +traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
114142 +suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
114143 +crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
114144 +insert_dent_65034 insert_dent 7 65034 NULL
114145 +snd_hda_get_pin_label_65035 snd_hda_get_pin_label 5 65035 NULL
114146 +ext4_ind_trans_blocks_65053 ext4_ind_trans_blocks 0-2 65053 NULL
114147 +pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL
114148 +__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
114149 +batadv_socket_write_65083 batadv_socket_write 3 65083 NULL
114150 +ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
114151 +ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 3-0 65090 NULL
114152 +uasp_alloc_cmd_65097 uasp_alloc_cmd 0 65097 NULL
114153 +generic_ocp_write_65107 generic_ocp_write 4 65107 NULL
114154 +rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
114155 +print_endpoint_stat_65232 print_endpoint_stat 3-4-0 65232 NULL
114156 +whci_n_caps_65247 whci_n_caps 0 65247 NULL
114157 +kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
114158 +compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
114159 +mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
114160 +redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
114161 +get_var_len_65304 get_var_len 0 65304 NULL
114162 +unpack_array_65318 unpack_array 0 65318 NULL
114163 +pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL
114164 +dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
114165 +dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
114166 +alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
114167 +SyS_writev_65372 SyS_writev 3 65372 NULL
114168 +__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
114169 +trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
114170 +__read_vmcore_65402 __read_vmcore 2 65402 NULL
114171 +usb_ep_enable_65405 usb_ep_enable 0 65405 NULL
114172 +ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL
114173 +device_add_groups_65423 device_add_groups 0 65423 NULL
114174 +xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
114175 +usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
114176 +il_dbgfs_wd_timeout_write_65464 il_dbgfs_wd_timeout_write 3 65464 NULL
114177 +clear_user_65470 clear_user 2 65470 NULL
114178 +dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
114179 +lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9 65493 NULL
114180 +nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
114181 +tree_mod_log_eb_copy_65535 tree_mod_log_eb_copy 6 65535 NULL
114182 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
114183 new file mode 100644
114184 index 0000000..fa0524c
114185 --- /dev/null
114186 +++ b/tools/gcc/size_overflow_plugin.c
114187 @@ -0,0 +1,4101 @@
114188 +/*
114189 + * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
114190 + * Licensed under the GPL v2, or (at your option) v3
114191 + *
114192 + * Homepage:
114193 + * http://www.grsecurity.net/~ephox/overflow_plugin/
114194 + *
114195 + * Documentation:
114196 + * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
114197 + *
114198 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
114199 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
114200 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
114201 + *
114202 + * Usage:
114203 + * $ # for 4.5/4.6/C based 4.7
114204 + * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu99 -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
114205 + * $ # for C++ based 4.7/4.8+
114206 + * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu++98 -fno-rtti -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
114207 + *
114208 + * $ gcc -fplugin=./size_overflow_plugin.so test.c -O2
114209 + */
114210 +
114211 +#include "gcc-common.h"
114212 +
114213 +int plugin_is_GPL_compatible;
114214 +
114215 +static struct plugin_info size_overflow_plugin_info = {
114216 + .version = "20140317",
114217 + .help = "no-size-overflow\tturn off size overflow checking\n",
114218 +};
114219 +
114220 +#define BEFORE_STMT true
114221 +#define AFTER_STMT false
114222 +#define CREATE_NEW_VAR NULL_TREE
114223 +#define CODES_LIMIT 32
114224 +#define MAX_PARAM 31
114225 +#define VEC_LEN 128
114226 +#define RET_CHECK NULL_TREE
114227 +#define CANNOT_FIND_ARG 32
114228 +#define WRONG_NODE 32
114229 +#define NOT_INTENTIONAL_ASM NULL
114230 +#define MIN_CHECK true
114231 +#define MAX_CHECK false
114232 +
114233 +#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
114234 +#define YES_ASM_STR "# size_overflow MARK_YES "
114235 +#define OK_ASM_STR "# size_overflow "
114236 +
114237 +struct size_overflow_hash {
114238 + const struct size_overflow_hash * const next;
114239 + const char * const name;
114240 + const unsigned int param;
114241 +};
114242 +
114243 +#include "size_overflow_hash.h"
114244 +
114245 +enum mark {
114246 + MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
114247 +};
114248 +
114249 +static unsigned int call_count;
114250 +
114251 +enum stmt_flags {
114252 + MY_STMT, NO_CAST_CHECK, VISITED_STMT, NO_FLAGS
114253 +};
114254 +
114255 +struct visited {
114256 + struct visited *next;
114257 + const_tree fndecl;
114258 + unsigned int num;
114259 +};
114260 +
114261 +struct next_cgraph_node {
114262 + struct next_cgraph_node *next;
114263 + struct cgraph_node *current_function;
114264 + tree callee_fndecl;
114265 + unsigned int num;
114266 +};
114267 +
114268 +struct interesting_node {
114269 + struct interesting_node *next;
114270 + gimple first_stmt;
114271 + const_tree fndecl;
114272 + tree node;
114273 +#if BUILDING_GCC_VERSION <= 4007
114274 + VEC(tree, gc) *last_nodes;
114275 +#else
114276 + vec<tree, va_gc> *last_nodes;
114277 +#endif
114278 + unsigned int num;
114279 + enum mark intentional_attr_decl;
114280 + enum mark intentional_attr_cur_fndecl;
114281 + gimple intentional_mark_from_gimple;
114282 +};
114283 +
114284 +static tree report_size_overflow_decl;
114285 +
114286 +static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
114287 +static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
114288 +static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
114289 +static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
114290 +static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
114291 +
114292 +static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
114293 +static tree get_size_overflow_type(gimple stmt, const_tree node);
114294 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
114295 +
114296 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
114297 +{
114298 + unsigned int arg_count;
114299 + enum tree_code code = TREE_CODE(*node);
114300 +
114301 + switch (code) {
114302 + case FUNCTION_DECL:
114303 + arg_count = type_num_arguments(TREE_TYPE(*node));
114304 + break;
114305 + case FUNCTION_TYPE:
114306 + case METHOD_TYPE:
114307 + arg_count = type_num_arguments(*node);
114308 + break;
114309 + default:
114310 + *no_add_attrs = true;
114311 + error("%s: %qE attribute only applies to functions", __func__, name);
114312 + return NULL_TREE;
114313 + }
114314 +
114315 + for (; args; args = TREE_CHAIN(args)) {
114316 + tree position = TREE_VALUE(args);
114317 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
114318 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
114319 + *no_add_attrs = true;
114320 + }
114321 + }
114322 + return NULL_TREE;
114323 +}
114324 +
114325 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
114326 +{
114327 + unsigned int arg_count;
114328 + enum tree_code code = TREE_CODE(*node);
114329 +
114330 + switch (code) {
114331 + case FUNCTION_DECL:
114332 + arg_count = type_num_arguments(TREE_TYPE(*node));
114333 + break;
114334 + case FUNCTION_TYPE:
114335 + case METHOD_TYPE:
114336 + arg_count = type_num_arguments(*node);
114337 + break;
114338 + case FIELD_DECL:
114339 + return NULL_TREE;
114340 + default:
114341 + *no_add_attrs = true;
114342 + error("%qE attribute only applies to functions", name);
114343 + return NULL_TREE;
114344 + }
114345 +
114346 + if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
114347 + return NULL_TREE;
114348 +
114349 + for (; args; args = TREE_CHAIN(args)) {
114350 + tree position = TREE_VALUE(args);
114351 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
114352 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
114353 + *no_add_attrs = true;
114354 + }
114355 + }
114356 + return NULL_TREE;
114357 +}
114358 +
114359 +static struct attribute_spec size_overflow_attr = {
114360 + .name = "size_overflow",
114361 + .min_length = 1,
114362 + .max_length = -1,
114363 + .decl_required = true,
114364 + .type_required = false,
114365 + .function_type_required = false,
114366 + .handler = handle_size_overflow_attribute,
114367 +#if BUILDING_GCC_VERSION >= 4007
114368 + .affects_type_identity = false
114369 +#endif
114370 +};
114371 +
114372 +static struct attribute_spec intentional_overflow_attr = {
114373 + .name = "intentional_overflow",
114374 + .min_length = 1,
114375 + .max_length = -1,
114376 + .decl_required = true,
114377 + .type_required = false,
114378 + .function_type_required = false,
114379 + .handler = handle_intentional_overflow_attribute,
114380 +#if BUILDING_GCC_VERSION >= 4007
114381 + .affects_type_identity = false
114382 +#endif
114383 +};
114384 +
114385 +static void register_attributes(void __unused *event_data, void __unused *data)
114386 +{
114387 + register_attribute(&size_overflow_attr);
114388 + register_attribute(&intentional_overflow_attr);
114389 +}
114390 +
114391 +static enum stmt_flags get_stmt_flag(gimple stmt)
114392 +{
114393 + bool bit_1, bit_2;
114394 +
114395 + bit_1 = gimple_plf(stmt, GF_PLF_1);
114396 + bit_2 = gimple_plf(stmt, GF_PLF_2);
114397 +
114398 + if (!bit_1 && !bit_2)
114399 + return NO_FLAGS;
114400 + if (bit_1 && bit_2)
114401 + return MY_STMT;
114402 + if (!bit_1 && bit_2)
114403 + return VISITED_STMT;
114404 + return NO_CAST_CHECK;
114405 +}
114406 +
114407 +static void set_stmt_flag(gimple stmt, enum stmt_flags new_flag)
114408 +{
114409 + bool bit_1, bit_2;
114410 +
114411 + switch (new_flag) {
114412 + case NO_FLAGS:
114413 + bit_1 = bit_2 = false;
114414 + break;
114415 + case MY_STMT:
114416 + bit_1 = bit_2 = true;
114417 + break;
114418 + case VISITED_STMT:
114419 + bit_1 = false;
114420 + bit_2 = true;
114421 + break;
114422 + case NO_CAST_CHECK:
114423 + bit_1 = true;
114424 + bit_2 = false;
114425 + break;
114426 + default:
114427 + gcc_unreachable();
114428 + }
114429 +
114430 + gimple_set_plf(stmt, GF_PLF_1, bit_1);
114431 + gimple_set_plf(stmt, GF_PLF_2, bit_2);
114432 +}
114433 +
114434 +static bool is_bool(const_tree node)
114435 +{
114436 + const_tree type;
114437 +
114438 + if (node == NULL_TREE)
114439 + return false;
114440 +
114441 + type = TREE_TYPE(node);
114442 + if (!INTEGRAL_TYPE_P(type))
114443 + return false;
114444 + if (TREE_CODE(type) == BOOLEAN_TYPE)
114445 + return true;
114446 + if (TYPE_PRECISION(type) == 1)
114447 + return true;
114448 + return false;
114449 +}
114450 +
114451 +static bool skip_types(const_tree var)
114452 +{
114453 + tree type;
114454 + enum tree_code code;
114455 +
114456 + if (is_gimple_constant(var))
114457 + return true;
114458 +
114459 + switch (TREE_CODE(var)) {
114460 + case ADDR_EXPR:
114461 +#if BUILDING_GCC_VERSION >= 4006
114462 + case MEM_REF:
114463 +#endif
114464 + case ARRAY_REF:
114465 + case BIT_FIELD_REF:
114466 + case INDIRECT_REF:
114467 + case TARGET_MEM_REF:
114468 + case COMPONENT_REF:
114469 + case VAR_DECL:
114470 + case VIEW_CONVERT_EXPR:
114471 + return true;
114472 + default:
114473 + break;
114474 + }
114475 +
114476 + code = TREE_CODE(var);
114477 + gcc_assert(code == SSA_NAME || code == PARM_DECL);
114478 +
114479 + type = TREE_TYPE(var);
114480 + switch (TREE_CODE(type)) {
114481 + case INTEGER_TYPE:
114482 + case ENUMERAL_TYPE:
114483 + return false;
114484 + case BOOLEAN_TYPE:
114485 + return is_bool(var);
114486 + default:
114487 + return true;
114488 + }
114489 +}
114490 +
114491 +static inline gimple get_def_stmt(const_tree node)
114492 +{
114493 + gcc_assert(node != NULL_TREE);
114494 +
114495 + if (skip_types(node))
114496 + return NULL;
114497 +
114498 + if (TREE_CODE(node) != SSA_NAME)
114499 + return NULL;
114500 + return SSA_NAME_DEF_STMT(node);
114501 +}
114502 +
114503 +static unsigned char get_tree_code(const_tree type)
114504 +{
114505 + switch (TREE_CODE(type)) {
114506 + case ARRAY_TYPE:
114507 + return 0;
114508 + case BOOLEAN_TYPE:
114509 + return 1;
114510 + case ENUMERAL_TYPE:
114511 + return 2;
114512 + case FUNCTION_TYPE:
114513 + return 3;
114514 + case INTEGER_TYPE:
114515 + return 4;
114516 + case POINTER_TYPE:
114517 + return 5;
114518 + case RECORD_TYPE:
114519 + return 6;
114520 + case UNION_TYPE:
114521 + return 7;
114522 + case VOID_TYPE:
114523 + return 8;
114524 + case REAL_TYPE:
114525 + return 9;
114526 + case VECTOR_TYPE:
114527 + return 10;
114528 + case REFERENCE_TYPE:
114529 + return 11;
114530 + case OFFSET_TYPE:
114531 + return 12;
114532 + case COMPLEX_TYPE:
114533 + return 13;
114534 + default:
114535 + debug_tree((tree)type);
114536 + gcc_unreachable();
114537 + }
114538 +}
114539 +
114540 +struct function_hash {
114541 + size_t tree_codes_len;
114542 + unsigned char tree_codes[CODES_LIMIT];
114543 + const_tree fndecl;
114544 + unsigned int hash;
114545 +};
114546 +
114547 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
114548 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
114549 +{
114550 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
114551 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
114552 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
114553 +
114554 + unsigned int m = 0x57559429;
114555 + unsigned int n = 0x5052acdb;
114556 + const unsigned int *key4 = (const unsigned int *)key;
114557 + unsigned int h = len;
114558 + unsigned int k = len + seed + n;
114559 + unsigned long long p;
114560 +
114561 + while (len >= 8) {
114562 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
114563 + len -= 8;
114564 + }
114565 + if (len >= 4) {
114566 + cwmixb(key4[0]) key4 += 1;
114567 + len -= 4;
114568 + }
114569 + if (len)
114570 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
114571 + cwmixb(h ^ (k + n));
114572 + return k ^ h;
114573 +
114574 +#undef cwfold
114575 +#undef cwmixa
114576 +#undef cwmixb
114577 +}
114578 +
114579 +static void set_hash(const char *fn_name, struct function_hash *fn_hash_data)
114580 +{
114581 + unsigned int fn, codes, seed = 0;
114582 +
114583 + fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff;
114584 + codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len, seed) & 0xffff;
114585 +
114586 + fn_hash_data->hash = fn ^ codes;
114587 +}
114588 +
114589 +static void set_node_codes(const_tree type, struct function_hash *fn_hash_data)
114590 +{
114591 + gcc_assert(type != NULL_TREE);
114592 + gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type);
114593 +
114594 + while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) {
114595 + fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type);
114596 + fn_hash_data->tree_codes_len++;
114597 + type = TREE_TYPE(type);
114598 + }
114599 +}
114600 +
114601 +static void set_result_codes(const_tree node, struct function_hash *fn_hash_data)
114602 +{
114603 + const_tree result;
114604 +
114605 + gcc_assert(node != NULL_TREE);
114606 +
114607 + if (DECL_P(node)) {
114608 + result = DECL_RESULT(node);
114609 + if (result != NULL_TREE)
114610 + return set_node_codes(TREE_TYPE(result), fn_hash_data);
114611 + return set_result_codes(TREE_TYPE(node), fn_hash_data);
114612 + }
114613 +
114614 + gcc_assert(TYPE_P(node));
114615 +
114616 + if (TREE_CODE(node) == FUNCTION_TYPE)
114617 + return set_result_codes(TREE_TYPE(node), fn_hash_data);
114618 +
114619 + return set_node_codes(node, fn_hash_data);
114620 +}
114621 +
114622 +static void set_function_codes(struct function_hash *fn_hash_data)
114623 +{
114624 + const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl);
114625 + enum tree_code code = TREE_CODE(type);
114626 +
114627 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
114628 +
114629 + set_result_codes(fn_hash_data->fndecl, fn_hash_data);
114630 +
114631 + for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len < CODES_LIMIT; arg = TREE_CHAIN(arg))
114632 + set_node_codes(TREE_VALUE(arg), fn_hash_data);
114633 +}
114634 +
114635 +static const struct size_overflow_hash *get_function_hash(const_tree fndecl)
114636 +{
114637 + const struct size_overflow_hash *entry;
114638 + struct function_hash fn_hash_data;
114639 + const char *func_name;
114640 +
114641 + // skip builtins __builtin_constant_p
114642 + if (DECL_BUILT_IN(fndecl))
114643 + return NULL;
114644 +
114645 + fn_hash_data.fndecl = fndecl;
114646 + fn_hash_data.tree_codes_len = 0;
114647 +
114648 + set_function_codes(&fn_hash_data);
114649 + gcc_assert(fn_hash_data.tree_codes_len != 0);
114650 +
114651 + func_name = DECL_NAME_POINTER(fn_hash_data.fndecl);
114652 + set_hash(func_name, &fn_hash_data);
114653 +
114654 + entry = size_overflow_hash[fn_hash_data.hash];
114655 +
114656 + while (entry) {
114657 + if (!strcmp(entry->name, func_name))
114658 + return entry;
114659 + entry = entry->next;
114660 + }
114661 + return NULL;
114662 +}
114663 +
114664 +static void print_missing_msg(const_tree func, unsigned int argnum)
114665 +{
114666 + location_t loc;
114667 + const char *curfunc;
114668 + struct function_hash fn_hash_data;
114669 +
114670 + fn_hash_data.fndecl = DECL_ORIGIN(func);
114671 + fn_hash_data.tree_codes_len = 0;
114672 +
114673 + loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl);
114674 + curfunc = DECL_NAME_POINTER(fn_hash_data.fndecl);
114675 +
114676 + set_function_codes(&fn_hash_data);
114677 + set_hash(curfunc, &fn_hash_data);
114678 +
114679 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
114680 +}
114681 +
114682 +static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
114683 +{
114684 + tree var;
114685 + unsigned int argnum = 1;
114686 +
114687 + if (TREE_CODE(arg) == SSA_NAME)
114688 + arg = SSA_NAME_VAR(arg);
114689 +
114690 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
114691 + if (!operand_equal_p(arg, var, 0) && strcmp(DECL_NAME_POINTER(var), DECL_NAME_POINTER(arg)))
114692 + continue;
114693 + if (!skip_types(var))
114694 + return argnum;
114695 + }
114696 +
114697 + return CANNOT_FIND_ARG;
114698 +}
114699 +
114700 +static tree create_new_var(tree type)
114701 +{
114702 + tree new_var = create_tmp_var(type, "cicus");
114703 +
114704 + add_referenced_var(new_var);
114705 + return new_var;
114706 +}
114707 +
114708 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
114709 +{
114710 + gimple assign;
114711 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
114712 + tree type = TREE_TYPE(rhs1);
114713 + tree lhs = create_new_var(type);
114714 +
114715 + gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
114716 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
114717 + gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
114718 +
114719 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
114720 + update_stmt(assign);
114721 + set_stmt_flag(assign, MY_STMT);
114722 + return assign;
114723 +}
114724 +
114725 +static tree cast_a_tree(tree type, tree var)
114726 +{
114727 + gcc_assert(type != NULL_TREE);
114728 + gcc_assert(var != NULL_TREE);
114729 + gcc_assert(fold_convertible_p(type, var));
114730 +
114731 + return fold_convert(type, var);
114732 +}
114733 +
114734 +static tree get_lhs(const_gimple stmt)
114735 +{
114736 + switch (gimple_code(stmt)) {
114737 + case GIMPLE_ASSIGN:
114738 + case GIMPLE_CALL:
114739 + return gimple_get_lhs(stmt);
114740 + case GIMPLE_PHI:
114741 + return gimple_phi_result(stmt);
114742 + default:
114743 + return NULL_TREE;
114744 + }
114745 +}
114746 +
114747 +static bool skip_cast(tree dst_type, const_tree rhs, bool force)
114748 +{
114749 + const_gimple def_stmt = get_def_stmt(rhs);
114750 +
114751 + if (force)
114752 + return false;
114753 +
114754 + if (is_gimple_constant(rhs))
114755 + return false;
114756 +
114757 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
114758 + return false;
114759 +
114760 + if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
114761 + return false;
114762 +
114763 + // DI type can be on 32 bit (from create_assign) but overflow type stays DI
114764 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
114765 + return false;
114766 +
114767 + return true;
114768 +}
114769 +
114770 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
114771 +{
114772 + gimple assign, def_stmt;
114773 +
114774 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
114775 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
114776 + gcc_unreachable();
114777 +
114778 + def_stmt = get_def_stmt(rhs);
114779 + if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && get_stmt_flag(def_stmt) == MY_STMT)
114780 + return def_stmt;
114781 +
114782 + if (lhs == CREATE_NEW_VAR)
114783 + lhs = create_new_var(dst_type);
114784 +
114785 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
114786 +
114787 + if (!gsi_end_p(*gsi)) {
114788 + location_t loc = gimple_location(gsi_stmt(*gsi));
114789 + gimple_set_location(assign, loc);
114790 + }
114791 +
114792 + gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
114793 +
114794 + if (before)
114795 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
114796 + else
114797 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
114798 + update_stmt(assign);
114799 + return assign;
114800 +}
114801 +
114802 +static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
114803 +{
114804 + gimple_stmt_iterator gsi;
114805 + tree lhs;
114806 + gimple new_stmt;
114807 +
114808 + if (rhs == NULL_TREE)
114809 + return NULL_TREE;
114810 +
114811 + gsi = gsi_for_stmt(stmt);
114812 + new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
114813 + set_stmt_flag(new_stmt, MY_STMT);
114814 +
114815 + lhs = get_lhs(new_stmt);
114816 + gcc_assert(lhs != NULL_TREE);
114817 + return lhs;
114818 +}
114819 +
114820 +static tree cast_to_TI_type(gimple stmt, tree node)
114821 +{
114822 + gimple_stmt_iterator gsi;
114823 + gimple cast_stmt;
114824 + tree type = TREE_TYPE(node);
114825 +
114826 + if (types_compatible_p(type, intTI_type_node))
114827 + return node;
114828 +
114829 + gsi = gsi_for_stmt(stmt);
114830 + cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
114831 + set_stmt_flag(cast_stmt, MY_STMT);
114832 + return gimple_assign_lhs(cast_stmt);
114833 +}
114834 +
114835 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
114836 +{
114837 + tree lhs, new_lhs;
114838 + gimple_stmt_iterator gsi;
114839 +
114840 + if (rhs1 == NULL_TREE) {
114841 + debug_gimple_stmt(oldstmt);
114842 + error("%s: rhs1 is NULL_TREE", __func__);
114843 + gcc_unreachable();
114844 + }
114845 +
114846 + switch (gimple_code(oldstmt)) {
114847 + case GIMPLE_ASM:
114848 + lhs = rhs1;
114849 + break;
114850 + case GIMPLE_CALL:
114851 + case GIMPLE_ASSIGN:
114852 + lhs = gimple_get_lhs(oldstmt);
114853 + break;
114854 + default:
114855 + debug_gimple_stmt(oldstmt);
114856 + gcc_unreachable();
114857 + }
114858 +
114859 + gsi = gsi_for_stmt(oldstmt);
114860 + pointer_set_insert(visited, oldstmt);
114861 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
114862 + basic_block next_bb, cur_bb;
114863 + const_edge e;
114864 +
114865 + gcc_assert(before == false);
114866 + gcc_assert(stmt_can_throw_internal(oldstmt));
114867 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
114868 + gcc_assert(!gsi_end_p(gsi));
114869 +
114870 + cur_bb = gimple_bb(oldstmt);
114871 + next_bb = cur_bb->next_bb;
114872 + e = find_edge(cur_bb, next_bb);
114873 + gcc_assert(e != NULL);
114874 + gcc_assert(e->flags & EDGE_FALLTHRU);
114875 +
114876 + gsi = gsi_after_labels(next_bb);
114877 + gcc_assert(!gsi_end_p(gsi));
114878 +
114879 + before = true;
114880 + oldstmt = gsi_stmt(gsi);
114881 + }
114882 +
114883 + new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
114884 + return new_lhs;
114885 +}
114886 +
114887 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
114888 +{
114889 + gimple stmt;
114890 + gimple_stmt_iterator gsi;
114891 + tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
114892 +
114893 + if (get_stmt_flag(oldstmt) == MY_STMT)
114894 + return lhs;
114895 +
114896 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
114897 + rhs1 = gimple_assign_rhs1(oldstmt);
114898 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
114899 + }
114900 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
114901 + rhs2 = gimple_assign_rhs2(oldstmt);
114902 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
114903 + }
114904 +
114905 + stmt = gimple_copy(oldstmt);
114906 + gimple_set_location(stmt, gimple_location(oldstmt));
114907 + set_stmt_flag(stmt, MY_STMT);
114908 +
114909 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
114910 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
114911 +
114912 + size_overflow_type = get_size_overflow_type(oldstmt, node);
114913 +
114914 + new_var = create_new_var(size_overflow_type);
114915 + new_var = make_ssa_name(new_var, stmt);
114916 + gimple_assign_set_lhs(stmt, new_var);
114917 +
114918 + if (rhs1 != NULL_TREE)
114919 + gimple_assign_set_rhs1(stmt, rhs1);
114920 +
114921 + if (rhs2 != NULL_TREE)
114922 + gimple_assign_set_rhs2(stmt, rhs2);
114923 +#if BUILDING_GCC_VERSION >= 4006
114924 + if (rhs3 != NULL_TREE)
114925 + gimple_assign_set_rhs3(stmt, rhs3);
114926 +#endif
114927 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
114928 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
114929 +
114930 + gsi = gsi_for_stmt(oldstmt);
114931 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
114932 + update_stmt(stmt);
114933 + pointer_set_insert(visited, oldstmt);
114934 + return gimple_assign_lhs(stmt);
114935 +}
114936 +
114937 +static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
114938 +{
114939 + gimple assign;
114940 + gimple_stmt_iterator gsi;
114941 + basic_block first_bb;
114942 +
114943 + gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
114944 +
114945 + if (bb->index == 0) {
114946 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
114947 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
114948 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
114949 + bb = first_bb;
114950 + }
114951 +
114952 + gsi = gsi_after_labels(bb);
114953 + assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
114954 + set_stmt_flag(assign, MY_STMT);
114955 +
114956 + return gimple_assign_lhs(assign);
114957 +}
114958 +
114959 +static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
114960 +{
114961 + gimple_stmt_iterator gsi;
114962 + gimple assign, def_stmt = get_def_stmt(new_arg);
114963 +
114964 + if (gimple_code(def_stmt) == GIMPLE_PHI) {
114965 + gsi = gsi_after_labels(gimple_bb(def_stmt));
114966 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
114967 + } else {
114968 + gsi = gsi_for_stmt(def_stmt);
114969 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
114970 + }
114971 +
114972 + set_stmt_flag(assign, MY_STMT);
114973 + return gimple_assign_lhs(assign);
114974 +}
114975 +
114976 +static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
114977 +{
114978 + basic_block bb;
114979 + gimple_stmt_iterator gsi;
114980 + const_gimple def_stmt;
114981 + gimple assign;
114982 +
114983 + def_stmt = get_def_stmt(arg);
114984 + bb = gimple_bb(def_stmt);
114985 + gcc_assert(bb->index != 0);
114986 + gsi = gsi_after_labels(bb);
114987 +
114988 + assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
114989 + set_stmt_flag(assign, MY_STMT);
114990 + return gimple_assign_lhs(assign);
114991 +}
114992 +
114993 +static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
114994 +{
114995 + tree size_overflow_type;
114996 + tree arg;
114997 + const_gimple def_stmt;
114998 +
114999 + if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
115000 + return new_arg;
115001 +
115002 + arg = gimple_phi_arg_def(oldstmt, i);
115003 + def_stmt = get_def_stmt(arg);
115004 + gcc_assert(def_stmt != NULL);
115005 + size_overflow_type = get_size_overflow_type(oldstmt, arg);
115006 +
115007 + switch (gimple_code(def_stmt)) {
115008 + case GIMPLE_PHI:
115009 + return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
115010 + case GIMPLE_NOP: {
115011 + basic_block bb;
115012 +
115013 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
115014 + return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
115015 + }
115016 + case GIMPLE_ASM: {
115017 + gimple_stmt_iterator gsi;
115018 + gimple assign, stmt = get_def_stmt(arg);
115019 +
115020 + gsi = gsi_for_stmt(stmt);
115021 + assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
115022 + set_stmt_flag(assign, MY_STMT);
115023 + return gimple_assign_lhs(assign);
115024 + }
115025 + default:
115026 + gcc_assert(new_arg != NULL_TREE);
115027 + gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
115028 + return use_phi_ssa_name(ssa_name_var, new_arg);
115029 + }
115030 +}
115031 +
115032 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
115033 +{
115034 + basic_block bb;
115035 + gimple phi;
115036 + gimple_seq seq;
115037 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
115038 +
115039 + bb = gsi_bb(gsi);
115040 +
115041 + if (result == NULL_TREE) {
115042 + tree old_result = gimple_phi_result(oldstmt);
115043 + tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
115044 +
115045 + result = create_new_var(size_overflow_type);
115046 + }
115047 +
115048 + phi = create_phi_node(result, bb);
115049 + gimple_phi_set_result(phi, make_ssa_name(result, phi));
115050 + seq = phi_nodes(bb);
115051 + gsi = gsi_last(seq);
115052 + gsi_remove(&gsi, false);
115053 +
115054 + gsi = gsi_for_stmt(oldstmt);
115055 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
115056 + gimple_set_bb(phi, bb);
115057 + set_stmt_flag(phi, MY_STMT);
115058 + return phi;
115059 +}
115060 +
115061 +#if BUILDING_GCC_VERSION <= 4007
115062 +static tree create_new_phi_node(VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
115063 +#else
115064 +static tree create_new_phi_node(vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
115065 +#endif
115066 +{
115067 + gimple new_phi;
115068 + unsigned int i;
115069 + tree arg, result;
115070 + location_t loc = gimple_location(oldstmt);
115071 +
115072 +#if BUILDING_GCC_VERSION <= 4007
115073 + gcc_assert(!VEC_empty(tree, *args));
115074 +#else
115075 + gcc_assert(!args->is_empty());
115076 +#endif
115077 +
115078 + new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
115079 + result = gimple_phi_result(new_phi);
115080 + ssa_name_var = SSA_NAME_VAR(result);
115081 +
115082 +#if BUILDING_GCC_VERSION <= 4007
115083 + FOR_EACH_VEC_ELT(tree, *args, i, arg) {
115084 +#else
115085 + FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
115086 +#endif
115087 + arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
115088 + add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
115089 + }
115090 +
115091 +#if BUILDING_GCC_VERSION <= 4007
115092 + VEC_free(tree, heap, *args);
115093 +#else
115094 + vec_free(args);
115095 +#endif
115096 + update_stmt(new_phi);
115097 + return result;
115098 +}
115099 +
115100 +static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result)
115101 +{
115102 + tree ssa_name_var = NULL_TREE;
115103 +#if BUILDING_GCC_VERSION <= 4007
115104 + VEC(tree, heap) *args = NULL;
115105 +#else
115106 + vec<tree, va_heap, vl_embed> *args = NULL;
115107 +#endif
115108 + gimple oldstmt = get_def_stmt(orig_result);
115109 + unsigned int i, len = gimple_phi_num_args(oldstmt);
115110 +
115111 + pointer_set_insert(visited, oldstmt);
115112 + for (i = 0; i < len; i++) {
115113 + tree arg, new_arg;
115114 +
115115 + arg = gimple_phi_arg_def(oldstmt, i);
115116 + new_arg = expand(visited, caller_node, arg);
115117 +
115118 + if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
115119 + ssa_name_var = SSA_NAME_VAR(new_arg);
115120 +
115121 + if (is_gimple_constant(arg)) {
115122 + tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
115123 +
115124 + new_arg = cast_a_tree(size_overflow_type, arg);
115125 + }
115126 +
115127 +#if BUILDING_GCC_VERSION <= 4007
115128 + VEC_safe_push(tree, heap, args, new_arg);
115129 +#else
115130 + vec_safe_push(args, new_arg);
115131 +#endif
115132 + }
115133 +
115134 +#if BUILDING_GCC_VERSION <= 4007
115135 + return create_new_phi_node(&args, ssa_name_var, oldstmt);
115136 +#else
115137 + return create_new_phi_node(args, ssa_name_var, oldstmt);
115138 +#endif
115139 +}
115140 +
115141 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
115142 +{
115143 + gimple assign;
115144 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
115145 + tree origtype = TREE_TYPE(orig_rhs);
115146 +
115147 + gcc_assert(is_gimple_assign(stmt));
115148 +
115149 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
115150 + set_stmt_flag(assign, MY_STMT);
115151 + return gimple_assign_lhs(assign);
115152 +}
115153 +
115154 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
115155 +{
115156 + const_tree rhs1, lhs, rhs1_type, lhs_type;
115157 + enum machine_mode lhs_mode, rhs_mode;
115158 + gimple def_stmt = get_def_stmt(no_const_rhs);
115159 +
115160 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
115161 + return false;
115162 +
115163 + rhs1 = gimple_assign_rhs1(def_stmt);
115164 + lhs = gimple_assign_lhs(def_stmt);
115165 + rhs1_type = TREE_TYPE(rhs1);
115166 + lhs_type = TREE_TYPE(lhs);
115167 + rhs_mode = TYPE_MODE(rhs1_type);
115168 + lhs_mode = TYPE_MODE(lhs_type);
115169 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
115170 + return false;
115171 +
115172 + return true;
115173 +}
115174 +
115175 +static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
115176 +{
115177 + tree rhs1 = gimple_assign_rhs1(stmt);
115178 + tree lhs = gimple_assign_lhs(stmt);
115179 + const_tree rhs1_type = TREE_TYPE(rhs1);
115180 + const_tree lhs_type = TREE_TYPE(lhs);
115181 +
115182 + if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
115183 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115184 +
115185 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
115186 +}
115187 +
115188 +static bool no_uses(tree node)
115189 +{
115190 + imm_use_iterator imm_iter;
115191 + use_operand_p use_p;
115192 +
115193 + FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
115194 + const_gimple use_stmt = USE_STMT(use_p);
115195 +
115196 + if (use_stmt == NULL)
115197 + return true;
115198 + if (is_gimple_debug(use_stmt))
115199 + continue;
115200 + return false;
115201 + }
115202 + return true;
115203 +}
115204 +
115205 +// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
115206 +static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
115207 +{
115208 + tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
115209 + gimple def_stmt = get_def_stmt(lhs);
115210 +
115211 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
115212 + return false;
115213 +
115214 + rhs1 = gimple_assign_rhs1(def_stmt);
115215 + rhs_type = TREE_TYPE(rhs1);
115216 + lhs_type = TREE_TYPE(lhs);
115217 + if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
115218 + return false;
115219 + if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
115220 + return false;
115221 +
115222 + def_stmt = get_def_stmt(rhs1);
115223 + if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
115224 + return false;
115225 +
115226 + if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
115227 + return false;
115228 +
115229 + rhs1 = gimple_assign_rhs1(def_stmt);
115230 + rhs2 = gimple_assign_rhs2(def_stmt);
115231 + if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
115232 + return false;
115233 +
115234 + if (is_gimple_constant(rhs2))
115235 + not_const_rhs = rhs1;
115236 + else
115237 + not_const_rhs = rhs2;
115238 +
115239 + return no_uses(not_const_rhs);
115240 +}
115241 +
115242 +static bool skip_lhs_cast_check(const_gimple stmt)
115243 +{
115244 + const_tree rhs = gimple_assign_rhs1(stmt);
115245 + const_gimple def_stmt = get_def_stmt(rhs);
115246 +
115247 + // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
115248 + if (gimple_code(def_stmt) == GIMPLE_ASM)
115249 + return true;
115250 +
115251 + if (is_const_plus_unsigned_signed_truncation(rhs))
115252 + return true;
115253 +
115254 + return false;
115255 +}
115256 +
115257 +static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
115258 +{
115259 + bool cast_lhs, cast_rhs;
115260 + tree lhs = gimple_assign_lhs(stmt);
115261 + tree rhs = gimple_assign_rhs1(stmt);
115262 + const_tree lhs_type = TREE_TYPE(lhs);
115263 + const_tree rhs_type = TREE_TYPE(rhs);
115264 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
115265 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
115266 + unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
115267 + unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
115268 +
115269 + static bool check_lhs[3][4] = {
115270 + // ss su us uu
115271 + { false, true, true, false }, // lhs > rhs
115272 + { false, false, false, false }, // lhs = rhs
115273 + { true, true, true, true }, // lhs < rhs
115274 + };
115275 +
115276 + static bool check_rhs[3][4] = {
115277 + // ss su us uu
115278 + { true, false, true, true }, // lhs > rhs
115279 + { true, false, true, true }, // lhs = rhs
115280 + { true, false, true, true }, // lhs < rhs
115281 + };
115282 +
115283 + // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
115284 + if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
115285 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115286 +
115287 + if (lhs_size > rhs_size) {
115288 + cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115289 + cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115290 + } else if (lhs_size == rhs_size) {
115291 + cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115292 + cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115293 + } else {
115294 + cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115295 + cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
115296 + }
115297 +
115298 + if (!cast_lhs && !cast_rhs)
115299 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
115300 +
115301 + if (cast_lhs && !skip_lhs_cast_check(stmt))
115302 + check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
115303 +
115304 + if (cast_rhs)
115305 + check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
115306 +
115307 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
115308 +}
115309 +
115310 +static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
115311 +{
115312 + tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
115313 +
115314 + if (get_stmt_flag(stmt) == MY_STMT)
115315 + return lhs;
115316 +
115317 + rhs1 = gimple_assign_rhs1(stmt);
115318 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
115319 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115320 +
115321 + new_rhs1 = expand(visited, caller_node, rhs1);
115322 +
115323 + if (new_rhs1 == NULL_TREE)
115324 + return create_cast_assign(visited, stmt);
115325 +
115326 + if (get_stmt_flag(stmt) == NO_CAST_CHECK)
115327 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
115328 +
115329 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
115330 + tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
115331 +
115332 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
115333 + check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
115334 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115335 + }
115336 +
115337 + if (!gimple_assign_cast_p(stmt))
115338 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
115339 +
115340 + return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
115341 +}
115342 +
115343 +static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
115344 +{
115345 + tree rhs1, lhs = gimple_assign_lhs(stmt);
115346 + gimple def_stmt = get_def_stmt(lhs);
115347 +
115348 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
115349 + rhs1 = gimple_assign_rhs1(def_stmt);
115350 +
115351 + if (is_gimple_constant(rhs1))
115352 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
115353 +
115354 + switch (TREE_CODE(rhs1)) {
115355 + case SSA_NAME:
115356 + return handle_unary_rhs(visited, caller_node, def_stmt);
115357 + case ARRAY_REF:
115358 + case BIT_FIELD_REF:
115359 + case ADDR_EXPR:
115360 + case COMPONENT_REF:
115361 + case INDIRECT_REF:
115362 +#if BUILDING_GCC_VERSION >= 4006
115363 + case MEM_REF:
115364 +#endif
115365 + case TARGET_MEM_REF:
115366 + case VIEW_CONVERT_EXPR:
115367 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
115368 + case PARM_DECL:
115369 + case VAR_DECL:
115370 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115371 +
115372 + default:
115373 + debug_gimple_stmt(def_stmt);
115374 + debug_tree(rhs1);
115375 + gcc_unreachable();
115376 + }
115377 +}
115378 +
115379 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
115380 +{
115381 + gimple cond_stmt;
115382 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
115383 +
115384 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
115385 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
115386 + update_stmt(cond_stmt);
115387 +}
115388 +
115389 +static tree create_string_param(tree string)
115390 +{
115391 + tree i_type, a_type;
115392 + const int length = TREE_STRING_LENGTH(string);
115393 +
115394 + gcc_assert(length > 0);
115395 +
115396 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
115397 + a_type = build_array_type(char_type_node, i_type);
115398 +
115399 + TREE_TYPE(string) = a_type;
115400 + TREE_CONSTANT(string) = 1;
115401 + TREE_READONLY(string) = 1;
115402 +
115403 + return build1(ADDR_EXPR, ptr_type_node, string);
115404 +}
115405 +
115406 +static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
115407 +{
115408 + gimple func_stmt;
115409 + const_gimple def_stmt;
115410 + const_tree loc_line;
115411 + tree loc_file, ssa_name, current_func;
115412 + expanded_location xloc;
115413 + char *ssa_name_buf;
115414 + int len;
115415 + struct cgraph_edge *edge;
115416 + struct cgraph_node *callee_node;
115417 + int frequency;
115418 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
115419 +
115420 + def_stmt = get_def_stmt(arg);
115421 + xloc = expand_location(gimple_location(def_stmt));
115422 +
115423 + if (!gimple_has_location(def_stmt)) {
115424 + xloc = expand_location(gimple_location(stmt));
115425 + if (!gimple_has_location(stmt))
115426 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
115427 + }
115428 +
115429 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
115430 +
115431 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
115432 + loc_file = create_string_param(loc_file);
115433 +
115434 + current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
115435 + current_func = create_string_param(current_func);
115436 +
115437 + gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
115438 + call_count++;
115439 + len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
115440 + gcc_assert(len > 0);
115441 + ssa_name = build_string(len + 1, ssa_name_buf);
115442 + free(ssa_name_buf);
115443 + ssa_name = create_string_param(ssa_name);
115444 +
115445 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
115446 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
115447 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
115448 +
115449 + callee_node = cgraph_get_create_node(report_size_overflow_decl);
115450 + frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
115451 +
115452 + edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
115453 + gcc_assert(edge != NULL);
115454 +}
115455 +
115456 +static void __unused print_the_code_insertions(const_gimple stmt)
115457 +{
115458 + location_t loc = gimple_location(stmt);
115459 +
115460 + inform(loc, "Integer size_overflow check applied here.");
115461 +}
115462 +
115463 +static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
115464 +{
115465 + basic_block cond_bb, join_bb, bb_true;
115466 + edge e;
115467 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
115468 +
115469 + cond_bb = gimple_bb(stmt);
115470 + if (before)
115471 + gsi_prev(&gsi);
115472 + if (gsi_end_p(gsi))
115473 + e = split_block_after_labels(cond_bb);
115474 + else
115475 + e = split_block(cond_bb, gsi_stmt(gsi));
115476 + cond_bb = e->src;
115477 + join_bb = e->dest;
115478 + e->flags = EDGE_FALSE_VALUE;
115479 + e->probability = REG_BR_PROB_BASE;
115480 +
115481 + bb_true = create_empty_bb(cond_bb);
115482 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
115483 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
115484 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
115485 +
115486 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
115487 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
115488 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
115489 +
115490 + if (current_loops != NULL) {
115491 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
115492 + add_bb_to_loop(bb_true, cond_bb->loop_father);
115493 + }
115494 +
115495 + insert_cond(cond_bb, arg, cond_code, type_value);
115496 + insert_cond_result(caller_node, bb_true, stmt, arg, min);
115497 +
115498 +// print_the_code_insertions(stmt);
115499 +}
115500 +
115501 +static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
115502 +{
115503 + const_tree rhs_type = TREE_TYPE(rhs);
115504 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
115505 +
115506 + gcc_assert(rhs_type != NULL_TREE);
115507 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
115508 + return;
115509 +
115510 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
115511 +
115512 + if (is_const_plus_unsigned_signed_truncation(rhs))
115513 + return;
115514 +
115515 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
115516 + // typemax (-1) < typemin (0)
115517 + if (TREE_OVERFLOW(type_max))
115518 + return;
115519 +
115520 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
115521 +
115522 + cast_rhs_type = TREE_TYPE(cast_rhs);
115523 + type_max_type = TREE_TYPE(type_max);
115524 + gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
115525 +
115526 + insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
115527 +
115528 + // special case: get_size_overflow_type(), 32, u64->s
115529 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
115530 + return;
115531 +
115532 + type_min_type = TREE_TYPE(type_min);
115533 + gcc_assert(types_compatible_p(type_max_type, type_min_type));
115534 + insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
115535 +}
115536 +
115537 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
115538 +{
115539 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
115540 + return false;
115541 + if (!is_gimple_constant(rhs))
115542 + return false;
115543 + return true;
115544 +}
115545 +
115546 +static tree get_def_stmt_rhs(const_tree var)
115547 +{
115548 + tree rhs1, def_stmt_rhs1;
115549 + gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
115550 +
115551 + def_stmt = get_def_stmt(var);
115552 + if (!gimple_assign_cast_p(def_stmt))
115553 + return NULL_TREE;
115554 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && get_stmt_flag(def_stmt) == MY_STMT && gimple_assign_cast_p(def_stmt));
115555 +
115556 + rhs1 = gimple_assign_rhs1(def_stmt);
115557 + rhs1_def_stmt = get_def_stmt(rhs1);
115558 + if (!gimple_assign_cast_p(rhs1_def_stmt))
115559 + return rhs1;
115560 +
115561 + def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
115562 + def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
115563 +
115564 + switch (gimple_code(def_stmt_rhs1_def_stmt)) {
115565 + case GIMPLE_CALL:
115566 + case GIMPLE_NOP:
115567 + case GIMPLE_ASM:
115568 + case GIMPLE_PHI:
115569 + return def_stmt_rhs1;
115570 + case GIMPLE_ASSIGN:
115571 + return rhs1;
115572 + default:
115573 + debug_gimple_stmt(def_stmt_rhs1_def_stmt);
115574 + gcc_unreachable();
115575 + }
115576 +}
115577 +
115578 +static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
115579 +{
115580 + tree new_rhs, orig_rhs;
115581 + void (*gimple_assign_set_rhs)(gimple, tree);
115582 + tree rhs1 = gimple_assign_rhs1(stmt);
115583 + tree rhs2 = gimple_assign_rhs2(stmt);
115584 + tree lhs = gimple_assign_lhs(stmt);
115585 +
115586 + if (!check_overflow)
115587 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115588 +
115589 + if (change_rhs == NULL_TREE)
115590 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115591 +
115592 + if (new_rhs2 == NULL_TREE) {
115593 + orig_rhs = rhs1;
115594 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
115595 + } else {
115596 + orig_rhs = rhs2;
115597 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
115598 + }
115599 +
115600 + check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
115601 +
115602 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
115603 + gimple_assign_set_rhs(stmt, new_rhs);
115604 + update_stmt(stmt);
115605 +
115606 + return create_assign(visited, stmt, lhs, AFTER_STMT);
115607 +}
115608 +
115609 +static bool is_subtraction_special(const_gimple stmt)
115610 +{
115611 + gimple rhs1_def_stmt, rhs2_def_stmt;
115612 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
115613 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
115614 + const_tree rhs1 = gimple_assign_rhs1(stmt);
115615 + const_tree rhs2 = gimple_assign_rhs2(stmt);
115616 +
115617 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
115618 + return false;
115619 +
115620 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
115621 +
115622 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
115623 + return false;
115624 +
115625 + rhs1_def_stmt = get_def_stmt(rhs1);
115626 + rhs2_def_stmt = get_def_stmt(rhs2);
115627 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
115628 + return false;
115629 +
115630 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
115631 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
115632 + rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
115633 + rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
115634 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
115635 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
115636 + rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
115637 + rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
115638 + if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
115639 + return false;
115640 + if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
115641 + return false;
115642 +
115643 + set_stmt_flag(rhs1_def_stmt, NO_CAST_CHECK);
115644 + set_stmt_flag(rhs2_def_stmt, NO_CAST_CHECK);
115645 + return true;
115646 +}
115647 +
115648 +static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs)
115649 +{
115650 + tree new_rhs1, new_rhs2;
115651 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
115652 + gimple assign, stmt = get_def_stmt(lhs);
115653 + tree rhs1 = gimple_assign_rhs1(stmt);
115654 + tree rhs2 = gimple_assign_rhs2(stmt);
115655 +
115656 + if (!is_subtraction_special(stmt))
115657 + return NULL_TREE;
115658 +
115659 + new_rhs1 = expand(visited, caller_node, rhs1);
115660 + new_rhs2 = expand(visited, caller_node, rhs2);
115661 +
115662 + new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
115663 + new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
115664 +
115665 + if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
115666 + return NULL_TREE;
115667 +
115668 + if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
115669 + new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
115670 + new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
115671 + }
115672 +
115673 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
115674 + new_lhs = gimple_assign_lhs(assign);
115675 + check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
115676 +
115677 + return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
115678 +}
115679 +
115680 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
115681 +{
115682 + const_gimple def_stmt;
115683 +
115684 + if (TREE_CODE(rhs) != SSA_NAME)
115685 + return false;
115686 +
115687 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
115688 + return false;
115689 +
115690 + def_stmt = get_def_stmt(rhs);
115691 + if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
115692 + return false;
115693 +
115694 + return true;
115695 +}
115696 +
115697 +static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
115698 +{
115699 + tree rhs1, rhs2, new_lhs;
115700 + gimple def_stmt = get_def_stmt(lhs);
115701 + tree new_rhs1 = NULL_TREE;
115702 + tree new_rhs2 = NULL_TREE;
115703 +
115704 + rhs1 = gimple_assign_rhs1(def_stmt);
115705 + rhs2 = gimple_assign_rhs2(def_stmt);
115706 +
115707 + /* no DImode/TImode division in the 32/64 bit kernel */
115708 + switch (gimple_assign_rhs_code(def_stmt)) {
115709 + case RDIV_EXPR:
115710 + case TRUNC_DIV_EXPR:
115711 + case CEIL_DIV_EXPR:
115712 + case FLOOR_DIV_EXPR:
115713 + case ROUND_DIV_EXPR:
115714 + case TRUNC_MOD_EXPR:
115715 + case CEIL_MOD_EXPR:
115716 + case FLOOR_MOD_EXPR:
115717 + case ROUND_MOD_EXPR:
115718 + case EXACT_DIV_EXPR:
115719 + case POINTER_PLUS_EXPR:
115720 + case BIT_AND_EXPR:
115721 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
115722 + default:
115723 + break;
115724 + }
115725 +
115726 + new_lhs = handle_integer_truncation(visited, caller_node, lhs);
115727 + if (new_lhs != NULL_TREE)
115728 + return new_lhs;
115729 +
115730 + if (TREE_CODE(rhs1) == SSA_NAME)
115731 + new_rhs1 = expand(visited, caller_node, rhs1);
115732 + if (TREE_CODE(rhs2) == SSA_NAME)
115733 + new_rhs2 = expand(visited, caller_node, rhs2);
115734 +
115735 + if (is_a_neg_overflow(def_stmt, rhs2))
115736 + return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
115737 + if (is_a_neg_overflow(def_stmt, rhs1))
115738 + return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
115739 +
115740 +
115741 + if (is_a_constant_overflow(def_stmt, rhs2))
115742 + return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
115743 + if (is_a_constant_overflow(def_stmt, rhs1))
115744 + return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
115745 +
115746 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
115747 +}
115748 +
115749 +#if BUILDING_GCC_VERSION >= 4006
115750 +static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
115751 +{
115752 + if (is_gimple_constant(rhs))
115753 + return cast_a_tree(size_overflow_type, rhs);
115754 + if (TREE_CODE(rhs) != SSA_NAME)
115755 + return NULL_TREE;
115756 + return expand(visited, caller_node, rhs);
115757 +}
115758 +
115759 +static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
115760 +{
115761 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
115762 + gimple def_stmt = get_def_stmt(lhs);
115763 +
115764 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
115765 +
115766 + rhs1 = gimple_assign_rhs1(def_stmt);
115767 + rhs2 = gimple_assign_rhs2(def_stmt);
115768 + rhs3 = gimple_assign_rhs3(def_stmt);
115769 + new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
115770 + new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
115771 + new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
115772 +
115773 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
115774 +}
115775 +#endif
115776 +
115777 +static tree get_size_overflow_type(gimple stmt, const_tree node)
115778 +{
115779 + const_tree type;
115780 + tree new_type;
115781 +
115782 + gcc_assert(node != NULL_TREE);
115783 +
115784 + type = TREE_TYPE(node);
115785 +
115786 + if (get_stmt_flag(stmt) == MY_STMT)
115787 + return TREE_TYPE(node);
115788 +
115789 + switch (TYPE_MODE(type)) {
115790 + case QImode:
115791 + new_type = intHI_type_node;
115792 + break;
115793 + case HImode:
115794 + new_type = intSI_type_node;
115795 + break;
115796 + case SImode:
115797 + new_type = intDI_type_node;
115798 + break;
115799 + case DImode:
115800 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
115801 + new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
115802 + else
115803 + new_type = intTI_type_node;
115804 + break;
115805 + case TImode:
115806 + gcc_assert(!TYPE_UNSIGNED(type));
115807 + new_type = intTI_type_node;
115808 + break;
115809 + default:
115810 + debug_tree((tree)node);
115811 + error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
115812 + gcc_unreachable();
115813 + }
115814 +
115815 + if (TYPE_QUALS(type) != 0)
115816 + return build_qualified_type(new_type, TYPE_QUALS(type));
115817 + return new_type;
115818 +}
115819 +
115820 +static tree expand_visited(gimple def_stmt)
115821 +{
115822 + const_gimple next_stmt;
115823 + gimple_stmt_iterator gsi;
115824 + enum gimple_code code = gimple_code(def_stmt);
115825 +
115826 + if (code == GIMPLE_ASM)
115827 + return NULL_TREE;
115828 +
115829 + gsi = gsi_for_stmt(def_stmt);
115830 + gsi_next(&gsi);
115831 +
115832 + if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
115833 + return NULL_TREE;
115834 + gcc_assert(!gsi_end_p(gsi));
115835 + next_stmt = gsi_stmt(gsi);
115836 +
115837 + if (gimple_code(def_stmt) == GIMPLE_PHI && get_stmt_flag((gimple)next_stmt) != MY_STMT)
115838 + return NULL_TREE;
115839 + gcc_assert(get_stmt_flag((gimple)next_stmt) == MY_STMT);
115840 +
115841 + return get_lhs(next_stmt);
115842 +}
115843 +
115844 +static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
115845 +{
115846 + gimple def_stmt;
115847 +
115848 + def_stmt = get_def_stmt(lhs);
115849 +
115850 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
115851 + return NULL_TREE;
115852 +
115853 + if (get_stmt_flag(def_stmt) == MY_STMT)
115854 + return lhs;
115855 +
115856 + if (pointer_set_contains(visited, def_stmt))
115857 + return expand_visited(def_stmt);
115858 +
115859 + switch (gimple_code(def_stmt)) {
115860 + case GIMPLE_PHI:
115861 + return handle_phi(visited, caller_node, lhs);
115862 + case GIMPLE_CALL:
115863 + case GIMPLE_ASM:
115864 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
115865 + case GIMPLE_ASSIGN:
115866 + switch (gimple_num_ops(def_stmt)) {
115867 + case 2:
115868 + return handle_unary_ops(visited, caller_node, def_stmt);
115869 + case 3:
115870 + return handle_binary_ops(visited, caller_node, lhs);
115871 +#if BUILDING_GCC_VERSION >= 4006
115872 + case 4:
115873 + return handle_ternary_ops(visited, caller_node, lhs);
115874 +#endif
115875 + }
115876 + default:
115877 + debug_gimple_stmt(def_stmt);
115878 + error("%s: unknown gimple code", __func__);
115879 + gcc_unreachable();
115880 + }
115881 +}
115882 +
115883 +static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
115884 +{
115885 + const_gimple assign;
115886 + tree orig_type = TREE_TYPE(orig_node);
115887 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
115888 +
115889 + assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
115890 + return gimple_assign_lhs(assign);
115891 +}
115892 +
115893 +static void change_orig_node(struct interesting_node *cur_node, tree new_node)
115894 +{
115895 + void (*set_rhs)(gimple, tree);
115896 + gimple stmt = cur_node->first_stmt;
115897 + const_tree orig_node = cur_node->node;
115898 +
115899 + switch (gimple_code(stmt)) {
115900 + case GIMPLE_RETURN:
115901 + gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
115902 + break;
115903 + case GIMPLE_CALL:
115904 + gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
115905 + break;
115906 + case GIMPLE_ASSIGN:
115907 + switch (cur_node->num) {
115908 + case 1:
115909 + set_rhs = &gimple_assign_set_rhs1;
115910 + break;
115911 + case 2:
115912 + set_rhs = &gimple_assign_set_rhs2;
115913 + break;
115914 +#if BUILDING_GCC_VERSION >= 4006
115915 + case 3:
115916 + set_rhs = &gimple_assign_set_rhs3;
115917 + break;
115918 +#endif
115919 + default:
115920 + gcc_unreachable();
115921 + }
115922 +
115923 + set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
115924 + break;
115925 + default:
115926 + debug_gimple_stmt(stmt);
115927 + gcc_unreachable();
115928 + }
115929 +
115930 + update_stmt(stmt);
115931 +}
115932 +
115933 +static unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
115934 +{
115935 + const struct size_overflow_hash *hash;
115936 + unsigned int new_argnum;
115937 + tree arg;
115938 + const_tree origarg;
115939 +
115940 + if (argnum == 0)
115941 + return argnum;
115942 +
115943 + hash = get_function_hash(fndecl);
115944 + if (hash && hash->param & (1U << argnum))
115945 + return argnum;
115946 +
115947 + if (DECL_EXTERNAL(fndecl))
115948 + return argnum;
115949 +
115950 + origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl));
115951 + argnum--;
115952 + while (origarg && argnum) {
115953 + origarg = TREE_CHAIN(origarg);
115954 + argnum--;
115955 + }
115956 + gcc_assert(argnum == 0);
115957 + gcc_assert(origarg != NULL_TREE);
115958 +
115959 + for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++)
115960 + if (operand_equal_p(origarg, arg, 0) || !strcmp(DECL_NAME_POINTER(origarg), DECL_NAME_POINTER(arg)))
115961 + return new_argnum;
115962 +
115963 + return CANNOT_FIND_ARG;
115964 +}
115965 +
115966 +// Don't want to duplicate entries in next_cgraph_node
115967 +static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
115968 +{
115969 + const_tree new_callee_fndecl;
115970 + struct next_cgraph_node *cur_node;
115971 +
115972 + if (fndecl == RET_CHECK)
115973 + new_callee_fndecl = NODE_DECL(node);
115974 + else
115975 + new_callee_fndecl = fndecl;
115976 +
115977 + for (cur_node = head; cur_node; cur_node = cur_node->next) {
115978 + if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
115979 + continue;
115980 + if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
115981 + continue;
115982 + if (num == cur_node->num)
115983 + return true;
115984 + }
115985 + return false;
115986 +}
115987 +
115988 +/* Add a next_cgraph_node into the list for handle_function().
115989 + * handle_function() iterates over all the next cgraph nodes and
115990 + * starts the overflow check insertion process.
115991 + */
115992 +static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
115993 +{
115994 + struct next_cgraph_node *new_node;
115995 +
115996 + if (is_in_next_cgraph_node(head, node, fndecl, num))
115997 + return head;
115998 +
115999 + new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
116000 + new_node->current_function = node;
116001 + new_node->next = NULL;
116002 + new_node->num = num;
116003 + if (fndecl == RET_CHECK)
116004 + new_node->callee_fndecl = NODE_DECL(node);
116005 + else
116006 + new_node->callee_fndecl = fndecl;
116007 +
116008 + if (!head)
116009 + return new_node;
116010 +
116011 + new_node->next = head;
116012 + return new_node;
116013 +}
116014 +
116015 +static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
116016 +{
116017 + struct cgraph_edge *e;
116018 +
116019 + if (num == 0)
116020 + return create_new_next_cgraph_node(head, node, RET_CHECK, num);
116021 +
116022 + for (e = node->callers; e; e = e->next_caller) {
116023 + tree fndecl = gimple_call_fndecl(e->call_stmt);
116024 +
116025 + gcc_assert(fndecl != NULL_TREE);
116026 + head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
116027 + }
116028 +
116029 + return head;
116030 +}
116031 +
116032 +static bool is_a_return_check(const_tree node)
116033 +{
116034 + if (TREE_CODE(node) == FUNCTION_DECL)
116035 + return true;
116036 +
116037 + gcc_assert(TREE_CODE(node) == PARM_DECL);
116038 + return false;
116039 +}
116040 +
116041 +static bool is_in_hash_table(const_tree fndecl, unsigned int num)
116042 +{
116043 + const struct size_overflow_hash *hash;
116044 +
116045 + hash = get_function_hash(fndecl);
116046 + if (hash && (hash->param & (1U << num)))
116047 + return true;
116048 + return false;
116049 +}
116050 +
116051 +struct missing_functions {
116052 + struct missing_functions *next;
116053 + const_tree node;
116054 + tree fndecl;
116055 +};
116056 +
116057 +static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
116058 +{
116059 + struct missing_functions *new_function;
116060 +
116061 + new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
116062 + new_function->node = node;
116063 + new_function->next = NULL;
116064 +
116065 + if (TREE_CODE(node) == FUNCTION_DECL)
116066 + new_function->fndecl = node;
116067 + else
116068 + new_function->fndecl = current_function_decl;
116069 + gcc_assert(new_function->fndecl);
116070 +
116071 + if (!missing_fn_head)
116072 + return new_function;
116073 +
116074 + new_function->next = missing_fn_head;
116075 + return new_function;
116076 +}
116077 +
116078 +/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
116079 + * If the function is missing everywhere then print the missing message into stderr.
116080 + */
116081 +static bool is_missing_function(const_tree orig_fndecl, unsigned int num)
116082 +{
116083 + switch (DECL_FUNCTION_CODE(orig_fndecl)) {
116084 +#if BUILDING_GCC_VERSION >= 4008
116085 + case BUILT_IN_BSWAP16:
116086 +#endif
116087 + case BUILT_IN_BSWAP32:
116088 + case BUILT_IN_BSWAP64:
116089 + case BUILT_IN_EXPECT:
116090 + case BUILT_IN_MEMCMP:
116091 + return false;
116092 + default:
116093 + break;
116094 + }
116095 +
116096 + // skip test.c
116097 + if (strcmp(DECL_NAME_POINTER(current_function_decl), "coolmalloc")) {
116098 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl)))
116099 + warning(0, "unnecessary size_overflow attribute on: %s\n", DECL_NAME_POINTER(orig_fndecl));
116100 + }
116101 +
116102 + if (is_in_hash_table(orig_fndecl, num))
116103 + return false;
116104 +
116105 + print_missing_msg(orig_fndecl, num);
116106 + return true;
116107 +}
116108 +
116109 +// Get the argnum of a function decl, if node is a return then the argnum is 0
116110 +static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
116111 +{
116112 + if (is_a_return_check(node))
116113 + return 0;
116114 + else
116115 + return find_arg_number_tree(node, orig_fndecl);
116116 +}
116117 +
116118 +/* If the function is missing from the hash table and it is a static function
116119 + * then create a next_cgraph_node from it for handle_function()
116120 + */
116121 +static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
116122 +{
116123 + unsigned int num;
116124 + const_tree orig_fndecl;
116125 + struct cgraph_node *next_node = NULL;
116126 +
116127 + orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
116128 +
116129 + num = get_function_num(missing_fn_head->node, orig_fndecl);
116130 + if (num == CANNOT_FIND_ARG)
116131 + return cnodes;
116132 +
116133 + if (!is_missing_function(orig_fndecl, num))
116134 + return cnodes;
116135 +
116136 + next_node = cgraph_get_node(missing_fn_head->fndecl);
116137 + if (next_node && next_node->local.local)
116138 + cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
116139 + return cnodes;
116140 +}
116141 +
116142 +/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
116143 + * into the next_cgraph_node list. They will be the next interesting returns or callees.
116144 + */
116145 +static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
116146 +{
116147 + unsigned int i;
116148 + tree node;
116149 + struct missing_functions *cur, *missing_fn_head = NULL;
116150 +
116151 +#if BUILDING_GCC_VERSION <= 4007
116152 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
116153 +#else
116154 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
116155 +#endif
116156 + switch (TREE_CODE(node)) {
116157 + case PARM_DECL:
116158 + if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
116159 + break;
116160 + case FUNCTION_DECL:
116161 + missing_fn_head = create_new_missing_function(missing_fn_head, node);
116162 + break;
116163 + default:
116164 + break;
116165 + }
116166 + }
116167 +
116168 + while (missing_fn_head) {
116169 + cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
116170 +
116171 + cur = missing_fn_head->next;
116172 + free(missing_fn_head);
116173 + missing_fn_head = cur;
116174 + }
116175 +
116176 + return cnodes;
116177 +}
116178 +
116179 +static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
116180 +{
116181 + gimple phi = get_def_stmt(result);
116182 + unsigned int i, n = gimple_phi_num_args(phi);
116183 +
116184 + pointer_set_insert(visited, phi);
116185 + for (i = 0; i < n; i++) {
116186 + const_tree arg = gimple_phi_arg_def(phi, i);
116187 +
116188 + set_conditions(visited, interesting_conditions, arg);
116189 + }
116190 +}
116191 +
116192 +enum conditions {
116193 + FROM_CONST, NOT_UNARY, CAST
116194 +};
116195 +
116196 +// Search for constants, cast assignments and binary/ternary assignments
116197 +static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
116198 +{
116199 + gimple def_stmt = get_def_stmt(lhs);
116200 +
116201 + if (is_gimple_constant(lhs)) {
116202 + interesting_conditions[FROM_CONST] = true;
116203 + return;
116204 + }
116205 +
116206 + if (!def_stmt)
116207 + return;
116208 +
116209 + if (pointer_set_contains(visited, def_stmt))
116210 + return;
116211 +
116212 + switch (gimple_code(def_stmt)) {
116213 + case GIMPLE_NOP:
116214 + case GIMPLE_CALL:
116215 + case GIMPLE_ASM:
116216 + return;
116217 + case GIMPLE_PHI:
116218 + return walk_phi_set_conditions(visited, interesting_conditions, lhs);
116219 + case GIMPLE_ASSIGN:
116220 + if (gimple_num_ops(def_stmt) == 2) {
116221 + const_tree rhs = gimple_assign_rhs1(def_stmt);
116222 +
116223 + if (gimple_assign_cast_p(def_stmt))
116224 + interesting_conditions[CAST] = true;
116225 +
116226 + return set_conditions(visited, interesting_conditions, rhs);
116227 + } else {
116228 + interesting_conditions[NOT_UNARY] = true;
116229 + return;
116230 + }
116231 + default:
116232 + debug_gimple_stmt(def_stmt);
116233 + gcc_unreachable();
116234 + }
116235 +}
116236 +
116237 +// determine whether duplication will be necessary or not.
116238 +static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
116239 +{
116240 + struct pointer_set_t *visited;
116241 +
116242 + if (gimple_assign_cast_p(cur_node->first_stmt))
116243 + interesting_conditions[CAST] = true;
116244 + else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
116245 + interesting_conditions[NOT_UNARY] = true;
116246 +
116247 + visited = pointer_set_create();
116248 + set_conditions(visited, interesting_conditions, cur_node->node);
116249 + pointer_set_destroy(visited);
116250 +}
116251 +
116252 +// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
116253 +static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
116254 +{
116255 + gimple assign;
116256 + gimple_stmt_iterator gsi;
116257 +
116258 + // already removed
116259 + if (gimple_bb(asm_stmt) == NULL)
116260 + return;
116261 + gsi = gsi_for_stmt(asm_stmt);
116262 +
116263 + assign = gimple_build_assign(lhs, rhs);
116264 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
116265 + SSA_NAME_DEF_STMT(lhs) = assign;
116266 +
116267 + gsi_remove(&gsi, true);
116268 +}
116269 +
116270 +// Get the field decl of a component ref for intentional_overflow checking
116271 +static const_tree search_field_decl(const_tree comp_ref)
116272 +{
116273 + const_tree field = NULL_TREE;
116274 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
116275 +
116276 + for (i = 0; i < len; i++) {
116277 + field = TREE_OPERAND(comp_ref, i);
116278 + if (TREE_CODE(field) == FIELD_DECL)
116279 + break;
116280 + }
116281 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
116282 + return field;
116283 +}
116284 +
116285 +/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
116286 + * stmt is a return otherwise it is the callee function.
116287 + */
116288 +static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
116289 +{
116290 + const_tree fndecl;
116291 +
116292 + if (argnum == 0)
116293 + fndecl = current_function_decl;
116294 + else
116295 + fndecl = gimple_call_fndecl(stmt);
116296 +
116297 + if (fndecl == NULL_TREE)
116298 + return NULL_TREE;
116299 +
116300 + return DECL_ORIGIN(fndecl);
116301 +}
116302 +
116303 +/* Get the param of the intentional_overflow attribute.
116304 + * * 0: MARK_NOT_INTENTIONAL
116305 + * * 1..MAX_PARAM: MARK_YES
116306 + * * -1: MARK_TURN_OFF
116307 + */
116308 +static tree get_attribute_param(const_tree decl)
116309 +{
116310 + const_tree attr;
116311 +
116312 + if (decl == NULL_TREE)
116313 + return NULL_TREE;
116314 +
116315 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
116316 + if (!attr || !TREE_VALUE(attr))
116317 + return NULL_TREE;
116318 +
116319 + return TREE_VALUE(attr);
116320 +}
116321 +
116322 +// MARK_TURN_OFF
116323 +static bool is_turn_off_intentional_attr(const_tree decl)
116324 +{
116325 + const_tree param_head;
116326 +
116327 + param_head = get_attribute_param(decl);
116328 + if (param_head == NULL_TREE)
116329 + return false;
116330 +
116331 + if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
116332 + return true;
116333 + return false;
116334 +}
116335 +
116336 +// MARK_NOT_INTENTIONAL
116337 +static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
116338 +{
116339 + const_tree param_head;
116340 +
116341 + if (argnum == 0)
116342 + return false;
116343 +
116344 + param_head = get_attribute_param(decl);
116345 + if (param_head == NULL_TREE)
116346 + return false;
116347 +
116348 + if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
116349 + return true;
116350 + return false;
116351 +}
116352 +
116353 +// MARK_YES
116354 +static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
116355 +{
116356 + tree param, param_head;
116357 +
116358 + if (argnum == 0)
116359 + return false;
116360 +
116361 + param_head = get_attribute_param(decl);
116362 + for (param = param_head; param; param = TREE_CHAIN(param))
116363 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
116364 + return true;
116365 + return false;
116366 +}
116367 +
116368 +static const char *get_asm_string(const_gimple stmt)
116369 +{
116370 + if (!stmt)
116371 + return NULL;
116372 + if (gimple_code(stmt) != GIMPLE_ASM)
116373 + return NULL;
116374 +
116375 + return gimple_asm_string(stmt);
116376 +}
116377 +
116378 +static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
116379 +{
116380 + const char *str;
116381 +
116382 + str = get_asm_string(stmt);
116383 + if (!str)
116384 + return false;
116385 + return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
116386 +}
116387 +
116388 +static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
116389 +{
116390 + const char *str;
116391 +
116392 + str = get_asm_string(stmt);
116393 + if (!str)
116394 + return false;
116395 + return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
116396 +}
116397 +
116398 +static bool is_size_overflow_asm(const_gimple stmt)
116399 +{
116400 + const char *str;
116401 +
116402 + str = get_asm_string(stmt);
116403 + if (!str)
116404 + return false;
116405 + return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
116406 +}
116407 +
116408 +static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
116409 +{
116410 + location_t loc;
116411 +
116412 + if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
116413 + return;
116414 +
116415 + if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
116416 + return;
116417 +
116418 + loc = DECL_SOURCE_LOCATION(decl);
116419 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
116420 +}
116421 +
116422 +/* Get the type of the intentional_overflow attribute of a node
116423 + * * MARK_TURN_OFF
116424 + * * MARK_YES
116425 + * * MARK_NO
116426 + * * MARK_NOT_INTENTIONAL
116427 + */
116428 +static enum mark get_intentional_attr_type(const_tree node)
116429 +{
116430 + const_tree cur_decl;
116431 +
116432 + if (node == NULL_TREE)
116433 + return MARK_NO;
116434 +
116435 + switch (TREE_CODE(node)) {
116436 + case COMPONENT_REF:
116437 + cur_decl = search_field_decl(node);
116438 + if (is_turn_off_intentional_attr(cur_decl))
116439 + return MARK_TURN_OFF;
116440 + if (is_end_intentional_intentional_attr(cur_decl, 1))
116441 + return MARK_YES;
116442 + break;
116443 + case PARM_DECL: {
116444 + unsigned int argnum;
116445 +
116446 + cur_decl = DECL_ORIGIN(current_function_decl);
116447 + argnum = find_arg_number_tree(node, cur_decl);
116448 + if (argnum == CANNOT_FIND_ARG)
116449 + return MARK_NO;
116450 + if (is_yes_intentional_attr(cur_decl, argnum))
116451 + return MARK_YES;
116452 + if (is_end_intentional_intentional_attr(cur_decl, argnum))
116453 + return MARK_NOT_INTENTIONAL;
116454 + break;
116455 + }
116456 + case FUNCTION_DECL:
116457 + if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
116458 + return MARK_TURN_OFF;
116459 + break;
116460 + default:
116461 + break;
116462 + }
116463 + return MARK_NO;
116464 +}
116465 +
116466 +// Search for the intentional_overflow attribute on the last nodes
116467 +static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
116468 +{
116469 + unsigned int i;
116470 + tree last_node;
116471 + enum mark mark = MARK_NO;
116472 +
116473 +#if BUILDING_GCC_VERSION <= 4007
116474 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
116475 +#else
116476 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
116477 +#endif
116478 + mark = get_intentional_attr_type(last_node);
116479 + if (mark != MARK_NO)
116480 + break;
116481 + }
116482 + return mark;
116483 +}
116484 +
116485 +/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
116486 + * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
116487 + */
116488 +static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
116489 +{
116490 + if (!cur_node->intentional_mark_from_gimple)
116491 + return false;
116492 +
116493 + if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
116494 + cur_node->intentional_attr_cur_fndecl = MARK_YES;
116495 + else
116496 + cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
116497 +
116498 + // skip param decls
116499 + if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
116500 + return true;
116501 + return true;
116502 +}
116503 +
116504 +/* Search intentional_overflow attribute on caller and on callee too.
116505 + * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
116506 + * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
116507 + * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
116508 +*/
116509 +static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
116510 +{
116511 + const_tree fndecl;
116512 +
116513 + if (is_intentional_attribute_from_gimple(cur_node))
116514 + return;
116515 +
116516 + if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
116517 + cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
116518 + return;
116519 + }
116520 +
116521 + if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
116522 + cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
116523 + return;
116524 + }
116525 +
116526 + if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
116527 + return;
116528 +
116529 + fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
116530 + if (is_turn_off_intentional_attr(fndecl)) {
116531 + cur_node->intentional_attr_decl = MARK_TURN_OFF;
116532 + return;
116533 + }
116534 +
116535 + if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
116536 + cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
116537 + else if (is_yes_intentional_attr(fndecl, cur_node->num))
116538 + cur_node->intentional_attr_decl = MARK_YES;
116539 +
116540 + cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
116541 + print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
116542 +}
116543 +
116544 +// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
116545 +static bool skip_asm(const_tree arg)
116546 +{
116547 + gimple def_stmt = get_def_stmt(arg);
116548 +
116549 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
116550 + return false;
116551 +
116552 + def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
116553 + return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
116554 +}
116555 +
116556 +static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
116557 +{
116558 + gimple phi = get_def_stmt(result);
116559 + unsigned int i, n = gimple_phi_num_args(phi);
116560 +
116561 + pointer_set_insert(visited, phi);
116562 + for (i = 0; i < n; i++) {
116563 + tree arg = gimple_phi_arg_def(phi, i);
116564 +
116565 + walk_use_def(visited, cur_node, arg);
116566 + }
116567 +}
116568 +
116569 +static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
116570 +{
116571 + gimple def_stmt = get_def_stmt(lhs);
116572 + tree rhs1, rhs2;
116573 +
116574 + rhs1 = gimple_assign_rhs1(def_stmt);
116575 + rhs2 = gimple_assign_rhs2(def_stmt);
116576 +
116577 + walk_use_def(visited, cur_node, rhs1);
116578 + walk_use_def(visited, cur_node, rhs2);
116579 +}
116580 +
116581 +static void insert_last_node(struct interesting_node *cur_node, tree node)
116582 +{
116583 + unsigned int i;
116584 + tree element;
116585 + enum tree_code code;
116586 +
116587 + gcc_assert(node != NULL_TREE);
116588 +
116589 + if (is_gimple_constant(node))
116590 + return;
116591 +
116592 + code = TREE_CODE(node);
116593 + if (code == VAR_DECL) {
116594 + node = DECL_ORIGIN(node);
116595 + code = TREE_CODE(node);
116596 + }
116597 +
116598 + if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
116599 + return;
116600 +
116601 +#if BUILDING_GCC_VERSION <= 4007
116602 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
116603 +#else
116604 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
116605 +#endif
116606 + if (operand_equal_p(node, element, 0))
116607 + return;
116608 + }
116609 +
116610 +#if BUILDING_GCC_VERSION <= 4007
116611 + gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
116612 + VEC_safe_push(tree, gc, cur_node->last_nodes, node);
116613 +#else
116614 + gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
116615 + vec_safe_push(cur_node->last_nodes, node);
116616 +#endif
116617 +}
116618 +
116619 +// a size_overflow asm stmt in the control flow doesn't stop the recursion
116620 +static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
116621 +{
116622 + if (!is_size_overflow_asm(stmt))
116623 + walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
116624 +}
116625 +
116626 +/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
116627 + * and component refs (for checking the intentional_overflow attribute).
116628 + */
116629 +static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
116630 +{
116631 + const_gimple def_stmt;
116632 +
116633 + if (TREE_CODE(lhs) != SSA_NAME) {
116634 + insert_last_node(cur_node, lhs);
116635 + return;
116636 + }
116637 +
116638 + def_stmt = get_def_stmt(lhs);
116639 + if (!def_stmt)
116640 + return;
116641 +
116642 + if (pointer_set_insert(visited, def_stmt))
116643 + return;
116644 +
116645 + switch (gimple_code(def_stmt)) {
116646 + case GIMPLE_NOP:
116647 + return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
116648 + case GIMPLE_ASM:
116649 + return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
116650 + case GIMPLE_CALL: {
116651 + tree fndecl = gimple_call_fndecl(def_stmt);
116652 +
116653 + if (fndecl == NULL_TREE)
116654 + return;
116655 + insert_last_node(cur_node, fndecl);
116656 + return;
116657 + }
116658 + case GIMPLE_PHI:
116659 + return walk_use_def_phi(visited, cur_node, lhs);
116660 + case GIMPLE_ASSIGN:
116661 + switch (gimple_num_ops(def_stmt)) {
116662 + case 2:
116663 + return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
116664 + case 3:
116665 + return walk_use_def_binary(visited, cur_node, lhs);
116666 + }
116667 + default:
116668 + debug_gimple_stmt((gimple)def_stmt);
116669 + error("%s: unknown gimple code", __func__);
116670 + gcc_unreachable();
116671 + }
116672 +}
116673 +
116674 +// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
116675 +static void set_last_nodes(struct interesting_node *cur_node)
116676 +{
116677 + struct pointer_set_t *visited;
116678 +
116679 + visited = pointer_set_create();
116680 + walk_use_def(visited, cur_node, cur_node->node);
116681 + pointer_set_destroy(visited);
116682 +}
116683 +
116684 +enum precond {
116685 + NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
116686 +};
116687 +
116688 +/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
116689 + * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
116690 + * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
116691 + * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
116692 + */
116693 +static enum precond check_preconditions(struct interesting_node *cur_node)
116694 +{
116695 + bool interesting_conditions[3] = {false, false, false};
116696 +
116697 + set_last_nodes(cur_node);
116698 +
116699 + check_intentional_attribute_ipa(cur_node);
116700 + if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
116701 + return NO_ATTRIBUTE_SEARCH;
116702 +
116703 + search_interesting_conditions(cur_node, interesting_conditions);
116704 +
116705 + // error code
116706 + if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
116707 + return NO_ATTRIBUTE_SEARCH;
116708 +
116709 + // unnecessary overflow check
116710 + if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
116711 + return NO_CHECK_INSERT;
116712 +
116713 + if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
116714 + return NO_CHECK_INSERT;
116715 +
116716 + return NONE;
116717 +}
116718 +
116719 +/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
116720 + * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
116721 + * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
116722 + */
116723 +static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
116724 +{
116725 + enum precond ret;
116726 + struct pointer_set_t *visited;
116727 + tree new_node, orig_node = cur_node->node;
116728 +
116729 + ret = check_preconditions(cur_node);
116730 + if (ret == NO_ATTRIBUTE_SEARCH)
116731 + return cnodes;
116732 +
116733 + cnodes = search_overflow_attribute(cnodes, cur_node);
116734 +
116735 + if (ret == NO_CHECK_INSERT)
116736 + return cnodes;
116737 +
116738 + visited = pointer_set_create();
116739 + new_node = expand(visited, caller_node, orig_node);
116740 + pointer_set_destroy(visited);
116741 +
116742 + if (new_node == NULL_TREE)
116743 + return cnodes;
116744 +
116745 + change_orig_node(cur_node, new_node);
116746 + check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
116747 +
116748 + return cnodes;
116749 +}
116750 +
116751 +// Check visited interesting nodes.
116752 +static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
116753 +{
116754 + struct interesting_node *cur;
116755 +
116756 + for (cur = head; cur; cur = cur->next) {
116757 + if (!operand_equal_p(node, cur->node, 0))
116758 + continue;
116759 + if (num != cur->num)
116760 + continue;
116761 + if (first_stmt == cur->first_stmt)
116762 + return true;
116763 + }
116764 + return false;
116765 +}
116766 +
116767 +/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
116768 + first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
116769 + last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
116770 + the intentional_overflow attribute check. They are collected by set_last_nodes().
116771 + num: arg count of a call stmt or 0 when it is a ret
116772 + node: the recursion starts from here, it is a call arg or a return value
116773 + fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
116774 + intentional_attr_decl: intentional_overflow attribute of the callee function
116775 + intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
116776 + intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
116777 + */
116778 +static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
116779 +{
116780 + struct interesting_node *new_node;
116781 + tree fndecl;
116782 + enum gimple_code code;
116783 +
116784 + gcc_assert(node != NULL_TREE);
116785 + code = gimple_code(first_stmt);
116786 + gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
116787 +
116788 + if (num == CANNOT_FIND_ARG)
116789 + return head;
116790 +
116791 + if (skip_types(node))
116792 + return head;
116793 +
116794 + if (skip_asm(node))
116795 + return head;
116796 +
116797 + if (is_gimple_call(first_stmt))
116798 + fndecl = gimple_call_fndecl(first_stmt);
116799 + else
116800 + fndecl = current_function_decl;
116801 +
116802 + if (fndecl == NULL_TREE)
116803 + return head;
116804 +
116805 + if (is_in_interesting_node(head, first_stmt, node, num))
116806 + return head;
116807 +
116808 + new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
116809 +
116810 + new_node->next = NULL;
116811 + new_node->first_stmt = first_stmt;
116812 +#if BUILDING_GCC_VERSION <= 4007
116813 + new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
116814 +#else
116815 + vec_alloc(new_node->last_nodes, VEC_LEN);
116816 +#endif
116817 + new_node->num = num;
116818 + new_node->node = node;
116819 + new_node->fndecl = fndecl;
116820 + new_node->intentional_attr_decl = MARK_NO;
116821 + new_node->intentional_attr_cur_fndecl = MARK_NO;
116822 + new_node->intentional_mark_from_gimple = asm_stmt;
116823 +
116824 + if (!head)
116825 + return new_node;
116826 +
116827 + new_node->next = head;
116828 + return new_node;
116829 +}
116830 +
116831 +/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
116832 + * If the ret stmt is in the next cgraph node list then it's an interesting ret.
116833 + */
116834 +static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
116835 +{
116836 + struct next_cgraph_node *cur_node;
116837 + tree ret = gimple_return_retval(stmt);
116838 +
116839 + if (ret == NULL_TREE)
116840 + return head;
116841 +
116842 + for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
116843 + if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
116844 + continue;
116845 + if (cur_node->num == 0)
116846 + head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
116847 + }
116848 +
116849 + return head;
116850 +}
116851 +
116852 +/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
116853 + * If the call stmt is in the next cgraph node list then it's an interesting call.
116854 + */
116855 +static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
116856 +{
116857 + unsigned int argnum;
116858 + tree arg;
116859 + const_tree fndecl;
116860 + struct next_cgraph_node *cur_node;
116861 +
116862 + fndecl = gimple_call_fndecl(stmt);
116863 + if (fndecl == NULL_TREE)
116864 + return head;
116865 +
116866 + for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
116867 + if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
116868 + continue;
116869 + argnum = get_correct_arg_count(cur_node->num, fndecl);
116870 + gcc_assert(argnum != CANNOT_FIND_ARG);
116871 + if (argnum == 0)
116872 + continue;
116873 +
116874 + arg = gimple_call_arg(stmt, argnum - 1);
116875 + head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
116876 + }
116877 +
116878 + return head;
116879 +}
116880 +
116881 +static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
116882 +{
116883 + if (!operand_equal_p(orig_node, node, 0))
116884 + return WRONG_NODE;
116885 + if (skip_types(node))
116886 + return WRONG_NODE;
116887 + return ret_count;
116888 +}
116889 +
116890 +// Get the index of the rhs node in an assignment
116891 +static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
116892 +{
116893 + const_tree rhs1, rhs2;
116894 + unsigned int ret;
116895 +
116896 + gcc_assert(stmt);
116897 + gcc_assert(is_gimple_assign(stmt));
116898 +
116899 + rhs1 = gimple_assign_rhs1(stmt);
116900 + gcc_assert(rhs1 != NULL_TREE);
116901 +
116902 + switch (gimple_num_ops(stmt)) {
116903 + case 2:
116904 + return check_ops(node, rhs1, 1);
116905 + case 3:
116906 + ret = check_ops(node, rhs1, 1);
116907 + if (ret != WRONG_NODE)
116908 + return ret;
116909 +
116910 + rhs2 = gimple_assign_rhs2(stmt);
116911 + gcc_assert(rhs2 != NULL_TREE);
116912 + return check_ops(node, rhs2, 2);
116913 + default:
116914 + gcc_unreachable();
116915 + }
116916 +}
116917 +
116918 +// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
116919 +static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
116920 +{
116921 + unsigned int i;
116922 +
116923 + if (gimple_call_fndecl(stmt) == NULL_TREE)
116924 + return CANNOT_FIND_ARG;
116925 +
116926 + for (i = 0; i < gimple_call_num_args(stmt); i++) {
116927 + tree node;
116928 +
116929 + node = gimple_call_arg(stmt, i);
116930 + if (!operand_equal_p(arg, node, 0))
116931 + continue;
116932 + if (!skip_types(node))
116933 + return i + 1;
116934 + }
116935 +
116936 + return CANNOT_FIND_ARG;
116937 +}
116938 +
116939 +/* starting from the size_overflow asm stmt collect interesting stmts. They can be
116940 + * any of return, call or assignment stmts (because of inlining).
116941 + */
116942 +static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
116943 +{
116944 + use_operand_p use_p;
116945 + imm_use_iterator imm_iter;
116946 + unsigned int argnum;
116947 +
116948 + gcc_assert(TREE_CODE(node) == SSA_NAME);
116949 +
116950 + if (pointer_set_insert(visited, node))
116951 + return head;
116952 +
116953 + FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
116954 + gimple stmt = USE_STMT(use_p);
116955 +
116956 + if (stmt == NULL)
116957 + return head;
116958 + if (is_gimple_debug(stmt))
116959 + continue;
116960 +
116961 + switch (gimple_code(stmt)) {
116962 + case GIMPLE_CALL:
116963 + argnum = find_arg_number_gimple(node, stmt);
116964 + head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
116965 + break;
116966 + case GIMPLE_RETURN:
116967 + head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
116968 + break;
116969 + case GIMPLE_ASSIGN:
116970 + argnum = get_assign_ops_count(stmt, node);
116971 + head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
116972 + break;
116973 + case GIMPLE_PHI: {
116974 + tree result = gimple_phi_result(stmt);
116975 + head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
116976 + break;
116977 + }
116978 + case GIMPLE_ASM:
116979 + if (gimple_asm_noutputs(stmt) != 0)
116980 + break;
116981 + if (!is_size_overflow_asm(stmt))
116982 + break;
116983 + head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
116984 + break;
116985 + case GIMPLE_COND:
116986 + case GIMPLE_SWITCH:
116987 + break;
116988 + default:
116989 + debug_gimple_stmt(stmt);
116990 + gcc_unreachable();
116991 + break;
116992 + }
116993 + }
116994 + return head;
116995 +}
116996 +
116997 +static void remove_size_overflow_asm(gimple stmt)
116998 +{
116999 + gimple_stmt_iterator gsi;
117000 + tree input, output;
117001 +
117002 + if (!is_size_overflow_asm(stmt))
117003 + return;
117004 +
117005 + if (gimple_asm_noutputs(stmt) == 0) {
117006 + gsi = gsi_for_stmt(stmt);
117007 + ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
117008 + gsi_remove(&gsi, true);
117009 + return;
117010 + }
117011 +
117012 + input = gimple_asm_input_op(stmt, 0);
117013 + output = gimple_asm_output_op(stmt, 0);
117014 + replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
117015 +}
117016 +
117017 +/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
117018 + * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
117019 + * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
117020 + */
117021 +static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
117022 +{
117023 + const_tree output;
117024 + struct pointer_set_t *visited;
117025 + gimple intentional_asm = NOT_INTENTIONAL_ASM;
117026 +
117027 + if (!is_size_overflow_asm(stmt))
117028 + return head;
117029 +
117030 + if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
117031 + intentional_asm = stmt;
117032 +
117033 + gcc_assert(gimple_asm_ninputs(stmt) == 1);
117034 +
117035 + if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
117036 + return head;
117037 +
117038 + if (gimple_asm_noutputs(stmt) == 0) {
117039 + const_tree input;
117040 +
117041 + if (!is_size_overflow_intentional_asm_turn_off(stmt))
117042 + return head;
117043 +
117044 + input = gimple_asm_input_op(stmt, 0);
117045 + remove_size_overflow_asm(stmt);
117046 + if (is_gimple_constant(TREE_VALUE(input)))
117047 + return head;
117048 + visited = pointer_set_create();
117049 + head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
117050 + pointer_set_destroy(visited);
117051 + return head;
117052 + }
117053 +
117054 + if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
117055 + remove_size_overflow_asm(stmt);
117056 +
117057 + visited = pointer_set_create();
117058 + output = gimple_asm_output_op(stmt, 0);
117059 + head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
117060 + pointer_set_destroy(visited);
117061 + return head;
117062 +}
117063 +
117064 +/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
117065 + * or a call stmt or a return stmt and store them in the interesting_node list
117066 + */
117067 +static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
117068 +{
117069 + basic_block bb;
117070 + struct interesting_node *head = NULL;
117071 +
117072 + FOR_ALL_BB_FN(bb, cfun) {
117073 + gimple_stmt_iterator gsi;
117074 +
117075 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
117076 + enum gimple_code code;
117077 + gimple stmt = gsi_stmt(gsi);
117078 +
117079 + code = gimple_code(stmt);
117080 +
117081 + if (code == GIMPLE_ASM)
117082 + head = handle_stmt_by_size_overflow_asm(stmt, head);
117083 +
117084 + if (!next_node)
117085 + continue;
117086 + if (code == GIMPLE_CALL)
117087 + head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
117088 + if (code == GIMPLE_RETURN)
117089 + head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
117090 + }
117091 + }
117092 + return head;
117093 +}
117094 +
117095 +static void set_current_function_decl(tree fndecl)
117096 +{
117097 + gcc_assert(fndecl != NULL_TREE);
117098 +
117099 + push_cfun(DECL_STRUCT_FUNCTION(fndecl));
117100 + calculate_dominance_info(CDI_DOMINATORS);
117101 + current_function_decl = fndecl;
117102 +}
117103 +
117104 +static void unset_current_function_decl(void)
117105 +{
117106 + free_dominance_info(CDI_DOMINATORS);
117107 + pop_cfun();
117108 + current_function_decl = NULL_TREE;
117109 +}
117110 +
117111 +static void free_interesting_node(struct interesting_node *head)
117112 +{
117113 + struct interesting_node *cur;
117114 +
117115 + while (head) {
117116 + cur = head->next;
117117 +#if BUILDING_GCC_VERSION <= 4007
117118 + VEC_free(tree, gc, head->last_nodes);
117119 +#else
117120 + vec_free(head->last_nodes);
117121 +#endif
117122 + free(head);
117123 + head = cur;
117124 + }
117125 +}
117126 +
117127 +static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node)
117128 +{
117129 + struct visited *new_visited;
117130 +
117131 + new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
117132 + new_visited->fndecl = cur_node->fndecl;
117133 + new_visited->num = cur_node->num;
117134 + new_visited->next = NULL;
117135 +
117136 + if (!head)
117137 + return new_visited;
117138 +
117139 + new_visited->next = head;
117140 + return new_visited;
117141 +}
117142 +
117143 +/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
117144 + * it is a visited function.
117145 + */
117146 +static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
117147 +{
117148 + struct visited *cur;
117149 +
117150 + if (!head)
117151 + return false;
117152 +
117153 + if (get_stmt_flag(cur_node->first_stmt) != VISITED_STMT)
117154 + return false;
117155 +
117156 + for (cur = head; cur; cur = cur->next) {
117157 + if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
117158 + continue;
117159 + if (cur_node->num == cur->num)
117160 + return true;
117161 + }
117162 + return false;
117163 +}
117164 +
117165 +static void free_next_cgraph_node(struct next_cgraph_node *head)
117166 +{
117167 + struct next_cgraph_node *cur;
117168 +
117169 + while (head) {
117170 + cur = head->next;
117171 + free(head);
117172 + head = cur;
117173 + }
117174 +}
117175 +
117176 +static void remove_all_size_overflow_asm(void)
117177 +{
117178 + basic_block bb;
117179 +
117180 + FOR_ALL_BB_FN(bb, cfun) {
117181 + gimple_stmt_iterator si;
117182 +
117183 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
117184 + remove_size_overflow_asm(gsi_stmt(si));
117185 + }
117186 +}
117187 +
117188 +/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
117189 + * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
117190 + * the newly collected interesting functions (they are interesting if there is control flow between
117191 + * the interesting stmts and them).
117192 + */
117193 +static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited)
117194 +{
117195 + struct interesting_node *head, *cur_node;
117196 + struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
117197 +
117198 + set_current_function_decl(NODE_DECL(node));
117199 + call_count = 0;
117200 +
117201 + head = collect_interesting_stmts(next_node);
117202 +
117203 + for (cur_node = head; cur_node; cur_node = cur_node->next) {
117204 + if (is_visited_function(visited, cur_node))
117205 + continue;
117206 + cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
117207 + set_stmt_flag(cur_node->first_stmt, VISITED_STMT);
117208 + visited = insert_visited_function(visited, cur_node);
117209 + }
117210 +
117211 + free_interesting_node(head);
117212 + remove_all_size_overflow_asm();
117213 + unset_current_function_decl();
117214 +
117215 + for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
117216 + visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
117217 +
117218 + free_next_cgraph_node(cnodes_head);
117219 + return visited;
117220 +}
117221 +
117222 +static void free_visited(struct visited *head)
117223 +{
117224 + struct visited *cur;
117225 +
117226 + while (head) {
117227 + cur = head->next;
117228 + free(head);
117229 + head = cur;
117230 + }
117231 +}
117232 +
117233 +// erase the local flag
117234 +static void set_plf_false(void)
117235 +{
117236 + basic_block bb;
117237 +
117238 + FOR_ALL_BB_FN(bb, cfun) {
117239 + gimple_stmt_iterator si;
117240 +
117241 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
117242 + set_stmt_flag(gsi_stmt(si), NO_FLAGS);
117243 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
117244 + set_stmt_flag(gsi_stmt(si), NO_FLAGS);
117245 + }
117246 +}
117247 +
117248 +// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
117249 +static unsigned int search_function(void)
117250 +{
117251 + struct cgraph_node *node;
117252 + struct visited *visited = NULL;
117253 +
117254 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
117255 + set_current_function_decl(NODE_DECL(node));
117256 + set_plf_false();
117257 + unset_current_function_decl();
117258 + }
117259 +
117260 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
117261 + gcc_assert(cgraph_function_flags_ready);
117262 +#if BUILDING_GCC_VERSION <= 4007
117263 + gcc_assert(node->reachable);
117264 +#endif
117265 +
117266 + visited = handle_function(node, NULL, visited);
117267 + }
117268 +
117269 + free_visited(visited);
117270 + return 0;
117271 +}
117272 +
117273 +#if BUILDING_GCC_VERSION >= 4009
117274 +static const struct pass_data ipa_pass_data = {
117275 +#else
117276 +static struct ipa_opt_pass_d ipa_pass = {
117277 + .pass = {
117278 +#endif
117279 + .type = SIMPLE_IPA_PASS,
117280 + .name = "size_overflow",
117281 +#if BUILDING_GCC_VERSION >= 4008
117282 + .optinfo_flags = OPTGROUP_NONE,
117283 +#endif
117284 +#if BUILDING_GCC_VERSION >= 4009
117285 + .has_gate = false,
117286 + .has_execute = true,
117287 +#else
117288 + .gate = NULL,
117289 + .execute = search_function,
117290 + .sub = NULL,
117291 + .next = NULL,
117292 + .static_pass_number = 0,
117293 +#endif
117294 + .tv_id = TV_NONE,
117295 + .properties_required = 0,
117296 + .properties_provided = 0,
117297 + .properties_destroyed = 0,
117298 + .todo_flags_start = 0,
117299 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
117300 +#if BUILDING_GCC_VERSION < 4009
117301 + },
117302 + .generate_summary = NULL,
117303 + .write_summary = NULL,
117304 + .read_summary = NULL,
117305 +#if BUILDING_GCC_VERSION >= 4006
117306 + .write_optimization_summary = NULL,
117307 + .read_optimization_summary = NULL,
117308 +#endif
117309 + .stmt_fixup = NULL,
117310 + .function_transform_todo_flags_start = 0,
117311 + .function_transform = NULL,
117312 + .variable_transform = NULL,
117313 +#endif
117314 +};
117315 +
117316 +#if BUILDING_GCC_VERSION >= 4009
117317 +namespace {
117318 +class ipa_pass : public ipa_opt_pass_d {
117319 +public:
117320 + ipa_pass() : ipa_opt_pass_d(ipa_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
117321 + unsigned int execute() { return search_function(); }
117322 +};
117323 +}
117324 +#endif
117325 +
117326 +static struct opt_pass *make_ipa_pass(void)
117327 +{
117328 +#if BUILDING_GCC_VERSION >= 4009
117329 + return new ipa_pass();
117330 +#else
117331 + return &ipa_pass.pass;
117332 +#endif
117333 +}
117334 +
117335 +// data for the size_overflow asm stmt
117336 +struct asm_data {
117337 + gimple def_stmt;
117338 + tree input;
117339 + tree output;
117340 +};
117341 +
117342 +#if BUILDING_GCC_VERSION <= 4007
117343 +static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
117344 +#else
117345 +static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
117346 +#endif
117347 +{
117348 + tree list;
117349 +#if BUILDING_GCC_VERSION <= 4007
117350 + VEC(tree, gc) *vec_list = NULL;
117351 +#else
117352 + vec<tree, va_gc> *vec_list = NULL;
117353 +#endif
117354 +
117355 + list = build_tree_list(NULL_TREE, string);
117356 + list = chainon(NULL_TREE, build_tree_list(list, io));
117357 +#if BUILDING_GCC_VERSION <= 4007
117358 + VEC_safe_push(tree, gc, vec_list, list);
117359 +#else
117360 + vec_safe_push(vec_list, list);
117361 +#endif
117362 + return vec_list;
117363 +}
117364 +
117365 +static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
117366 +{
117367 + gimple asm_stmt;
117368 + gimple_stmt_iterator gsi;
117369 +#if BUILDING_GCC_VERSION <= 4007
117370 + VEC(tree, gc) *input, *output = NULL;
117371 +#else
117372 + vec<tree, va_gc> *input, *output = NULL;
117373 +#endif
117374 +
117375 + input = create_asm_io_list(str_input, asm_data->input);
117376 +
117377 + if (asm_data->output)
117378 + output = create_asm_io_list(str_output, asm_data->output);
117379 +
117380 + asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
117381 + gsi = gsi_for_stmt(asm_data->def_stmt);
117382 + gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
117383 +
117384 + if (asm_data->output)
117385 + SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
117386 +}
117387 +
117388 +static void replace_call_lhs(const struct asm_data *asm_data)
117389 +{
117390 + gimple_set_lhs(asm_data->def_stmt, asm_data->input);
117391 + update_stmt(asm_data->def_stmt);
117392 + SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
117393 +}
117394 +
117395 +static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
117396 +{
117397 + enum mark cur_fndecl_attr;
117398 + gimple phi = get_def_stmt(result);
117399 + unsigned int i, n = gimple_phi_num_args(phi);
117400 +
117401 + pointer_set_insert(visited, phi);
117402 + for (i = 0; i < n; i++) {
117403 + tree arg = gimple_phi_arg_def(phi, i);
117404 +
117405 + cur_fndecl_attr = search_intentional(visited, arg);
117406 + if (cur_fndecl_attr != MARK_NO)
117407 + return cur_fndecl_attr;
117408 + }
117409 + return MARK_NO;
117410 +}
117411 +
117412 +static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
117413 +{
117414 + enum mark cur_fndecl_attr;
117415 + const_tree rhs1, rhs2;
117416 + gimple def_stmt = get_def_stmt(lhs);
117417 +
117418 + rhs1 = gimple_assign_rhs1(def_stmt);
117419 + rhs2 = gimple_assign_rhs2(def_stmt);
117420 +
117421 + cur_fndecl_attr = search_intentional(visited, rhs1);
117422 + if (cur_fndecl_attr != MARK_NO)
117423 + return cur_fndecl_attr;
117424 + return search_intentional(visited, rhs2);
117425 +}
117426 +
117427 +// Look up the intentional_overflow attribute on the caller and the callee functions.
117428 +static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
117429 +{
117430 + const_gimple def_stmt;
117431 +
117432 + if (TREE_CODE(lhs) != SSA_NAME)
117433 + return get_intentional_attr_type(lhs);
117434 +
117435 + def_stmt = get_def_stmt(lhs);
117436 + if (!def_stmt)
117437 + return MARK_NO;
117438 +
117439 + if (pointer_set_contains(visited, def_stmt))
117440 + return MARK_NO;
117441 +
117442 + switch (gimple_code(def_stmt)) {
117443 + case GIMPLE_NOP:
117444 + return search_intentional(visited, SSA_NAME_VAR(lhs));
117445 + case GIMPLE_ASM:
117446 + if (is_size_overflow_intentional_asm_turn_off(def_stmt))
117447 + return MARK_TURN_OFF;
117448 + return MARK_NO;
117449 + case GIMPLE_CALL:
117450 + return MARK_NO;
117451 + case GIMPLE_PHI:
117452 + return search_intentional_phi(visited, lhs);
117453 + case GIMPLE_ASSIGN:
117454 + switch (gimple_num_ops(def_stmt)) {
117455 + case 2:
117456 + return search_intentional(visited, gimple_assign_rhs1(def_stmt));
117457 + case 3:
117458 + return search_intentional_binary(visited, lhs);
117459 + }
117460 + case GIMPLE_RETURN:
117461 + return MARK_NO;
117462 + default:
117463 + debug_gimple_stmt((gimple)def_stmt);
117464 + error("%s: unknown gimple code", __func__);
117465 + gcc_unreachable();
117466 + }
117467 +}
117468 +
117469 +// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
117470 +static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
117471 +{
117472 + const_tree fndecl;
117473 + struct pointer_set_t *visited;
117474 + enum mark cur_fndecl_attr, decl_attr = MARK_NO;
117475 +
117476 + fndecl = get_interesting_orig_fndecl(stmt, argnum);
117477 + if (is_end_intentional_intentional_attr(fndecl, argnum))
117478 + decl_attr = MARK_NOT_INTENTIONAL;
117479 + else if (is_yes_intentional_attr(fndecl, argnum))
117480 + decl_attr = MARK_YES;
117481 + else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
117482 + return MARK_TURN_OFF;
117483 + }
117484 +
117485 + visited = pointer_set_create();
117486 + cur_fndecl_attr = search_intentional(visited, arg);
117487 + pointer_set_destroy(visited);
117488 +
117489 + switch (cur_fndecl_attr) {
117490 + case MARK_NO:
117491 + case MARK_TURN_OFF:
117492 + return cur_fndecl_attr;
117493 + default:
117494 + print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
117495 + return MARK_YES;
117496 + }
117497 +}
117498 +
117499 +static void check_missing_size_overflow_attribute(tree var)
117500 +{
117501 + tree orig_fndecl;
117502 + unsigned int num;
117503 +
117504 + if (is_a_return_check(var))
117505 + orig_fndecl = DECL_ORIGIN(var);
117506 + else
117507 + orig_fndecl = DECL_ORIGIN(current_function_decl);
117508 +
117509 + num = get_function_num(var, orig_fndecl);
117510 + if (num == CANNOT_FIND_ARG)
117511 + return;
117512 +
117513 + is_missing_function(orig_fndecl, num);
117514 +}
117515 +
117516 +static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
117517 +{
117518 + gimple phi = get_def_stmt(result);
117519 + unsigned int i, n = gimple_phi_num_args(phi);
117520 +
117521 + pointer_set_insert(visited, phi);
117522 + for (i = 0; i < n; i++) {
117523 + tree arg = gimple_phi_arg_def(phi, i);
117524 +
117525 + search_size_overflow_attribute(visited, arg);
117526 + }
117527 +}
117528 +
117529 +static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
117530 +{
117531 + const_gimple def_stmt = get_def_stmt(lhs);
117532 + tree rhs1, rhs2;
117533 +
117534 + rhs1 = gimple_assign_rhs1(def_stmt);
117535 + rhs2 = gimple_assign_rhs2(def_stmt);
117536 +
117537 + search_size_overflow_attribute(visited, rhs1);
117538 + search_size_overflow_attribute(visited, rhs2);
117539 +}
117540 +
117541 +static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
117542 +{
117543 + const_gimple def_stmt;
117544 +
117545 + if (TREE_CODE(lhs) == PARM_DECL) {
117546 + check_missing_size_overflow_attribute(lhs);
117547 + return;
117548 + }
117549 +
117550 + def_stmt = get_def_stmt(lhs);
117551 + if (!def_stmt)
117552 + return;
117553 +
117554 + if (pointer_set_insert(visited, def_stmt))
117555 + return;
117556 +
117557 + switch (gimple_code(def_stmt)) {
117558 + case GIMPLE_NOP:
117559 + return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
117560 + case GIMPLE_ASM:
117561 + return;
117562 + case GIMPLE_CALL: {
117563 + tree fndecl = gimple_call_fndecl(def_stmt);
117564 +
117565 + if (fndecl == NULL_TREE)
117566 + return;
117567 + check_missing_size_overflow_attribute(fndecl);
117568 + return;
117569 + }
117570 + case GIMPLE_PHI:
117571 + return search_size_overflow_attribute_phi(visited, lhs);
117572 + case GIMPLE_ASSIGN:
117573 + switch (gimple_num_ops(def_stmt)) {
117574 + case 2:
117575 + return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
117576 + case 3:
117577 + return search_size_overflow_attribute_binary(visited, lhs);
117578 + }
117579 + default:
117580 + debug_gimple_stmt((gimple)def_stmt);
117581 + error("%s: unknown gimple code", __func__);
117582 + gcc_unreachable();
117583 + }
117584 +}
117585 +
117586 +// Search missing entries in the hash table (invoked from the gimple pass)
117587 +static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
117588 +{
117589 + tree fndecl = NULL_TREE;
117590 + tree lhs;
117591 + struct pointer_set_t *visited;
117592 +
117593 + if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
117594 + return;
117595 +
117596 + if (num == 0) {
117597 + gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
117598 + lhs = gimple_return_retval(stmt);
117599 + } else {
117600 + gcc_assert(is_gimple_call(stmt));
117601 + lhs = gimple_call_arg(stmt, num - 1);
117602 + fndecl = gimple_call_fndecl(stmt);
117603 + }
117604 +
117605 + if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
117606 + return;
117607 +
117608 + visited = pointer_set_create();
117609 + search_size_overflow_attribute(visited, lhs);
117610 + pointer_set_destroy(visited);
117611 +}
117612 +
117613 +static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
117614 +{
117615 + gimple_stmt_iterator gsi;
117616 + gimple assign;
117617 +
117618 + assign = gimple_build_assign(asm_data->input, asm_data->output);
117619 + gsi = gsi_for_stmt(stmt);
117620 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
117621 + asm_data->def_stmt = assign;
117622 +
117623 + asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
117624 + asm_data->output = make_ssa_name(asm_data->output, stmt);
117625 + if (gimple_code(stmt) == GIMPLE_RETURN)
117626 + gimple_return_set_retval(stmt, asm_data->output);
117627 + else
117628 + gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
117629 + update_stmt(stmt);
117630 +}
117631 +
117632 +static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
117633 +{
117634 + const char *fn_name;
117635 + char *asm_comment;
117636 + unsigned int len;
117637 +
117638 + if (argnum == 0)
117639 + fn_name = DECL_NAME_POINTER(current_function_decl);
117640 + else
117641 + fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
117642 +
117643 + len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
117644 + gcc_assert(len > 0);
117645 +
117646 + return asm_comment;
117647 +}
117648 +
117649 +static const char *convert_mark_to_str(enum mark mark)
117650 +{
117651 + switch (mark) {
117652 + case MARK_NO:
117653 + return OK_ASM_STR;
117654 + case MARK_YES:
117655 + case MARK_NOT_INTENTIONAL:
117656 + return YES_ASM_STR;
117657 + case MARK_TURN_OFF:
117658 + return TURN_OFF_ASM_STR;
117659 + }
117660 +
117661 + gcc_unreachable();
117662 +}
117663 +
117664 +/* Create the input of the size_overflow asm stmt.
117665 + * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
117666 + * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
117667 + * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
117668 + * otherwise create the input (for a phi stmt the output too) of the asm stmt.
117669 + */
117670 +static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
117671 +{
117672 + if (!asm_data->def_stmt) {
117673 + asm_data->input = NULL_TREE;
117674 + return;
117675 + }
117676 +
117677 + asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
117678 + asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
117679 +
117680 + switch (gimple_code(asm_data->def_stmt)) {
117681 + case GIMPLE_ASSIGN:
117682 + case GIMPLE_CALL:
117683 + replace_call_lhs(asm_data);
117684 + break;
117685 + case GIMPLE_PHI:
117686 + create_output_from_phi(stmt, argnum, asm_data);
117687 + break;
117688 + case GIMPLE_NOP: {
117689 + enum mark mark;
117690 + const char *mark_str;
117691 + char *asm_comment;
117692 +
117693 + mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
117694 +
117695 + asm_data->input = asm_data->output;
117696 + asm_data->output = NULL;
117697 + asm_data->def_stmt = stmt;
117698 +
117699 + mark_str = convert_mark_to_str(mark);
117700 + asm_comment = create_asm_comment(argnum, stmt, mark_str);
117701 +
117702 + create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data);
117703 + free(asm_comment);
117704 + asm_data->input = NULL_TREE;
117705 + break;
117706 + }
117707 + case GIMPLE_ASM:
117708 + if (is_size_overflow_asm(asm_data->def_stmt)) {
117709 + asm_data->input = NULL_TREE;
117710 + break;
117711 + }
117712 + default:
117713 + debug_gimple_stmt(asm_data->def_stmt);
117714 + gcc_unreachable();
117715 + }
117716 +}
117717 +
117718 +/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
117719 + * is of the right kind create the appropriate size_overflow asm stmts:
117720 + * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
117721 + * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
117722 + */
117723 +static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
117724 +{
117725 + struct asm_data asm_data;
117726 + const char *mark_str;
117727 + char *asm_comment;
117728 + enum mark mark;
117729 +
117730 + if (is_gimple_constant(output_node))
117731 + return;
117732 +
117733 + asm_data.output = output_node;
117734 + mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
117735 + if (mark != MARK_TURN_OFF)
117736 + search_missing_size_overflow_attribute_gimple(stmt, argnum);
117737 +
117738 + asm_data.def_stmt = get_def_stmt(asm_data.output);
117739 + if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
117740 + return;
117741 +
117742 + create_asm_input(stmt, argnum, &asm_data);
117743 + if (asm_data.input == NULL_TREE)
117744 + return;
117745 +
117746 + mark_str = convert_mark_to_str(mark);
117747 + asm_comment = create_asm_comment(argnum, stmt, mark_str);
117748 + create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
117749 + free(asm_comment);
117750 +}
117751 +
117752 +// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
117753 +static bool create_mark_asm(gimple stmt, enum mark mark)
117754 +{
117755 + struct asm_data asm_data;
117756 + const char *asm_str;
117757 +
117758 + switch (mark) {
117759 + case MARK_TURN_OFF:
117760 + asm_str = TURN_OFF_ASM_STR;
117761 + break;
117762 + case MARK_NOT_INTENTIONAL:
117763 + case MARK_YES:
117764 + asm_str = YES_ASM_STR;
117765 + break;
117766 + default:
117767 + gcc_unreachable();
117768 + }
117769 +
117770 + asm_data.def_stmt = stmt;
117771 + asm_data.output = gimple_call_lhs(stmt);
117772 +
117773 + if (asm_data.output == NULL_TREE) {
117774 + asm_data.input = gimple_call_arg(stmt, 0);
117775 + if (is_gimple_constant(asm_data.input))
117776 + return false;
117777 + asm_data.output = NULL;
117778 + create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data);
117779 + return true;
117780 + }
117781 +
117782 + create_asm_input(stmt, 0, &asm_data);
117783 + gcc_assert(asm_data.input != NULL_TREE);
117784 +
117785 + create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
117786 + return true;
117787 +}
117788 +
117789 +static bool is_from_cast(const_tree node)
117790 +{
117791 + gimple def_stmt = get_def_stmt(node);
117792 +
117793 + if (!def_stmt)
117794 + return false;
117795 +
117796 + if (gimple_assign_cast_p(def_stmt))
117797 + return true;
117798 +
117799 + return false;
117800 +}
117801 +
117802 +// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
117803 +static bool skip_ptr_minus(gimple stmt)
117804 +{
117805 + const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
117806 +
117807 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
117808 + return false;
117809 +
117810 + rhs1 = gimple_assign_rhs1(stmt);
117811 + if (!is_from_cast(rhs1))
117812 + return false;
117813 +
117814 + rhs2 = gimple_assign_rhs2(stmt);
117815 + if (!is_from_cast(rhs2))
117816 + return false;
117817 +
117818 + ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
117819 + ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
117820 +
117821 + if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
117822 + return false;
117823 +
117824 + create_mark_asm(stmt, MARK_YES);
117825 + return true;
117826 +}
117827 +
117828 +static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
117829 +{
117830 + gimple def_stmt;
117831 +
117832 + def_stmt = get_def_stmt(lhs);
117833 + if (!def_stmt)
117834 + return;
117835 +
117836 + if (pointer_set_insert(visited, def_stmt))
117837 + return;
117838 +
117839 + switch (gimple_code(def_stmt)) {
117840 + case GIMPLE_NOP:
117841 + case GIMPLE_ASM:
117842 + case GIMPLE_CALL:
117843 + break;
117844 + case GIMPLE_PHI: {
117845 + unsigned int i, n = gimple_phi_num_args(def_stmt);
117846 +
117847 + pointer_set_insert(visited, def_stmt);
117848 +
117849 + for (i = 0; i < n; i++) {
117850 + tree arg = gimple_phi_arg_def(def_stmt, i);
117851 +
117852 + walk_use_def_ptr(visited, arg);
117853 + }
117854 + }
117855 + case GIMPLE_ASSIGN:
117856 + switch (gimple_num_ops(def_stmt)) {
117857 + case 2:
117858 + walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
117859 + return;
117860 + case 3:
117861 + if (skip_ptr_minus(def_stmt))
117862 + return;
117863 +
117864 + walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
117865 + walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
117866 + return;
117867 + default:
117868 + return;
117869 + }
117870 + default:
117871 + debug_gimple_stmt((gimple)def_stmt);
117872 + error("%s: unknown gimple code", __func__);
117873 + gcc_unreachable();
117874 + }
117875 +}
117876 +
117877 +// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
117878 +static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
117879 +{
117880 + struct pointer_set_t *visited;
117881 +
117882 + visited = pointer_set_create();
117883 + walk_use_def_ptr(visited, arg);
117884 + pointer_set_destroy(visited);
117885 +}
117886 +
117887 +// Determine the return value and insert the asm stmt to mark the return stmt.
117888 +static void insert_asm_ret(gimple stmt)
117889 +{
117890 + tree ret;
117891 +
117892 + ret = gimple_return_retval(stmt);
117893 + create_size_overflow_asm(stmt, ret, 0);
117894 +}
117895 +
117896 +// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
117897 +static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
117898 +{
117899 + tree arg;
117900 + unsigned int argnum;
117901 +
117902 + argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
117903 + gcc_assert(argnum != 0);
117904 + if (argnum == CANNOT_FIND_ARG)
117905 + return;
117906 +
117907 + arg = gimple_call_arg(stmt, argnum - 1);
117908 + gcc_assert(arg != NULL_TREE);
117909 +
117910 + // skip all ptr - ptr expressions
117911 + insert_mark_not_intentional_asm_at_ptr(arg);
117912 +
117913 + create_size_overflow_asm(stmt, arg, argnum);
117914 +}
117915 +
117916 +// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
117917 +static void set_argnum_attribute(const_tree attr, bool *argnums)
117918 +{
117919 + unsigned int argnum;
117920 + tree attr_value;
117921 +
117922 + for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
117923 + argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
117924 + argnums[argnum] = true;
117925 + }
117926 +}
117927 +
117928 +// If a function arg or the return value is in the hash table then set its index in the array.
117929 +static void set_argnum_hash(tree fndecl, bool *argnums)
117930 +{
117931 + unsigned int num;
117932 + const struct size_overflow_hash *hash;
117933 +
117934 + hash = get_function_hash(DECL_ORIGIN(fndecl));
117935 + if (!hash)
117936 + return;
117937 +
117938 + for (num = 0; num <= MAX_PARAM; num++) {
117939 + if (!(hash->param & (1U << num)))
117940 + continue;
117941 +
117942 + argnums[num] = true;
117943 + }
117944 +}
117945 +
117946 +static bool is_all_the_argnums_empty(bool *argnums)
117947 +{
117948 + unsigned int i;
117949 +
117950 + for (i = 0; i <= MAX_PARAM; i++)
117951 + if (argnums[i])
117952 + return false;
117953 + return true;
117954 +}
117955 +
117956 +// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
117957 +static void search_interesting_args(tree fndecl, bool *argnums)
117958 +{
117959 + const_tree attr;
117960 +
117961 + set_argnum_hash(fndecl, argnums);
117962 + if (!is_all_the_argnums_empty(argnums))
117963 + return;
117964 +
117965 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
117966 + if (attr && TREE_VALUE(attr))
117967 + set_argnum_attribute(attr, argnums);
117968 +}
117969 +
117970 +/*
117971 + * Look up the intentional_overflow attribute that turns off ipa based duplication
117972 + * on the callee function.
117973 + */
117974 +static bool is_mark_turn_off_attribute(gimple stmt)
117975 +{
117976 + enum mark mark;
117977 + const_tree fndecl = gimple_call_fndecl(stmt);
117978 +
117979 + mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
117980 + if (mark == MARK_TURN_OFF)
117981 + return true;
117982 + return false;
117983 +}
117984 +
117985 +// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
117986 +static void handle_interesting_function(gimple stmt)
117987 +{
117988 + unsigned int argnum;
117989 + tree fndecl;
117990 + bool orig_argnums[MAX_PARAM + 1] = {false};
117991 +
117992 + if (gimple_call_num_args(stmt) == 0)
117993 + return;
117994 + fndecl = gimple_call_fndecl(stmt);
117995 + if (fndecl == NULL_TREE)
117996 + return;
117997 + fndecl = DECL_ORIGIN(fndecl);
117998 +
117999 + if (is_mark_turn_off_attribute(stmt)) {
118000 + create_mark_asm(stmt, MARK_TURN_OFF);
118001 + return;
118002 + }
118003 +
118004 + search_interesting_args(fndecl, orig_argnums);
118005 +
118006 + for (argnum = 1; argnum < MAX_PARAM; argnum++)
118007 + if (orig_argnums[argnum])
118008 + insert_asm_arg(stmt, argnum);
118009 +}
118010 +
118011 +// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
118012 +static void handle_interesting_ret(gimple stmt)
118013 +{
118014 + bool orig_argnums[MAX_PARAM + 1] = {false};
118015 +
118016 + search_interesting_args(current_function_decl, orig_argnums);
118017 +
118018 + if (orig_argnums[0])
118019 + insert_asm_ret(stmt);
118020 +}
118021 +
118022 +// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
118023 +static unsigned int search_interesting_functions(void)
118024 +{
118025 + basic_block bb;
118026 +
118027 + FOR_ALL_BB_FN(bb, cfun) {
118028 + gimple_stmt_iterator gsi;
118029 +
118030 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
118031 + gimple stmt = gsi_stmt(gsi);
118032 +
118033 + if (is_size_overflow_asm(stmt))
118034 + continue;
118035 +
118036 + if (is_gimple_call(stmt))
118037 + handle_interesting_function(stmt);
118038 + else if (gimple_code(stmt) == GIMPLE_RETURN)
118039 + handle_interesting_ret(stmt);
118040 + }
118041 + }
118042 + return 0;
118043 +}
118044 +
118045 +/*
118046 + * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
118047 + * this pass inserts asm stmts to mark the interesting args
118048 + * that the ipa pass will detect and insert the size overflow checks for.
118049 + */
118050 +#if BUILDING_GCC_VERSION >= 4009
118051 +static const struct pass_data insert_size_overflow_asm_pass_data = {
118052 +#else
118053 +static struct gimple_opt_pass insert_size_overflow_asm_pass = {
118054 + .pass = {
118055 +#endif
118056 + .type = GIMPLE_PASS,
118057 + .name = "insert_size_overflow_asm",
118058 +#if BUILDING_GCC_VERSION >= 4008
118059 + .optinfo_flags = OPTGROUP_NONE,
118060 +#endif
118061 +#if BUILDING_GCC_VERSION >= 4009
118062 + .has_gate = false,
118063 + .has_execute = true,
118064 +#else
118065 + .gate = NULL,
118066 + .execute = search_interesting_functions,
118067 + .sub = NULL,
118068 + .next = NULL,
118069 + .static_pass_number = 0,
118070 +#endif
118071 + .tv_id = TV_NONE,
118072 + .properties_required = PROP_cfg,
118073 + .properties_provided = 0,
118074 + .properties_destroyed = 0,
118075 + .todo_flags_start = 0,
118076 + .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
118077 +#if BUILDING_GCC_VERSION < 4009
118078 + }
118079 +#endif
118080 +};
118081 +
118082 +#if BUILDING_GCC_VERSION >= 4009
118083 +namespace {
118084 +class insert_size_overflow_asm_pass : public gimple_opt_pass {
118085 +public:
118086 + insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
118087 + unsigned int execute() { return search_interesting_functions(); }
118088 +};
118089 +}
118090 +#endif
118091 +
118092 +static struct opt_pass *make_insert_size_overflow_asm_pass(void)
118093 +{
118094 +#if BUILDING_GCC_VERSION >= 4009
118095 + return new insert_size_overflow_asm_pass();
118096 +#else
118097 + return &insert_size_overflow_asm_pass.pass;
118098 +#endif
118099 +}
118100 +
118101 +// Create the noreturn report_size_overflow() function decl.
118102 +static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
118103 +{
118104 + tree const_char_ptr_type_node;
118105 + tree fntype;
118106 +
118107 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
118108 +
118109 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
118110 + fntype = build_function_type_list(void_type_node,
118111 + const_char_ptr_type_node,
118112 + unsigned_type_node,
118113 + const_char_ptr_type_node,
118114 + const_char_ptr_type_node,
118115 + NULL_TREE);
118116 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
118117 +
118118 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
118119 + TREE_PUBLIC(report_size_overflow_decl) = 1;
118120 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
118121 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
118122 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
118123 +}
118124 +
118125 +static unsigned int dump_functions(void)
118126 +{
118127 + struct cgraph_node *node;
118128 +
118129 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
118130 + basic_block bb;
118131 +
118132 + push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
118133 + current_function_decl = NODE_DECL(node);
118134 +
118135 + fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
118136 +
118137 + FOR_ALL_BB_FN(bb, cfun) {
118138 + gimple_stmt_iterator si;
118139 +
118140 + fprintf(stderr, "<bb %u>:\n", bb->index);
118141 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
118142 + debug_gimple_stmt(gsi_stmt(si));
118143 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
118144 + debug_gimple_stmt(gsi_stmt(si));
118145 + fprintf(stderr, "\n");
118146 + }
118147 +
118148 + fprintf(stderr, "-------------------------------------------------------------------------\n");
118149 +
118150 + pop_cfun();
118151 + current_function_decl = NULL_TREE;
118152 + }
118153 +
118154 + fprintf(stderr, "###############################################################################\n");
118155 +
118156 + return 0;
118157 +}
118158 +
118159 +#if BUILDING_GCC_VERSION >= 4009
118160 +static const struct pass_data dump_pass_data = {
118161 +#else
118162 +static struct ipa_opt_pass_d dump_pass = {
118163 + .pass = {
118164 +#endif
118165 + .type = SIMPLE_IPA_PASS,
118166 + .name = "dump",
118167 +#if BUILDING_GCC_VERSION >= 4008
118168 + .optinfo_flags = OPTGROUP_NONE,
118169 +#endif
118170 +#if BUILDING_GCC_VERSION >= 4009
118171 + .has_gate = false,
118172 + .has_execute = true,
118173 +#else
118174 + .gate = NULL,
118175 + .execute = dump_functions,
118176 + .sub = NULL,
118177 + .next = NULL,
118178 + .static_pass_number = 0,
118179 +#endif
118180 + .tv_id = TV_NONE,
118181 + .properties_required = 0,
118182 + .properties_provided = 0,
118183 + .properties_destroyed = 0,
118184 + .todo_flags_start = 0,
118185 + .todo_flags_finish = 0,
118186 +#if BUILDING_GCC_VERSION < 4009
118187 + },
118188 + .generate_summary = NULL,
118189 + .write_summary = NULL,
118190 + .read_summary = NULL,
118191 +#if BUILDING_GCC_VERSION >= 4006
118192 + .write_optimization_summary = NULL,
118193 + .read_optimization_summary = NULL,
118194 +#endif
118195 + .stmt_fixup = NULL,
118196 + .function_transform_todo_flags_start = 0,
118197 + .function_transform = NULL,
118198 + .variable_transform = NULL,
118199 +#endif
118200 +};
118201 +
118202 +#if BUILDING_GCC_VERSION >= 4009
118203 +namespace {
118204 +class dump_pass : public ipa_opt_pass_d {
118205 +public:
118206 + dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
118207 + unsigned int execute() { return dump_functions(); }
118208 +};
118209 +}
118210 +#endif
118211 +
118212 +static struct opt_pass *make_dump_pass(void)
118213 +{
118214 +#if BUILDING_GCC_VERSION >= 4009
118215 + return new dump_pass();
118216 +#else
118217 + return &dump_pass.pass;
118218 +#endif
118219 +}
118220 +
118221 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
118222 +{
118223 + int i;
118224 + const char * const plugin_name = plugin_info->base_name;
118225 + const int argc = plugin_info->argc;
118226 + const struct plugin_argument * const argv = plugin_info->argv;
118227 + bool enable = true;
118228 + struct register_pass_info insert_size_overflow_asm_pass_info;
118229 + struct register_pass_info __unused dump_before_pass_info;
118230 + struct register_pass_info __unused dump_after_pass_info;
118231 + struct register_pass_info ipa_pass_info;
118232 + static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
118233 + {
118234 + .base = &report_size_overflow_decl,
118235 + .nelt = 1,
118236 + .stride = sizeof(report_size_overflow_decl),
118237 + .cb = &gt_ggc_mx_tree_node,
118238 + .pchw = &gt_pch_nx_tree_node
118239 + },
118240 + LAST_GGC_ROOT_TAB
118241 + };
118242 +
118243 + insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
118244 + insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
118245 + insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
118246 + insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
118247 +
118248 + dump_before_pass_info.pass = make_dump_pass();
118249 + dump_before_pass_info.reference_pass_name = "increase_alignment";
118250 + dump_before_pass_info.ref_pass_instance_number = 1;
118251 + dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
118252 +
118253 + ipa_pass_info.pass = make_ipa_pass();
118254 + ipa_pass_info.reference_pass_name = "increase_alignment";
118255 + ipa_pass_info.ref_pass_instance_number = 1;
118256 + ipa_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
118257 +
118258 + dump_after_pass_info.pass = make_dump_pass();
118259 + dump_after_pass_info.reference_pass_name = "increase_alignment";
118260 + dump_after_pass_info.ref_pass_instance_number = 1;
118261 + dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
118262 +
118263 + if (!plugin_default_version_check(version, &gcc_version)) {
118264 + error(G_("incompatible gcc/plugin versions"));
118265 + return 1;
118266 + }
118267 +
118268 + for (i = 0; i < argc; ++i) {
118269 + if (!strcmp(argv[i].key, "no-size-overflow")) {
118270 + enable = false;
118271 + continue;
118272 + }
118273 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
118274 + }
118275 +
118276 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
118277 + if (enable) {
118278 + register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
118279 + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
118280 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
118281 +// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
118282 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
118283 +// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
118284 + }
118285 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
118286 +
118287 + return 0;
118288 +}
118289 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
118290 new file mode 100644
118291 index 0000000..5c0b937
118292 --- /dev/null
118293 +++ b/tools/gcc/stackleak_plugin.c
118294 @@ -0,0 +1,374 @@
118295 +/*
118296 + * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
118297 + * Licensed under the GPL v2
118298 + *
118299 + * Note: the choice of the license means that the compilation process is
118300 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
118301 + * but for the kernel it doesn't matter since it doesn't link against
118302 + * any of the gcc libraries
118303 + *
118304 + * gcc plugin to help implement various PaX features
118305 + *
118306 + * - track lowest stack pointer
118307 + *
118308 + * TODO:
118309 + * - initialize all local variables
118310 + *
118311 + * BUGS:
118312 + * - none known
118313 + */
118314 +
118315 +#include "gcc-common.h"
118316 +
118317 +int plugin_is_GPL_compatible;
118318 +
118319 +static int track_frame_size = -1;
118320 +static const char track_function[] = "pax_track_stack";
118321 +static const char check_function[] = "pax_check_alloca";
118322 +static tree track_function_decl, check_function_decl;
118323 +static bool init_locals;
118324 +
118325 +static struct plugin_info stackleak_plugin_info = {
118326 + .version = "201402131920",
118327 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
118328 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
118329 +};
118330 +
118331 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
118332 +{
118333 + gimple check_alloca;
118334 + tree alloca_size;
118335 +
118336 + // insert call to void pax_check_alloca(unsigned long size)
118337 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
118338 + check_alloca = gimple_build_call(check_function_decl, 1, alloca_size);
118339 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
118340 +}
118341 +
118342 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
118343 +{
118344 + gimple track_stack;
118345 +
118346 + // insert call to void pax_track_stack(void)
118347 + track_stack = gimple_build_call(track_function_decl, 0);
118348 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
118349 +}
118350 +
118351 +static bool is_alloca(gimple stmt)
118352 +{
118353 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
118354 + return true;
118355 +
118356 +#if BUILDING_GCC_VERSION >= 4007
118357 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
118358 + return true;
118359 +#endif
118360 +
118361 + return false;
118362 +}
118363 +
118364 +static unsigned int execute_stackleak_tree_instrument(void)
118365 +{
118366 + basic_block bb, entry_bb;
118367 + bool prologue_instrumented = false, is_leaf = true;
118368 +
118369 + entry_bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb;
118370 +
118371 + // 1. loop through BBs and GIMPLE statements
118372 + FOR_EACH_BB_FN(bb, cfun) {
118373 + gimple_stmt_iterator gsi;
118374 +
118375 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
118376 + gimple stmt;
118377 +
118378 + stmt = gsi_stmt(gsi);
118379 +
118380 + if (is_gimple_call(stmt))
118381 + is_leaf = false;
118382 +
118383 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
118384 + if (!is_alloca(stmt))
118385 + continue;
118386 +
118387 + // 2. insert stack overflow check before each __builtin_alloca call
118388 + stackleak_check_alloca(&gsi);
118389 +
118390 + // 3. insert track call after each __builtin_alloca call
118391 + stackleak_add_instrumentation(&gsi);
118392 + if (bb == entry_bb)
118393 + prologue_instrumented = true;
118394 + }
118395 + }
118396 +
118397 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
118398 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
118399 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
118400 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
118401 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
118402 + return 0;
118403 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
118404 + return 0;
118405 +
118406 + // 4. insert track call at the beginning
118407 + if (!prologue_instrumented) {
118408 + gimple_stmt_iterator gsi;
118409 +
118410 + bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
118411 + if (dom_info_available_p(CDI_DOMINATORS))
118412 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
118413 + gsi = gsi_start_bb(bb);
118414 + stackleak_add_instrumentation(&gsi);
118415 + }
118416 +
118417 + return 0;
118418 +}
118419 +
118420 +static unsigned int execute_stackleak_final(void)
118421 +{
118422 + rtx insn, next;
118423 +
118424 + if (cfun->calls_alloca)
118425 + return 0;
118426 +
118427 + // keep calls only if function frame is big enough
118428 + if (get_frame_size() >= track_frame_size)
118429 + return 0;
118430 +
118431 + // 1. find pax_track_stack calls
118432 + for (insn = get_insns(); insn; insn = next) {
118433 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
118434 + rtx body;
118435 +
118436 + next = NEXT_INSN(insn);
118437 + if (!CALL_P(insn))
118438 + continue;
118439 + body = PATTERN(insn);
118440 + if (GET_CODE(body) != CALL)
118441 + continue;
118442 + body = XEXP(body, 0);
118443 + if (GET_CODE(body) != MEM)
118444 + continue;
118445 + body = XEXP(body, 0);
118446 + if (GET_CODE(body) != SYMBOL_REF)
118447 + continue;
118448 +// if (strcmp(XSTR(body, 0), track_function))
118449 + if (SYMBOL_REF_DECL(body) != track_function_decl)
118450 + continue;
118451 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
118452 + // 2. delete call
118453 + delete_insn_and_edges(insn);
118454 +#if BUILDING_GCC_VERSION >= 4007
118455 + if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
118456 + insn = next;
118457 + next = NEXT_INSN(insn);
118458 + delete_insn_and_edges(insn);
118459 + }
118460 +#endif
118461 + }
118462 +
118463 +// print_simple_rtl(stderr, get_insns());
118464 +// print_rtl(stderr, get_insns());
118465 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
118466 +
118467 + return 0;
118468 +}
118469 +
118470 +static bool gate_stackleak_track_stack(void)
118471 +{
118472 + return track_frame_size >= 0;
118473 +}
118474 +
118475 +static void stackleak_start_unit(void *gcc_data, void *user_data)
118476 +{
118477 + tree fntype;
118478 +
118479 + // void pax_track_stack(void)
118480 + fntype = build_function_type_list(void_type_node, NULL_TREE);
118481 + track_function_decl = build_fn_decl(track_function, fntype);
118482 + DECL_ASSEMBLER_NAME(track_function_decl); // for LTO
118483 + TREE_PUBLIC(track_function_decl) = 1;
118484 + DECL_EXTERNAL(track_function_decl) = 1;
118485 + DECL_ARTIFICIAL(track_function_decl) = 1;
118486 +
118487 + // void pax_check_alloca(unsigned long)
118488 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
118489 + check_function_decl = build_fn_decl(check_function, fntype);
118490 + DECL_ASSEMBLER_NAME(check_function_decl); // for LTO
118491 + TREE_PUBLIC(check_function_decl) = 1;
118492 + DECL_EXTERNAL(check_function_decl) = 1;
118493 + DECL_ARTIFICIAL(check_function_decl) = 1;
118494 +}
118495 +
118496 +#if BUILDING_GCC_VERSION >= 4009
118497 +static const struct pass_data stackleak_tree_instrument_pass_data = {
118498 +#else
118499 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
118500 + .pass = {
118501 +#endif
118502 + .type = GIMPLE_PASS,
118503 + .name = "stackleak_tree_instrument",
118504 +#if BUILDING_GCC_VERSION >= 4008
118505 + .optinfo_flags = OPTGROUP_NONE,
118506 +#endif
118507 +#if BUILDING_GCC_VERSION >= 4009
118508 + .has_gate = true,
118509 + .has_execute = true,
118510 +#else
118511 + .gate = gate_stackleak_track_stack,
118512 + .execute = execute_stackleak_tree_instrument,
118513 + .sub = NULL,
118514 + .next = NULL,
118515 + .static_pass_number = 0,
118516 +#endif
118517 + .tv_id = TV_NONE,
118518 + .properties_required = PROP_gimple_leh | PROP_cfg,
118519 + .properties_provided = 0,
118520 + .properties_destroyed = 0,
118521 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
118522 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa | TODO_rebuild_cgraph_edges
118523 +#if BUILDING_GCC_VERSION < 4009
118524 + }
118525 +#endif
118526 +};
118527 +
118528 +#if BUILDING_GCC_VERSION >= 4009
118529 +static const struct pass_data stackleak_final_rtl_opt_pass_data = {
118530 +#else
118531 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
118532 + .pass = {
118533 +#endif
118534 + .type = RTL_PASS,
118535 + .name = "stackleak_final",
118536 +#if BUILDING_GCC_VERSION >= 4008
118537 + .optinfo_flags = OPTGROUP_NONE,
118538 +#endif
118539 +#if BUILDING_GCC_VERSION >= 4009
118540 + .has_gate = true,
118541 + .has_execute = true,
118542 +#else
118543 + .gate = gate_stackleak_track_stack,
118544 + .execute = execute_stackleak_final,
118545 + .sub = NULL,
118546 + .next = NULL,
118547 + .static_pass_number = 0,
118548 +#endif
118549 + .tv_id = TV_NONE,
118550 + .properties_required = 0,
118551 + .properties_provided = 0,
118552 + .properties_destroyed = 0,
118553 + .todo_flags_start = 0,
118554 + .todo_flags_finish = TODO_dump_func
118555 +#if BUILDING_GCC_VERSION < 4009
118556 + }
118557 +#endif
118558 +};
118559 +
118560 +#if BUILDING_GCC_VERSION >= 4009
118561 +namespace {
118562 +class stackleak_tree_instrument_pass : public gimple_opt_pass {
118563 +public:
118564 + stackleak_tree_instrument_pass() : gimple_opt_pass(stackleak_tree_instrument_pass_data, g) {}
118565 + bool gate() { return gate_stackleak_track_stack(); }
118566 + unsigned int execute() { return execute_stackleak_tree_instrument(); }
118567 +};
118568 +
118569 +class stackleak_final_rtl_opt_pass : public rtl_opt_pass {
118570 +public:
118571 + stackleak_final_rtl_opt_pass() : rtl_opt_pass(stackleak_final_rtl_opt_pass_data, g) {}
118572 + bool gate() { return gate_stackleak_track_stack(); }
118573 + unsigned int execute() { return execute_stackleak_final(); }
118574 +};
118575 +}
118576 +#endif
118577 +
118578 +static struct opt_pass *make_stackleak_tree_instrument_pass(void)
118579 +{
118580 +#if BUILDING_GCC_VERSION >= 4009
118581 + return new stackleak_tree_instrument_pass();
118582 +#else
118583 + return &stackleak_tree_instrument_pass.pass;
118584 +#endif
118585 +}
118586 +
118587 +static struct opt_pass *make_stackleak_final_rtl_opt_pass(void)
118588 +{
118589 +#if BUILDING_GCC_VERSION >= 4009
118590 + return new stackleak_final_rtl_opt_pass();
118591 +#else
118592 + return &stackleak_final_rtl_opt_pass.pass;
118593 +#endif
118594 +}
118595 +
118596 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
118597 +{
118598 + const char * const plugin_name = plugin_info->base_name;
118599 + const int argc = plugin_info->argc;
118600 + const struct plugin_argument * const argv = plugin_info->argv;
118601 + int i;
118602 + struct register_pass_info stackleak_tree_instrument_pass_info;
118603 + struct register_pass_info stackleak_final_pass_info;
118604 + static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
118605 + {
118606 + .base = &track_function_decl,
118607 + .nelt = 1,
118608 + .stride = sizeof(track_function_decl),
118609 + .cb = &gt_ggc_mx_tree_node,
118610 + .pchw = &gt_pch_nx_tree_node
118611 + },
118612 + {
118613 + .base = &check_function_decl,
118614 + .nelt = 1,
118615 + .stride = sizeof(check_function_decl),
118616 + .cb = &gt_ggc_mx_tree_node,
118617 + .pchw = &gt_pch_nx_tree_node
118618 + },
118619 + LAST_GGC_ROOT_TAB
118620 + };
118621 +
118622 + stackleak_tree_instrument_pass_info.pass = make_stackleak_tree_instrument_pass();
118623 +// stackleak_tree_instrument_pass_info.reference_pass_name = "tree_profile";
118624 + stackleak_tree_instrument_pass_info.reference_pass_name = "optimized";
118625 + stackleak_tree_instrument_pass_info.ref_pass_instance_number = 1;
118626 + stackleak_tree_instrument_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
118627 +
118628 + stackleak_final_pass_info.pass = make_stackleak_final_rtl_opt_pass();
118629 + stackleak_final_pass_info.reference_pass_name = "final";
118630 + stackleak_final_pass_info.ref_pass_instance_number = 1;
118631 + stackleak_final_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
118632 +
118633 + if (!plugin_default_version_check(version, &gcc_version)) {
118634 + error(G_("incompatible gcc/plugin versions"));
118635 + return 1;
118636 + }
118637 +
118638 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
118639 +
118640 + for (i = 0; i < argc; ++i) {
118641 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
118642 + if (!argv[i].value) {
118643 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
118644 + continue;
118645 + }
118646 + track_frame_size = atoi(argv[i].value);
118647 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
118648 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
118649 + continue;
118650 + }
118651 + if (!strcmp(argv[i].key, "initialize-locals")) {
118652 + if (argv[i].value) {
118653 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
118654 + continue;
118655 + }
118656 + init_locals = true;
118657 + continue;
118658 + }
118659 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
118660 + }
118661 +
118662 + register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
118663 + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_stackleak);
118664 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
118665 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
118666 +
118667 + return 0;
118668 +}
118669 diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
118670 new file mode 100644
118671 index 0000000..796569a
118672 --- /dev/null
118673 +++ b/tools/gcc/structleak_plugin.c
118674 @@ -0,0 +1,273 @@
118675 +/*
118676 + * Copyright 2013-2014 by PaX Team <pageexec@freemail.hu>
118677 + * Licensed under the GPL v2
118678 + *
118679 + * Note: the choice of the license means that the compilation process is
118680 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
118681 + * but for the kernel it doesn't matter since it doesn't link against
118682 + * any of the gcc libraries
118683 + *
118684 + * gcc plugin to forcibly initialize certain local variables that could
118685 + * otherwise leak kernel stack to userland if they aren't properly initialized
118686 + * by later code
118687 + *
118688 + * Homepage: http://pax.grsecurity.net/
118689 + *
118690 + * Usage:
118691 + * $ # for 4.5/4.6/C based 4.7
118692 + * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
118693 + * $ # for C++ based 4.7/4.8+
118694 + * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
118695 + * $ gcc -fplugin=./structleak_plugin.so test.c -O2
118696 + *
118697 + * TODO: eliminate redundant initializers
118698 + * increase type coverage
118699 + */
118700 +
118701 +#include "gcc-common.h"
118702 +
118703 +// unused C type flag in all versions 4.5-4.9
118704 +#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
118705 +
118706 +int plugin_is_GPL_compatible;
118707 +
118708 +static struct plugin_info structleak_plugin_info = {
118709 + .version = "201401260140",
118710 + .help = "disable\tdo not activate plugin\n",
118711 +};
118712 +
118713 +static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
118714 +{
118715 + *no_add_attrs = true;
118716 +
118717 + // check for types? for now accept everything linux has to offer
118718 + if (TREE_CODE(*node) != FIELD_DECL)
118719 + return NULL_TREE;
118720 +
118721 + *no_add_attrs = false;
118722 + return NULL_TREE;
118723 +}
118724 +
118725 +static struct attribute_spec user_attr = {
118726 + .name = "user",
118727 + .min_length = 0,
118728 + .max_length = 0,
118729 + .decl_required = false,
118730 + .type_required = false,
118731 + .function_type_required = false,
118732 + .handler = handle_user_attribute,
118733 +#if BUILDING_GCC_VERSION >= 4007
118734 + .affects_type_identity = true
118735 +#endif
118736 +};
118737 +
118738 +static void register_attributes(void *event_data, void *data)
118739 +{
118740 + register_attribute(&user_attr);
118741 +// register_attribute(&force_attr);
118742 +}
118743 +
118744 +static tree get_field_type(tree field)
118745 +{
118746 + return strip_array_types(TREE_TYPE(field));
118747 +}
118748 +
118749 +static bool is_userspace_type(tree type)
118750 +{
118751 + tree field;
118752 +
118753 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
118754 + tree fieldtype = get_field_type(field);
118755 + enum tree_code code = TREE_CODE(fieldtype);
118756 +
118757 + if (code == RECORD_TYPE || code == UNION_TYPE)
118758 + if (is_userspace_type(fieldtype))
118759 + return true;
118760 +
118761 + if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
118762 + return true;
118763 + }
118764 + return false;
118765 +}
118766 +
118767 +static void finish_type(void *event_data, void *data)
118768 +{
118769 + tree type = (tree)event_data;
118770 +
118771 + if (TYPE_USERSPACE(type))
118772 + return;
118773 +
118774 + if (is_userspace_type(type))
118775 + TYPE_USERSPACE(type) = 1;
118776 +}
118777 +
118778 +static void initialize(tree var)
118779 +{
118780 + basic_block bb;
118781 + gimple_stmt_iterator gsi;
118782 + tree initializer;
118783 + gimple init_stmt;
118784 +
118785 + // this is the original entry bb before the forced split
118786 + // TODO: check further BBs in case more splits occured before us
118787 + bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb->next_bb;
118788 +
118789 + // first check if the variable is already initialized, warn otherwise
118790 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
118791 + gimple stmt = gsi_stmt(gsi);
118792 + tree rhs1;
118793 +
118794 + // we're looking for an assignment of a single rhs...
118795 + if (!gimple_assign_single_p(stmt))
118796 + continue;
118797 + rhs1 = gimple_assign_rhs1(stmt);
118798 +#if BUILDING_GCC_VERSION >= 4007
118799 + // ... of a non-clobbering expression...
118800 + if (TREE_CLOBBER_P(rhs1))
118801 + continue;
118802 +#endif
118803 + // ... to our variable...
118804 + if (gimple_get_lhs(stmt) != var)
118805 + continue;
118806 + // if it's an initializer then we're good
118807 + if (TREE_CODE(rhs1) == CONSTRUCTOR)
118808 + return;
118809 + }
118810 +
118811 + // these aren't the 0days you're looking for
118812 +// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
118813 +
118814 + // build the initializer expression
118815 + initializer = build_constructor(TREE_TYPE(var), NULL);
118816 +
118817 + // build the initializer stmt
118818 + init_stmt = gimple_build_assign(var, initializer);
118819 + gsi = gsi_start_bb(ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb);
118820 + gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
118821 + update_stmt(init_stmt);
118822 +}
118823 +
118824 +static unsigned int handle_function(void)
118825 +{
118826 + basic_block bb;
118827 + unsigned int ret = 0;
118828 + tree var;
118829 + unsigned int i;
118830 +
118831 + // split the first bb where we can put the forced initializers
118832 + bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
118833 + if (dom_info_available_p(CDI_DOMINATORS))
118834 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
118835 +
118836 + // enumarate all local variables and forcibly initialize our targets
118837 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
118838 + tree type = TREE_TYPE(var);
118839 +
118840 + gcc_assert(DECL_P(var));
118841 + if (!auto_var_in_fn_p(var, current_function_decl))
118842 + continue;
118843 +
118844 + // only care about structure types
118845 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
118846 + continue;
118847 +
118848 + // if the type is of interest, examine the variable
118849 + if (TYPE_USERSPACE(type))
118850 + initialize(var);
118851 + }
118852 +
118853 + return ret;
118854 +}
118855 +
118856 +#if BUILDING_GCC_VERSION >= 4009
118857 +static const struct pass_data structleak_pass_data = {
118858 +#else
118859 +static struct gimple_opt_pass structleak_pass = {
118860 + .pass = {
118861 +#endif
118862 + .type = GIMPLE_PASS,
118863 + .name = "structleak",
118864 +#if BUILDING_GCC_VERSION >= 4008
118865 + .optinfo_flags = OPTGROUP_NONE,
118866 +#endif
118867 +#if BUILDING_GCC_VERSION >= 4009
118868 + .has_gate = false,
118869 + .has_execute = true,
118870 +#else
118871 + .gate = NULL,
118872 + .execute = handle_function,
118873 + .sub = NULL,
118874 + .next = NULL,
118875 + .static_pass_number = 0,
118876 +#endif
118877 + .tv_id = TV_NONE,
118878 + .properties_required = PROP_cfg,
118879 + .properties_provided = 0,
118880 + .properties_destroyed = 0,
118881 + .todo_flags_start = 0,
118882 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
118883 +#if BUILDING_GCC_VERSION < 4009
118884 + }
118885 +#endif
118886 +};
118887 +
118888 +#if BUILDING_GCC_VERSION >= 4009
118889 +namespace {
118890 +class structleak_pass : public gimple_opt_pass {
118891 +public:
118892 + structleak_pass() : gimple_opt_pass(structleak_pass_data, g) {}
118893 + unsigned int execute() { return handle_function(); }
118894 +};
118895 +}
118896 +#endif
118897 +
118898 +static struct opt_pass *make_structleak_pass(void)
118899 +{
118900 +#if BUILDING_GCC_VERSION >= 4009
118901 + return new structleak_pass();
118902 +#else
118903 + return &structleak_pass.pass;
118904 +#endif
118905 +}
118906 +
118907 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
118908 +{
118909 + int i;
118910 + const char * const plugin_name = plugin_info->base_name;
118911 + const int argc = plugin_info->argc;
118912 + const struct plugin_argument * const argv = plugin_info->argv;
118913 + bool enable = true;
118914 + struct register_pass_info structleak_pass_info;
118915 +
118916 + structleak_pass_info.pass = make_structleak_pass();
118917 + structleak_pass_info.reference_pass_name = "ssa";
118918 + structleak_pass_info.ref_pass_instance_number = 1;
118919 + structleak_pass_info.pos_op = PASS_POS_INSERT_AFTER;
118920 +
118921 + if (!plugin_default_version_check(version, &gcc_version)) {
118922 + error(G_("incompatible gcc/plugin versions"));
118923 + return 1;
118924 + }
118925 +
118926 + if (strcmp(lang_hooks.name, "GNU C")) {
118927 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
118928 + enable = false;
118929 + }
118930 +
118931 + for (i = 0; i < argc; ++i) {
118932 + if (!strcmp(argv[i].key, "disable")) {
118933 + enable = false;
118934 + continue;
118935 + }
118936 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
118937 + }
118938 +
118939 + register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
118940 + if (enable) {
118941 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
118942 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
118943 + }
118944 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
118945 +
118946 + return 0;
118947 +}
118948 diff --git a/tools/lib/lk/Makefile b/tools/lib/lk/Makefile
118949 index 3dba0a4..97175dc 100644
118950 --- a/tools/lib/lk/Makefile
118951 +++ b/tools/lib/lk/Makefile
118952 @@ -13,7 +13,7 @@ LIB_OBJS += $(OUTPUT)debugfs.o
118953
118954 LIBFILE = liblk.a
118955
118956 -CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
118957 +CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
118958 EXTLIBS = -lelf -lpthread -lrt -lm
118959 ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
118960 ALL_LDFLAGS = $(LDFLAGS)
118961 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
118962 index 6789d78..4afd019e 100644
118963 --- a/tools/perf/util/include/asm/alternative-asm.h
118964 +++ b/tools/perf/util/include/asm/alternative-asm.h
118965 @@ -5,4 +5,7 @@
118966
118967 #define altinstruction_entry #
118968
118969 + .macro pax_force_retaddr rip=0, reload=0
118970 + .endm
118971 +
118972 #endif
118973 diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
118974 index b003ad7..c0a02f8 100644
118975 --- a/tools/perf/util/include/linux/compiler.h
118976 +++ b/tools/perf/util/include/linux/compiler.h
118977 @@ -27,4 +27,12 @@
118978 # define __weak __attribute__((weak))
118979 #endif
118980
118981 +#ifndef __size_overflow
118982 +# define __size_overflow(...)
118983 +#endif
118984 +
118985 +#ifndef __intentional_overflow
118986 +# define __intentional_overflow(...)
118987 +#endif
118988 +
118989 #endif
118990 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
118991 index 4f588bc..a543c97 100644
118992 --- a/virt/kvm/kvm_main.c
118993 +++ b/virt/kvm/kvm_main.c
118994 @@ -76,12 +76,17 @@ LIST_HEAD(vm_list);
118995
118996 static cpumask_var_t cpus_hardware_enabled;
118997 static int kvm_usage_count = 0;
118998 -static atomic_t hardware_enable_failed;
118999 +static atomic_unchecked_t hardware_enable_failed;
119000
119001 struct kmem_cache *kvm_vcpu_cache;
119002 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
119003
119004 -static __read_mostly struct preempt_ops kvm_preempt_ops;
119005 +static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
119006 +static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
119007 +static struct preempt_ops kvm_preempt_ops = {
119008 + .sched_in = kvm_sched_in,
119009 + .sched_out = kvm_sched_out,
119010 +};
119011
119012 struct dentry *kvm_debugfs_dir;
119013
119014 @@ -751,7 +756,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
119015 /* We can read the guest memory with __xxx_user() later on. */
119016 if ((mem->slot < KVM_USER_MEM_SLOTS) &&
119017 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
119018 - !access_ok(VERIFY_WRITE,
119019 + !access_ok_noprefault(VERIFY_WRITE,
119020 (void __user *)(unsigned long)mem->userspace_addr,
119021 mem->memory_size)))
119022 goto out;
119023 @@ -1615,9 +1620,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
119024
119025 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
119026 {
119027 - const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
119028 + int r;
119029 + unsigned long addr;
119030
119031 - return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
119032 + addr = gfn_to_hva(kvm, gfn);
119033 + if (kvm_is_error_hva(addr))
119034 + return -EFAULT;
119035 + r = __clear_user((void __user *)addr + offset, len);
119036 + if (r)
119037 + return -EFAULT;
119038 + mark_page_dirty(kvm, gfn);
119039 + return 0;
119040 }
119041 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
119042
119043 @@ -1872,7 +1885,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
119044 return 0;
119045 }
119046
119047 -static struct file_operations kvm_vcpu_fops = {
119048 +static file_operations_no_const kvm_vcpu_fops __read_only = {
119049 .release = kvm_vcpu_release,
119050 .unlocked_ioctl = kvm_vcpu_ioctl,
119051 #ifdef CONFIG_COMPAT
119052 @@ -2532,7 +2545,7 @@ out:
119053 }
119054 #endif
119055
119056 -static struct file_operations kvm_vm_fops = {
119057 +static file_operations_no_const kvm_vm_fops __read_only = {
119058 .release = kvm_vm_release,
119059 .unlocked_ioctl = kvm_vm_ioctl,
119060 #ifdef CONFIG_COMPAT
119061 @@ -2632,7 +2645,7 @@ out:
119062 return r;
119063 }
119064
119065 -static struct file_operations kvm_chardev_ops = {
119066 +static file_operations_no_const kvm_chardev_ops __read_only = {
119067 .unlocked_ioctl = kvm_dev_ioctl,
119068 .compat_ioctl = kvm_dev_ioctl,
119069 .llseek = noop_llseek,
119070 @@ -2658,7 +2671,7 @@ static void hardware_enable_nolock(void *junk)
119071
119072 if (r) {
119073 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
119074 - atomic_inc(&hardware_enable_failed);
119075 + atomic_inc_unchecked(&hardware_enable_failed);
119076 printk(KERN_INFO "kvm: enabling virtualization on "
119077 "CPU%d failed\n", cpu);
119078 }
119079 @@ -2714,10 +2727,10 @@ static int hardware_enable_all(void)
119080
119081 kvm_usage_count++;
119082 if (kvm_usage_count == 1) {
119083 - atomic_set(&hardware_enable_failed, 0);
119084 + atomic_set_unchecked(&hardware_enable_failed, 0);
119085 on_each_cpu(hardware_enable_nolock, NULL, 1);
119086
119087 - if (atomic_read(&hardware_enable_failed)) {
119088 + if (atomic_read_unchecked(&hardware_enable_failed)) {
119089 hardware_disable_all_nolock();
119090 r = -EBUSY;
119091 }
119092 @@ -3148,7 +3161,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
119093 kvm_arch_vcpu_put(vcpu);
119094 }
119095
119096 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
119097 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
119098 struct module *module)
119099 {
119100 int r;
119101 @@ -3195,7 +3208,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
119102 if (!vcpu_align)
119103 vcpu_align = __alignof__(struct kvm_vcpu);
119104 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
119105 - 0, NULL);
119106 + SLAB_USERCOPY, NULL);
119107 if (!kvm_vcpu_cache) {
119108 r = -ENOMEM;
119109 goto out_free_3;
119110 @@ -3205,9 +3218,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
119111 if (r)
119112 goto out_free;
119113
119114 + pax_open_kernel();
119115 kvm_chardev_ops.owner = module;
119116 kvm_vm_fops.owner = module;
119117 kvm_vcpu_fops.owner = module;
119118 + pax_close_kernel();
119119
119120 r = misc_register(&kvm_dev);
119121 if (r) {
119122 @@ -3217,9 +3232,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
119123
119124 register_syscore_ops(&kvm_syscore_ops);
119125
119126 - kvm_preempt_ops.sched_in = kvm_sched_in;
119127 - kvm_preempt_ops.sched_out = kvm_sched_out;
119128 -
119129 r = kvm_init_debug();
119130 if (r) {
119131 printk(KERN_ERR "kvm: create debugfs files failed\n");