]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.9.2-201305172333.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.9.2-201305172333.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b89a739..b47493f 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125 -linux
126 +lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130 @@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134 -media
135 mconf
136 +mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143 +mkpiggy
144 mkprep
145 mkregtable
146 mktables
147 @@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151 +parse-events*
152 +pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156 @@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160 +pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164 @@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168 +realmode.lds
169 +realmode.relocs
170 recordmcount
171 +regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175 @@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179 +signing_key*
180 +size_overflow_hash.h
181 sImage
182 +slabinfo
183 sm_tbl*
184 +sortextable
185 split-include
186 syscalltab.h
187 tables.c
188 @@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192 +user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196 @@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200 +vdsox32.lds
201 +vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208 +vmlinux.bin.bz2
209 vmlinux.lds
210 +vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214 @@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218 +utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222 +x509*
223 zImage*
224 zconf.hash.c
225 +zconf.lex.c
226 zoffset.h
227 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228 index 8ccbf27..afffeb4 100644
229 --- a/Documentation/kernel-parameters.txt
230 +++ b/Documentation/kernel-parameters.txt
231 @@ -948,6 +948,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236 + ignore grsecurity's /proc restrictions
237 +
238 +
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242 @@ -2147,6 +2151,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247 + virtualization environments that don't cope well with the
248 + expand down segment used by UDEREF on X86-32 or the frequent
249 + page table updates on X86-64.
250 +
251 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252 +
253 + pax_extra_latent_entropy
254 + Enable a very simple form of latent entropy extraction
255 + from the first 4GB of memory as the bootmem allocator
256 + passes the memory pages to the buddy allocator.
257 +
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261 diff --git a/Makefile b/Makefile
262 index 3e71511..8ff502e 100644
263 --- a/Makefile
264 +++ b/Makefile
265 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270 -HOSTCXXFLAGS = -O2
271 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
273 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277 @@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281 -PHONY += scripts_basic
282 -scripts_basic:
283 +PHONY += scripts_basic gcc-plugins
284 +scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288 @@ -576,6 +577,65 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292 +ifndef DISABLE_PAX_PLUGINS
293 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295 +else
296 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297 +endif
298 +ifneq ($(PLUGINCC),)
299 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301 +endif
302 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
303 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305 +endif
306 +ifdef CONFIG_KALLOCSTAT_PLUGIN
307 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308 +endif
309 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313 +endif
314 +ifdef CONFIG_CHECKER_PLUGIN
315 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317 +endif
318 +endif
319 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320 +ifdef CONFIG_PAX_SIZE_OVERFLOW
321 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322 +endif
323 +ifdef CONFIG_PAX_LATENT_ENTROPY
324 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325 +endif
326 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
327 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
328 +endif
329 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
330 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
331 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
332 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
333 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
334 +ifeq ($(KBUILD_EXTMOD),)
335 +gcc-plugins:
336 + $(Q)$(MAKE) $(build)=tools/gcc
337 +else
338 +gcc-plugins: ;
339 +endif
340 +else
341 +gcc-plugins:
342 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
343 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
344 +else
345 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
346 +endif
347 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
348 +endif
349 +endif
350 +
351 include $(srctree)/arch/$(SRCARCH)/Makefile
352
353 ifdef CONFIG_READABLE_ASM
354 @@ -733,7 +793,7 @@ export mod_sign_cmd
355
356
357 ifeq ($(KBUILD_EXTMOD),)
358 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
359 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
360
361 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
362 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
363 @@ -780,6 +840,8 @@ endif
364
365 # The actual objects are generated when descending,
366 # make sure no implicit rule kicks in
367 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
368 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
369 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370
371 # Handle descending into subdirectories listed in $(vmlinux-dirs)
372 @@ -789,7 +851,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
373 # Error messages still appears in the original language
374
375 PHONY += $(vmlinux-dirs)
376 -$(vmlinux-dirs): prepare scripts
377 +$(vmlinux-dirs): gcc-plugins prepare scripts
378 $(Q)$(MAKE) $(build)=$@
379
380 # Store (new) KERNELRELASE string in include/config/kernel.release
381 @@ -833,6 +895,7 @@ prepare0: archprepare FORCE
382 $(Q)$(MAKE) $(build)=.
383
384 # All the preparing..
385 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
386 prepare: prepare0
387
388 # Generate some files
389 @@ -940,6 +1003,8 @@ all: modules
390 # using awk while concatenating to the final file.
391
392 PHONY += modules
393 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
394 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
395 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
396 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
397 @$(kecho) ' Building modules, stage 2.';
398 @@ -955,7 +1020,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
399
400 # Target to prepare building external modules
401 PHONY += modules_prepare
402 -modules_prepare: prepare scripts
403 +modules_prepare: gcc-plugins prepare scripts
404
405 # Target to install modules
406 PHONY += modules_install
407 @@ -1021,7 +1086,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
408 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
409 signing_key.priv signing_key.x509 x509.genkey \
410 extra_certificates signing_key.x509.keyid \
411 - signing_key.x509.signer
412 + signing_key.x509.signer tools/gcc/size_overflow_hash.h
413
414 # clean - Delete most, but leave enough to build external modules
415 #
416 @@ -1061,6 +1126,7 @@ distclean: mrproper
417 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
418 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
419 -o -name '.*.rej' \
420 + -o -name '.*.rej' -o -name '*.so' \
421 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
422 -type f -print | xargs rm -f
423
424 @@ -1221,6 +1287,8 @@ PHONY += $(module-dirs) modules
425 $(module-dirs): crmodverdir $(objtree)/Module.symvers
426 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
427
428 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(module-dirs)
431 @$(kecho) ' Building modules, stage 2.';
432 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
433 @@ -1357,17 +1425,21 @@ else
434 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
435 endif
436
437 -%.s: %.c prepare scripts FORCE
438 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
439 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
440 +%.s: %.c gcc-plugins prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442 %.i: %.c prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444 -%.o: %.c prepare scripts FORCE
445 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447 +%.o: %.c gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.lst: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451 -%.s: %.S prepare scripts FORCE
452 +%.s: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454 -%.o: %.S prepare scripts FORCE
455 +%.o: %.S gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.symtypes: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459 @@ -1377,11 +1449,15 @@ endif
460 $(cmd_crmodverdir)
461 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
462 $(build)=$(build-dir)
463 -%/: prepare scripts FORCE
464 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
465 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
466 +%/: gcc-plugins prepare scripts FORCE
467 $(cmd_crmodverdir)
468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
469 $(build)=$(build-dir)
470 -%.ko: prepare scripts FORCE
471 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
472 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
473 +%.ko: gcc-plugins prepare scripts FORCE
474 $(cmd_crmodverdir)
475 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
476 $(build)=$(build-dir) $(@:.ko=.o)
477 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
478 index c2cbe4f..f7264b4 100644
479 --- a/arch/alpha/include/asm/atomic.h
480 +++ b/arch/alpha/include/asm/atomic.h
481 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
482 #define atomic_dec(v) atomic_sub(1,(v))
483 #define atomic64_dec(v) atomic64_sub(1,(v))
484
485 +#define atomic64_read_unchecked(v) atomic64_read(v)
486 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
487 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
488 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
489 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
490 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
491 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
492 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
493 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
494 +
495 #define smp_mb__before_atomic_dec() smp_mb()
496 #define smp_mb__after_atomic_dec() smp_mb()
497 #define smp_mb__before_atomic_inc() smp_mb()
498 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
499 index ad368a9..fbe0f25 100644
500 --- a/arch/alpha/include/asm/cache.h
501 +++ b/arch/alpha/include/asm/cache.h
502 @@ -4,19 +4,19 @@
503 #ifndef __ARCH_ALPHA_CACHE_H
504 #define __ARCH_ALPHA_CACHE_H
505
506 +#include <linux/const.h>
507
508 /* Bytes per L1 (data) cache line. */
509 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
510 -# define L1_CACHE_BYTES 64
511 # define L1_CACHE_SHIFT 6
512 #else
513 /* Both EV4 and EV5 are write-through, read-allocate,
514 direct-mapped, physical.
515 */
516 -# define L1_CACHE_BYTES 32
517 # define L1_CACHE_SHIFT 5
518 #endif
519
520 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
521 #define SMP_CACHE_BYTES L1_CACHE_BYTES
522
523 #endif
524 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
525 index 968d999..d36b2df 100644
526 --- a/arch/alpha/include/asm/elf.h
527 +++ b/arch/alpha/include/asm/elf.h
528 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
529
530 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
531
532 +#ifdef CONFIG_PAX_ASLR
533 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
534 +
535 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
536 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
537 +#endif
538 +
539 /* $0 is set by ld.so to a pointer to a function which might be
540 registered using atexit. This provides a mean for the dynamic
541 linker to call DT_FINI functions for shared libraries that have
542 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
543 index bc2a0da..8ad11ee 100644
544 --- a/arch/alpha/include/asm/pgalloc.h
545 +++ b/arch/alpha/include/asm/pgalloc.h
546 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
547 pgd_set(pgd, pmd);
548 }
549
550 +static inline void
551 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
552 +{
553 + pgd_populate(mm, pgd, pmd);
554 +}
555 +
556 extern pgd_t *pgd_alloc(struct mm_struct *mm);
557
558 static inline void
559 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
560 index 81a4342..348b927 100644
561 --- a/arch/alpha/include/asm/pgtable.h
562 +++ b/arch/alpha/include/asm/pgtable.h
563 @@ -102,6 +102,17 @@ struct vm_area_struct;
564 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
565 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
566 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
567 +
568 +#ifdef CONFIG_PAX_PAGEEXEC
569 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
570 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
571 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
572 +#else
573 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
574 +# define PAGE_COPY_NOEXEC PAGE_COPY
575 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
576 +#endif
577 +
578 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
579
580 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
581 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
582 index 2fd00b7..cfd5069 100644
583 --- a/arch/alpha/kernel/module.c
584 +++ b/arch/alpha/kernel/module.c
585 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
586
587 /* The small sections were sorted to the end of the segment.
588 The following should definitely cover them. */
589 - gp = (u64)me->module_core + me->core_size - 0x8000;
590 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
591 got = sechdrs[me->arch.gotsecindex].sh_addr;
592
593 for (i = 0; i < n; i++) {
594 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
595 index b9e37ad..44c24e7 100644
596 --- a/arch/alpha/kernel/osf_sys.c
597 +++ b/arch/alpha/kernel/osf_sys.c
598 @@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
599 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
600
601 static unsigned long
602 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
603 - unsigned long limit)
604 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
605 + unsigned long limit, unsigned long flags)
606 {
607 struct vm_unmapped_area_info info;
608 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
609
610 info.flags = 0;
611 info.length = len;
612 @@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
613 info.high_limit = limit;
614 info.align_mask = 0;
615 info.align_offset = 0;
616 + info.threadstack_offset = offset;
617 return vm_unmapped_area(&info);
618 }
619
620 @@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
621 merely specific addresses, but regions of memory -- perhaps
622 this feature should be incorporated into all ports? */
623
624 +#ifdef CONFIG_PAX_RANDMMAP
625 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
626 +#endif
627 +
628 if (addr) {
629 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
630 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
631 if (addr != (unsigned long) -ENOMEM)
632 return addr;
633 }
634
635 /* Next, try allocating at TASK_UNMAPPED_BASE. */
636 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
637 - len, limit);
638 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
639 +
640 if (addr != (unsigned long) -ENOMEM)
641 return addr;
642
643 /* Finally, try allocating in low memory. */
644 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
645 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
646
647 return addr;
648 }
649 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
650 index 0c4132d..88f0d53 100644
651 --- a/arch/alpha/mm/fault.c
652 +++ b/arch/alpha/mm/fault.c
653 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
654 __reload_thread(pcb);
655 }
656
657 +#ifdef CONFIG_PAX_PAGEEXEC
658 +/*
659 + * PaX: decide what to do with offenders (regs->pc = fault address)
660 + *
661 + * returns 1 when task should be killed
662 + * 2 when patched PLT trampoline was detected
663 + * 3 when unpatched PLT trampoline was detected
664 + */
665 +static int pax_handle_fetch_fault(struct pt_regs *regs)
666 +{
667 +
668 +#ifdef CONFIG_PAX_EMUPLT
669 + int err;
670 +
671 + do { /* PaX: patched PLT emulation #1 */
672 + unsigned int ldah, ldq, jmp;
673 +
674 + err = get_user(ldah, (unsigned int *)regs->pc);
675 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
676 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
677 +
678 + if (err)
679 + break;
680 +
681 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
682 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
683 + jmp == 0x6BFB0000U)
684 + {
685 + unsigned long r27, addr;
686 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
687 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
688 +
689 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
690 + err = get_user(r27, (unsigned long *)addr);
691 + if (err)
692 + break;
693 +
694 + regs->r27 = r27;
695 + regs->pc = r27;
696 + return 2;
697 + }
698 + } while (0);
699 +
700 + do { /* PaX: patched PLT emulation #2 */
701 + unsigned int ldah, lda, br;
702 +
703 + err = get_user(ldah, (unsigned int *)regs->pc);
704 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
705 + err |= get_user(br, (unsigned int *)(regs->pc+8));
706 +
707 + if (err)
708 + break;
709 +
710 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
711 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
712 + (br & 0xFFE00000U) == 0xC3E00000U)
713 + {
714 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
715 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
716 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
717 +
718 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
719 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
720 + return 2;
721 + }
722 + } while (0);
723 +
724 + do { /* PaX: unpatched PLT emulation */
725 + unsigned int br;
726 +
727 + err = get_user(br, (unsigned int *)regs->pc);
728 +
729 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
730 + unsigned int br2, ldq, nop, jmp;
731 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
732 +
733 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
734 + err = get_user(br2, (unsigned int *)addr);
735 + err |= get_user(ldq, (unsigned int *)(addr+4));
736 + err |= get_user(nop, (unsigned int *)(addr+8));
737 + err |= get_user(jmp, (unsigned int *)(addr+12));
738 + err |= get_user(resolver, (unsigned long *)(addr+16));
739 +
740 + if (err)
741 + break;
742 +
743 + if (br2 == 0xC3600000U &&
744 + ldq == 0xA77B000CU &&
745 + nop == 0x47FF041FU &&
746 + jmp == 0x6B7B0000U)
747 + {
748 + regs->r28 = regs->pc+4;
749 + regs->r27 = addr+16;
750 + regs->pc = resolver;
751 + return 3;
752 + }
753 + }
754 + } while (0);
755 +#endif
756 +
757 + return 1;
758 +}
759 +
760 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
761 +{
762 + unsigned long i;
763 +
764 + printk(KERN_ERR "PAX: bytes at PC: ");
765 + for (i = 0; i < 5; i++) {
766 + unsigned int c;
767 + if (get_user(c, (unsigned int *)pc+i))
768 + printk(KERN_CONT "???????? ");
769 + else
770 + printk(KERN_CONT "%08x ", c);
771 + }
772 + printk("\n");
773 +}
774 +#endif
775
776 /*
777 * This routine handles page faults. It determines the address,
778 @@ -133,8 +251,29 @@ retry:
779 good_area:
780 si_code = SEGV_ACCERR;
781 if (cause < 0) {
782 - if (!(vma->vm_flags & VM_EXEC))
783 + if (!(vma->vm_flags & VM_EXEC)) {
784 +
785 +#ifdef CONFIG_PAX_PAGEEXEC
786 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
787 + goto bad_area;
788 +
789 + up_read(&mm->mmap_sem);
790 + switch (pax_handle_fetch_fault(regs)) {
791 +
792 +#ifdef CONFIG_PAX_EMUPLT
793 + case 2:
794 + case 3:
795 + return;
796 +#endif
797 +
798 + }
799 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
800 + do_group_exit(SIGKILL);
801 +#else
802 goto bad_area;
803 +#endif
804 +
805 + }
806 } else if (!cause) {
807 /* Allow reads even for write-only mappings */
808 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
809 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
810 index 1cacda4..2cef624 100644
811 --- a/arch/arm/Kconfig
812 +++ b/arch/arm/Kconfig
813 @@ -1850,7 +1850,7 @@ config ALIGNMENT_TRAP
814
815 config UACCESS_WITH_MEMCPY
816 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
817 - depends on MMU
818 + depends on MMU && !PAX_MEMORY_UDEREF
819 default y if CPU_FEROCEON
820 help
821 Implement faster copy_to_user and clear_user methods for CPU
822 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
823 index c79f61f..9ac0642 100644
824 --- a/arch/arm/include/asm/atomic.h
825 +++ b/arch/arm/include/asm/atomic.h
826 @@ -17,17 +17,35 @@
827 #include <asm/barrier.h>
828 #include <asm/cmpxchg.h>
829
830 +#ifdef CONFIG_GENERIC_ATOMIC64
831 +#include <asm-generic/atomic64.h>
832 +#endif
833 +
834 #define ATOMIC_INIT(i) { (i) }
835
836 #ifdef __KERNEL__
837
838 +#define _ASM_EXTABLE(from, to) \
839 +" .pushsection __ex_table,\"a\"\n"\
840 +" .align 3\n" \
841 +" .long " #from ", " #to"\n" \
842 +" .popsection"
843 +
844 /*
845 * On ARM, ordinary assignment (str instruction) doesn't clear the local
846 * strex/ldrex monitor on some implementations. The reason we can use it for
847 * atomic_set() is the clrex or dummy strex done on every exception return.
848 */
849 #define atomic_read(v) (*(volatile int *)&(v)->counter)
850 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
851 +{
852 + return v->counter;
853 +}
854 #define atomic_set(v,i) (((v)->counter) = (i))
855 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
856 +{
857 + v->counter = i;
858 +}
859
860 #if __LINUX_ARM_ARCH__ >= 6
861
862 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
863 int result;
864
865 __asm__ __volatile__("@ atomic_add\n"
866 +"1: ldrex %1, [%3]\n"
867 +" adds %0, %1, %4\n"
868 +
869 +#ifdef CONFIG_PAX_REFCOUNT
870 +" bvc 3f\n"
871 +"2: bkpt 0xf103\n"
872 +"3:\n"
873 +#endif
874 +
875 +" strex %1, %0, [%3]\n"
876 +" teq %1, #0\n"
877 +" bne 1b"
878 +
879 +#ifdef CONFIG_PAX_REFCOUNT
880 +"\n4:\n"
881 + _ASM_EXTABLE(2b, 4b)
882 +#endif
883 +
884 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
885 + : "r" (&v->counter), "Ir" (i)
886 + : "cc");
887 +}
888 +
889 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
890 +{
891 + unsigned long tmp;
892 + int result;
893 +
894 + __asm__ __volatile__("@ atomic_add_unchecked\n"
895 "1: ldrex %0, [%3]\n"
896 " add %0, %0, %4\n"
897 " strex %1, %0, [%3]\n"
898 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
899 smp_mb();
900
901 __asm__ __volatile__("@ atomic_add_return\n"
902 +"1: ldrex %1, [%3]\n"
903 +" adds %0, %1, %4\n"
904 +
905 +#ifdef CONFIG_PAX_REFCOUNT
906 +" bvc 3f\n"
907 +" mov %0, %1\n"
908 +"2: bkpt 0xf103\n"
909 +"3:\n"
910 +#endif
911 +
912 +" strex %1, %0, [%3]\n"
913 +" teq %1, #0\n"
914 +" bne 1b"
915 +
916 +#ifdef CONFIG_PAX_REFCOUNT
917 +"\n4:\n"
918 + _ASM_EXTABLE(2b, 4b)
919 +#endif
920 +
921 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
922 + : "r" (&v->counter), "Ir" (i)
923 + : "cc");
924 +
925 + smp_mb();
926 +
927 + return result;
928 +}
929 +
930 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
931 +{
932 + unsigned long tmp;
933 + int result;
934 +
935 + smp_mb();
936 +
937 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
938 "1: ldrex %0, [%3]\n"
939 " add %0, %0, %4\n"
940 " strex %1, %0, [%3]\n"
941 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
942 int result;
943
944 __asm__ __volatile__("@ atomic_sub\n"
945 +"1: ldrex %1, [%3]\n"
946 +" subs %0, %1, %4\n"
947 +
948 +#ifdef CONFIG_PAX_REFCOUNT
949 +" bvc 3f\n"
950 +"2: bkpt 0xf103\n"
951 +"3:\n"
952 +#endif
953 +
954 +" strex %1, %0, [%3]\n"
955 +" teq %1, #0\n"
956 +" bne 1b"
957 +
958 +#ifdef CONFIG_PAX_REFCOUNT
959 +"\n4:\n"
960 + _ASM_EXTABLE(2b, 4b)
961 +#endif
962 +
963 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
964 + : "r" (&v->counter), "Ir" (i)
965 + : "cc");
966 +}
967 +
968 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
969 +{
970 + unsigned long tmp;
971 + int result;
972 +
973 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
974 "1: ldrex %0, [%3]\n"
975 " sub %0, %0, %4\n"
976 " strex %1, %0, [%3]\n"
977 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
978 smp_mb();
979
980 __asm__ __volatile__("@ atomic_sub_return\n"
981 -"1: ldrex %0, [%3]\n"
982 -" sub %0, %0, %4\n"
983 +"1: ldrex %1, [%3]\n"
984 +" subs %0, %1, %4\n"
985 +
986 +#ifdef CONFIG_PAX_REFCOUNT
987 +" bvc 3f\n"
988 +" mov %0, %1\n"
989 +"2: bkpt 0xf103\n"
990 +"3:\n"
991 +#endif
992 +
993 " strex %1, %0, [%3]\n"
994 " teq %1, #0\n"
995 " bne 1b"
996 +
997 +#ifdef CONFIG_PAX_REFCOUNT
998 +"\n4:\n"
999 + _ASM_EXTABLE(2b, 4b)
1000 +#endif
1001 +
1002 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1003 : "r" (&v->counter), "Ir" (i)
1004 : "cc");
1005 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1006 return oldval;
1007 }
1008
1009 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1010 +{
1011 + unsigned long oldval, res;
1012 +
1013 + smp_mb();
1014 +
1015 + do {
1016 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1017 + "ldrex %1, [%3]\n"
1018 + "mov %0, #0\n"
1019 + "teq %1, %4\n"
1020 + "strexeq %0, %5, [%3]\n"
1021 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1022 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1023 + : "cc");
1024 + } while (res);
1025 +
1026 + smp_mb();
1027 +
1028 + return oldval;
1029 +}
1030 +
1031 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1032 {
1033 unsigned long tmp, tmp2;
1034 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1035
1036 return val;
1037 }
1038 +
1039 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1040 +{
1041 + return atomic_add_return(i, v);
1042 +}
1043 +
1044 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1045 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1046 +{
1047 + (void) atomic_add_return(i, v);
1048 +}
1049
1050 static inline int atomic_sub_return(int i, atomic_t *v)
1051 {
1052 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1053 return val;
1054 }
1055 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1056 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1057 +{
1058 + (void) atomic_sub_return(i, v);
1059 +}
1060
1061 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1062 {
1063 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1064 return ret;
1065 }
1066
1067 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1068 +{
1069 + return atomic_cmpxchg(v, old, new);
1070 +}
1071 +
1072 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1073 {
1074 unsigned long flags;
1075 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1076 #endif /* __LINUX_ARM_ARCH__ */
1077
1078 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1079 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1080 +{
1081 + return xchg(&v->counter, new);
1082 +}
1083
1084 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1085 {
1086 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1087 }
1088
1089 #define atomic_inc(v) atomic_add(1, v)
1090 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1091 +{
1092 + atomic_add_unchecked(1, v);
1093 +}
1094 #define atomic_dec(v) atomic_sub(1, v)
1095 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1096 +{
1097 + atomic_sub_unchecked(1, v);
1098 +}
1099
1100 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1101 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1102 +{
1103 + return atomic_add_return_unchecked(1, v) == 0;
1104 +}
1105 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1106 #define atomic_inc_return(v) (atomic_add_return(1, v))
1107 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1108 +{
1109 + return atomic_add_return_unchecked(1, v);
1110 +}
1111 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1112 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1113
1114 @@ -241,6 +428,14 @@ typedef struct {
1115 u64 __aligned(8) counter;
1116 } atomic64_t;
1117
1118 +#ifdef CONFIG_PAX_REFCOUNT
1119 +typedef struct {
1120 + u64 __aligned(8) counter;
1121 +} atomic64_unchecked_t;
1122 +#else
1123 +typedef atomic64_t atomic64_unchecked_t;
1124 +#endif
1125 +
1126 #define ATOMIC64_INIT(i) { (i) }
1127
1128 static inline u64 atomic64_read(const atomic64_t *v)
1129 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1130 return result;
1131 }
1132
1133 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1134 +{
1135 + u64 result;
1136 +
1137 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1138 +" ldrexd %0, %H0, [%1]"
1139 + : "=&r" (result)
1140 + : "r" (&v->counter), "Qo" (v->counter)
1141 + );
1142 +
1143 + return result;
1144 +}
1145 +
1146 static inline void atomic64_set(atomic64_t *v, u64 i)
1147 {
1148 u64 tmp;
1149 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1150 : "cc");
1151 }
1152
1153 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1154 +{
1155 + u64 tmp;
1156 +
1157 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1158 +"1: ldrexd %0, %H0, [%2]\n"
1159 +" strexd %0, %3, %H3, [%2]\n"
1160 +" teq %0, #0\n"
1161 +" bne 1b"
1162 + : "=&r" (tmp), "=Qo" (v->counter)
1163 + : "r" (&v->counter), "r" (i)
1164 + : "cc");
1165 +}
1166 +
1167 static inline void atomic64_add(u64 i, atomic64_t *v)
1168 {
1169 u64 result;
1170 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1171 __asm__ __volatile__("@ atomic64_add\n"
1172 "1: ldrexd %0, %H0, [%3]\n"
1173 " adds %0, %0, %4\n"
1174 +" adcs %H0, %H0, %H4\n"
1175 +
1176 +#ifdef CONFIG_PAX_REFCOUNT
1177 +" bvc 3f\n"
1178 +"2: bkpt 0xf103\n"
1179 +"3:\n"
1180 +#endif
1181 +
1182 +" strexd %1, %0, %H0, [%3]\n"
1183 +" teq %1, #0\n"
1184 +" bne 1b"
1185 +
1186 +#ifdef CONFIG_PAX_REFCOUNT
1187 +"\n4:\n"
1188 + _ASM_EXTABLE(2b, 4b)
1189 +#endif
1190 +
1191 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1192 + : "r" (&v->counter), "r" (i)
1193 + : "cc");
1194 +}
1195 +
1196 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1197 +{
1198 + u64 result;
1199 + unsigned long tmp;
1200 +
1201 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1202 +"1: ldrexd %0, %H0, [%3]\n"
1203 +" adds %0, %0, %4\n"
1204 " adc %H0, %H0, %H4\n"
1205 " strexd %1, %0, %H0, [%3]\n"
1206 " teq %1, #0\n"
1207 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1208
1209 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1210 {
1211 - u64 result;
1212 - unsigned long tmp;
1213 + u64 result, tmp;
1214
1215 smp_mb();
1216
1217 __asm__ __volatile__("@ atomic64_add_return\n"
1218 +"1: ldrexd %1, %H1, [%3]\n"
1219 +" adds %0, %1, %4\n"
1220 +" adcs %H0, %H1, %H4\n"
1221 +
1222 +#ifdef CONFIG_PAX_REFCOUNT
1223 +" bvc 3f\n"
1224 +" mov %0, %1\n"
1225 +" mov %H0, %H1\n"
1226 +"2: bkpt 0xf103\n"
1227 +"3:\n"
1228 +#endif
1229 +
1230 +" strexd %1, %0, %H0, [%3]\n"
1231 +" teq %1, #0\n"
1232 +" bne 1b"
1233 +
1234 +#ifdef CONFIG_PAX_REFCOUNT
1235 +"\n4:\n"
1236 + _ASM_EXTABLE(2b, 4b)
1237 +#endif
1238 +
1239 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1240 + : "r" (&v->counter), "r" (i)
1241 + : "cc");
1242 +
1243 + smp_mb();
1244 +
1245 + return result;
1246 +}
1247 +
1248 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1249 +{
1250 + u64 result;
1251 + unsigned long tmp;
1252 +
1253 + smp_mb();
1254 +
1255 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1256 "1: ldrexd %0, %H0, [%3]\n"
1257 " adds %0, %0, %4\n"
1258 " adc %H0, %H0, %H4\n"
1259 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1260 __asm__ __volatile__("@ atomic64_sub\n"
1261 "1: ldrexd %0, %H0, [%3]\n"
1262 " subs %0, %0, %4\n"
1263 +" sbcs %H0, %H0, %H4\n"
1264 +
1265 +#ifdef CONFIG_PAX_REFCOUNT
1266 +" bvc 3f\n"
1267 +"2: bkpt 0xf103\n"
1268 +"3:\n"
1269 +#endif
1270 +
1271 +" strexd %1, %0, %H0, [%3]\n"
1272 +" teq %1, #0\n"
1273 +" bne 1b"
1274 +
1275 +#ifdef CONFIG_PAX_REFCOUNT
1276 +"\n4:\n"
1277 + _ASM_EXTABLE(2b, 4b)
1278 +#endif
1279 +
1280 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1281 + : "r" (&v->counter), "r" (i)
1282 + : "cc");
1283 +}
1284 +
1285 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1286 +{
1287 + u64 result;
1288 + unsigned long tmp;
1289 +
1290 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1291 +"1: ldrexd %0, %H0, [%3]\n"
1292 +" subs %0, %0, %4\n"
1293 " sbc %H0, %H0, %H4\n"
1294 " strexd %1, %0, %H0, [%3]\n"
1295 " teq %1, #0\n"
1296 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1297
1298 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1299 {
1300 - u64 result;
1301 - unsigned long tmp;
1302 + u64 result, tmp;
1303
1304 smp_mb();
1305
1306 __asm__ __volatile__("@ atomic64_sub_return\n"
1307 -"1: ldrexd %0, %H0, [%3]\n"
1308 -" subs %0, %0, %4\n"
1309 -" sbc %H0, %H0, %H4\n"
1310 +"1: ldrexd %1, %H1, [%3]\n"
1311 +" subs %0, %1, %4\n"
1312 +" sbcs %H0, %H1, %H4\n"
1313 +
1314 +#ifdef CONFIG_PAX_REFCOUNT
1315 +" bvc 3f\n"
1316 +" mov %0, %1\n"
1317 +" mov %H0, %H1\n"
1318 +"2: bkpt 0xf103\n"
1319 +"3:\n"
1320 +#endif
1321 +
1322 " strexd %1, %0, %H0, [%3]\n"
1323 " teq %1, #0\n"
1324 " bne 1b"
1325 +
1326 +#ifdef CONFIG_PAX_REFCOUNT
1327 +"\n4:\n"
1328 + _ASM_EXTABLE(2b, 4b)
1329 +#endif
1330 +
1331 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1332 : "r" (&v->counter), "r" (i)
1333 : "cc");
1334 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1335 return oldval;
1336 }
1337
1338 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1339 +{
1340 + u64 oldval;
1341 + unsigned long res;
1342 +
1343 + smp_mb();
1344 +
1345 + do {
1346 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1347 + "ldrexd %1, %H1, [%3]\n"
1348 + "mov %0, #0\n"
1349 + "teq %1, %4\n"
1350 + "teqeq %H1, %H4\n"
1351 + "strexdeq %0, %5, %H5, [%3]"
1352 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1353 + : "r" (&ptr->counter), "r" (old), "r" (new)
1354 + : "cc");
1355 + } while (res);
1356 +
1357 + smp_mb();
1358 +
1359 + return oldval;
1360 +}
1361 +
1362 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1363 {
1364 u64 result;
1365 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1366
1367 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1368 {
1369 - u64 result;
1370 - unsigned long tmp;
1371 + u64 result, tmp;
1372
1373 smp_mb();
1374
1375 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1376 -"1: ldrexd %0, %H0, [%3]\n"
1377 -" subs %0, %0, #1\n"
1378 -" sbc %H0, %H0, #0\n"
1379 +"1: ldrexd %1, %H1, [%3]\n"
1380 +" subs %0, %1, #1\n"
1381 +" sbcs %H0, %H1, #0\n"
1382 +
1383 +#ifdef CONFIG_PAX_REFCOUNT
1384 +" bvc 3f\n"
1385 +" mov %0, %1\n"
1386 +" mov %H0, %H1\n"
1387 +"2: bkpt 0xf103\n"
1388 +"3:\n"
1389 +#endif
1390 +
1391 " teq %H0, #0\n"
1392 -" bmi 2f\n"
1393 +" bmi 4f\n"
1394 " strexd %1, %0, %H0, [%3]\n"
1395 " teq %1, #0\n"
1396 " bne 1b\n"
1397 -"2:"
1398 +"4:\n"
1399 +
1400 +#ifdef CONFIG_PAX_REFCOUNT
1401 + _ASM_EXTABLE(2b, 4b)
1402 +#endif
1403 +
1404 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1405 : "r" (&v->counter)
1406 : "cc");
1407 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1408 " teq %0, %5\n"
1409 " teqeq %H0, %H5\n"
1410 " moveq %1, #0\n"
1411 -" beq 2f\n"
1412 +" beq 4f\n"
1413 " adds %0, %0, %6\n"
1414 -" adc %H0, %H0, %H6\n"
1415 +" adcs %H0, %H0, %H6\n"
1416 +
1417 +#ifdef CONFIG_PAX_REFCOUNT
1418 +" bvc 3f\n"
1419 +"2: bkpt 0xf103\n"
1420 +"3:\n"
1421 +#endif
1422 +
1423 " strexd %2, %0, %H0, [%4]\n"
1424 " teq %2, #0\n"
1425 " bne 1b\n"
1426 -"2:"
1427 +"4:\n"
1428 +
1429 +#ifdef CONFIG_PAX_REFCOUNT
1430 + _ASM_EXTABLE(2b, 4b)
1431 +#endif
1432 +
1433 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1434 : "r" (&v->counter), "r" (u), "r" (a)
1435 : "cc");
1436 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1437
1438 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1439 #define atomic64_inc(v) atomic64_add(1LL, (v))
1440 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1441 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1442 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1443 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1444 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1445 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1446 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1447 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1448 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1449 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1450 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1451 index 75fe66b..ba3dee4 100644
1452 --- a/arch/arm/include/asm/cache.h
1453 +++ b/arch/arm/include/asm/cache.h
1454 @@ -4,8 +4,10 @@
1455 #ifndef __ASMARM_CACHE_H
1456 #define __ASMARM_CACHE_H
1457
1458 +#include <linux/const.h>
1459 +
1460 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1461 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1462 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1463
1464 /*
1465 * Memory returned by kmalloc() may be used for DMA, so we must make
1466 @@ -24,5 +26,6 @@
1467 #endif
1468
1469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1470 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1471
1472 #endif
1473 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1474 index e1489c5..d418304 100644
1475 --- a/arch/arm/include/asm/cacheflush.h
1476 +++ b/arch/arm/include/asm/cacheflush.h
1477 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1478 void (*dma_unmap_area)(const void *, size_t, int);
1479
1480 void (*dma_flush_range)(const void *, const void *);
1481 -};
1482 +} __no_const;
1483
1484 /*
1485 * Select the calling method
1486 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1487 index 6dcc164..b14d917 100644
1488 --- a/arch/arm/include/asm/checksum.h
1489 +++ b/arch/arm/include/asm/checksum.h
1490 @@ -37,7 +37,19 @@ __wsum
1491 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1492
1493 __wsum
1494 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1495 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1496 +
1497 +static inline __wsum
1498 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1499 +{
1500 + __wsum ret;
1501 + pax_open_userland();
1502 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1503 + pax_close_userland();
1504 + return ret;
1505 +}
1506 +
1507 +
1508
1509 /*
1510 * Fold a partial checksum without adding pseudo headers
1511 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1512 index 7eb18c1..e38b6d2 100644
1513 --- a/arch/arm/include/asm/cmpxchg.h
1514 +++ b/arch/arm/include/asm/cmpxchg.h
1515 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1516
1517 #define xchg(ptr,x) \
1518 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1519 +#define xchg_unchecked(ptr,x) \
1520 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1521
1522 #include <asm-generic/cmpxchg-local.h>
1523
1524 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1525 index 6ddbe44..b5e38b1 100644
1526 --- a/arch/arm/include/asm/domain.h
1527 +++ b/arch/arm/include/asm/domain.h
1528 @@ -48,18 +48,37 @@
1529 * Domain types
1530 */
1531 #define DOMAIN_NOACCESS 0
1532 -#define DOMAIN_CLIENT 1
1533 #ifdef CONFIG_CPU_USE_DOMAINS
1534 +#define DOMAIN_USERCLIENT 1
1535 +#define DOMAIN_KERNELCLIENT 1
1536 #define DOMAIN_MANAGER 3
1537 +#define DOMAIN_VECTORS DOMAIN_USER
1538 #else
1539 +
1540 +#ifdef CONFIG_PAX_KERNEXEC
1541 #define DOMAIN_MANAGER 1
1542 +#define DOMAIN_KERNEXEC 3
1543 +#else
1544 +#define DOMAIN_MANAGER 1
1545 +#endif
1546 +
1547 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1548 +#define DOMAIN_USERCLIENT 0
1549 +#define DOMAIN_UDEREF 1
1550 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1551 +#else
1552 +#define DOMAIN_USERCLIENT 1
1553 +#define DOMAIN_VECTORS DOMAIN_USER
1554 +#endif
1555 +#define DOMAIN_KERNELCLIENT 1
1556 +
1557 #endif
1558
1559 #define domain_val(dom,type) ((type) << (2*(dom)))
1560
1561 #ifndef __ASSEMBLY__
1562
1563 -#ifdef CONFIG_CPU_USE_DOMAINS
1564 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1565 static inline void set_domain(unsigned val)
1566 {
1567 asm volatile(
1568 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1569 isb();
1570 }
1571
1572 -#define modify_domain(dom,type) \
1573 - do { \
1574 - struct thread_info *thread = current_thread_info(); \
1575 - unsigned int domain = thread->cpu_domain; \
1576 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1577 - thread->cpu_domain = domain | domain_val(dom, type); \
1578 - set_domain(thread->cpu_domain); \
1579 - } while (0)
1580 -
1581 +extern void modify_domain(unsigned int dom, unsigned int type);
1582 #else
1583 static inline void set_domain(unsigned val) { }
1584 static inline void modify_domain(unsigned dom, unsigned type) { }
1585 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1586 index 38050b1..9d90e8b 100644
1587 --- a/arch/arm/include/asm/elf.h
1588 +++ b/arch/arm/include/asm/elf.h
1589 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1590 the loader. We need to make sure that it is out of the way of the program
1591 that it will "exec", and that there is sufficient room for the brk. */
1592
1593 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1594 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1595 +
1596 +#ifdef CONFIG_PAX_ASLR
1597 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1598 +
1599 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1600 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1601 +#endif
1602
1603 /* When the program starts, a1 contains a pointer to a function to be
1604 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1605 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1606 extern void elf_set_personality(const struct elf32_hdr *);
1607 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1608
1609 -struct mm_struct;
1610 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1611 -#define arch_randomize_brk arch_randomize_brk
1612 -
1613 #endif
1614 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1615 index de53547..52b9a28 100644
1616 --- a/arch/arm/include/asm/fncpy.h
1617 +++ b/arch/arm/include/asm/fncpy.h
1618 @@ -81,7 +81,9 @@
1619 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1620 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1621 \
1622 + pax_open_kernel(); \
1623 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1624 + pax_close_kernel(); \
1625 flush_icache_range((unsigned long)(dest_buf), \
1626 (unsigned long)(dest_buf) + (size)); \
1627 \
1628 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1629 index e42cf59..7b94b8f 100644
1630 --- a/arch/arm/include/asm/futex.h
1631 +++ b/arch/arm/include/asm/futex.h
1632 @@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1633 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1634 return -EFAULT;
1635
1636 + pax_open_userland();
1637 +
1638 smp_mb();
1639 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1640 "1: ldrex %1, [%4]\n"
1641 @@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1642 : "cc", "memory");
1643 smp_mb();
1644
1645 + pax_close_userland();
1646 +
1647 *uval = val;
1648 return ret;
1649 }
1650 @@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1651 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1652 return -EFAULT;
1653
1654 + pax_open_userland();
1655 +
1656 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1657 "1: " TUSER(ldr) " %1, [%4]\n"
1658 " teq %1, %2\n"
1659 @@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1660 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1661 : "cc", "memory");
1662
1663 + pax_close_userland();
1664 +
1665 *uval = val;
1666 return ret;
1667 }
1668 @@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1669 return -EFAULT;
1670
1671 pagefault_disable(); /* implies preempt_disable() */
1672 + pax_open_userland();
1673
1674 switch (op) {
1675 case FUTEX_OP_SET:
1676 @@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1677 ret = -ENOSYS;
1678 }
1679
1680 + pax_close_userland();
1681 pagefault_enable(); /* subsumes preempt_enable() */
1682
1683 if (!ret) {
1684 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1685 index 83eb2f7..ed77159 100644
1686 --- a/arch/arm/include/asm/kmap_types.h
1687 +++ b/arch/arm/include/asm/kmap_types.h
1688 @@ -4,6 +4,6 @@
1689 /*
1690 * This is the "bare minimum". AIO seems to require this.
1691 */
1692 -#define KM_TYPE_NR 16
1693 +#define KM_TYPE_NR 17
1694
1695 #endif
1696 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1697 index 9e614a1..3302cca 100644
1698 --- a/arch/arm/include/asm/mach/dma.h
1699 +++ b/arch/arm/include/asm/mach/dma.h
1700 @@ -22,7 +22,7 @@ struct dma_ops {
1701 int (*residue)(unsigned int, dma_t *); /* optional */
1702 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1703 const char *type;
1704 -};
1705 +} __do_const;
1706
1707 struct dma_struct {
1708 void *addr; /* single DMA address */
1709 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1710 index 2fe141f..192dc01 100644
1711 --- a/arch/arm/include/asm/mach/map.h
1712 +++ b/arch/arm/include/asm/mach/map.h
1713 @@ -27,13 +27,16 @@ struct map_desc {
1714 #define MT_MINICLEAN 6
1715 #define MT_LOW_VECTORS 7
1716 #define MT_HIGH_VECTORS 8
1717 -#define MT_MEMORY 9
1718 +#define MT_MEMORY_RWX 9
1719 #define MT_ROM 10
1720 -#define MT_MEMORY_NONCACHED 11
1721 +#define MT_MEMORY_NONCACHED_RX 11
1722 #define MT_MEMORY_DTCM 12
1723 #define MT_MEMORY_ITCM 13
1724 #define MT_MEMORY_SO 14
1725 #define MT_MEMORY_DMA_READY 15
1726 +#define MT_MEMORY_RW 16
1727 +#define MT_MEMORY_RX 17
1728 +#define MT_MEMORY_NONCACHED_RW 18
1729
1730 #ifdef CONFIG_MMU
1731 extern void iotable_init(struct map_desc *, int);
1732 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1733 index 12f71a1..04e063c 100644
1734 --- a/arch/arm/include/asm/outercache.h
1735 +++ b/arch/arm/include/asm/outercache.h
1736 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1737 #endif
1738 void (*set_debug)(unsigned long);
1739 void (*resume)(void);
1740 -};
1741 +} __no_const;
1742
1743 #ifdef CONFIG_OUTER_CACHE
1744
1745 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1746 index 812a494..71fc0b6 100644
1747 --- a/arch/arm/include/asm/page.h
1748 +++ b/arch/arm/include/asm/page.h
1749 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1750 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1751 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1752 unsigned long vaddr, struct vm_area_struct *vma);
1753 -};
1754 +} __no_const;
1755
1756 #ifdef MULTI_USER
1757 extern struct cpu_user_fns cpu_user;
1758 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1759 index 943504f..c37a730 100644
1760 --- a/arch/arm/include/asm/pgalloc.h
1761 +++ b/arch/arm/include/asm/pgalloc.h
1762 @@ -17,6 +17,7 @@
1763 #include <asm/processor.h>
1764 #include <asm/cacheflush.h>
1765 #include <asm/tlbflush.h>
1766 +#include <asm/system_info.h>
1767
1768 #define check_pgt_cache() do { } while (0)
1769
1770 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1771 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1772 }
1773
1774 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1775 +{
1776 + pud_populate(mm, pud, pmd);
1777 +}
1778 +
1779 #else /* !CONFIG_ARM_LPAE */
1780
1781 /*
1782 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1783 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1784 #define pmd_free(mm, pmd) do { } while (0)
1785 #define pud_populate(mm,pmd,pte) BUG()
1786 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1787
1788 #endif /* CONFIG_ARM_LPAE */
1789
1790 @@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1791 __free_page(pte);
1792 }
1793
1794 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1795 +{
1796 +#ifdef CONFIG_ARM_LPAE
1797 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1798 +#else
1799 + if (addr & SECTION_SIZE)
1800 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1801 + else
1802 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1803 +#endif
1804 + flush_pmd_entry(pmdp);
1805 +}
1806 +
1807 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1808 pmdval_t prot)
1809 {
1810 @@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1811 static inline void
1812 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1813 {
1814 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1815 + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1816 }
1817 #define pmd_pgtable(pmd) pmd_page(pmd)
1818
1819 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1820 index 5cfba15..f415e1a 100644
1821 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1822 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1823 @@ -20,12 +20,15 @@
1824 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1825 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1826 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1827 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1828 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1829 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1830 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1831 +
1832 /*
1833 * - section
1834 */
1835 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1836 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1837 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1838 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1839 @@ -37,6 +40,7 @@
1840 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1841 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1842 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1843 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1844
1845 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1846 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1847 @@ -66,6 +70,7 @@
1848 * - extended small page/tiny page
1849 */
1850 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1851 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1852 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1853 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1854 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1855 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1856 index f97ee02..07f1be5 100644
1857 --- a/arch/arm/include/asm/pgtable-2level.h
1858 +++ b/arch/arm/include/asm/pgtable-2level.h
1859 @@ -125,6 +125,7 @@
1860 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1861 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1862 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1863 +#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1864
1865 /*
1866 * These are the memory types, defined to be compatible with
1867 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1868 index 18f5cef..25b8f43 100644
1869 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1870 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1871 @@ -41,6 +41,7 @@
1872 */
1873 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1874 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1875 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1876 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1877 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1878 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1879 @@ -71,6 +72,7 @@
1880 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1881 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1882 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1883 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1884 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1885
1886 /*
1887 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1888 index 86b8fe3..e25f975 100644
1889 --- a/arch/arm/include/asm/pgtable-3level.h
1890 +++ b/arch/arm/include/asm/pgtable-3level.h
1891 @@ -74,6 +74,7 @@
1892 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1893 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1894 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1895 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1896 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1897 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1898 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1899 @@ -82,6 +83,7 @@
1900 /*
1901 * To be used in assembly code with the upper page attributes.
1902 */
1903 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
1904 #define L_PTE_XN_HIGH (1 << (54 - 32))
1905 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1906
1907 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1908 index 9bcd262..fba731c 100644
1909 --- a/arch/arm/include/asm/pgtable.h
1910 +++ b/arch/arm/include/asm/pgtable.h
1911 @@ -30,6 +30,9 @@
1912 #include <asm/pgtable-2level.h>
1913 #endif
1914
1915 +#define ktla_ktva(addr) (addr)
1916 +#define ktva_ktla(addr) (addr)
1917 +
1918 /*
1919 * Just any arbitrary offset to the start of the vmalloc VM area: the
1920 * current 8MB value just means that there will be a 8MB "hole" after the
1921 @@ -45,6 +48,9 @@
1922 #define LIBRARY_TEXT_START 0x0c000000
1923
1924 #ifndef __ASSEMBLY__
1925 +extern pteval_t __supported_pte_mask;
1926 +extern pmdval_t __supported_pmd_mask;
1927 +
1928 extern void __pte_error(const char *file, int line, pte_t);
1929 extern void __pmd_error(const char *file, int line, pmd_t);
1930 extern void __pgd_error(const char *file, int line, pgd_t);
1931 @@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1932 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1933 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1934
1935 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
1936 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1937 +
1938 +#ifdef CONFIG_PAX_KERNEXEC
1939 +#include <asm/domain.h>
1940 +#include <linux/thread_info.h>
1941 +#include <linux/preempt.h>
1942 +#endif
1943 +
1944 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1945 +static inline int test_domain(int domain, int domaintype)
1946 +{
1947 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1948 +}
1949 +#endif
1950 +
1951 +#ifdef CONFIG_PAX_KERNEXEC
1952 +static inline unsigned long pax_open_kernel(void) {
1953 +#ifdef CONFIG_ARM_LPAE
1954 + /* TODO */
1955 +#else
1956 + preempt_disable();
1957 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
1958 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
1959 +#endif
1960 + return 0;
1961 +}
1962 +
1963 +static inline unsigned long pax_close_kernel(void) {
1964 +#ifdef CONFIG_ARM_LPAE
1965 + /* TODO */
1966 +#else
1967 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
1968 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
1969 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
1970 + preempt_enable_no_resched();
1971 +#endif
1972 + return 0;
1973 +}
1974 +#else
1975 +static inline unsigned long pax_open_kernel(void) { return 0; }
1976 +static inline unsigned long pax_close_kernel(void) { return 0; }
1977 +#endif
1978 +
1979 /*
1980 * This is the lowest virtual address we can permit any user space
1981 * mapping to be mapped at. This is particularly important for
1982 @@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1983 /*
1984 * The pgprot_* and protection_map entries will be fixed up in runtime
1985 * to include the cachable and bufferable bits based on memory policy,
1986 - * as well as any architecture dependent bits like global/ASID and SMP
1987 - * shared mapping bits.
1988 + * as well as any architecture dependent bits like global/ASID, PXN,
1989 + * and SMP shared mapping bits.
1990 */
1991 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1992
1993 @@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1994 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1995 {
1996 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
1997 - L_PTE_NONE | L_PTE_VALID;
1998 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
1999 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2000 return pte;
2001 }
2002 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2003 index f3628fb..a0672dd 100644
2004 --- a/arch/arm/include/asm/proc-fns.h
2005 +++ b/arch/arm/include/asm/proc-fns.h
2006 @@ -75,7 +75,7 @@ extern struct processor {
2007 unsigned int suspend_size;
2008 void (*do_suspend)(void *);
2009 void (*do_resume)(void *);
2010 -} processor;
2011 +} __do_const processor;
2012
2013 #ifndef MULTI_CPU
2014 extern void cpu_proc_init(void);
2015 diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2016 index 06e7d50..8a8e251 100644
2017 --- a/arch/arm/include/asm/processor.h
2018 +++ b/arch/arm/include/asm/processor.h
2019 @@ -65,9 +65,8 @@ struct thread_struct {
2020 regs->ARM_cpsr |= PSR_ENDSTATE; \
2021 regs->ARM_pc = pc & ~1; /* pc */ \
2022 regs->ARM_sp = sp; /* sp */ \
2023 - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2024 - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2025 - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2026 + /* r2 (envp), r1 (argv), r0 (argc) */ \
2027 + (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2028 nommu_start_thread(regs); \
2029 })
2030
2031 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2032 index ce0dbe7..c085b6f 100644
2033 --- a/arch/arm/include/asm/psci.h
2034 +++ b/arch/arm/include/asm/psci.h
2035 @@ -29,7 +29,7 @@ struct psci_operations {
2036 int (*cpu_off)(struct psci_power_state state);
2037 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2038 int (*migrate)(unsigned long cpuid);
2039 -};
2040 +} __no_const;
2041
2042 extern struct psci_operations psci_ops;
2043
2044 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2045 index d3a22be..3a69ad5 100644
2046 --- a/arch/arm/include/asm/smp.h
2047 +++ b/arch/arm/include/asm/smp.h
2048 @@ -107,7 +107,7 @@ struct smp_operations {
2049 int (*cpu_disable)(unsigned int cpu);
2050 #endif
2051 #endif
2052 -};
2053 +} __no_const;
2054
2055 /*
2056 * set platform specific SMP operations
2057 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2058 index cddda1f..ff357f7 100644
2059 --- a/arch/arm/include/asm/thread_info.h
2060 +++ b/arch/arm/include/asm/thread_info.h
2061 @@ -77,9 +77,9 @@ struct thread_info {
2062 .flags = 0, \
2063 .preempt_count = INIT_PREEMPT_COUNT, \
2064 .addr_limit = KERNEL_DS, \
2065 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2066 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2067 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2068 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2069 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2070 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2071 .restart_block = { \
2072 .fn = do_no_restart_syscall, \
2073 }, \
2074 @@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2075 #define TIF_SYSCALL_AUDIT 9
2076 #define TIF_SYSCALL_TRACEPOINT 10
2077 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2078 +
2079 +/* within 8 bits of TIF_SYSCALL_TRACE
2080 + * to meet flexible second operand requirements
2081 + */
2082 +#define TIF_GRSEC_SETXID 12
2083 +
2084 #define TIF_USING_IWMMXT 17
2085 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2086 #define TIF_RESTORE_SIGMASK 20
2087 @@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2088 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2089 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2090 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2091 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2092
2093 /* Checks for any syscall work in entry-common.S */
2094 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2095 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2096 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2097
2098 /*
2099 * Change these and you break ASM code in entry-common.S
2100 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2101 index 7e1f760..752fcb7 100644
2102 --- a/arch/arm/include/asm/uaccess.h
2103 +++ b/arch/arm/include/asm/uaccess.h
2104 @@ -18,6 +18,7 @@
2105 #include <asm/domain.h>
2106 #include <asm/unified.h>
2107 #include <asm/compiler.h>
2108 +#include <asm/pgtable.h>
2109
2110 #define VERIFY_READ 0
2111 #define VERIFY_WRITE 1
2112 @@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2113 #define USER_DS TASK_SIZE
2114 #define get_fs() (current_thread_info()->addr_limit)
2115
2116 +static inline void pax_open_userland(void)
2117 +{
2118 +
2119 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2120 + if (get_fs() == USER_DS) {
2121 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2122 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2123 + }
2124 +#endif
2125 +
2126 +}
2127 +
2128 +static inline void pax_close_userland(void)
2129 +{
2130 +
2131 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2132 + if (get_fs() == USER_DS) {
2133 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2134 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2135 + }
2136 +#endif
2137 +
2138 +}
2139 +
2140 static inline void set_fs(mm_segment_t fs)
2141 {
2142 current_thread_info()->addr_limit = fs;
2143 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2144 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2145 }
2146
2147 #define segment_eq(a,b) ((a) == (b))
2148 @@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2149
2150 #define get_user(x,p) \
2151 ({ \
2152 + int __e; \
2153 might_fault(); \
2154 - __get_user_check(x,p); \
2155 + pax_open_userland(); \
2156 + __e = __get_user_check(x,p); \
2157 + pax_close_userland(); \
2158 + __e; \
2159 })
2160
2161 extern int __put_user_1(void *, unsigned int);
2162 @@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2163
2164 #define put_user(x,p) \
2165 ({ \
2166 + int __e; \
2167 might_fault(); \
2168 - __put_user_check(x,p); \
2169 + pax_open_userland(); \
2170 + __e = __put_user_check(x,p); \
2171 + pax_close_userland(); \
2172 + __e; \
2173 })
2174
2175 #else /* CONFIG_MMU */
2176 @@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2177 #define __get_user(x,ptr) \
2178 ({ \
2179 long __gu_err = 0; \
2180 + pax_open_userland(); \
2181 __get_user_err((x),(ptr),__gu_err); \
2182 + pax_close_userland(); \
2183 __gu_err; \
2184 })
2185
2186 #define __get_user_error(x,ptr,err) \
2187 ({ \
2188 + pax_open_userland(); \
2189 __get_user_err((x),(ptr),err); \
2190 + pax_close_userland(); \
2191 (void) 0; \
2192 })
2193
2194 @@ -312,13 +349,17 @@ do { \
2195 #define __put_user(x,ptr) \
2196 ({ \
2197 long __pu_err = 0; \
2198 + pax_open_userland(); \
2199 __put_user_err((x),(ptr),__pu_err); \
2200 + pax_close_userland(); \
2201 __pu_err; \
2202 })
2203
2204 #define __put_user_error(x,ptr,err) \
2205 ({ \
2206 + pax_open_userland(); \
2207 __put_user_err((x),(ptr),err); \
2208 + pax_close_userland(); \
2209 (void) 0; \
2210 })
2211
2212 @@ -418,11 +459,44 @@ do { \
2213
2214
2215 #ifdef CONFIG_MMU
2216 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2217 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2218 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2219 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2220 +
2221 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2222 +{
2223 + unsigned long ret;
2224 +
2225 + check_object_size(to, n, false);
2226 + pax_open_userland();
2227 + ret = ___copy_from_user(to, from, n);
2228 + pax_close_userland();
2229 + return ret;
2230 +}
2231 +
2232 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2233 +{
2234 + unsigned long ret;
2235 +
2236 + check_object_size(from, n, true);
2237 + pax_open_userland();
2238 + ret = ___copy_to_user(to, from, n);
2239 + pax_close_userland();
2240 + return ret;
2241 +}
2242 +
2243 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2244 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2245 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2246 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2247 +
2248 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2249 +{
2250 + unsigned long ret;
2251 + pax_open_userland();
2252 + ret = ___clear_user(addr, n);
2253 + pax_close_userland();
2254 + return ret;
2255 +}
2256 +
2257 #else
2258 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2259 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2260 @@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2261
2262 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2263 {
2264 + if ((long)n < 0)
2265 + return n;
2266 +
2267 if (access_ok(VERIFY_READ, from, n))
2268 n = __copy_from_user(to, from, n);
2269 else /* security hole - plug it */
2270 @@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2271
2272 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2273 {
2274 + if ((long)n < 0)
2275 + return n;
2276 +
2277 if (access_ok(VERIFY_WRITE, to, n))
2278 n = __copy_to_user(to, from, n);
2279 return n;
2280 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2281 index 96ee092..37f1844 100644
2282 --- a/arch/arm/include/uapi/asm/ptrace.h
2283 +++ b/arch/arm/include/uapi/asm/ptrace.h
2284 @@ -73,7 +73,7 @@
2285 * ARMv7 groups of PSR bits
2286 */
2287 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2288 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2289 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2290 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2291 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2292
2293 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2294 index 60d3b73..d27ee09 100644
2295 --- a/arch/arm/kernel/armksyms.c
2296 +++ b/arch/arm/kernel/armksyms.c
2297 @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2298 #ifdef CONFIG_MMU
2299 EXPORT_SYMBOL(copy_page);
2300
2301 -EXPORT_SYMBOL(__copy_from_user);
2302 -EXPORT_SYMBOL(__copy_to_user);
2303 -EXPORT_SYMBOL(__clear_user);
2304 +EXPORT_SYMBOL(___copy_from_user);
2305 +EXPORT_SYMBOL(___copy_to_user);
2306 +EXPORT_SYMBOL(___clear_user);
2307
2308 EXPORT_SYMBOL(__get_user_1);
2309 EXPORT_SYMBOL(__get_user_2);
2310 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2311 index 0f82098..3dbd3ee 100644
2312 --- a/arch/arm/kernel/entry-armv.S
2313 +++ b/arch/arm/kernel/entry-armv.S
2314 @@ -47,6 +47,87 @@
2315 9997:
2316 .endm
2317
2318 + .macro pax_enter_kernel
2319 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2320 + @ make aligned space for saved DACR
2321 + sub sp, sp, #8
2322 + @ save regs
2323 + stmdb sp!, {r1, r2}
2324 + @ read DACR from cpu_domain into r1
2325 + mov r2, sp
2326 + @ assume 8K pages, since we have to split the immediate in two
2327 + bic r2, r2, #(0x1fc0)
2328 + bic r2, r2, #(0x3f)
2329 + ldr r1, [r2, #TI_CPU_DOMAIN]
2330 + @ store old DACR on stack
2331 + str r1, [sp, #8]
2332 +#ifdef CONFIG_PAX_KERNEXEC
2333 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2334 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2335 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2336 +#endif
2337 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2338 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2339 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2340 +#endif
2341 + @ write r1 to current_thread_info()->cpu_domain
2342 + str r1, [r2, #TI_CPU_DOMAIN]
2343 + @ write r1 to DACR
2344 + mcr p15, 0, r1, c3, c0, 0
2345 + @ instruction sync
2346 + instr_sync
2347 + @ restore regs
2348 + ldmia sp!, {r1, r2}
2349 +#endif
2350 + .endm
2351 +
2352 + .macro pax_open_userland
2353 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2354 + @ save regs
2355 + stmdb sp!, {r0, r1}
2356 + @ read DACR from cpu_domain into r1
2357 + mov r0, sp
2358 + @ assume 8K pages, since we have to split the immediate in two
2359 + bic r0, r0, #(0x1fc0)
2360 + bic r0, r0, #(0x3f)
2361 + ldr r1, [r0, #TI_CPU_DOMAIN]
2362 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2363 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2364 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2365 + @ write r1 to current_thread_info()->cpu_domain
2366 + str r1, [r0, #TI_CPU_DOMAIN]
2367 + @ write r1 to DACR
2368 + mcr p15, 0, r1, c3, c0, 0
2369 + @ instruction sync
2370 + instr_sync
2371 + @ restore regs
2372 + ldmia sp!, {r0, r1}
2373 +#endif
2374 + .endm
2375 +
2376 + .macro pax_close_userland
2377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2378 + @ save regs
2379 + stmdb sp!, {r0, r1}
2380 + @ read DACR from cpu_domain into r1
2381 + mov r0, sp
2382 + @ assume 8K pages, since we have to split the immediate in two
2383 + bic r0, r0, #(0x1fc0)
2384 + bic r0, r0, #(0x3f)
2385 + ldr r1, [r0, #TI_CPU_DOMAIN]
2386 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2387 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2388 + @ write r1 to current_thread_info()->cpu_domain
2389 + str r1, [r0, #TI_CPU_DOMAIN]
2390 + @ write r1 to DACR
2391 + mcr p15, 0, r1, c3, c0, 0
2392 + @ instruction sync
2393 + instr_sync
2394 + @ restore regs
2395 + ldmia sp!, {r0, r1}
2396 +#endif
2397 + .endm
2398 +
2399 .macro pabt_helper
2400 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2401 #ifdef MULTI_PABORT
2402 @@ -89,11 +170,15 @@
2403 * Invalid mode handlers
2404 */
2405 .macro inv_entry, reason
2406 +
2407 + pax_enter_kernel
2408 +
2409 sub sp, sp, #S_FRAME_SIZE
2410 ARM( stmib sp, {r1 - lr} )
2411 THUMB( stmia sp, {r0 - r12} )
2412 THUMB( str sp, [sp, #S_SP] )
2413 THUMB( str lr, [sp, #S_LR] )
2414 +
2415 mov r1, #\reason
2416 .endm
2417
2418 @@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2419 .macro svc_entry, stack_hole=0
2420 UNWIND(.fnstart )
2421 UNWIND(.save {r0 - pc} )
2422 +
2423 + pax_enter_kernel
2424 +
2425 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2426 +
2427 #ifdef CONFIG_THUMB2_KERNEL
2428 SPFIX( str r0, [sp] ) @ temporarily saved
2429 SPFIX( mov r0, sp )
2430 @@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2431 ldmia r0, {r3 - r5}
2432 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2433 mov r6, #-1 @ "" "" "" ""
2434 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2435 + @ offset sp by 8 as done in pax_enter_kernel
2436 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2437 +#else
2438 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2439 +#endif
2440 SPFIX( addeq r2, r2, #4 )
2441 str r3, [sp, #-4]! @ save the "real" r0 copied
2442 @ from the exception stack
2443 @@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2444 .macro usr_entry
2445 UNWIND(.fnstart )
2446 UNWIND(.cantunwind ) @ don't unwind the user space
2447 +
2448 + pax_enter_kernel_user
2449 +
2450 sub sp, sp, #S_FRAME_SIZE
2451 ARM( stmib sp, {r1 - r12} )
2452 THUMB( stmia sp, {r0 - r12} )
2453 @@ -456,7 +553,9 @@ __und_usr:
2454 tst r3, #PSR_T_BIT @ Thumb mode?
2455 bne __und_usr_thumb
2456 sub r4, r2, #4 @ ARM instr at LR - 4
2457 + pax_open_userland
2458 1: ldrt r0, [r4]
2459 + pax_close_userland
2460 #ifdef CONFIG_CPU_ENDIAN_BE8
2461 rev r0, r0 @ little endian instruction
2462 #endif
2463 @@ -491,10 +590,14 @@ __und_usr_thumb:
2464 */
2465 .arch armv6t2
2466 #endif
2467 + pax_open_userland
2468 2: ldrht r5, [r4]
2469 + pax_close_userland
2470 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2471 blo __und_usr_fault_16 @ 16bit undefined instruction
2472 + pax_open_userland
2473 3: ldrht r0, [r2]
2474 + pax_close_userland
2475 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2476 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2477 orr r0, r0, r5, lsl #16
2478 @@ -733,7 +836,7 @@ ENTRY(__switch_to)
2479 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2480 THUMB( str sp, [ip], #4 )
2481 THUMB( str lr, [ip], #4 )
2482 -#ifdef CONFIG_CPU_USE_DOMAINS
2483 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2484 ldr r6, [r2, #TI_CPU_DOMAIN]
2485 #endif
2486 set_tls r3, r4, r5
2487 @@ -742,7 +845,7 @@ ENTRY(__switch_to)
2488 ldr r8, =__stack_chk_guard
2489 ldr r7, [r7, #TSK_STACK_CANARY]
2490 #endif
2491 -#ifdef CONFIG_CPU_USE_DOMAINS
2492 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2493 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2494 #endif
2495 mov r5, r0
2496 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2497 index fefd7f9..e6f250e 100644
2498 --- a/arch/arm/kernel/entry-common.S
2499 +++ b/arch/arm/kernel/entry-common.S
2500 @@ -10,18 +10,46 @@
2501
2502 #include <asm/unistd.h>
2503 #include <asm/ftrace.h>
2504 +#include <asm/domain.h>
2505 #include <asm/unwind.h>
2506
2507 +#include "entry-header.S"
2508 +
2509 #ifdef CONFIG_NEED_RET_TO_USER
2510 #include <mach/entry-macro.S>
2511 #else
2512 .macro arch_ret_to_user, tmp1, tmp2
2513 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2514 + @ save regs
2515 + stmdb sp!, {r1, r2}
2516 + @ read DACR from cpu_domain into r1
2517 + mov r2, sp
2518 + @ assume 8K pages, since we have to split the immediate in two
2519 + bic r2, r2, #(0x1fc0)
2520 + bic r2, r2, #(0x3f)
2521 + ldr r1, [r2, #TI_CPU_DOMAIN]
2522 +#ifdef CONFIG_PAX_KERNEXEC
2523 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2524 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2525 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2526 +#endif
2527 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2528 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2529 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2530 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2531 +#endif
2532 + @ write r1 to current_thread_info()->cpu_domain
2533 + str r1, [r2, #TI_CPU_DOMAIN]
2534 + @ write r1 to DACR
2535 + mcr p15, 0, r1, c3, c0, 0
2536 + @ instruction sync
2537 + instr_sync
2538 + @ restore regs
2539 + ldmia sp!, {r1, r2}
2540 +#endif
2541 .endm
2542 #endif
2543
2544 -#include "entry-header.S"
2545 -
2546 -
2547 .align 5
2548 /*
2549 * This is the fast syscall return path. We do as little as
2550 @@ -351,6 +379,7 @@ ENDPROC(ftrace_stub)
2551
2552 .align 5
2553 ENTRY(vector_swi)
2554 +
2555 sub sp, sp, #S_FRAME_SIZE
2556 stmia sp, {r0 - r12} @ Calling r0 - r12
2557 ARM( add r8, sp, #S_PC )
2558 @@ -400,6 +429,12 @@ ENTRY(vector_swi)
2559 ldr scno, [lr, #-4] @ get SWI instruction
2560 #endif
2561
2562 + /*
2563 + * do this here to avoid a performance hit of wrapping the code above
2564 + * that directly dereferences userland to parse the SWI instruction
2565 + */
2566 + pax_enter_kernel_user
2567 +
2568 #ifdef CONFIG_ALIGNMENT_TRAP
2569 ldr ip, __cr_alignment
2570 ldr ip, [ip]
2571 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2572 index 9a8531e..812e287 100644
2573 --- a/arch/arm/kernel/entry-header.S
2574 +++ b/arch/arm/kernel/entry-header.S
2575 @@ -73,9 +73,66 @@
2576 msr cpsr_c, \rtemp @ switch back to the SVC mode
2577 .endm
2578
2579 + .macro pax_enter_kernel_user
2580 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2581 + @ save regs
2582 + stmdb sp!, {r0, r1}
2583 + @ read DACR from cpu_domain into r1
2584 + mov r0, sp
2585 + @ assume 8K pages, since we have to split the immediate in two
2586 + bic r0, r0, #(0x1fc0)
2587 + bic r0, r0, #(0x3f)
2588 + ldr r1, [r0, #TI_CPU_DOMAIN]
2589 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2590 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2591 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2592 +#endif
2593 +#ifdef CONFIG_PAX_KERNEXEC
2594 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2595 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2596 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2597 +#endif
2598 + @ write r1 to current_thread_info()->cpu_domain
2599 + str r1, [r0, #TI_CPU_DOMAIN]
2600 + @ write r1 to DACR
2601 + mcr p15, 0, r1, c3, c0, 0
2602 + @ instruction sync
2603 + instr_sync
2604 + @ restore regs
2605 + ldmia sp!, {r0, r1}
2606 +#endif
2607 + .endm
2608 +
2609 + .macro pax_exit_kernel
2610 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2611 + @ save regs
2612 + stmdb sp!, {r0, r1}
2613 + @ read old DACR from stack into r1
2614 + ldr r1, [sp, #(8 + S_SP)]
2615 + sub r1, r1, #8
2616 + ldr r1, [r1]
2617 +
2618 + @ write r1 to current_thread_info()->cpu_domain
2619 + mov r0, sp
2620 + @ assume 8K pages, since we have to split the immediate in two
2621 + bic r0, r0, #(0x1fc0)
2622 + bic r0, r0, #(0x3f)
2623 + str r1, [r0, #TI_CPU_DOMAIN]
2624 + @ write r1 to DACR
2625 + mcr p15, 0, r1, c3, c0, 0
2626 + @ instruction sync
2627 + instr_sync
2628 + @ restore regs
2629 + ldmia sp!, {r0, r1}
2630 +#endif
2631 + .endm
2632 +
2633 #ifndef CONFIG_THUMB2_KERNEL
2634 .macro svc_exit, rpsr
2635 msr spsr_cxsf, \rpsr
2636 +
2637 + pax_exit_kernel
2638 +
2639 #if defined(CONFIG_CPU_V6)
2640 ldr r0, [sp]
2641 strex r1, r2, [sp] @ clear the exclusive monitor
2642 @@ -121,6 +178,9 @@
2643 .endm
2644 #else /* CONFIG_THUMB2_KERNEL */
2645 .macro svc_exit, rpsr
2646 +
2647 + pax_exit_kernel
2648 +
2649 ldr lr, [sp, #S_SP] @ top of the stack
2650 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2651 clrex @ clear the exclusive monitor
2652 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2653 index 2adda11..7fbe958 100644
2654 --- a/arch/arm/kernel/fiq.c
2655 +++ b/arch/arm/kernel/fiq.c
2656 @@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2657 #if defined(CONFIG_CPU_USE_DOMAINS)
2658 memcpy((void *)0xffff001c, start, length);
2659 #else
2660 + pax_open_kernel();
2661 memcpy(vectors_page + 0x1c, start, length);
2662 + pax_close_kernel();
2663 #endif
2664 flush_icache_range(0xffff001c, 0xffff001c + length);
2665 if (!vectors_high())
2666 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2667 index 8bac553..caee108 100644
2668 --- a/arch/arm/kernel/head.S
2669 +++ b/arch/arm/kernel/head.S
2670 @@ -52,7 +52,9 @@
2671 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2672
2673 .macro pgtbl, rd, phys
2674 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2675 + mov \rd, #TEXT_OFFSET
2676 + sub \rd, #PG_DIR_SIZE
2677 + add \rd, \rd, \phys
2678 .endm
2679
2680 /*
2681 @@ -434,7 +436,7 @@ __enable_mmu:
2682 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2683 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2684 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2685 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2686 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2687 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2688 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2689 #endif
2690 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2691 index 1fd749e..47adb08 100644
2692 --- a/arch/arm/kernel/hw_breakpoint.c
2693 +++ b/arch/arm/kernel/hw_breakpoint.c
2694 @@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2695 return NOTIFY_OK;
2696 }
2697
2698 -static struct notifier_block __cpuinitdata dbg_reset_nb = {
2699 +static struct notifier_block dbg_reset_nb = {
2700 .notifier_call = dbg_reset_notify,
2701 };
2702
2703 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2704 index 1e9be5d..03edbc2 100644
2705 --- a/arch/arm/kernel/module.c
2706 +++ b/arch/arm/kernel/module.c
2707 @@ -37,12 +37,37 @@
2708 #endif
2709
2710 #ifdef CONFIG_MMU
2711 -void *module_alloc(unsigned long size)
2712 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2713 {
2714 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2715 + return NULL;
2716 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2717 - GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2718 + GFP_KERNEL, prot, -1,
2719 __builtin_return_address(0));
2720 }
2721 +
2722 +void *module_alloc(unsigned long size)
2723 +{
2724 +
2725 +#ifdef CONFIG_PAX_KERNEXEC
2726 + return __module_alloc(size, PAGE_KERNEL);
2727 +#else
2728 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2729 +#endif
2730 +
2731 +}
2732 +
2733 +#ifdef CONFIG_PAX_KERNEXEC
2734 +void module_free_exec(struct module *mod, void *module_region)
2735 +{
2736 + module_free(mod, module_region);
2737 +}
2738 +
2739 +void *module_alloc_exec(unsigned long size)
2740 +{
2741 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2742 +}
2743 +#endif
2744 #endif
2745
2746 int
2747 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2748 index 07314af..c46655c 100644
2749 --- a/arch/arm/kernel/patch.c
2750 +++ b/arch/arm/kernel/patch.c
2751 @@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2752 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2753 int size;
2754
2755 + pax_open_kernel();
2756 if (thumb2 && __opcode_is_thumb16(insn)) {
2757 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2758 size = sizeof(u16);
2759 @@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2760 *(u32 *)addr = insn;
2761 size = sizeof(u32);
2762 }
2763 + pax_close_kernel();
2764
2765 flush_icache_range((uintptr_t)(addr),
2766 (uintptr_t)(addr) + size);
2767 diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2768 index 1f2740e..b36e225 100644
2769 --- a/arch/arm/kernel/perf_event_cpu.c
2770 +++ b/arch/arm/kernel/perf_event_cpu.c
2771 @@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2772 return NOTIFY_OK;
2773 }
2774
2775 -static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2776 +static struct notifier_block cpu_pmu_hotplug_notifier = {
2777 .notifier_call = cpu_pmu_notify,
2778 };
2779
2780 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2781 index 047d3e4..7e96107 100644
2782 --- a/arch/arm/kernel/process.c
2783 +++ b/arch/arm/kernel/process.c
2784 @@ -28,7 +28,6 @@
2785 #include <linux/tick.h>
2786 #include <linux/utsname.h>
2787 #include <linux/uaccess.h>
2788 -#include <linux/random.h>
2789 #include <linux/hw_breakpoint.h>
2790 #include <linux/cpuidle.h>
2791 #include <linux/leds.h>
2792 @@ -251,9 +250,10 @@ void machine_power_off(void)
2793 machine_shutdown();
2794 if (pm_power_off)
2795 pm_power_off();
2796 + BUG();
2797 }
2798
2799 -void machine_restart(char *cmd)
2800 +__noreturn void machine_restart(char *cmd)
2801 {
2802 machine_shutdown();
2803
2804 @@ -278,8 +278,8 @@ void __show_regs(struct pt_regs *regs)
2805 init_utsname()->release,
2806 (int)strcspn(init_utsname()->version, " "),
2807 init_utsname()->version);
2808 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2809 - print_symbol("LR is at %s\n", regs->ARM_lr);
2810 + printk("PC is at %pA\n", instruction_pointer(regs));
2811 + printk("LR is at %pA\n", regs->ARM_lr);
2812 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2813 "sp : %08lx ip : %08lx fp : %08lx\n",
2814 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2815 @@ -447,12 +447,6 @@ unsigned long get_wchan(struct task_struct *p)
2816 return 0;
2817 }
2818
2819 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2820 -{
2821 - unsigned long range_end = mm->brk + 0x02000000;
2822 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2823 -}
2824 -
2825 #ifdef CONFIG_MMU
2826 /*
2827 * The vectors page is always readable from user space for the
2828 @@ -465,9 +459,8 @@ static int __init gate_vma_init(void)
2829 {
2830 gate_vma.vm_start = 0xffff0000;
2831 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2832 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2833 - gate_vma.vm_flags = VM_READ | VM_EXEC |
2834 - VM_MAYREAD | VM_MAYEXEC;
2835 + gate_vma.vm_flags = VM_NONE;
2836 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2837 return 0;
2838 }
2839 arch_initcall(gate_vma_init);
2840 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2841 index 3653164..d83e55d 100644
2842 --- a/arch/arm/kernel/psci.c
2843 +++ b/arch/arm/kernel/psci.c
2844 @@ -24,7 +24,7 @@
2845 #include <asm/opcodes-virt.h>
2846 #include <asm/psci.h>
2847
2848 -struct psci_operations psci_ops;
2849 +struct psci_operations psci_ops __read_only;
2850
2851 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2852
2853 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2854 index 03deeff..741ce88 100644
2855 --- a/arch/arm/kernel/ptrace.c
2856 +++ b/arch/arm/kernel/ptrace.c
2857 @@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2858 return current_thread_info()->syscall;
2859 }
2860
2861 +#ifdef CONFIG_GRKERNSEC_SETXID
2862 +extern void gr_delayed_cred_worker(void);
2863 +#endif
2864 +
2865 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2866 {
2867 current_thread_info()->syscall = scno;
2868
2869 +#ifdef CONFIG_GRKERNSEC_SETXID
2870 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2871 + gr_delayed_cred_worker();
2872 +#endif
2873 +
2874 /* Do the secure computing check first; failures should be fast. */
2875 if (secure_computing(scno) == -1)
2876 return -1;
2877 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2878 index 234e339..81264a1 100644
2879 --- a/arch/arm/kernel/setup.c
2880 +++ b/arch/arm/kernel/setup.c
2881 @@ -96,21 +96,23 @@ EXPORT_SYMBOL(system_serial_high);
2882 unsigned int elf_hwcap __read_mostly;
2883 EXPORT_SYMBOL(elf_hwcap);
2884
2885 +pteval_t __supported_pte_mask __read_only;
2886 +pmdval_t __supported_pmd_mask __read_only;
2887
2888 #ifdef MULTI_CPU
2889 -struct processor processor __read_mostly;
2890 +struct processor processor;
2891 #endif
2892 #ifdef MULTI_TLB
2893 -struct cpu_tlb_fns cpu_tlb __read_mostly;
2894 +struct cpu_tlb_fns cpu_tlb __read_only;
2895 #endif
2896 #ifdef MULTI_USER
2897 -struct cpu_user_fns cpu_user __read_mostly;
2898 +struct cpu_user_fns cpu_user __read_only;
2899 #endif
2900 #ifdef MULTI_CACHE
2901 -struct cpu_cache_fns cpu_cache __read_mostly;
2902 +struct cpu_cache_fns cpu_cache __read_only;
2903 #endif
2904 #ifdef CONFIG_OUTER_CACHE
2905 -struct outer_cache_fns outer_cache __read_mostly;
2906 +struct outer_cache_fns outer_cache __read_only;
2907 EXPORT_SYMBOL(outer_cache);
2908 #endif
2909
2910 @@ -235,9 +237,13 @@ static int __get_cpu_architecture(void)
2911 asm("mrc p15, 0, %0, c0, c1, 4"
2912 : "=r" (mmfr0));
2913 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2914 - (mmfr0 & 0x000000f0) >= 0x00000030)
2915 + (mmfr0 & 0x000000f0) >= 0x00000030) {
2916 cpu_arch = CPU_ARCH_ARMv7;
2917 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2918 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2919 + __supported_pte_mask |= L_PTE_PXN;
2920 + __supported_pmd_mask |= PMD_PXNTABLE;
2921 + }
2922 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2923 (mmfr0 & 0x000000f0) == 0x00000020)
2924 cpu_arch = CPU_ARCH_ARMv6;
2925 else
2926 @@ -478,7 +484,7 @@ static void __init setup_processor(void)
2927 __cpu_architecture = __get_cpu_architecture();
2928
2929 #ifdef MULTI_CPU
2930 - processor = *list->proc;
2931 + memcpy((void *)&processor, list->proc, sizeof processor);
2932 #endif
2933 #ifdef MULTI_TLB
2934 cpu_tlb = *list->tlb;
2935 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
2936 index 296786b..a8d4dd5 100644
2937 --- a/arch/arm/kernel/signal.c
2938 +++ b/arch/arm/kernel/signal.c
2939 @@ -396,22 +396,14 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
2940 __put_user(sigreturn_codes[idx+1], rc+1))
2941 return 1;
2942
2943 - if (cpsr & MODE32_BIT) {
2944 - /*
2945 - * 32-bit code can use the new high-page
2946 - * signal return code support.
2947 - */
2948 - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
2949 - } else {
2950 - /*
2951 - * Ensure that the instruction cache sees
2952 - * the return code written onto the stack.
2953 - */
2954 - flush_icache_range((unsigned long)rc,
2955 - (unsigned long)(rc + 2));
2956 + /*
2957 + * Ensure that the instruction cache sees
2958 + * the return code written onto the stack.
2959 + */
2960 + flush_icache_range((unsigned long)rc,
2961 + (unsigned long)(rc + 2));
2962
2963 - retcode = ((unsigned long)rc) + thumb;
2964 - }
2965 + retcode = ((unsigned long)rc) + thumb;
2966 }
2967
2968 regs->ARM_r0 = map_sig(ksig->sig);
2969 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2970 index 1f2cccc..f40c02e 100644
2971 --- a/arch/arm/kernel/smp.c
2972 +++ b/arch/arm/kernel/smp.c
2973 @@ -70,7 +70,7 @@ enum ipi_msg_type {
2974
2975 static DECLARE_COMPLETION(cpu_running);
2976
2977 -static struct smp_operations smp_ops;
2978 +static struct smp_operations smp_ops __read_only;
2979
2980 void __init smp_set_ops(struct smp_operations *ops)
2981 {
2982 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2983 index 1c08911..264f009 100644
2984 --- a/arch/arm/kernel/traps.c
2985 +++ b/arch/arm/kernel/traps.c
2986 @@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2987 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2988 {
2989 #ifdef CONFIG_KALLSYMS
2990 - printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2991 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2992 #else
2993 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2994 #endif
2995 @@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2996 static int die_owner = -1;
2997 static unsigned int die_nest_count;
2998
2999 +extern void gr_handle_kernel_exploit(void);
3000 +
3001 static unsigned long oops_begin(void)
3002 {
3003 int cpu;
3004 @@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3005 panic("Fatal exception in interrupt");
3006 if (panic_on_oops)
3007 panic("Fatal exception");
3008 +
3009 + gr_handle_kernel_exploit();
3010 +
3011 if (signr)
3012 do_exit(signr);
3013 }
3014 @@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3015 * The user helper at 0xffff0fe0 must be used instead.
3016 * (see entry-armv.S for details)
3017 */
3018 + pax_open_kernel();
3019 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3020 + pax_close_kernel();
3021 }
3022 return 0;
3023
3024 @@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3025 */
3026 kuser_get_tls_init(vectors);
3027
3028 - /*
3029 - * Copy signal return handlers into the vector page, and
3030 - * set sigreturn to be a pointer to these.
3031 - */
3032 - memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3033 - sigreturn_codes, sizeof(sigreturn_codes));
3034 -
3035 flush_icache_range(vectors, vectors + PAGE_SIZE);
3036 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3037 +
3038 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3039 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3040 +#endif
3041 +
3042 }
3043 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3044 index b571484..4b2fc9b 100644
3045 --- a/arch/arm/kernel/vmlinux.lds.S
3046 +++ b/arch/arm/kernel/vmlinux.lds.S
3047 @@ -8,7 +8,11 @@
3048 #include <asm/thread_info.h>
3049 #include <asm/memory.h>
3050 #include <asm/page.h>
3051 -
3052 +
3053 +#ifdef CONFIG_PAX_KERNEXEC
3054 +#include <asm/pgtable.h>
3055 +#endif
3056 +
3057 #define PROC_INFO \
3058 . = ALIGN(4); \
3059 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3060 @@ -94,6 +98,11 @@ SECTIONS
3061 _text = .;
3062 HEAD_TEXT
3063 }
3064 +
3065 +#ifdef CONFIG_PAX_KERNEXEC
3066 + . = ALIGN(1<<SECTION_SHIFT);
3067 +#endif
3068 +
3069 .text : { /* Real text segment */
3070 _stext = .; /* Text and read-only data */
3071 __exception_text_start = .;
3072 @@ -116,6 +125,8 @@ SECTIONS
3073 ARM_CPU_KEEP(PROC_INFO)
3074 }
3075
3076 + _etext = .; /* End of text section */
3077 +
3078 RO_DATA(PAGE_SIZE)
3079
3080 . = ALIGN(4);
3081 @@ -146,7 +157,9 @@ SECTIONS
3082
3083 NOTES
3084
3085 - _etext = .; /* End of text and rodata section */
3086 +#ifdef CONFIG_PAX_KERNEXEC
3087 + . = ALIGN(1<<SECTION_SHIFT);
3088 +#endif
3089
3090 #ifndef CONFIG_XIP_KERNEL
3091 . = ALIGN(PAGE_SIZE);
3092 @@ -207,6 +220,11 @@ SECTIONS
3093 . = PAGE_OFFSET + TEXT_OFFSET;
3094 #else
3095 __init_end = .;
3096 +
3097 +#ifdef CONFIG_PAX_KERNEXEC
3098 + . = ALIGN(1<<SECTION_SHIFT);
3099 +#endif
3100 +
3101 . = ALIGN(THREAD_SIZE);
3102 __data_loc = .;
3103 #endif
3104 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3105 index 14a0d98..7771a7d 100644
3106 --- a/arch/arm/lib/clear_user.S
3107 +++ b/arch/arm/lib/clear_user.S
3108 @@ -12,14 +12,14 @@
3109
3110 .text
3111
3112 -/* Prototype: int __clear_user(void *addr, size_t sz)
3113 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3114 * Purpose : clear some user memory
3115 * Params : addr - user memory address to clear
3116 * : sz - number of bytes to clear
3117 * Returns : number of bytes NOT cleared
3118 */
3119 ENTRY(__clear_user_std)
3120 -WEAK(__clear_user)
3121 +WEAK(___clear_user)
3122 stmfd sp!, {r1, lr}
3123 mov r2, #0
3124 cmp r1, #4
3125 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3126 USER( strnebt r2, [r0])
3127 mov r0, #0
3128 ldmfd sp!, {r1, pc}
3129 -ENDPROC(__clear_user)
3130 +ENDPROC(___clear_user)
3131 ENDPROC(__clear_user_std)
3132
3133 .pushsection .fixup,"ax"
3134 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3135 index 66a477a..bee61d3 100644
3136 --- a/arch/arm/lib/copy_from_user.S
3137 +++ b/arch/arm/lib/copy_from_user.S
3138 @@ -16,7 +16,7 @@
3139 /*
3140 * Prototype:
3141 *
3142 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3143 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3144 *
3145 * Purpose:
3146 *
3147 @@ -84,11 +84,11 @@
3148
3149 .text
3150
3151 -ENTRY(__copy_from_user)
3152 +ENTRY(___copy_from_user)
3153
3154 #include "copy_template.S"
3155
3156 -ENDPROC(__copy_from_user)
3157 +ENDPROC(___copy_from_user)
3158
3159 .pushsection .fixup,"ax"
3160 .align 0
3161 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3162 index 6ee2f67..d1cce76 100644
3163 --- a/arch/arm/lib/copy_page.S
3164 +++ b/arch/arm/lib/copy_page.S
3165 @@ -10,6 +10,7 @@
3166 * ASM optimised string functions
3167 */
3168 #include <linux/linkage.h>
3169 +#include <linux/const.h>
3170 #include <asm/assembler.h>
3171 #include <asm/asm-offsets.h>
3172 #include <asm/cache.h>
3173 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3174 index d066df6..df28194 100644
3175 --- a/arch/arm/lib/copy_to_user.S
3176 +++ b/arch/arm/lib/copy_to_user.S
3177 @@ -16,7 +16,7 @@
3178 /*
3179 * Prototype:
3180 *
3181 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3182 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3183 *
3184 * Purpose:
3185 *
3186 @@ -88,11 +88,11 @@
3187 .text
3188
3189 ENTRY(__copy_to_user_std)
3190 -WEAK(__copy_to_user)
3191 +WEAK(___copy_to_user)
3192
3193 #include "copy_template.S"
3194
3195 -ENDPROC(__copy_to_user)
3196 +ENDPROC(___copy_to_user)
3197 ENDPROC(__copy_to_user_std)
3198
3199 .pushsection .fixup,"ax"
3200 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3201 index 7d08b43..f7ca7ea 100644
3202 --- a/arch/arm/lib/csumpartialcopyuser.S
3203 +++ b/arch/arm/lib/csumpartialcopyuser.S
3204 @@ -57,8 +57,8 @@
3205 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3206 */
3207
3208 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3209 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3210 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3211 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3212
3213 #include "csumpartialcopygeneric.S"
3214
3215 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3216 index 64dbfa5..84a3fd9 100644
3217 --- a/arch/arm/lib/delay.c
3218 +++ b/arch/arm/lib/delay.c
3219 @@ -28,7 +28,7 @@
3220 /*
3221 * Default to the loop-based delay implementation.
3222 */
3223 -struct arm_delay_ops arm_delay_ops = {
3224 +struct arm_delay_ops arm_delay_ops __read_only = {
3225 .delay = __loop_delay,
3226 .const_udelay = __loop_const_udelay,
3227 .udelay = __loop_udelay,
3228 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3229 index 025f742..8432b08 100644
3230 --- a/arch/arm/lib/uaccess_with_memcpy.c
3231 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3232 @@ -104,7 +104,7 @@ out:
3233 }
3234
3235 unsigned long
3236 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3237 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3238 {
3239 /*
3240 * This test is stubbed out of the main function above to keep
3241 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3242 index 49792a0..f192052 100644
3243 --- a/arch/arm/mach-kirkwood/common.c
3244 +++ b/arch/arm/mach-kirkwood/common.c
3245 @@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3246 clk_gate_ops.disable(hw);
3247 }
3248
3249 -static struct clk_ops clk_gate_fn_ops;
3250 +static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3251 +{
3252 + return clk_gate_ops.is_enabled(hw);
3253 +}
3254 +
3255 +static struct clk_ops clk_gate_fn_ops = {
3256 + .enable = clk_gate_fn_enable,
3257 + .disable = clk_gate_fn_disable,
3258 + .is_enabled = clk_gate_fn_is_enabled,
3259 +};
3260
3261 static struct clk __init *clk_register_gate_fn(struct device *dev,
3262 const char *name,
3263 @@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3264 gate_fn->fn_en = fn_en;
3265 gate_fn->fn_dis = fn_dis;
3266
3267 - /* ops is the gate ops, but with our enable/disable functions */
3268 - if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3269 - clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3270 - clk_gate_fn_ops = clk_gate_ops;
3271 - clk_gate_fn_ops.enable = clk_gate_fn_enable;
3272 - clk_gate_fn_ops.disable = clk_gate_fn_disable;
3273 - }
3274 -
3275 clk = clk_register(dev, &gate_fn->gate.hw);
3276
3277 if (IS_ERR(clk))
3278 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3279 index f6eeb87..cc90868 100644
3280 --- a/arch/arm/mach-omap2/board-n8x0.c
3281 +++ b/arch/arm/mach-omap2/board-n8x0.c
3282 @@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3283 }
3284 #endif
3285
3286 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3287 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3288 .late_init = n8x0_menelaus_late_init,
3289 };
3290
3291 diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3292 index 410e1ba..1d2dd59 100644
3293 --- a/arch/arm/mach-omap2/gpmc.c
3294 +++ b/arch/arm/mach-omap2/gpmc.c
3295 @@ -145,7 +145,6 @@ struct omap3_gpmc_regs {
3296 };
3297
3298 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3299 -static struct irq_chip gpmc_irq_chip;
3300 static unsigned gpmc_irq_start;
3301
3302 static struct resource gpmc_mem_root;
3303 @@ -707,6 +706,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3304
3305 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3306
3307 +static struct irq_chip gpmc_irq_chip = {
3308 + .name = "gpmc",
3309 + .irq_startup = gpmc_irq_noop_ret,
3310 + .irq_enable = gpmc_irq_enable,
3311 + .irq_disable = gpmc_irq_disable,
3312 + .irq_shutdown = gpmc_irq_noop,
3313 + .irq_ack = gpmc_irq_noop,
3314 + .irq_mask = gpmc_irq_noop,
3315 + .irq_unmask = gpmc_irq_noop,
3316 +
3317 +};
3318 +
3319 static int gpmc_setup_irq(void)
3320 {
3321 int i;
3322 @@ -721,15 +732,6 @@ static int gpmc_setup_irq(void)
3323 return gpmc_irq_start;
3324 }
3325
3326 - gpmc_irq_chip.name = "gpmc";
3327 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3328 - gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3329 - gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3330 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3331 - gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3332 - gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3333 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3334 -
3335 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3336 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3337
3338 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3339 index f8bb3b9..831e7b8 100644
3340 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3341 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3342 @@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3343 return NOTIFY_OK;
3344 }
3345
3346 -static struct notifier_block __refdata irq_hotplug_notifier = {
3347 +static struct notifier_block irq_hotplug_notifier = {
3348 .notifier_call = irq_cpu_hotplug_notify,
3349 };
3350
3351 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3352 index 381be7a..89b9c7e 100644
3353 --- a/arch/arm/mach-omap2/omap_device.c
3354 +++ b/arch/arm/mach-omap2/omap_device.c
3355 @@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3356 struct platform_device __init *omap_device_build(const char *pdev_name,
3357 int pdev_id,
3358 struct omap_hwmod *oh,
3359 - void *pdata, int pdata_len)
3360 + const void *pdata, int pdata_len)
3361 {
3362 struct omap_hwmod *ohs[] = { oh };
3363
3364 @@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3365 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3366 int pdev_id,
3367 struct omap_hwmod **ohs,
3368 - int oh_cnt, void *pdata,
3369 + int oh_cnt, const void *pdata,
3370 int pdata_len)
3371 {
3372 int ret = -ENOMEM;
3373 diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3374 index 044c31d..2ee0861 100644
3375 --- a/arch/arm/mach-omap2/omap_device.h
3376 +++ b/arch/arm/mach-omap2/omap_device.h
3377 @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3378 /* Core code interface */
3379
3380 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3381 - struct omap_hwmod *oh, void *pdata,
3382 + struct omap_hwmod *oh, const void *pdata,
3383 int pdata_len);
3384
3385 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3386 struct omap_hwmod **oh, int oh_cnt,
3387 - void *pdata, int pdata_len);
3388 + const void *pdata, int pdata_len);
3389
3390 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3391 struct omap_hwmod **ohs, int oh_cnt);
3392 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3393 index a202a47..c430564 100644
3394 --- a/arch/arm/mach-omap2/omap_hwmod.c
3395 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3396 @@ -191,10 +191,10 @@ struct omap_hwmod_soc_ops {
3397 int (*init_clkdm)(struct omap_hwmod *oh);
3398 void (*update_context_lost)(struct omap_hwmod *oh);
3399 int (*get_context_lost)(struct omap_hwmod *oh);
3400 -};
3401 +} __no_const;
3402
3403 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3404 -static struct omap_hwmod_soc_ops soc_ops;
3405 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3406
3407 /* omap_hwmod_list contains all registered struct omap_hwmods */
3408 static LIST_HEAD(omap_hwmod_list);
3409 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3410 index d15c7bb..b2d1f0c 100644
3411 --- a/arch/arm/mach-omap2/wd_timer.c
3412 +++ b/arch/arm/mach-omap2/wd_timer.c
3413 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3414 struct omap_hwmod *oh;
3415 char *oh_name = "wd_timer2";
3416 char *dev_name = "omap_wdt";
3417 - struct omap_wd_timer_platform_data pdata;
3418 + static struct omap_wd_timer_platform_data pdata = {
3419 + .read_reset_sources = prm_read_reset_sources
3420 + };
3421
3422 if (!cpu_class_is_omap2() || of_have_populated_dt())
3423 return 0;
3424 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3425 return -EINVAL;
3426 }
3427
3428 - pdata.read_reset_sources = prm_read_reset_sources;
3429 -
3430 pdev = omap_device_build(dev_name, id, oh, &pdata,
3431 sizeof(struct omap_wd_timer_platform_data));
3432 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3433 diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3434 index bddce2b..3eb04e2 100644
3435 --- a/arch/arm/mach-ux500/include/mach/setup.h
3436 +++ b/arch/arm/mach-ux500/include/mach/setup.h
3437 @@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3438 .type = MT_DEVICE, \
3439 }
3440
3441 -#define __MEM_DEV_DESC(x, sz) { \
3442 - .virtual = IO_ADDRESS(x), \
3443 - .pfn = __phys_to_pfn(x), \
3444 - .length = sz, \
3445 - .type = MT_MEMORY, \
3446 -}
3447 -
3448 extern struct smp_operations ux500_smp_ops;
3449 extern void ux500_cpu_die(unsigned int cpu);
3450
3451 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3452 index 4045c49..4e26c79 100644
3453 --- a/arch/arm/mm/Kconfig
3454 +++ b/arch/arm/mm/Kconfig
3455 @@ -425,7 +425,7 @@ config CPU_32v5
3456
3457 config CPU_32v6
3458 bool
3459 - select CPU_USE_DOMAINS if CPU_V6 && MMU
3460 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3461 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3462
3463 config CPU_32v6K
3464 @@ -574,6 +574,7 @@ config CPU_CP15_MPU
3465
3466 config CPU_USE_DOMAINS
3467 bool
3468 + depends on !ARM_LPAE && !PAX_KERNEXEC
3469 help
3470 This option enables or disables the use of domain switching
3471 via the set_fs() function.
3472 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3473 index db26e2e..ee44569 100644
3474 --- a/arch/arm/mm/alignment.c
3475 +++ b/arch/arm/mm/alignment.c
3476 @@ -211,10 +211,12 @@ union offset_union {
3477 #define __get16_unaligned_check(ins,val,addr) \
3478 do { \
3479 unsigned int err = 0, v, a = addr; \
3480 + pax_open_userland(); \
3481 __get8_unaligned_check(ins,v,a,err); \
3482 val = v << ((BE) ? 8 : 0); \
3483 __get8_unaligned_check(ins,v,a,err); \
3484 val |= v << ((BE) ? 0 : 8); \
3485 + pax_close_userland(); \
3486 if (err) \
3487 goto fault; \
3488 } while (0)
3489 @@ -228,6 +230,7 @@ union offset_union {
3490 #define __get32_unaligned_check(ins,val,addr) \
3491 do { \
3492 unsigned int err = 0, v, a = addr; \
3493 + pax_open_userland(); \
3494 __get8_unaligned_check(ins,v,a,err); \
3495 val = v << ((BE) ? 24 : 0); \
3496 __get8_unaligned_check(ins,v,a,err); \
3497 @@ -236,6 +239,7 @@ union offset_union {
3498 val |= v << ((BE) ? 8 : 16); \
3499 __get8_unaligned_check(ins,v,a,err); \
3500 val |= v << ((BE) ? 0 : 24); \
3501 + pax_close_userland(); \
3502 if (err) \
3503 goto fault; \
3504 } while (0)
3505 @@ -249,6 +253,7 @@ union offset_union {
3506 #define __put16_unaligned_check(ins,val,addr) \
3507 do { \
3508 unsigned int err = 0, v = val, a = addr; \
3509 + pax_open_userland(); \
3510 __asm__( FIRST_BYTE_16 \
3511 ARM( "1: "ins" %1, [%2], #1\n" ) \
3512 THUMB( "1: "ins" %1, [%2]\n" ) \
3513 @@ -268,6 +273,7 @@ union offset_union {
3514 " .popsection\n" \
3515 : "=r" (err), "=&r" (v), "=&r" (a) \
3516 : "0" (err), "1" (v), "2" (a)); \
3517 + pax_close_userland(); \
3518 if (err) \
3519 goto fault; \
3520 } while (0)
3521 @@ -281,6 +287,7 @@ union offset_union {
3522 #define __put32_unaligned_check(ins,val,addr) \
3523 do { \
3524 unsigned int err = 0, v = val, a = addr; \
3525 + pax_open_userland(); \
3526 __asm__( FIRST_BYTE_32 \
3527 ARM( "1: "ins" %1, [%2], #1\n" ) \
3528 THUMB( "1: "ins" %1, [%2]\n" ) \
3529 @@ -310,6 +317,7 @@ union offset_union {
3530 " .popsection\n" \
3531 : "=r" (err), "=&r" (v), "=&r" (a) \
3532 : "0" (err), "1" (v), "2" (a)); \
3533 + pax_close_userland(); \
3534 if (err) \
3535 goto fault; \
3536 } while (0)
3537 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3538 index 5dbf13f..1a60561 100644
3539 --- a/arch/arm/mm/fault.c
3540 +++ b/arch/arm/mm/fault.c
3541 @@ -25,6 +25,7 @@
3542 #include <asm/system_misc.h>
3543 #include <asm/system_info.h>
3544 #include <asm/tlbflush.h>
3545 +#include <asm/sections.h>
3546
3547 #include "fault.h"
3548
3549 @@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3550 if (fixup_exception(regs))
3551 return;
3552
3553 +#ifdef CONFIG_PAX_KERNEXEC
3554 + if ((fsr & FSR_WRITE) &&
3555 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3556 + (MODULES_VADDR <= addr && addr < MODULES_END)))
3557 + {
3558 + if (current->signal->curr_ip)
3559 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3560 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3561 + else
3562 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3563 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3564 + }
3565 +#endif
3566 +
3567 /*
3568 * No handler, we'll have to terminate things with extreme prejudice.
3569 */
3570 @@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3571 }
3572 #endif
3573
3574 +#ifdef CONFIG_PAX_PAGEEXEC
3575 + if (fsr & FSR_LNX_PF) {
3576 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3577 + do_group_exit(SIGKILL);
3578 + }
3579 +#endif
3580 +
3581 tsk->thread.address = addr;
3582 tsk->thread.error_code = fsr;
3583 tsk->thread.trap_no = 14;
3584 @@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3585 }
3586 #endif /* CONFIG_MMU */
3587
3588 +#ifdef CONFIG_PAX_PAGEEXEC
3589 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3590 +{
3591 + long i;
3592 +
3593 + printk(KERN_ERR "PAX: bytes at PC: ");
3594 + for (i = 0; i < 20; i++) {
3595 + unsigned char c;
3596 + if (get_user(c, (__force unsigned char __user *)pc+i))
3597 + printk(KERN_CONT "?? ");
3598 + else
3599 + printk(KERN_CONT "%02x ", c);
3600 + }
3601 + printk("\n");
3602 +
3603 + printk(KERN_ERR "PAX: bytes at SP-4: ");
3604 + for (i = -1; i < 20; i++) {
3605 + unsigned long c;
3606 + if (get_user(c, (__force unsigned long __user *)sp+i))
3607 + printk(KERN_CONT "???????? ");
3608 + else
3609 + printk(KERN_CONT "%08lx ", c);
3610 + }
3611 + printk("\n");
3612 +}
3613 +#endif
3614 +
3615 /*
3616 * First Level Translation Fault Handler
3617 *
3618 @@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3619 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3620 struct siginfo info;
3621
3622 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3623 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3624 + if (current->signal->curr_ip)
3625 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3626 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3627 + else
3628 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3629 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3630 + goto die;
3631 + }
3632 +#endif
3633 +
3634 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3635 return;
3636
3637 +die:
3638 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3639 inf->name, fsr, addr);
3640
3641 @@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3642 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3643 struct siginfo info;
3644
3645 + if (user_mode(regs)) {
3646 + if (addr == 0xffff0fe0UL) {
3647 + /*
3648 + * PaX: __kuser_get_tls emulation
3649 + */
3650 + regs->ARM_r0 = current_thread_info()->tp_value;
3651 + regs->ARM_pc = regs->ARM_lr;
3652 + return;
3653 + }
3654 + }
3655 +
3656 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3657 + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3658 + if (current->signal->curr_ip)
3659 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3660 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3661 + addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3662 + else
3663 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3664 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3665 + addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3666 + goto die;
3667 + }
3668 +#endif
3669 +
3670 +#ifdef CONFIG_PAX_REFCOUNT
3671 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3672 + unsigned int bkpt;
3673 +
3674 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3675 + current->thread.error_code = ifsr;
3676 + current->thread.trap_no = 0;
3677 + pax_report_refcount_overflow(regs);
3678 + fixup_exception(regs);
3679 + return;
3680 + }
3681 + }
3682 +#endif
3683 +
3684 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3685 return;
3686
3687 +die:
3688 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3689 inf->name, ifsr, addr);
3690
3691 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3692 index cf08bdf..772656c 100644
3693 --- a/arch/arm/mm/fault.h
3694 +++ b/arch/arm/mm/fault.h
3695 @@ -3,6 +3,7 @@
3696
3697 /*
3698 * Fault status register encodings. We steal bit 31 for our own purposes.
3699 + * Set when the FSR value is from an instruction fault.
3700 */
3701 #define FSR_LNX_PF (1 << 31)
3702 #define FSR_WRITE (1 << 11)
3703 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3704 }
3705 #endif
3706
3707 +/* valid for LPAE and !LPAE */
3708 +static inline int is_xn_fault(unsigned int fsr)
3709 +{
3710 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
3711 +}
3712 +
3713 +static inline int is_domain_fault(unsigned int fsr)
3714 +{
3715 + return ((fsr_fs(fsr) & 0xD) == 0x9);
3716 +}
3717 +
3718 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3719 unsigned long search_exception_table(unsigned long addr);
3720
3721 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3722 index ad722f1..763fdd3 100644
3723 --- a/arch/arm/mm/init.c
3724 +++ b/arch/arm/mm/init.c
3725 @@ -30,6 +30,8 @@
3726 #include <asm/setup.h>
3727 #include <asm/tlb.h>
3728 #include <asm/fixmap.h>
3729 +#include <asm/system_info.h>
3730 +#include <asm/cp15.h>
3731
3732 #include <asm/mach/arch.h>
3733 #include <asm/mach/map.h>
3734 @@ -736,7 +738,46 @@ void free_initmem(void)
3735 {
3736 #ifdef CONFIG_HAVE_TCM
3737 extern char __tcm_start, __tcm_end;
3738 +#endif
3739
3740 +#ifdef CONFIG_PAX_KERNEXEC
3741 + unsigned long addr;
3742 + pgd_t *pgd;
3743 + pud_t *pud;
3744 + pmd_t *pmd;
3745 + int cpu_arch = cpu_architecture();
3746 + unsigned int cr = get_cr();
3747 +
3748 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3749 + /* make pages tables, etc before .text NX */
3750 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3751 + pgd = pgd_offset_k(addr);
3752 + pud = pud_offset(pgd, addr);
3753 + pmd = pmd_offset(pud, addr);
3754 + __section_update(pmd, addr, PMD_SECT_XN);
3755 + }
3756 + /* make init NX */
3757 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3758 + pgd = pgd_offset_k(addr);
3759 + pud = pud_offset(pgd, addr);
3760 + pmd = pmd_offset(pud, addr);
3761 + __section_update(pmd, addr, PMD_SECT_XN);
3762 + }
3763 + /* make kernel code/rodata RX */
3764 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3765 + pgd = pgd_offset_k(addr);
3766 + pud = pud_offset(pgd, addr);
3767 + pmd = pmd_offset(pud, addr);
3768 +#ifdef CONFIG_ARM_LPAE
3769 + __section_update(pmd, addr, PMD_SECT_RDONLY);
3770 +#else
3771 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3772 +#endif
3773 + }
3774 + }
3775 +#endif
3776 +
3777 +#ifdef CONFIG_HAVE_TCM
3778 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3779 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3780 __phys_to_pfn(__pa(&__tcm_end)),
3781 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3782 index 04d9006..c547d85 100644
3783 --- a/arch/arm/mm/ioremap.c
3784 +++ b/arch/arm/mm/ioremap.c
3785 @@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3786 unsigned int mtype;
3787
3788 if (cached)
3789 - mtype = MT_MEMORY;
3790 + mtype = MT_MEMORY_RX;
3791 else
3792 - mtype = MT_MEMORY_NONCACHED;
3793 + mtype = MT_MEMORY_NONCACHED_RX;
3794
3795 return __arm_ioremap_caller(phys_addr, size, mtype,
3796 __builtin_return_address(0));
3797 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3798 index 10062ce..cd34fb9 100644
3799 --- a/arch/arm/mm/mmap.c
3800 +++ b/arch/arm/mm/mmap.c
3801 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3802 struct vm_area_struct *vma;
3803 int do_align = 0;
3804 int aliasing = cache_is_vipt_aliasing();
3805 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3806 struct vm_unmapped_area_info info;
3807
3808 /*
3809 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3810 if (len > TASK_SIZE)
3811 return -ENOMEM;
3812
3813 +#ifdef CONFIG_PAX_RANDMMAP
3814 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3815 +#endif
3816 +
3817 if (addr) {
3818 if (do_align)
3819 addr = COLOUR_ALIGN(addr, pgoff);
3820 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3821 addr = PAGE_ALIGN(addr);
3822
3823 vma = find_vma(mm, addr);
3824 - if (TASK_SIZE - len >= addr &&
3825 - (!vma || addr + len <= vma->vm_start))
3826 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3827 return addr;
3828 }
3829
3830 @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3831 info.high_limit = TASK_SIZE;
3832 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3833 info.align_offset = pgoff << PAGE_SHIFT;
3834 + info.threadstack_offset = offset;
3835 return vm_unmapped_area(&info);
3836 }
3837
3838 @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3839 unsigned long addr = addr0;
3840 int do_align = 0;
3841 int aliasing = cache_is_vipt_aliasing();
3842 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3843 struct vm_unmapped_area_info info;
3844
3845 /*
3846 @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3847 return addr;
3848 }
3849
3850 +#ifdef CONFIG_PAX_RANDMMAP
3851 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3852 +#endif
3853 +
3854 /* requesting a specific address */
3855 if (addr) {
3856 if (do_align)
3857 @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3858 else
3859 addr = PAGE_ALIGN(addr);
3860 vma = find_vma(mm, addr);
3861 - if (TASK_SIZE - len >= addr &&
3862 - (!vma || addr + len <= vma->vm_start))
3863 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3864 return addr;
3865 }
3866
3867 @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3868 info.high_limit = mm->mmap_base;
3869 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3870 info.align_offset = pgoff << PAGE_SHIFT;
3871 + info.threadstack_offset = offset;
3872 addr = vm_unmapped_area(&info);
3873
3874 /*
3875 @@ -162,6 +172,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3876 VM_BUG_ON(addr != -ENOMEM);
3877 info.flags = 0;
3878 info.low_limit = mm->mmap_base;
3879 +
3880 +#ifdef CONFIG_PAX_RANDMMAP
3881 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3882 + info.low_limit += mm->delta_mmap;
3883 +#endif
3884 +
3885 info.high_limit = TASK_SIZE;
3886 addr = vm_unmapped_area(&info);
3887 }
3888 @@ -173,6 +189,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3889 {
3890 unsigned long random_factor = 0UL;
3891
3892 +#ifdef CONFIG_PAX_RANDMMAP
3893 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3894 +#endif
3895 +
3896 /* 8 bits of randomness in 20 address space bits */
3897 if ((current->flags & PF_RANDOMIZE) &&
3898 !(current->personality & ADDR_NO_RANDOMIZE))
3899 @@ -180,10 +200,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3900
3901 if (mmap_is_legacy()) {
3902 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3903 +
3904 +#ifdef CONFIG_PAX_RANDMMAP
3905 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3906 + mm->mmap_base += mm->delta_mmap;
3907 +#endif
3908 +
3909 mm->get_unmapped_area = arch_get_unmapped_area;
3910 mm->unmap_area = arch_unmap_area;
3911 } else {
3912 mm->mmap_base = mmap_base(random_factor);
3913 +
3914 +#ifdef CONFIG_PAX_RANDMMAP
3915 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3916 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3917 +#endif
3918 +
3919 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3920 mm->unmap_area = arch_unmap_area_topdown;
3921 }
3922 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3923 index a84ff76..f221c1d 100644
3924 --- a/arch/arm/mm/mmu.c
3925 +++ b/arch/arm/mm/mmu.c
3926 @@ -36,6 +36,22 @@
3927 #include "mm.h"
3928 #include "tcm.h"
3929
3930 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3931 +void modify_domain(unsigned int dom, unsigned int type)
3932 +{
3933 + struct thread_info *thread = current_thread_info();
3934 + unsigned int domain = thread->cpu_domain;
3935 + /*
3936 + * DOMAIN_MANAGER might be defined to some other value,
3937 + * use the arch-defined constant
3938 + */
3939 + domain &= ~domain_val(dom, 3);
3940 + thread->cpu_domain = domain | domain_val(dom, type);
3941 + set_domain(thread->cpu_domain);
3942 +}
3943 +EXPORT_SYMBOL(modify_domain);
3944 +#endif
3945 +
3946 /*
3947 * empty_zero_page is a special page that is used for
3948 * zero-initialized data and COW.
3949 @@ -211,10 +227,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
3950 }
3951 #endif
3952
3953 -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
3954 +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
3955 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
3956
3957 -static struct mem_type mem_types[] = {
3958 +#ifdef CONFIG_PAX_KERNEXEC
3959 +#define L_PTE_KERNEXEC L_PTE_RDONLY
3960 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
3961 +#else
3962 +#define L_PTE_KERNEXEC L_PTE_DIRTY
3963 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
3964 +#endif
3965 +
3966 +static struct mem_type mem_types[] __read_only = {
3967 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
3968 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
3969 L_PTE_SHARED,
3970 @@ -243,16 +267,16 @@ static struct mem_type mem_types[] = {
3971 [MT_UNCACHED] = {
3972 .prot_pte = PROT_PTE_DEVICE,
3973 .prot_l1 = PMD_TYPE_TABLE,
3974 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3975 + .prot_sect = PROT_SECT_DEVICE,
3976 .domain = DOMAIN_IO,
3977 },
3978 [MT_CACHECLEAN] = {
3979 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3980 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3981 .domain = DOMAIN_KERNEL,
3982 },
3983 #ifndef CONFIG_ARM_LPAE
3984 [MT_MINICLEAN] = {
3985 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
3986 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
3987 .domain = DOMAIN_KERNEL,
3988 },
3989 #endif
3990 @@ -260,36 +284,54 @@ static struct mem_type mem_types[] = {
3991 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3992 L_PTE_RDONLY,
3993 .prot_l1 = PMD_TYPE_TABLE,
3994 - .domain = DOMAIN_USER,
3995 + .domain = DOMAIN_VECTORS,
3996 },
3997 [MT_HIGH_VECTORS] = {
3998 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3999 - L_PTE_USER | L_PTE_RDONLY,
4000 + L_PTE_RDONLY,
4001 .prot_l1 = PMD_TYPE_TABLE,
4002 - .domain = DOMAIN_USER,
4003 + .domain = DOMAIN_VECTORS,
4004 },
4005 - [MT_MEMORY] = {
4006 + [MT_MEMORY_RWX] = {
4007 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4008 .prot_l1 = PMD_TYPE_TABLE,
4009 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4010 .domain = DOMAIN_KERNEL,
4011 },
4012 + [MT_MEMORY_RW] = {
4013 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4014 + .prot_l1 = PMD_TYPE_TABLE,
4015 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4016 + .domain = DOMAIN_KERNEL,
4017 + },
4018 + [MT_MEMORY_RX] = {
4019 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4020 + .prot_l1 = PMD_TYPE_TABLE,
4021 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4022 + .domain = DOMAIN_KERNEL,
4023 + },
4024 [MT_ROM] = {
4025 - .prot_sect = PMD_TYPE_SECT,
4026 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4027 .domain = DOMAIN_KERNEL,
4028 },
4029 - [MT_MEMORY_NONCACHED] = {
4030 + [MT_MEMORY_NONCACHED_RW] = {
4031 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4032 L_PTE_MT_BUFFERABLE,
4033 .prot_l1 = PMD_TYPE_TABLE,
4034 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4035 .domain = DOMAIN_KERNEL,
4036 },
4037 + [MT_MEMORY_NONCACHED_RX] = {
4038 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4039 + L_PTE_MT_BUFFERABLE,
4040 + .prot_l1 = PMD_TYPE_TABLE,
4041 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4042 + .domain = DOMAIN_KERNEL,
4043 + },
4044 [MT_MEMORY_DTCM] = {
4045 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4046 - L_PTE_XN,
4047 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4048 .prot_l1 = PMD_TYPE_TABLE,
4049 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4050 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4051 .domain = DOMAIN_KERNEL,
4052 },
4053 [MT_MEMORY_ITCM] = {
4054 @@ -299,10 +341,10 @@ static struct mem_type mem_types[] = {
4055 },
4056 [MT_MEMORY_SO] = {
4057 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4058 - L_PTE_MT_UNCACHED | L_PTE_XN,
4059 + L_PTE_MT_UNCACHED,
4060 .prot_l1 = PMD_TYPE_TABLE,
4061 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4062 - PMD_SECT_UNCACHED | PMD_SECT_XN,
4063 + PMD_SECT_UNCACHED,
4064 .domain = DOMAIN_KERNEL,
4065 },
4066 [MT_MEMORY_DMA_READY] = {
4067 @@ -388,9 +430,35 @@ static void __init build_mem_type_table(void)
4068 * to prevent speculative instruction fetches.
4069 */
4070 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4071 + mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4072 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4073 + mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4074 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4075 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4076 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4077 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4078 +
4079 + /* Mark other regions on ARMv6+ as execute-never */
4080 +
4081 +#ifdef CONFIG_PAX_KERNEXEC
4082 + mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4083 + mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4084 + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4085 + mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4086 +#ifndef CONFIG_ARM_LPAE
4087 + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4088 + mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4089 +#endif
4090 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4091 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4092 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4093 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4094 + mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4095 + mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4096 +#endif
4097 +
4098 + mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4099 + mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4100 }
4101 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4102 /*
4103 @@ -451,6 +519,9 @@ static void __init build_mem_type_table(void)
4104 * from SVC mode and no access from userspace.
4105 */
4106 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4107 +#ifdef CONFIG_PAX_KERNEXEC
4108 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4109 +#endif
4110 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4111 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4112 #endif
4113 @@ -468,11 +539,17 @@ static void __init build_mem_type_table(void)
4114 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4115 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4116 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4117 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4118 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4119 + mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4120 + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4121 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4122 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4123 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4124 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4125 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4126 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4127 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4128 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4129 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4130 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4131 + mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4132 }
4133 }
4134
4135 @@ -483,15 +560,20 @@ static void __init build_mem_type_table(void)
4136 if (cpu_arch >= CPU_ARCH_ARMv6) {
4137 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4138 /* Non-cacheable Normal is XCB = 001 */
4139 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4140 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4141 + PMD_SECT_BUFFERED;
4142 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4143 PMD_SECT_BUFFERED;
4144 } else {
4145 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4146 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4147 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4148 + PMD_SECT_TEX(1);
4149 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4150 PMD_SECT_TEX(1);
4151 }
4152 } else {
4153 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4154 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4155 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4156 }
4157
4158 #ifdef CONFIG_ARM_LPAE
4159 @@ -507,6 +589,8 @@ static void __init build_mem_type_table(void)
4160 vecs_pgprot |= PTE_EXT_AF;
4161 #endif
4162
4163 + user_pgprot |= __supported_pte_mask;
4164 +
4165 for (i = 0; i < 16; i++) {
4166 pteval_t v = pgprot_val(protection_map[i]);
4167 protection_map[i] = __pgprot(v | user_pgprot);
4168 @@ -524,10 +608,15 @@ static void __init build_mem_type_table(void)
4169
4170 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4171 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4172 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4173 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4174 + mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4175 + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4176 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4177 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4178 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4179 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4180 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4181 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4182 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4183 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4184 mem_types[MT_ROM].prot_sect |= cp->pmd;
4185
4186 switch (cp->pmd) {
4187 @@ -1147,18 +1236,15 @@ void __init arm_mm_memblock_reserve(void)
4188 * called function. This means you can't use any function or debugging
4189 * method which may touch any device, otherwise the kernel _will_ crash.
4190 */
4191 +
4192 +static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4193 +
4194 static void __init devicemaps_init(struct machine_desc *mdesc)
4195 {
4196 struct map_desc map;
4197 unsigned long addr;
4198 - void *vectors;
4199
4200 - /*
4201 - * Allocate the vector page early.
4202 - */
4203 - vectors = early_alloc(PAGE_SIZE);
4204 -
4205 - early_trap_init(vectors);
4206 + early_trap_init(&vectors);
4207
4208 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4209 pmd_clear(pmd_off_k(addr));
4210 @@ -1198,7 +1284,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4211 * location (0xffff0000). If we aren't using high-vectors, also
4212 * create a mapping at the low-vectors virtual address.
4213 */
4214 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4215 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4216 map.virtual = 0xffff0000;
4217 map.length = PAGE_SIZE;
4218 map.type = MT_HIGH_VECTORS;
4219 @@ -1256,8 +1342,39 @@ static void __init map_lowmem(void)
4220 map.pfn = __phys_to_pfn(start);
4221 map.virtual = __phys_to_virt(start);
4222 map.length = end - start;
4223 - map.type = MT_MEMORY;
4224
4225 +#ifdef CONFIG_PAX_KERNEXEC
4226 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4227 + struct map_desc kernel;
4228 + struct map_desc initmap;
4229 +
4230 + /* when freeing initmem we will make this RW */
4231 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4232 + initmap.virtual = (unsigned long)__init_begin;
4233 + initmap.length = _sdata - __init_begin;
4234 + initmap.type = MT_MEMORY_RWX;
4235 + create_mapping(&initmap);
4236 +
4237 + /* when freeing initmem we will make this RX */
4238 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4239 + kernel.virtual = (unsigned long)_stext;
4240 + kernel.length = __init_begin - _stext;
4241 + kernel.type = MT_MEMORY_RWX;
4242 + create_mapping(&kernel);
4243 +
4244 + if (map.virtual < (unsigned long)_stext) {
4245 + map.length = (unsigned long)_stext - map.virtual;
4246 + map.type = MT_MEMORY_RWX;
4247 + create_mapping(&map);
4248 + }
4249 +
4250 + map.pfn = __phys_to_pfn(__pa(_sdata));
4251 + map.virtual = (unsigned long)_sdata;
4252 + map.length = end - __pa(_sdata);
4253 + }
4254 +#endif
4255 +
4256 + map.type = MT_MEMORY_RW;
4257 create_mapping(&map);
4258 }
4259 }
4260 diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4261 index 78f520b..31f0cb6 100644
4262 --- a/arch/arm/mm/proc-v7-2level.S
4263 +++ b/arch/arm/mm/proc-v7-2level.S
4264 @@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4265 tst r1, #L_PTE_XN
4266 orrne r3, r3, #PTE_EXT_XN
4267
4268 + tst r1, #L_PTE_PXN
4269 + orrne r3, r3, #PTE_EXT_PXN
4270 +
4271 tst r1, #L_PTE_YOUNG
4272 tstne r1, #L_PTE_VALID
4273 #ifndef CONFIG_CPU_USE_DOMAINS
4274 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4275 index a5bc92d..0bb4730 100644
4276 --- a/arch/arm/plat-omap/sram.c
4277 +++ b/arch/arm/plat-omap/sram.c
4278 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4279 * Looks like we need to preserve some bootloader code at the
4280 * beginning of SRAM for jumping to flash for reboot to work...
4281 */
4282 + pax_open_kernel();
4283 memset_io(omap_sram_base + omap_sram_skip, 0,
4284 omap_sram_size - omap_sram_skip);
4285 + pax_close_kernel();
4286 }
4287 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4288 index 1141782..0959d64 100644
4289 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4290 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4291 @@ -48,7 +48,7 @@ struct samsung_dma_ops {
4292 int (*started)(unsigned ch);
4293 int (*flush)(unsigned ch);
4294 int (*stop)(unsigned ch);
4295 -};
4296 +} __no_const;
4297
4298 extern void *samsung_dmadev_get_ops(void);
4299 extern void *s3c_dma_get_ops(void);
4300 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4301 index 0c3ba9f..95722b3 100644
4302 --- a/arch/arm64/kernel/debug-monitors.c
4303 +++ b/arch/arm64/kernel/debug-monitors.c
4304 @@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4305 return NOTIFY_OK;
4306 }
4307
4308 -static struct notifier_block __cpuinitdata os_lock_nb = {
4309 +static struct notifier_block os_lock_nb = {
4310 .notifier_call = os_lock_notify,
4311 };
4312
4313 diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4314 index 5ab825c..96aaec8 100644
4315 --- a/arch/arm64/kernel/hw_breakpoint.c
4316 +++ b/arch/arm64/kernel/hw_breakpoint.c
4317 @@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4318 return NOTIFY_OK;
4319 }
4320
4321 -static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4322 +static struct notifier_block hw_breakpoint_reset_nb = {
4323 .notifier_call = hw_breakpoint_reset_notify,
4324 };
4325
4326 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4327 index c3a58a1..78fbf54 100644
4328 --- a/arch/avr32/include/asm/cache.h
4329 +++ b/arch/avr32/include/asm/cache.h
4330 @@ -1,8 +1,10 @@
4331 #ifndef __ASM_AVR32_CACHE_H
4332 #define __ASM_AVR32_CACHE_H
4333
4334 +#include <linux/const.h>
4335 +
4336 #define L1_CACHE_SHIFT 5
4337 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4338 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4339
4340 /*
4341 * Memory returned by kmalloc() may be used for DMA, so we must make
4342 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4343 index d232888..87c8df1 100644
4344 --- a/arch/avr32/include/asm/elf.h
4345 +++ b/arch/avr32/include/asm/elf.h
4346 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4347 the loader. We need to make sure that it is out of the way of the program
4348 that it will "exec", and that there is sufficient room for the brk. */
4349
4350 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4351 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4352
4353 +#ifdef CONFIG_PAX_ASLR
4354 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4355 +
4356 +#define PAX_DELTA_MMAP_LEN 15
4357 +#define PAX_DELTA_STACK_LEN 15
4358 +#endif
4359
4360 /* This yields a mask that user programs can use to figure out what
4361 instruction set this CPU supports. This could be done in user space,
4362 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4363 index 479330b..53717a8 100644
4364 --- a/arch/avr32/include/asm/kmap_types.h
4365 +++ b/arch/avr32/include/asm/kmap_types.h
4366 @@ -2,9 +2,9 @@
4367 #define __ASM_AVR32_KMAP_TYPES_H
4368
4369 #ifdef CONFIG_DEBUG_HIGHMEM
4370 -# define KM_TYPE_NR 29
4371 +# define KM_TYPE_NR 30
4372 #else
4373 -# define KM_TYPE_NR 14
4374 +# define KM_TYPE_NR 15
4375 #endif
4376
4377 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4378 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4379 index b2f2d2d..d1c85cb 100644
4380 --- a/arch/avr32/mm/fault.c
4381 +++ b/arch/avr32/mm/fault.c
4382 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4383
4384 int exception_trace = 1;
4385
4386 +#ifdef CONFIG_PAX_PAGEEXEC
4387 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4388 +{
4389 + unsigned long i;
4390 +
4391 + printk(KERN_ERR "PAX: bytes at PC: ");
4392 + for (i = 0; i < 20; i++) {
4393 + unsigned char c;
4394 + if (get_user(c, (unsigned char *)pc+i))
4395 + printk(KERN_CONT "???????? ");
4396 + else
4397 + printk(KERN_CONT "%02x ", c);
4398 + }
4399 + printk("\n");
4400 +}
4401 +#endif
4402 +
4403 /*
4404 * This routine handles page faults. It determines the address and the
4405 * problem, and then passes it off to one of the appropriate routines.
4406 @@ -174,6 +191,16 @@ bad_area:
4407 up_read(&mm->mmap_sem);
4408
4409 if (user_mode(regs)) {
4410 +
4411 +#ifdef CONFIG_PAX_PAGEEXEC
4412 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4413 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4414 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4415 + do_group_exit(SIGKILL);
4416 + }
4417 + }
4418 +#endif
4419 +
4420 if (exception_trace && printk_ratelimit())
4421 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4422 "sp %08lx ecr %lu\n",
4423 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4424 index 568885a..f8008df 100644
4425 --- a/arch/blackfin/include/asm/cache.h
4426 +++ b/arch/blackfin/include/asm/cache.h
4427 @@ -7,6 +7,7 @@
4428 #ifndef __ARCH_BLACKFIN_CACHE_H
4429 #define __ARCH_BLACKFIN_CACHE_H
4430
4431 +#include <linux/const.h>
4432 #include <linux/linkage.h> /* for asmlinkage */
4433
4434 /*
4435 @@ -14,7 +15,7 @@
4436 * Blackfin loads 32 bytes for cache
4437 */
4438 #define L1_CACHE_SHIFT 5
4439 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4440 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4441 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4442
4443 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4444 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4445 index aea2718..3639a60 100644
4446 --- a/arch/cris/include/arch-v10/arch/cache.h
4447 +++ b/arch/cris/include/arch-v10/arch/cache.h
4448 @@ -1,8 +1,9 @@
4449 #ifndef _ASM_ARCH_CACHE_H
4450 #define _ASM_ARCH_CACHE_H
4451
4452 +#include <linux/const.h>
4453 /* Etrax 100LX have 32-byte cache-lines. */
4454 -#define L1_CACHE_BYTES 32
4455 #define L1_CACHE_SHIFT 5
4456 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4457
4458 #endif /* _ASM_ARCH_CACHE_H */
4459 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4460 index 7caf25d..ee65ac5 100644
4461 --- a/arch/cris/include/arch-v32/arch/cache.h
4462 +++ b/arch/cris/include/arch-v32/arch/cache.h
4463 @@ -1,11 +1,12 @@
4464 #ifndef _ASM_CRIS_ARCH_CACHE_H
4465 #define _ASM_CRIS_ARCH_CACHE_H
4466
4467 +#include <linux/const.h>
4468 #include <arch/hwregs/dma.h>
4469
4470 /* A cache-line is 32 bytes. */
4471 -#define L1_CACHE_BYTES 32
4472 #define L1_CACHE_SHIFT 5
4473 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4474
4475 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4476
4477 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4478 index b86329d..6709906 100644
4479 --- a/arch/frv/include/asm/atomic.h
4480 +++ b/arch/frv/include/asm/atomic.h
4481 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4482 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4483 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4484
4485 +#define atomic64_read_unchecked(v) atomic64_read(v)
4486 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4487 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4488 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4489 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4490 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4491 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4492 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4493 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4494 +
4495 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4496 {
4497 int c, old;
4498 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4499 index 2797163..c2a401d 100644
4500 --- a/arch/frv/include/asm/cache.h
4501 +++ b/arch/frv/include/asm/cache.h
4502 @@ -12,10 +12,11 @@
4503 #ifndef __ASM_CACHE_H
4504 #define __ASM_CACHE_H
4505
4506 +#include <linux/const.h>
4507
4508 /* bytes per L1 cache line */
4509 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4510 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4511 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4512
4513 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4514 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4515 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4516 index 43901f2..0d8b865 100644
4517 --- a/arch/frv/include/asm/kmap_types.h
4518 +++ b/arch/frv/include/asm/kmap_types.h
4519 @@ -2,6 +2,6 @@
4520 #ifndef _ASM_KMAP_TYPES_H
4521 #define _ASM_KMAP_TYPES_H
4522
4523 -#define KM_TYPE_NR 17
4524 +#define KM_TYPE_NR 18
4525
4526 #endif
4527 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4528 index 836f147..4cf23f5 100644
4529 --- a/arch/frv/mm/elf-fdpic.c
4530 +++ b/arch/frv/mm/elf-fdpic.c
4531 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4532 {
4533 struct vm_area_struct *vma;
4534 struct vm_unmapped_area_info info;
4535 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4536
4537 if (len > TASK_SIZE)
4538 return -ENOMEM;
4539 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4540 if (addr) {
4541 addr = PAGE_ALIGN(addr);
4542 vma = find_vma(current->mm, addr);
4543 - if (TASK_SIZE - len >= addr &&
4544 - (!vma || addr + len <= vma->vm_start))
4545 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4546 goto success;
4547 }
4548
4549 @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4550 info.high_limit = (current->mm->start_stack - 0x00200000);
4551 info.align_mask = 0;
4552 info.align_offset = 0;
4553 + info.threadstack_offset = offset;
4554 addr = vm_unmapped_area(&info);
4555 if (!(addr & ~PAGE_MASK))
4556 goto success;
4557 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4558 index f4ca594..adc72fd6 100644
4559 --- a/arch/hexagon/include/asm/cache.h
4560 +++ b/arch/hexagon/include/asm/cache.h
4561 @@ -21,9 +21,11 @@
4562 #ifndef __ASM_CACHE_H
4563 #define __ASM_CACHE_H
4564
4565 +#include <linux/const.h>
4566 +
4567 /* Bytes per L1 cache line */
4568 -#define L1_CACHE_SHIFT (5)
4569 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4570 +#define L1_CACHE_SHIFT 5
4571 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4572
4573 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4574 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4575 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4576 index 6e6fe18..a6ae668 100644
4577 --- a/arch/ia64/include/asm/atomic.h
4578 +++ b/arch/ia64/include/asm/atomic.h
4579 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4580 #define atomic64_inc(v) atomic64_add(1, (v))
4581 #define atomic64_dec(v) atomic64_sub(1, (v))
4582
4583 +#define atomic64_read_unchecked(v) atomic64_read(v)
4584 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4585 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4586 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4587 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4588 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4589 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4590 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4591 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4592 +
4593 /* Atomic operations are already serializing */
4594 #define smp_mb__before_atomic_dec() barrier()
4595 #define smp_mb__after_atomic_dec() barrier()
4596 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4597 index 988254a..e1ee885 100644
4598 --- a/arch/ia64/include/asm/cache.h
4599 +++ b/arch/ia64/include/asm/cache.h
4600 @@ -1,6 +1,7 @@
4601 #ifndef _ASM_IA64_CACHE_H
4602 #define _ASM_IA64_CACHE_H
4603
4604 +#include <linux/const.h>
4605
4606 /*
4607 * Copyright (C) 1998-2000 Hewlett-Packard Co
4608 @@ -9,7 +10,7 @@
4609
4610 /* Bytes per L1 (data) cache line. */
4611 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4612 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4613 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4614
4615 #ifdef CONFIG_SMP
4616 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4617 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4618 index 5a83c5c..4d7f553 100644
4619 --- a/arch/ia64/include/asm/elf.h
4620 +++ b/arch/ia64/include/asm/elf.h
4621 @@ -42,6 +42,13 @@
4622 */
4623 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4624
4625 +#ifdef CONFIG_PAX_ASLR
4626 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4627 +
4628 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4629 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4630 +#endif
4631 +
4632 #define PT_IA_64_UNWIND 0x70000001
4633
4634 /* IA-64 relocations: */
4635 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4636 index 96a8d92..617a1cf 100644
4637 --- a/arch/ia64/include/asm/pgalloc.h
4638 +++ b/arch/ia64/include/asm/pgalloc.h
4639 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4640 pgd_val(*pgd_entry) = __pa(pud);
4641 }
4642
4643 +static inline void
4644 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4645 +{
4646 + pgd_populate(mm, pgd_entry, pud);
4647 +}
4648 +
4649 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4650 {
4651 return quicklist_alloc(0, GFP_KERNEL, NULL);
4652 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4653 pud_val(*pud_entry) = __pa(pmd);
4654 }
4655
4656 +static inline void
4657 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4658 +{
4659 + pud_populate(mm, pud_entry, pmd);
4660 +}
4661 +
4662 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4663 {
4664 return quicklist_alloc(0, GFP_KERNEL, NULL);
4665 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4666 index 815810c..d60bd4c 100644
4667 --- a/arch/ia64/include/asm/pgtable.h
4668 +++ b/arch/ia64/include/asm/pgtable.h
4669 @@ -12,7 +12,7 @@
4670 * David Mosberger-Tang <davidm@hpl.hp.com>
4671 */
4672
4673 -
4674 +#include <linux/const.h>
4675 #include <asm/mman.h>
4676 #include <asm/page.h>
4677 #include <asm/processor.h>
4678 @@ -142,6 +142,17 @@
4679 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4680 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4681 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4682 +
4683 +#ifdef CONFIG_PAX_PAGEEXEC
4684 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4685 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4686 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4687 +#else
4688 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4689 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4690 +# define PAGE_COPY_NOEXEC PAGE_COPY
4691 +#endif
4692 +
4693 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4694 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4695 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4696 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4697 index 54ff557..70c88b7 100644
4698 --- a/arch/ia64/include/asm/spinlock.h
4699 +++ b/arch/ia64/include/asm/spinlock.h
4700 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4701 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4702
4703 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4704 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4705 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4706 }
4707
4708 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4709 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4710 index 449c8c0..18965fb 100644
4711 --- a/arch/ia64/include/asm/uaccess.h
4712 +++ b/arch/ia64/include/asm/uaccess.h
4713 @@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4714 static inline unsigned long
4715 __copy_to_user (void __user *to, const void *from, unsigned long count)
4716 {
4717 + if (count > INT_MAX)
4718 + return count;
4719 +
4720 + if (!__builtin_constant_p(count))
4721 + check_object_size(from, count, true);
4722 +
4723 return __copy_user(to, (__force void __user *) from, count);
4724 }
4725
4726 static inline unsigned long
4727 __copy_from_user (void *to, const void __user *from, unsigned long count)
4728 {
4729 + if (count > INT_MAX)
4730 + return count;
4731 +
4732 + if (!__builtin_constant_p(count))
4733 + check_object_size(to, count, false);
4734 +
4735 return __copy_user((__force void __user *) to, from, count);
4736 }
4737
4738 @@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4739 ({ \
4740 void __user *__cu_to = (to); \
4741 const void *__cu_from = (from); \
4742 - long __cu_len = (n); \
4743 + unsigned long __cu_len = (n); \
4744 \
4745 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
4746 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4747 + if (!__builtin_constant_p(n)) \
4748 + check_object_size(__cu_from, __cu_len, true); \
4749 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4750 + } \
4751 __cu_len; \
4752 })
4753
4754 @@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4755 ({ \
4756 void *__cu_to = (to); \
4757 const void __user *__cu_from = (from); \
4758 - long __cu_len = (n); \
4759 + unsigned long __cu_len = (n); \
4760 \
4761 __chk_user_ptr(__cu_from); \
4762 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
4763 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4764 + if (!__builtin_constant_p(n)) \
4765 + check_object_size(__cu_to, __cu_len, false); \
4766 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4767 + } \
4768 __cu_len; \
4769 })
4770
4771 diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4772 index 2d67317..07d8bfa 100644
4773 --- a/arch/ia64/kernel/err_inject.c
4774 +++ b/arch/ia64/kernel/err_inject.c
4775 @@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4776 return NOTIFY_OK;
4777 }
4778
4779 -static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4780 +static struct notifier_block err_inject_cpu_notifier =
4781 {
4782 .notifier_call = err_inject_cpu_callback,
4783 };
4784 diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4785 index d7396db..b33e873 100644
4786 --- a/arch/ia64/kernel/mca.c
4787 +++ b/arch/ia64/kernel/mca.c
4788 @@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4789 return NOTIFY_OK;
4790 }
4791
4792 -static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4793 +static struct notifier_block mca_cpu_notifier = {
4794 .notifier_call = mca_cpu_callback
4795 };
4796
4797 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4798 index 24603be..948052d 100644
4799 --- a/arch/ia64/kernel/module.c
4800 +++ b/arch/ia64/kernel/module.c
4801 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4802 void
4803 module_free (struct module *mod, void *module_region)
4804 {
4805 - if (mod && mod->arch.init_unw_table &&
4806 - module_region == mod->module_init) {
4807 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4808 unw_remove_unwind_table(mod->arch.init_unw_table);
4809 mod->arch.init_unw_table = NULL;
4810 }
4811 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4812 }
4813
4814 static inline int
4815 +in_init_rx (const struct module *mod, uint64_t addr)
4816 +{
4817 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4818 +}
4819 +
4820 +static inline int
4821 +in_init_rw (const struct module *mod, uint64_t addr)
4822 +{
4823 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4824 +}
4825 +
4826 +static inline int
4827 in_init (const struct module *mod, uint64_t addr)
4828 {
4829 - return addr - (uint64_t) mod->module_init < mod->init_size;
4830 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4831 +}
4832 +
4833 +static inline int
4834 +in_core_rx (const struct module *mod, uint64_t addr)
4835 +{
4836 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4837 +}
4838 +
4839 +static inline int
4840 +in_core_rw (const struct module *mod, uint64_t addr)
4841 +{
4842 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4843 }
4844
4845 static inline int
4846 in_core (const struct module *mod, uint64_t addr)
4847 {
4848 - return addr - (uint64_t) mod->module_core < mod->core_size;
4849 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4850 }
4851
4852 static inline int
4853 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4854 break;
4855
4856 case RV_BDREL:
4857 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4858 + if (in_init_rx(mod, val))
4859 + val -= (uint64_t) mod->module_init_rx;
4860 + else if (in_init_rw(mod, val))
4861 + val -= (uint64_t) mod->module_init_rw;
4862 + else if (in_core_rx(mod, val))
4863 + val -= (uint64_t) mod->module_core_rx;
4864 + else if (in_core_rw(mod, val))
4865 + val -= (uint64_t) mod->module_core_rw;
4866 break;
4867
4868 case RV_LTV:
4869 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4870 * addresses have been selected...
4871 */
4872 uint64_t gp;
4873 - if (mod->core_size > MAX_LTOFF)
4874 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4875 /*
4876 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4877 * at the end of the module.
4878 */
4879 - gp = mod->core_size - MAX_LTOFF / 2;
4880 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4881 else
4882 - gp = mod->core_size / 2;
4883 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4884 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4885 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4886 mod->arch.gp = gp;
4887 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4888 }
4889 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4890 index 79521d5..43dddff 100644
4891 --- a/arch/ia64/kernel/palinfo.c
4892 +++ b/arch/ia64/kernel/palinfo.c
4893 @@ -1006,7 +1006,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4894 return NOTIFY_OK;
4895 }
4896
4897 -static struct notifier_block __refdata palinfo_cpu_notifier =
4898 +static struct notifier_block palinfo_cpu_notifier =
4899 {
4900 .notifier_call = palinfo_cpu_callback,
4901 .priority = 0,
4902 diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4903 index aa527d7..f237752 100644
4904 --- a/arch/ia64/kernel/salinfo.c
4905 +++ b/arch/ia64/kernel/salinfo.c
4906 @@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4907 return NOTIFY_OK;
4908 }
4909
4910 -static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4911 +static struct notifier_block salinfo_cpu_notifier =
4912 {
4913 .notifier_call = salinfo_cpu_callback,
4914 .priority = 0,
4915 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4916 index 41e33f8..65180b2 100644
4917 --- a/arch/ia64/kernel/sys_ia64.c
4918 +++ b/arch/ia64/kernel/sys_ia64.c
4919 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4920 unsigned long align_mask = 0;
4921 struct mm_struct *mm = current->mm;
4922 struct vm_unmapped_area_info info;
4923 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4924
4925 if (len > RGN_MAP_LIMIT)
4926 return -ENOMEM;
4927 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4928 if (REGION_NUMBER(addr) == RGN_HPAGE)
4929 addr = 0;
4930 #endif
4931 +
4932 +#ifdef CONFIG_PAX_RANDMMAP
4933 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4934 + addr = mm->free_area_cache;
4935 + else
4936 +#endif
4937 +
4938 if (!addr)
4939 addr = TASK_UNMAPPED_BASE;
4940
4941 @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4942 info.high_limit = TASK_SIZE;
4943 info.align_mask = align_mask;
4944 info.align_offset = 0;
4945 + info.threadstack_offset = offset;
4946 return vm_unmapped_area(&info);
4947 }
4948
4949 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
4950 index dc00b2c..cce53c2 100644
4951 --- a/arch/ia64/kernel/topology.c
4952 +++ b/arch/ia64/kernel/topology.c
4953 @@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
4954 return NOTIFY_OK;
4955 }
4956
4957 -static struct notifier_block __cpuinitdata cache_cpu_notifier =
4958 +static struct notifier_block cache_cpu_notifier =
4959 {
4960 .notifier_call = cache_cpu_callback
4961 };
4962 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
4963 index 0ccb28f..8992469 100644
4964 --- a/arch/ia64/kernel/vmlinux.lds.S
4965 +++ b/arch/ia64/kernel/vmlinux.lds.S
4966 @@ -198,7 +198,7 @@ SECTIONS {
4967 /* Per-cpu data: */
4968 . = ALIGN(PERCPU_PAGE_SIZE);
4969 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
4970 - __phys_per_cpu_start = __per_cpu_load;
4971 + __phys_per_cpu_start = per_cpu_load;
4972 /*
4973 * ensure percpu data fits
4974 * into percpu page size
4975 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
4976 index 6cf0341..d352594 100644
4977 --- a/arch/ia64/mm/fault.c
4978 +++ b/arch/ia64/mm/fault.c
4979 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
4980 return pte_present(pte);
4981 }
4982
4983 +#ifdef CONFIG_PAX_PAGEEXEC
4984 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4985 +{
4986 + unsigned long i;
4987 +
4988 + printk(KERN_ERR "PAX: bytes at PC: ");
4989 + for (i = 0; i < 8; i++) {
4990 + unsigned int c;
4991 + if (get_user(c, (unsigned int *)pc+i))
4992 + printk(KERN_CONT "???????? ");
4993 + else
4994 + printk(KERN_CONT "%08x ", c);
4995 + }
4996 + printk("\n");
4997 +}
4998 +#endif
4999 +
5000 # define VM_READ_BIT 0
5001 # define VM_WRITE_BIT 1
5002 # define VM_EXEC_BIT 2
5003 @@ -149,8 +166,21 @@ retry:
5004 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5005 goto bad_area;
5006
5007 - if ((vma->vm_flags & mask) != mask)
5008 + if ((vma->vm_flags & mask) != mask) {
5009 +
5010 +#ifdef CONFIG_PAX_PAGEEXEC
5011 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5012 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5013 + goto bad_area;
5014 +
5015 + up_read(&mm->mmap_sem);
5016 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5017 + do_group_exit(SIGKILL);
5018 + }
5019 +#endif
5020 +
5021 goto bad_area;
5022 + }
5023
5024 /*
5025 * If for any reason at all we couldn't handle the fault, make
5026 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5027 index 76069c1..c2aa816 100644
5028 --- a/arch/ia64/mm/hugetlbpage.c
5029 +++ b/arch/ia64/mm/hugetlbpage.c
5030 @@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5031 unsigned long pgoff, unsigned long flags)
5032 {
5033 struct vm_unmapped_area_info info;
5034 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5035
5036 if (len > RGN_MAP_LIMIT)
5037 return -ENOMEM;
5038 @@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5039 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5040 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5041 info.align_offset = 0;
5042 + info.threadstack_offset = offset;
5043 return vm_unmapped_area(&info);
5044 }
5045
5046 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5047 index 20bc967..a26993e 100644
5048 --- a/arch/ia64/mm/init.c
5049 +++ b/arch/ia64/mm/init.c
5050 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5051 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5052 vma->vm_end = vma->vm_start + PAGE_SIZE;
5053 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5054 +
5055 +#ifdef CONFIG_PAX_PAGEEXEC
5056 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5057 + vma->vm_flags &= ~VM_EXEC;
5058 +
5059 +#ifdef CONFIG_PAX_MPROTECT
5060 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
5061 + vma->vm_flags &= ~VM_MAYEXEC;
5062 +#endif
5063 +
5064 + }
5065 +#endif
5066 +
5067 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5068 down_write(&current->mm->mmap_sem);
5069 if (insert_vm_struct(current->mm, vma)) {
5070 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5071 index 40b3ee9..8c2c112 100644
5072 --- a/arch/m32r/include/asm/cache.h
5073 +++ b/arch/m32r/include/asm/cache.h
5074 @@ -1,8 +1,10 @@
5075 #ifndef _ASM_M32R_CACHE_H
5076 #define _ASM_M32R_CACHE_H
5077
5078 +#include <linux/const.h>
5079 +
5080 /* L1 cache line size */
5081 #define L1_CACHE_SHIFT 4
5082 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5083 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5084
5085 #endif /* _ASM_M32R_CACHE_H */
5086 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5087 index 82abd15..d95ae5d 100644
5088 --- a/arch/m32r/lib/usercopy.c
5089 +++ b/arch/m32r/lib/usercopy.c
5090 @@ -14,6 +14,9 @@
5091 unsigned long
5092 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5093 {
5094 + if ((long)n < 0)
5095 + return n;
5096 +
5097 prefetch(from);
5098 if (access_ok(VERIFY_WRITE, to, n))
5099 __copy_user(to,from,n);
5100 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5101 unsigned long
5102 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5103 {
5104 + if ((long)n < 0)
5105 + return n;
5106 +
5107 prefetchw(to);
5108 if (access_ok(VERIFY_READ, from, n))
5109 __copy_user_zeroing(to,from,n);
5110 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5111 index 0395c51..5f26031 100644
5112 --- a/arch/m68k/include/asm/cache.h
5113 +++ b/arch/m68k/include/asm/cache.h
5114 @@ -4,9 +4,11 @@
5115 #ifndef __ARCH_M68K_CACHE_H
5116 #define __ARCH_M68K_CACHE_H
5117
5118 +#include <linux/const.h>
5119 +
5120 /* bytes per L1 cache line */
5121 #define L1_CACHE_SHIFT 4
5122 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5123 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5124
5125 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5126
5127 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5128 index 3c52fa6..11b2ad8 100644
5129 --- a/arch/metag/mm/hugetlbpage.c
5130 +++ b/arch/metag/mm/hugetlbpage.c
5131 @@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5132 info.high_limit = TASK_SIZE;
5133 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5134 info.align_offset = 0;
5135 + info.threadstack_offset = 0;
5136 return vm_unmapped_area(&info);
5137 }
5138
5139 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5140 index 4efe96a..60e8699 100644
5141 --- a/arch/microblaze/include/asm/cache.h
5142 +++ b/arch/microblaze/include/asm/cache.h
5143 @@ -13,11 +13,12 @@
5144 #ifndef _ASM_MICROBLAZE_CACHE_H
5145 #define _ASM_MICROBLAZE_CACHE_H
5146
5147 +#include <linux/const.h>
5148 #include <asm/registers.h>
5149
5150 #define L1_CACHE_SHIFT 5
5151 /* word-granular cache in microblaze */
5152 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5153 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5154
5155 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5156
5157 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5158 index 08b6079..eb272cf 100644
5159 --- a/arch/mips/include/asm/atomic.h
5160 +++ b/arch/mips/include/asm/atomic.h
5161 @@ -21,6 +21,10 @@
5162 #include <asm/cmpxchg.h>
5163 #include <asm/war.h>
5164
5165 +#ifdef CONFIG_GENERIC_ATOMIC64
5166 +#include <asm-generic/atomic64.h>
5167 +#endif
5168 +
5169 #define ATOMIC_INIT(i) { (i) }
5170
5171 /*
5172 @@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5173 */
5174 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5175
5176 +#define atomic64_read_unchecked(v) atomic64_read(v)
5177 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5178 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5179 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5180 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5181 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5182 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5183 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5184 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5185 +
5186 #endif /* CONFIG_64BIT */
5187
5188 /*
5189 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5190 index b4db69f..8f3b093 100644
5191 --- a/arch/mips/include/asm/cache.h
5192 +++ b/arch/mips/include/asm/cache.h
5193 @@ -9,10 +9,11 @@
5194 #ifndef _ASM_CACHE_H
5195 #define _ASM_CACHE_H
5196
5197 +#include <linux/const.h>
5198 #include <kmalloc.h>
5199
5200 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5201 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5202 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5203
5204 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5205 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5206 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5207 index cf3ae24..238d22f 100644
5208 --- a/arch/mips/include/asm/elf.h
5209 +++ b/arch/mips/include/asm/elf.h
5210 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
5211 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5212 #endif
5213
5214 +#ifdef CONFIG_PAX_ASLR
5215 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5216 +
5217 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5218 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5219 +#endif
5220 +
5221 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5222 struct linux_binprm;
5223 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5224 int uses_interp);
5225
5226 -struct mm_struct;
5227 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5228 -#define arch_randomize_brk arch_randomize_brk
5229 -
5230 #endif /* _ASM_ELF_H */
5231 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5232 index c1f6afa..38cc6e9 100644
5233 --- a/arch/mips/include/asm/exec.h
5234 +++ b/arch/mips/include/asm/exec.h
5235 @@ -12,6 +12,6 @@
5236 #ifndef _ASM_EXEC_H
5237 #define _ASM_EXEC_H
5238
5239 -extern unsigned long arch_align_stack(unsigned long sp);
5240 +#define arch_align_stack(x) ((x) & ~0xfUL)
5241
5242 #endif /* _ASM_EXEC_H */
5243 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5244 index eab99e5..607c98e 100644
5245 --- a/arch/mips/include/asm/page.h
5246 +++ b/arch/mips/include/asm/page.h
5247 @@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5248 #ifdef CONFIG_CPU_MIPS32
5249 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5250 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5251 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5252 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5253 #else
5254 typedef struct { unsigned long long pte; } pte_t;
5255 #define pte_val(x) ((x).pte)
5256 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5257 index 881d18b..cea38bc 100644
5258 --- a/arch/mips/include/asm/pgalloc.h
5259 +++ b/arch/mips/include/asm/pgalloc.h
5260 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5261 {
5262 set_pud(pud, __pud((unsigned long)pmd));
5263 }
5264 +
5265 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5266 +{
5267 + pud_populate(mm, pud, pmd);
5268 +}
5269 #endif
5270
5271 /*
5272 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5273 index 178f792..8ebc510 100644
5274 --- a/arch/mips/include/asm/thread_info.h
5275 +++ b/arch/mips/include/asm/thread_info.h
5276 @@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5277 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5278 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5279 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5280 +/* li takes a 32bit immediate */
5281 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5282 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5283
5284 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5285 @@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5286 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5287 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5288 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5289 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5290 +
5291 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5292
5293 /* work to do in syscall_trace_leave() */
5294 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5295 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5296
5297 /* work to do on interrupt/exception return */
5298 #define _TIF_WORK_MASK \
5299 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5300 /* work to do on any return to u-space */
5301 -#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5302 +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5303
5304 #endif /* __KERNEL__ */
5305
5306 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5307 index e06f777..3244284 100644
5308 --- a/arch/mips/kernel/binfmt_elfn32.c
5309 +++ b/arch/mips/kernel/binfmt_elfn32.c
5310 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5311 #undef ELF_ET_DYN_BASE
5312 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5313
5314 +#ifdef CONFIG_PAX_ASLR
5315 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5316 +
5317 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5318 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5319 +#endif
5320 +
5321 #include <asm/processor.h>
5322 #include <linux/module.h>
5323 #include <linux/elfcore.h>
5324 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5325 index 556a435..b4fd2e3 100644
5326 --- a/arch/mips/kernel/binfmt_elfo32.c
5327 +++ b/arch/mips/kernel/binfmt_elfo32.c
5328 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5329 #undef ELF_ET_DYN_BASE
5330 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5331
5332 +#ifdef CONFIG_PAX_ASLR
5333 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5334 +
5335 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5336 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5337 +#endif
5338 +
5339 #include <asm/processor.h>
5340
5341 /*
5342 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5343 index 3be4405..a799827 100644
5344 --- a/arch/mips/kernel/process.c
5345 +++ b/arch/mips/kernel/process.c
5346 @@ -461,15 +461,3 @@ unsigned long get_wchan(struct task_struct *task)
5347 out:
5348 return pc;
5349 }
5350 -
5351 -/*
5352 - * Don't forget that the stack pointer must be aligned on a 8 bytes
5353 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5354 - */
5355 -unsigned long arch_align_stack(unsigned long sp)
5356 -{
5357 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5358 - sp -= get_random_int() & ~PAGE_MASK;
5359 -
5360 - return sp & ALMASK;
5361 -}
5362 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5363 index 9c6299c..2fb4c22 100644
5364 --- a/arch/mips/kernel/ptrace.c
5365 +++ b/arch/mips/kernel/ptrace.c
5366 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
5367 return arch;
5368 }
5369
5370 +#ifdef CONFIG_GRKERNSEC_SETXID
5371 +extern void gr_delayed_cred_worker(void);
5372 +#endif
5373 +
5374 /*
5375 * Notification of system call entry/exit
5376 * - triggered by current->work.syscall_trace
5377 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5378 /* do the secure computing check first */
5379 secure_computing_strict(regs->regs[2]);
5380
5381 +#ifdef CONFIG_GRKERNSEC_SETXID
5382 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5383 + gr_delayed_cred_worker();
5384 +#endif
5385 +
5386 if (!(current->ptrace & PT_PTRACED))
5387 goto out;
5388
5389 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5390 index 9ea2964..c4329c3 100644
5391 --- a/arch/mips/kernel/scall32-o32.S
5392 +++ b/arch/mips/kernel/scall32-o32.S
5393 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5394
5395 stack_done:
5396 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5397 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5398 + li t1, _TIF_SYSCALL_WORK
5399 and t0, t1
5400 bnez t0, syscall_trace_entry # -> yes
5401
5402 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5403 index 36cfd40..b1436e0 100644
5404 --- a/arch/mips/kernel/scall64-64.S
5405 +++ b/arch/mips/kernel/scall64-64.S
5406 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5407
5408 sd a3, PT_R26(sp) # save a3 for syscall restarting
5409
5410 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5411 + li t1, _TIF_SYSCALL_WORK
5412 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5413 and t0, t1, t0
5414 bnez t0, syscall_trace_entry
5415 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5416 index 693d60b..ae0ba75 100644
5417 --- a/arch/mips/kernel/scall64-n32.S
5418 +++ b/arch/mips/kernel/scall64-n32.S
5419 @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5420
5421 sd a3, PT_R26(sp) # save a3 for syscall restarting
5422
5423 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5424 + li t1, _TIF_SYSCALL_WORK
5425 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5426 and t0, t1, t0
5427 bnez t0, n32_syscall_trace_entry
5428 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5429 index af8887f..611ccb6 100644
5430 --- a/arch/mips/kernel/scall64-o32.S
5431 +++ b/arch/mips/kernel/scall64-o32.S
5432 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5433 PTR 4b, bad_stack
5434 .previous
5435
5436 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5437 + li t1, _TIF_SYSCALL_WORK
5438 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5439 and t0, t1, t0
5440 bnez t0, trace_a_syscall
5441 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5442 index 0fead53..a2c0fb5 100644
5443 --- a/arch/mips/mm/fault.c
5444 +++ b/arch/mips/mm/fault.c
5445 @@ -27,6 +27,23 @@
5446 #include <asm/highmem.h> /* For VMALLOC_END */
5447 #include <linux/kdebug.h>
5448
5449 +#ifdef CONFIG_PAX_PAGEEXEC
5450 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5451 +{
5452 + unsigned long i;
5453 +
5454 + printk(KERN_ERR "PAX: bytes at PC: ");
5455 + for (i = 0; i < 5; i++) {
5456 + unsigned int c;
5457 + if (get_user(c, (unsigned int *)pc+i))
5458 + printk(KERN_CONT "???????? ");
5459 + else
5460 + printk(KERN_CONT "%08x ", c);
5461 + }
5462 + printk("\n");
5463 +}
5464 +#endif
5465 +
5466 /*
5467 * This routine handles page faults. It determines the address,
5468 * and the problem, and then passes it off to one of the appropriate
5469 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5470 index 7e5fe27..9656513 100644
5471 --- a/arch/mips/mm/mmap.c
5472 +++ b/arch/mips/mm/mmap.c
5473 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5474 struct vm_area_struct *vma;
5475 unsigned long addr = addr0;
5476 int do_color_align;
5477 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5478 struct vm_unmapped_area_info info;
5479
5480 if (unlikely(len > TASK_SIZE))
5481 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5482 do_color_align = 1;
5483
5484 /* requesting a specific address */
5485 +
5486 +#ifdef CONFIG_PAX_RANDMMAP
5487 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5488 +#endif
5489 +
5490 if (addr) {
5491 if (do_color_align)
5492 addr = COLOUR_ALIGN(addr, pgoff);
5493 @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5494 addr = PAGE_ALIGN(addr);
5495
5496 vma = find_vma(mm, addr);
5497 - if (TASK_SIZE - len >= addr &&
5498 - (!vma || addr + len <= vma->vm_start))
5499 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5500 return addr;
5501 }
5502
5503 info.length = len;
5504 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5505 info.align_offset = pgoff << PAGE_SHIFT;
5506 + info.threadstack_offset = offset;
5507
5508 if (dir == DOWN) {
5509 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5510 @@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5511 {
5512 unsigned long random_factor = 0UL;
5513
5514 +#ifdef CONFIG_PAX_RANDMMAP
5515 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5516 +#endif
5517 +
5518 if (current->flags & PF_RANDOMIZE) {
5519 random_factor = get_random_int();
5520 random_factor = random_factor << PAGE_SHIFT;
5521 @@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5522
5523 if (mmap_is_legacy()) {
5524 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5525 +
5526 +#ifdef CONFIG_PAX_RANDMMAP
5527 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5528 + mm->mmap_base += mm->delta_mmap;
5529 +#endif
5530 +
5531 mm->get_unmapped_area = arch_get_unmapped_area;
5532 mm->unmap_area = arch_unmap_area;
5533 } else {
5534 mm->mmap_base = mmap_base(random_factor);
5535 +
5536 +#ifdef CONFIG_PAX_RANDMMAP
5537 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5538 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5539 +#endif
5540 +
5541 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5542 mm->unmap_area = arch_unmap_area_topdown;
5543 }
5544 }
5545
5546 -static inline unsigned long brk_rnd(void)
5547 -{
5548 - unsigned long rnd = get_random_int();
5549 -
5550 - rnd = rnd << PAGE_SHIFT;
5551 - /* 8MB for 32bit, 256MB for 64bit */
5552 - if (TASK_IS_32BIT_ADDR)
5553 - rnd = rnd & 0x7ffffful;
5554 - else
5555 - rnd = rnd & 0xffffffful;
5556 -
5557 - return rnd;
5558 -}
5559 -
5560 -unsigned long arch_randomize_brk(struct mm_struct *mm)
5561 -{
5562 - unsigned long base = mm->brk;
5563 - unsigned long ret;
5564 -
5565 - ret = PAGE_ALIGN(base + brk_rnd());
5566 -
5567 - if (ret < mm->brk)
5568 - return mm->brk;
5569 -
5570 - return ret;
5571 -}
5572 -
5573 int __virt_addr_valid(const volatile void *kaddr)
5574 {
5575 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5576 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5577 index 967d144..db12197 100644
5578 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5579 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5580 @@ -11,12 +11,14 @@
5581 #ifndef _ASM_PROC_CACHE_H
5582 #define _ASM_PROC_CACHE_H
5583
5584 +#include <linux/const.h>
5585 +
5586 /* L1 cache */
5587
5588 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5589 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5590 -#define L1_CACHE_BYTES 16 /* bytes per entry */
5591 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5592 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5593 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5594
5595 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5596 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5597 index bcb5df2..84fabd2 100644
5598 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5599 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5600 @@ -16,13 +16,15 @@
5601 #ifndef _ASM_PROC_CACHE_H
5602 #define _ASM_PROC_CACHE_H
5603
5604 +#include <linux/const.h>
5605 +
5606 /*
5607 * L1 cache
5608 */
5609 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5610 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5611 -#define L1_CACHE_BYTES 32 /* bytes per entry */
5612 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5613 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5614 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5615
5616 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5617 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5618 index 4ce7a01..449202a 100644
5619 --- a/arch/openrisc/include/asm/cache.h
5620 +++ b/arch/openrisc/include/asm/cache.h
5621 @@ -19,11 +19,13 @@
5622 #ifndef __ASM_OPENRISC_CACHE_H
5623 #define __ASM_OPENRISC_CACHE_H
5624
5625 +#include <linux/const.h>
5626 +
5627 /* FIXME: How can we replace these with values from the CPU...
5628 * they shouldn't be hard-coded!
5629 */
5630
5631 -#define L1_CACHE_BYTES 16
5632 #define L1_CACHE_SHIFT 4
5633 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5634
5635 #endif /* __ASM_OPENRISC_CACHE_H */
5636 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5637 index f38e198..4179e38 100644
5638 --- a/arch/parisc/include/asm/atomic.h
5639 +++ b/arch/parisc/include/asm/atomic.h
5640 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5641
5642 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5643
5644 +#define atomic64_read_unchecked(v) atomic64_read(v)
5645 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5646 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5647 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5648 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5649 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5650 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5651 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5652 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5653 +
5654 #endif /* !CONFIG_64BIT */
5655
5656
5657 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5658 index 47f11c7..3420df2 100644
5659 --- a/arch/parisc/include/asm/cache.h
5660 +++ b/arch/parisc/include/asm/cache.h
5661 @@ -5,6 +5,7 @@
5662 #ifndef __ARCH_PARISC_CACHE_H
5663 #define __ARCH_PARISC_CACHE_H
5664
5665 +#include <linux/const.h>
5666
5667 /*
5668 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5669 @@ -15,13 +16,13 @@
5670 * just ruin performance.
5671 */
5672 #ifdef CONFIG_PA20
5673 -#define L1_CACHE_BYTES 64
5674 #define L1_CACHE_SHIFT 6
5675 #else
5676 -#define L1_CACHE_BYTES 32
5677 #define L1_CACHE_SHIFT 5
5678 #endif
5679
5680 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5681 +
5682 #ifndef __ASSEMBLY__
5683
5684 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5685 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5686 index ad2b503..bdf1651 100644
5687 --- a/arch/parisc/include/asm/elf.h
5688 +++ b/arch/parisc/include/asm/elf.h
5689 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5690
5691 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5692
5693 +#ifdef CONFIG_PAX_ASLR
5694 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5695 +
5696 +#define PAX_DELTA_MMAP_LEN 16
5697 +#define PAX_DELTA_STACK_LEN 16
5698 +#endif
5699 +
5700 /* This yields a mask that user programs can use to figure out what
5701 instruction set this CPU supports. This could be done in user space,
5702 but it's not easy, and we've already done it here. */
5703 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5704 index fc987a1..6e068ef 100644
5705 --- a/arch/parisc/include/asm/pgalloc.h
5706 +++ b/arch/parisc/include/asm/pgalloc.h
5707 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5708 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5709 }
5710
5711 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5712 +{
5713 + pgd_populate(mm, pgd, pmd);
5714 +}
5715 +
5716 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5717 {
5718 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5719 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5720 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5721 #define pmd_free(mm, x) do { } while (0)
5722 #define pgd_populate(mm, pmd, pte) BUG()
5723 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
5724
5725 #endif
5726
5727 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5728 index 1e40d7f..a3eb445 100644
5729 --- a/arch/parisc/include/asm/pgtable.h
5730 +++ b/arch/parisc/include/asm/pgtable.h
5731 @@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5732 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5733 #define PAGE_COPY PAGE_EXECREAD
5734 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5735 +
5736 +#ifdef CONFIG_PAX_PAGEEXEC
5737 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5738 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5739 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5740 +#else
5741 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5742 +# define PAGE_COPY_NOEXEC PAGE_COPY
5743 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5744 +#endif
5745 +
5746 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5747 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5748 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5749 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5750 index e0a8235..ce2f1e1 100644
5751 --- a/arch/parisc/include/asm/uaccess.h
5752 +++ b/arch/parisc/include/asm/uaccess.h
5753 @@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5754 const void __user *from,
5755 unsigned long n)
5756 {
5757 - int sz = __compiletime_object_size(to);
5758 + size_t sz = __compiletime_object_size(to);
5759 int ret = -EFAULT;
5760
5761 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5762 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5763 ret = __copy_from_user(to, from, n);
5764 else
5765 copy_from_user_overflow();
5766 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5767 index 2a625fb..9908930 100644
5768 --- a/arch/parisc/kernel/module.c
5769 +++ b/arch/parisc/kernel/module.c
5770 @@ -98,16 +98,38 @@
5771
5772 /* three functions to determine where in the module core
5773 * or init pieces the location is */
5774 +static inline int in_init_rx(struct module *me, void *loc)
5775 +{
5776 + return (loc >= me->module_init_rx &&
5777 + loc < (me->module_init_rx + me->init_size_rx));
5778 +}
5779 +
5780 +static inline int in_init_rw(struct module *me, void *loc)
5781 +{
5782 + return (loc >= me->module_init_rw &&
5783 + loc < (me->module_init_rw + me->init_size_rw));
5784 +}
5785 +
5786 static inline int in_init(struct module *me, void *loc)
5787 {
5788 - return (loc >= me->module_init &&
5789 - loc <= (me->module_init + me->init_size));
5790 + return in_init_rx(me, loc) || in_init_rw(me, loc);
5791 +}
5792 +
5793 +static inline int in_core_rx(struct module *me, void *loc)
5794 +{
5795 + return (loc >= me->module_core_rx &&
5796 + loc < (me->module_core_rx + me->core_size_rx));
5797 +}
5798 +
5799 +static inline int in_core_rw(struct module *me, void *loc)
5800 +{
5801 + return (loc >= me->module_core_rw &&
5802 + loc < (me->module_core_rw + me->core_size_rw));
5803 }
5804
5805 static inline int in_core(struct module *me, void *loc)
5806 {
5807 - return (loc >= me->module_core &&
5808 - loc <= (me->module_core + me->core_size));
5809 + return in_core_rx(me, loc) || in_core_rw(me, loc);
5810 }
5811
5812 static inline int in_local(struct module *me, void *loc)
5813 @@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5814 }
5815
5816 /* align things a bit */
5817 - me->core_size = ALIGN(me->core_size, 16);
5818 - me->arch.got_offset = me->core_size;
5819 - me->core_size += gots * sizeof(struct got_entry);
5820 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
5821 + me->arch.got_offset = me->core_size_rw;
5822 + me->core_size_rw += gots * sizeof(struct got_entry);
5823
5824 - me->core_size = ALIGN(me->core_size, 16);
5825 - me->arch.fdesc_offset = me->core_size;
5826 - me->core_size += fdescs * sizeof(Elf_Fdesc);
5827 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
5828 + me->arch.fdesc_offset = me->core_size_rw;
5829 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5830
5831 me->arch.got_max = gots;
5832 me->arch.fdesc_max = fdescs;
5833 @@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5834
5835 BUG_ON(value == 0);
5836
5837 - got = me->module_core + me->arch.got_offset;
5838 + got = me->module_core_rw + me->arch.got_offset;
5839 for (i = 0; got[i].addr; i++)
5840 if (got[i].addr == value)
5841 goto out;
5842 @@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5843 #ifdef CONFIG_64BIT
5844 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5845 {
5846 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5847 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5848
5849 if (!value) {
5850 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5851 @@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5852
5853 /* Create new one */
5854 fdesc->addr = value;
5855 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5856 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5857 return (Elf_Addr)fdesc;
5858 }
5859 #endif /* CONFIG_64BIT */
5860 @@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5861
5862 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5863 end = table + sechdrs[me->arch.unwind_section].sh_size;
5864 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5865 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5866
5867 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5868 me->arch.unwind_section, table, end, gp);
5869 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5870 index 5dfd248..64914ac 100644
5871 --- a/arch/parisc/kernel/sys_parisc.c
5872 +++ b/arch/parisc/kernel/sys_parisc.c
5873 @@ -33,9 +33,11 @@
5874 #include <linux/utsname.h>
5875 #include <linux/personality.h>
5876
5877 -static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5878 +static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5879 + unsigned long flags)
5880 {
5881 struct vm_unmapped_area_info info;
5882 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5883
5884 info.flags = 0;
5885 info.length = len;
5886 @@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5887 info.high_limit = TASK_SIZE;
5888 info.align_mask = 0;
5889 info.align_offset = 0;
5890 + info.threadstack_offset = offset;
5891 return vm_unmapped_area(&info);
5892 }
5893
5894 @@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
5895 return (unsigned long) mapping >> 8;
5896 }
5897
5898 -static unsigned long get_shared_area(struct address_space *mapping,
5899 - unsigned long addr, unsigned long len, unsigned long pgoff)
5900 +static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5901 + unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5902 {
5903 struct vm_unmapped_area_info info;
5904 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5905
5906 info.flags = 0;
5907 info.length = len;
5908 @@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5909 info.high_limit = TASK_SIZE;
5910 info.align_mask = PAGE_MASK & (SHMLBA - 1);
5911 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
5912 + info.threadstack_offset = offset;
5913 return vm_unmapped_area(&info);
5914 }
5915
5916 @@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5917 return -EINVAL;
5918 return addr;
5919 }
5920 - if (!addr)
5921 + if (!addr) {
5922 addr = TASK_UNMAPPED_BASE;
5923
5924 +#ifdef CONFIG_PAX_RANDMMAP
5925 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
5926 + addr += current->mm->delta_mmap;
5927 +#endif
5928 +
5929 + }
5930 +
5931 if (filp) {
5932 - addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5933 + addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5934 } else if(flags & MAP_SHARED) {
5935 - addr = get_shared_area(NULL, addr, len, pgoff);
5936 + addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
5937 } else {
5938 - addr = get_unshared_area(addr, len);
5939 + addr = get_unshared_area(filp, addr, len, flags);
5940 }
5941 return addr;
5942 }
5943 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
5944 index aeb8f8f..27a6c2f 100644
5945 --- a/arch/parisc/kernel/traps.c
5946 +++ b/arch/parisc/kernel/traps.c
5947 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
5948
5949 down_read(&current->mm->mmap_sem);
5950 vma = find_vma(current->mm,regs->iaoq[0]);
5951 - if (vma && (regs->iaoq[0] >= vma->vm_start)
5952 - && (vma->vm_flags & VM_EXEC)) {
5953 -
5954 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
5955 fault_address = regs->iaoq[0];
5956 fault_space = regs->iasq[0];
5957
5958 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
5959 index f247a34..dc0f219 100644
5960 --- a/arch/parisc/mm/fault.c
5961 +++ b/arch/parisc/mm/fault.c
5962 @@ -15,6 +15,7 @@
5963 #include <linux/sched.h>
5964 #include <linux/interrupt.h>
5965 #include <linux/module.h>
5966 +#include <linux/unistd.h>
5967
5968 #include <asm/uaccess.h>
5969 #include <asm/traps.h>
5970 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
5971 static unsigned long
5972 parisc_acctyp(unsigned long code, unsigned int inst)
5973 {
5974 - if (code == 6 || code == 16)
5975 + if (code == 6 || code == 7 || code == 16)
5976 return VM_EXEC;
5977
5978 switch (inst & 0xf0000000) {
5979 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
5980 }
5981 #endif
5982
5983 +#ifdef CONFIG_PAX_PAGEEXEC
5984 +/*
5985 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
5986 + *
5987 + * returns 1 when task should be killed
5988 + * 2 when rt_sigreturn trampoline was detected
5989 + * 3 when unpatched PLT trampoline was detected
5990 + */
5991 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5992 +{
5993 +
5994 +#ifdef CONFIG_PAX_EMUPLT
5995 + int err;
5996 +
5997 + do { /* PaX: unpatched PLT emulation */
5998 + unsigned int bl, depwi;
5999 +
6000 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6001 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6002 +
6003 + if (err)
6004 + break;
6005 +
6006 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6007 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6008 +
6009 + err = get_user(ldw, (unsigned int *)addr);
6010 + err |= get_user(bv, (unsigned int *)(addr+4));
6011 + err |= get_user(ldw2, (unsigned int *)(addr+8));
6012 +
6013 + if (err)
6014 + break;
6015 +
6016 + if (ldw == 0x0E801096U &&
6017 + bv == 0xEAC0C000U &&
6018 + ldw2 == 0x0E881095U)
6019 + {
6020 + unsigned int resolver, map;
6021 +
6022 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6023 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6024 + if (err)
6025 + break;
6026 +
6027 + regs->gr[20] = instruction_pointer(regs)+8;
6028 + regs->gr[21] = map;
6029 + regs->gr[22] = resolver;
6030 + regs->iaoq[0] = resolver | 3UL;
6031 + regs->iaoq[1] = regs->iaoq[0] + 4;
6032 + return 3;
6033 + }
6034 + }
6035 + } while (0);
6036 +#endif
6037 +
6038 +#ifdef CONFIG_PAX_EMUTRAMP
6039 +
6040 +#ifndef CONFIG_PAX_EMUSIGRT
6041 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6042 + return 1;
6043 +#endif
6044 +
6045 + do { /* PaX: rt_sigreturn emulation */
6046 + unsigned int ldi1, ldi2, bel, nop;
6047 +
6048 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6049 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6050 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6051 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6052 +
6053 + if (err)
6054 + break;
6055 +
6056 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6057 + ldi2 == 0x3414015AU &&
6058 + bel == 0xE4008200U &&
6059 + nop == 0x08000240U)
6060 + {
6061 + regs->gr[25] = (ldi1 & 2) >> 1;
6062 + regs->gr[20] = __NR_rt_sigreturn;
6063 + regs->gr[31] = regs->iaoq[1] + 16;
6064 + regs->sr[0] = regs->iasq[1];
6065 + regs->iaoq[0] = 0x100UL;
6066 + regs->iaoq[1] = regs->iaoq[0] + 4;
6067 + regs->iasq[0] = regs->sr[2];
6068 + regs->iasq[1] = regs->sr[2];
6069 + return 2;
6070 + }
6071 + } while (0);
6072 +#endif
6073 +
6074 + return 1;
6075 +}
6076 +
6077 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6078 +{
6079 + unsigned long i;
6080 +
6081 + printk(KERN_ERR "PAX: bytes at PC: ");
6082 + for (i = 0; i < 5; i++) {
6083 + unsigned int c;
6084 + if (get_user(c, (unsigned int *)pc+i))
6085 + printk(KERN_CONT "???????? ");
6086 + else
6087 + printk(KERN_CONT "%08x ", c);
6088 + }
6089 + printk("\n");
6090 +}
6091 +#endif
6092 +
6093 int fixup_exception(struct pt_regs *regs)
6094 {
6095 const struct exception_table_entry *fix;
6096 @@ -194,8 +305,33 @@ good_area:
6097
6098 acc_type = parisc_acctyp(code,regs->iir);
6099
6100 - if ((vma->vm_flags & acc_type) != acc_type)
6101 + if ((vma->vm_flags & acc_type) != acc_type) {
6102 +
6103 +#ifdef CONFIG_PAX_PAGEEXEC
6104 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6105 + (address & ~3UL) == instruction_pointer(regs))
6106 + {
6107 + up_read(&mm->mmap_sem);
6108 + switch (pax_handle_fetch_fault(regs)) {
6109 +
6110 +#ifdef CONFIG_PAX_EMUPLT
6111 + case 3:
6112 + return;
6113 +#endif
6114 +
6115 +#ifdef CONFIG_PAX_EMUTRAMP
6116 + case 2:
6117 + return;
6118 +#endif
6119 +
6120 + }
6121 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6122 + do_group_exit(SIGKILL);
6123 + }
6124 +#endif
6125 +
6126 goto bad_area;
6127 + }
6128
6129 /*
6130 * If for any reason at all we couldn't handle the fault, make
6131 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6132 index e3b1d41..8e81edf 100644
6133 --- a/arch/powerpc/include/asm/atomic.h
6134 +++ b/arch/powerpc/include/asm/atomic.h
6135 @@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6136 return t1;
6137 }
6138
6139 +#define atomic64_read_unchecked(v) atomic64_read(v)
6140 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6141 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6142 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6143 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6144 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6145 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6146 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6147 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6148 +
6149 #endif /* __powerpc64__ */
6150
6151 #endif /* __KERNEL__ */
6152 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6153 index 9e495c9..b6878e5 100644
6154 --- a/arch/powerpc/include/asm/cache.h
6155 +++ b/arch/powerpc/include/asm/cache.h
6156 @@ -3,6 +3,7 @@
6157
6158 #ifdef __KERNEL__
6159
6160 +#include <linux/const.h>
6161
6162 /* bytes per L1 cache line */
6163 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6164 @@ -22,7 +23,7 @@
6165 #define L1_CACHE_SHIFT 7
6166 #endif
6167
6168 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6169 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6170
6171 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6172
6173 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6174 index ac9790f..6d30741 100644
6175 --- a/arch/powerpc/include/asm/elf.h
6176 +++ b/arch/powerpc/include/asm/elf.h
6177 @@ -28,8 +28,19 @@
6178 the loader. We need to make sure that it is out of the way of the program
6179 that it will "exec", and that there is sufficient room for the brk. */
6180
6181 -extern unsigned long randomize_et_dyn(unsigned long base);
6182 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6183 +#define ELF_ET_DYN_BASE (0x20000000)
6184 +
6185 +#ifdef CONFIG_PAX_ASLR
6186 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6187 +
6188 +#ifdef __powerpc64__
6189 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6190 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6191 +#else
6192 +#define PAX_DELTA_MMAP_LEN 15
6193 +#define PAX_DELTA_STACK_LEN 15
6194 +#endif
6195 +#endif
6196
6197 /*
6198 * Our registers are always unsigned longs, whether we're a 32 bit
6199 @@ -122,10 +133,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6200 (0x7ff >> (PAGE_SHIFT - 12)) : \
6201 (0x3ffff >> (PAGE_SHIFT - 12)))
6202
6203 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6204 -#define arch_randomize_brk arch_randomize_brk
6205 -
6206 -
6207 #ifdef CONFIG_SPU_BASE
6208 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6209 #define NT_SPU 1
6210 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6211 index 8196e9c..d83a9f3 100644
6212 --- a/arch/powerpc/include/asm/exec.h
6213 +++ b/arch/powerpc/include/asm/exec.h
6214 @@ -4,6 +4,6 @@
6215 #ifndef _ASM_POWERPC_EXEC_H
6216 #define _ASM_POWERPC_EXEC_H
6217
6218 -extern unsigned long arch_align_stack(unsigned long sp);
6219 +#define arch_align_stack(x) ((x) & ~0xfUL)
6220
6221 #endif /* _ASM_POWERPC_EXEC_H */
6222 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6223 index 5acabbd..7ea14fa 100644
6224 --- a/arch/powerpc/include/asm/kmap_types.h
6225 +++ b/arch/powerpc/include/asm/kmap_types.h
6226 @@ -10,7 +10,7 @@
6227 * 2 of the License, or (at your option) any later version.
6228 */
6229
6230 -#define KM_TYPE_NR 16
6231 +#define KM_TYPE_NR 17
6232
6233 #endif /* __KERNEL__ */
6234 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6235 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6236 index 8565c25..2865190 100644
6237 --- a/arch/powerpc/include/asm/mman.h
6238 +++ b/arch/powerpc/include/asm/mman.h
6239 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6240 }
6241 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6242
6243 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6244 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6245 {
6246 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6247 }
6248 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6249 index f072e97..b436dee 100644
6250 --- a/arch/powerpc/include/asm/page.h
6251 +++ b/arch/powerpc/include/asm/page.h
6252 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6253 * and needs to be executable. This means the whole heap ends
6254 * up being executable.
6255 */
6256 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6257 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6258 +#define VM_DATA_DEFAULT_FLAGS32 \
6259 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6260 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6261
6262 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6263 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6264 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6265 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6266 #endif
6267
6268 +#define ktla_ktva(addr) (addr)
6269 +#define ktva_ktla(addr) (addr)
6270 +
6271 /*
6272 * Use the top bit of the higher-level page table entries to indicate whether
6273 * the entries we point to contain hugepages. This works because we know that
6274 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6275 index cd915d6..c10cee8 100644
6276 --- a/arch/powerpc/include/asm/page_64.h
6277 +++ b/arch/powerpc/include/asm/page_64.h
6278 @@ -154,15 +154,18 @@ do { \
6279 * stack by default, so in the absence of a PT_GNU_STACK program header
6280 * we turn execute permission off.
6281 */
6282 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6283 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6284 +#define VM_STACK_DEFAULT_FLAGS32 \
6285 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6286 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6287
6288 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6289 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6290
6291 +#ifndef CONFIG_PAX_PAGEEXEC
6292 #define VM_STACK_DEFAULT_FLAGS \
6293 (is_32bit_task() ? \
6294 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6295 +#endif
6296
6297 #include <asm-generic/getorder.h>
6298
6299 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6300 index 292725c..f87ae14 100644
6301 --- a/arch/powerpc/include/asm/pgalloc-64.h
6302 +++ b/arch/powerpc/include/asm/pgalloc-64.h
6303 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6304 #ifndef CONFIG_PPC_64K_PAGES
6305
6306 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6307 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6308
6309 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6310 {
6311 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6312 pud_set(pud, (unsigned long)pmd);
6313 }
6314
6315 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6316 +{
6317 + pud_populate(mm, pud, pmd);
6318 +}
6319 +
6320 #define pmd_populate(mm, pmd, pte_page) \
6321 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6322 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6323 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6324 #else /* CONFIG_PPC_64K_PAGES */
6325
6326 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6327 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6328
6329 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6330 pte_t *pte)
6331 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6332 index a9cbd3b..3b67efa 100644
6333 --- a/arch/powerpc/include/asm/pgtable.h
6334 +++ b/arch/powerpc/include/asm/pgtable.h
6335 @@ -2,6 +2,7 @@
6336 #define _ASM_POWERPC_PGTABLE_H
6337 #ifdef __KERNEL__
6338
6339 +#include <linux/const.h>
6340 #ifndef __ASSEMBLY__
6341 #include <asm/processor.h> /* For TASK_SIZE */
6342 #include <asm/mmu.h>
6343 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6344 index 4aad413..85d86bf 100644
6345 --- a/arch/powerpc/include/asm/pte-hash32.h
6346 +++ b/arch/powerpc/include/asm/pte-hash32.h
6347 @@ -21,6 +21,7 @@
6348 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6349 #define _PAGE_USER 0x004 /* usermode access allowed */
6350 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6351 +#define _PAGE_EXEC _PAGE_GUARDED
6352 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6353 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6354 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6355 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6356 index c9c67fc..e10c012 100644
6357 --- a/arch/powerpc/include/asm/reg.h
6358 +++ b/arch/powerpc/include/asm/reg.h
6359 @@ -245,6 +245,7 @@
6360 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6361 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6362 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6363 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6364 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6365 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6366 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6367 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6368 index 195ce2a..ab5c614 100644
6369 --- a/arch/powerpc/include/asm/smp.h
6370 +++ b/arch/powerpc/include/asm/smp.h
6371 @@ -50,7 +50,7 @@ struct smp_ops_t {
6372 int (*cpu_disable)(void);
6373 void (*cpu_die)(unsigned int nr);
6374 int (*cpu_bootable)(unsigned int nr);
6375 -};
6376 +} __no_const;
6377
6378 extern void smp_send_debugger_break(void);
6379 extern void start_secondary_resume(void);
6380 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6381 index 406b7b9..af63426 100644
6382 --- a/arch/powerpc/include/asm/thread_info.h
6383 +++ b/arch/powerpc/include/asm/thread_info.h
6384 @@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6385 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6386 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6387 #define TIF_SINGLESTEP 8 /* singlestepping active */
6388 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6389 #define TIF_SECCOMP 10 /* secure computing */
6390 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6391 #define TIF_NOERROR 12 /* Force successful syscall return */
6392 @@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6393 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6394 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6395 for stack store? */
6396 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6397 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6398 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6399
6400 /* as above, but as bit values */
6401 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6402 @@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6403 #define _TIF_UPROBE (1<<TIF_UPROBE)
6404 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6405 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6406 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6407 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6408 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6409 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6410 + _TIF_GRSEC_SETXID)
6411
6412 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6413 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6414 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6415 index 4db4959..aba5c41 100644
6416 --- a/arch/powerpc/include/asm/uaccess.h
6417 +++ b/arch/powerpc/include/asm/uaccess.h
6418 @@ -318,52 +318,6 @@ do { \
6419 extern unsigned long __copy_tofrom_user(void __user *to,
6420 const void __user *from, unsigned long size);
6421
6422 -#ifndef __powerpc64__
6423 -
6424 -static inline unsigned long copy_from_user(void *to,
6425 - const void __user *from, unsigned long n)
6426 -{
6427 - unsigned long over;
6428 -
6429 - if (access_ok(VERIFY_READ, from, n))
6430 - return __copy_tofrom_user((__force void __user *)to, from, n);
6431 - if ((unsigned long)from < TASK_SIZE) {
6432 - over = (unsigned long)from + n - TASK_SIZE;
6433 - return __copy_tofrom_user((__force void __user *)to, from,
6434 - n - over) + over;
6435 - }
6436 - return n;
6437 -}
6438 -
6439 -static inline unsigned long copy_to_user(void __user *to,
6440 - const void *from, unsigned long n)
6441 -{
6442 - unsigned long over;
6443 -
6444 - if (access_ok(VERIFY_WRITE, to, n))
6445 - return __copy_tofrom_user(to, (__force void __user *)from, n);
6446 - if ((unsigned long)to < TASK_SIZE) {
6447 - over = (unsigned long)to + n - TASK_SIZE;
6448 - return __copy_tofrom_user(to, (__force void __user *)from,
6449 - n - over) + over;
6450 - }
6451 - return n;
6452 -}
6453 -
6454 -#else /* __powerpc64__ */
6455 -
6456 -#define __copy_in_user(to, from, size) \
6457 - __copy_tofrom_user((to), (from), (size))
6458 -
6459 -extern unsigned long copy_from_user(void *to, const void __user *from,
6460 - unsigned long n);
6461 -extern unsigned long copy_to_user(void __user *to, const void *from,
6462 - unsigned long n);
6463 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
6464 - unsigned long n);
6465 -
6466 -#endif /* __powerpc64__ */
6467 -
6468 static inline unsigned long __copy_from_user_inatomic(void *to,
6469 const void __user *from, unsigned long n)
6470 {
6471 @@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6472 if (ret == 0)
6473 return 0;
6474 }
6475 +
6476 + if (!__builtin_constant_p(n))
6477 + check_object_size(to, n, false);
6478 +
6479 return __copy_tofrom_user((__force void __user *)to, from, n);
6480 }
6481
6482 @@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6483 if (ret == 0)
6484 return 0;
6485 }
6486 +
6487 + if (!__builtin_constant_p(n))
6488 + check_object_size(from, n, true);
6489 +
6490 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6491 }
6492
6493 @@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6494 return __copy_to_user_inatomic(to, from, size);
6495 }
6496
6497 +#ifndef __powerpc64__
6498 +
6499 +static inline unsigned long __must_check copy_from_user(void *to,
6500 + const void __user *from, unsigned long n)
6501 +{
6502 + unsigned long over;
6503 +
6504 + if ((long)n < 0)
6505 + return n;
6506 +
6507 + if (access_ok(VERIFY_READ, from, n)) {
6508 + if (!__builtin_constant_p(n))
6509 + check_object_size(to, n, false);
6510 + return __copy_tofrom_user((__force void __user *)to, from, n);
6511 + }
6512 + if ((unsigned long)from < TASK_SIZE) {
6513 + over = (unsigned long)from + n - TASK_SIZE;
6514 + if (!__builtin_constant_p(n - over))
6515 + check_object_size(to, n - over, false);
6516 + return __copy_tofrom_user((__force void __user *)to, from,
6517 + n - over) + over;
6518 + }
6519 + return n;
6520 +}
6521 +
6522 +static inline unsigned long __must_check copy_to_user(void __user *to,
6523 + const void *from, unsigned long n)
6524 +{
6525 + unsigned long over;
6526 +
6527 + if ((long)n < 0)
6528 + return n;
6529 +
6530 + if (access_ok(VERIFY_WRITE, to, n)) {
6531 + if (!__builtin_constant_p(n))
6532 + check_object_size(from, n, true);
6533 + return __copy_tofrom_user(to, (__force void __user *)from, n);
6534 + }
6535 + if ((unsigned long)to < TASK_SIZE) {
6536 + over = (unsigned long)to + n - TASK_SIZE;
6537 + if (!__builtin_constant_p(n))
6538 + check_object_size(from, n - over, true);
6539 + return __copy_tofrom_user(to, (__force void __user *)from,
6540 + n - over) + over;
6541 + }
6542 + return n;
6543 +}
6544 +
6545 +#else /* __powerpc64__ */
6546 +
6547 +#define __copy_in_user(to, from, size) \
6548 + __copy_tofrom_user((to), (from), (size))
6549 +
6550 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6551 +{
6552 + if ((long)n < 0 || n > INT_MAX)
6553 + return n;
6554 +
6555 + if (!__builtin_constant_p(n))
6556 + check_object_size(to, n, false);
6557 +
6558 + if (likely(access_ok(VERIFY_READ, from, n)))
6559 + n = __copy_from_user(to, from, n);
6560 + else
6561 + memset(to, 0, n);
6562 + return n;
6563 +}
6564 +
6565 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6566 +{
6567 + if ((long)n < 0 || n > INT_MAX)
6568 + return n;
6569 +
6570 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
6571 + if (!__builtin_constant_p(n))
6572 + check_object_size(from, n, true);
6573 + n = __copy_to_user(to, from, n);
6574 + }
6575 + return n;
6576 +}
6577 +
6578 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
6579 + unsigned long n);
6580 +
6581 +#endif /* __powerpc64__ */
6582 +
6583 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6584
6585 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6586 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6587 index ae54553..cf2184d 100644
6588 --- a/arch/powerpc/kernel/exceptions-64e.S
6589 +++ b/arch/powerpc/kernel/exceptions-64e.S
6590 @@ -716,6 +716,7 @@ storage_fault_common:
6591 std r14,_DAR(r1)
6592 std r15,_DSISR(r1)
6593 addi r3,r1,STACK_FRAME_OVERHEAD
6594 + bl .save_nvgprs
6595 mr r4,r14
6596 mr r5,r15
6597 ld r14,PACA_EXGEN+EX_R14(r13)
6598 @@ -724,8 +725,7 @@ storage_fault_common:
6599 cmpdi r3,0
6600 bne- 1f
6601 b .ret_from_except_lite
6602 -1: bl .save_nvgprs
6603 - mr r5,r3
6604 +1: mr r5,r3
6605 addi r3,r1,STACK_FRAME_OVERHEAD
6606 ld r4,_DAR(r1)
6607 bl .bad_page_fault
6608 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6609 index 3bbe7ed..14ec3eb 100644
6610 --- a/arch/powerpc/kernel/exceptions-64s.S
6611 +++ b/arch/powerpc/kernel/exceptions-64s.S
6612 @@ -1390,10 +1390,10 @@ handle_page_fault:
6613 11: ld r4,_DAR(r1)
6614 ld r5,_DSISR(r1)
6615 addi r3,r1,STACK_FRAME_OVERHEAD
6616 + bl .save_nvgprs
6617 bl .do_page_fault
6618 cmpdi r3,0
6619 beq+ 12f
6620 - bl .save_nvgprs
6621 mr r5,r3
6622 addi r3,r1,STACK_FRAME_OVERHEAD
6623 lwz r4,_DAR(r1)
6624 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6625 index 2e3200c..72095ce 100644
6626 --- a/arch/powerpc/kernel/module_32.c
6627 +++ b/arch/powerpc/kernel/module_32.c
6628 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6629 me->arch.core_plt_section = i;
6630 }
6631 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6632 - printk("Module doesn't contain .plt or .init.plt sections.\n");
6633 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6634 return -ENOEXEC;
6635 }
6636
6637 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6638
6639 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6640 /* Init, or core PLT? */
6641 - if (location >= mod->module_core
6642 - && location < mod->module_core + mod->core_size)
6643 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6644 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6645 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6646 - else
6647 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6648 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6649 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6650 + else {
6651 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6652 + return ~0UL;
6653 + }
6654
6655 /* Find this entry, or if that fails, the next avail. entry */
6656 while (entry->jump[0]) {
6657 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6658 index 16e77a8..4501b41 100644
6659 --- a/arch/powerpc/kernel/process.c
6660 +++ b/arch/powerpc/kernel/process.c
6661 @@ -870,8 +870,8 @@ void show_regs(struct pt_regs * regs)
6662 * Lookup NIP late so we have the best change of getting the
6663 * above info out without failing
6664 */
6665 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6666 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6667 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6668 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6669 #endif
6670 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6671 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
6672 @@ -1330,10 +1330,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6673 newsp = stack[0];
6674 ip = stack[STACK_FRAME_LR_SAVE];
6675 if (!firstframe || ip != lr) {
6676 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6677 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6678 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6679 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6680 - printk(" (%pS)",
6681 + printk(" (%pA)",
6682 (void *)current->ret_stack[curr_frame].ret);
6683 curr_frame--;
6684 }
6685 @@ -1353,7 +1353,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6686 struct pt_regs *regs = (struct pt_regs *)
6687 (sp + STACK_FRAME_OVERHEAD);
6688 lr = regs->link;
6689 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
6690 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
6691 regs->trap, (void *)regs->nip, (void *)lr);
6692 firstframe = 1;
6693 }
6694 @@ -1395,58 +1395,3 @@ void __ppc64_runlatch_off(void)
6695 mtspr(SPRN_CTRLT, ctrl);
6696 }
6697 #endif /* CONFIG_PPC64 */
6698 -
6699 -unsigned long arch_align_stack(unsigned long sp)
6700 -{
6701 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6702 - sp -= get_random_int() & ~PAGE_MASK;
6703 - return sp & ~0xf;
6704 -}
6705 -
6706 -static inline unsigned long brk_rnd(void)
6707 -{
6708 - unsigned long rnd = 0;
6709 -
6710 - /* 8MB for 32bit, 1GB for 64bit */
6711 - if (is_32bit_task())
6712 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6713 - else
6714 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6715 -
6716 - return rnd << PAGE_SHIFT;
6717 -}
6718 -
6719 -unsigned long arch_randomize_brk(struct mm_struct *mm)
6720 -{
6721 - unsigned long base = mm->brk;
6722 - unsigned long ret;
6723 -
6724 -#ifdef CONFIG_PPC_STD_MMU_64
6725 - /*
6726 - * If we are using 1TB segments and we are allowed to randomise
6727 - * the heap, we can put it above 1TB so it is backed by a 1TB
6728 - * segment. Otherwise the heap will be in the bottom 1TB
6729 - * which always uses 256MB segments and this may result in a
6730 - * performance penalty.
6731 - */
6732 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6733 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6734 -#endif
6735 -
6736 - ret = PAGE_ALIGN(base + brk_rnd());
6737 -
6738 - if (ret < mm->brk)
6739 - return mm->brk;
6740 -
6741 - return ret;
6742 -}
6743 -
6744 -unsigned long randomize_et_dyn(unsigned long base)
6745 -{
6746 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6747 -
6748 - if (ret < base)
6749 - return base;
6750 -
6751 - return ret;
6752 -}
6753 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6754 index f9b30c6..d72e7a3 100644
6755 --- a/arch/powerpc/kernel/ptrace.c
6756 +++ b/arch/powerpc/kernel/ptrace.c
6757 @@ -1771,6 +1771,10 @@ long arch_ptrace(struct task_struct *child, long request,
6758 return ret;
6759 }
6760
6761 +#ifdef CONFIG_GRKERNSEC_SETXID
6762 +extern void gr_delayed_cred_worker(void);
6763 +#endif
6764 +
6765 /*
6766 * We must return the syscall number to actually look up in the table.
6767 * This can be -1L to skip running any syscall at all.
6768 @@ -1781,6 +1785,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6769
6770 secure_computing_strict(regs->gpr[0]);
6771
6772 +#ifdef CONFIG_GRKERNSEC_SETXID
6773 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6774 + gr_delayed_cred_worker();
6775 +#endif
6776 +
6777 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6778 tracehook_report_syscall_entry(regs))
6779 /*
6780 @@ -1815,6 +1824,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6781 {
6782 int step;
6783
6784 +#ifdef CONFIG_GRKERNSEC_SETXID
6785 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6786 + gr_delayed_cred_worker();
6787 +#endif
6788 +
6789 audit_syscall_exit(regs);
6790
6791 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6792 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6793 index 95068bf..9ba1814 100644
6794 --- a/arch/powerpc/kernel/signal_32.c
6795 +++ b/arch/powerpc/kernel/signal_32.c
6796 @@ -982,7 +982,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6797 /* Save user registers on the stack */
6798 frame = &rt_sf->uc.uc_mcontext;
6799 addr = frame;
6800 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6801 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6802 sigret = 0;
6803 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6804 } else {
6805 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6806 index c179428..58acdaa 100644
6807 --- a/arch/powerpc/kernel/signal_64.c
6808 +++ b/arch/powerpc/kernel/signal_64.c
6809 @@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6810 #endif
6811
6812 /* Set up to return from userspace. */
6813 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6814 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6815 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6816 } else {
6817 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6818 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6819 index 3ce1f86..c30e629 100644
6820 --- a/arch/powerpc/kernel/sysfs.c
6821 +++ b/arch/powerpc/kernel/sysfs.c
6822 @@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6823 return NOTIFY_OK;
6824 }
6825
6826 -static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6827 +static struct notifier_block sysfs_cpu_nb = {
6828 .notifier_call = sysfs_cpu_notify,
6829 };
6830
6831 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6832 index 83efa2f..6bb5839 100644
6833 --- a/arch/powerpc/kernel/traps.c
6834 +++ b/arch/powerpc/kernel/traps.c
6835 @@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6836 return flags;
6837 }
6838
6839 +extern void gr_handle_kernel_exploit(void);
6840 +
6841 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6842 int signr)
6843 {
6844 @@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6845 panic("Fatal exception in interrupt");
6846 if (panic_on_oops)
6847 panic("Fatal exception");
6848 +
6849 + gr_handle_kernel_exploit();
6850 +
6851 do_exit(signr);
6852 }
6853
6854 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6855 index 1b2076f..835e4be 100644
6856 --- a/arch/powerpc/kernel/vdso.c
6857 +++ b/arch/powerpc/kernel/vdso.c
6858 @@ -34,6 +34,7 @@
6859 #include <asm/firmware.h>
6860 #include <asm/vdso.h>
6861 #include <asm/vdso_datapage.h>
6862 +#include <asm/mman.h>
6863
6864 #include "setup.h"
6865
6866 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6867 vdso_base = VDSO32_MBASE;
6868 #endif
6869
6870 - current->mm->context.vdso_base = 0;
6871 + current->mm->context.vdso_base = ~0UL;
6872
6873 /* vDSO has a problem and was disabled, just don't "enable" it for the
6874 * process
6875 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6876 vdso_base = get_unmapped_area(NULL, vdso_base,
6877 (vdso_pages << PAGE_SHIFT) +
6878 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6879 - 0, 0);
6880 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
6881 if (IS_ERR_VALUE(vdso_base)) {
6882 rc = vdso_base;
6883 goto fail_mmapsem;
6884 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6885 index 5eea6f3..5d10396 100644
6886 --- a/arch/powerpc/lib/usercopy_64.c
6887 +++ b/arch/powerpc/lib/usercopy_64.c
6888 @@ -9,22 +9,6 @@
6889 #include <linux/module.h>
6890 #include <asm/uaccess.h>
6891
6892 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6893 -{
6894 - if (likely(access_ok(VERIFY_READ, from, n)))
6895 - n = __copy_from_user(to, from, n);
6896 - else
6897 - memset(to, 0, n);
6898 - return n;
6899 -}
6900 -
6901 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6902 -{
6903 - if (likely(access_ok(VERIFY_WRITE, to, n)))
6904 - n = __copy_to_user(to, from, n);
6905 - return n;
6906 -}
6907 -
6908 unsigned long copy_in_user(void __user *to, const void __user *from,
6909 unsigned long n)
6910 {
6911 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6912 return n;
6913 }
6914
6915 -EXPORT_SYMBOL(copy_from_user);
6916 -EXPORT_SYMBOL(copy_to_user);
6917 EXPORT_SYMBOL(copy_in_user);
6918
6919 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6920 index 229951f..cdeca42 100644
6921 --- a/arch/powerpc/mm/fault.c
6922 +++ b/arch/powerpc/mm/fault.c
6923 @@ -32,6 +32,10 @@
6924 #include <linux/perf_event.h>
6925 #include <linux/magic.h>
6926 #include <linux/ratelimit.h>
6927 +#include <linux/slab.h>
6928 +#include <linux/pagemap.h>
6929 +#include <linux/compiler.h>
6930 +#include <linux/unistd.h>
6931
6932 #include <asm/firmware.h>
6933 #include <asm/page.h>
6934 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6935 }
6936 #endif
6937
6938 +#ifdef CONFIG_PAX_PAGEEXEC
6939 +/*
6940 + * PaX: decide what to do with offenders (regs->nip = fault address)
6941 + *
6942 + * returns 1 when task should be killed
6943 + */
6944 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6945 +{
6946 + return 1;
6947 +}
6948 +
6949 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6950 +{
6951 + unsigned long i;
6952 +
6953 + printk(KERN_ERR "PAX: bytes at PC: ");
6954 + for (i = 0; i < 5; i++) {
6955 + unsigned int c;
6956 + if (get_user(c, (unsigned int __user *)pc+i))
6957 + printk(KERN_CONT "???????? ");
6958 + else
6959 + printk(KERN_CONT "%08x ", c);
6960 + }
6961 + printk("\n");
6962 +}
6963 +#endif
6964 +
6965 /*
6966 * Check whether the instruction at regs->nip is a store using
6967 * an update addressing form which will update r1.
6968 @@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
6969 * indicate errors in DSISR but can validly be set in SRR1.
6970 */
6971 if (trap == 0x400)
6972 - error_code &= 0x48200000;
6973 + error_code &= 0x58200000;
6974 else
6975 is_write = error_code & DSISR_ISSTORE;
6976 #else
6977 @@ -364,7 +395,7 @@ good_area:
6978 * "undefined". Of those that can be set, this is the only
6979 * one which seems bad.
6980 */
6981 - if (error_code & 0x10000000)
6982 + if (error_code & DSISR_GUARDED)
6983 /* Guarded storage error. */
6984 goto bad_area;
6985 #endif /* CONFIG_8xx */
6986 @@ -379,7 +410,7 @@ good_area:
6987 * processors use the same I/D cache coherency mechanism
6988 * as embedded.
6989 */
6990 - if (error_code & DSISR_PROTFAULT)
6991 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
6992 goto bad_area;
6993 #endif /* CONFIG_PPC_STD_MMU */
6994
6995 @@ -462,6 +493,23 @@ bad_area:
6996 bad_area_nosemaphore:
6997 /* User mode accesses cause a SIGSEGV */
6998 if (user_mode(regs)) {
6999 +
7000 +#ifdef CONFIG_PAX_PAGEEXEC
7001 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7002 +#ifdef CONFIG_PPC_STD_MMU
7003 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7004 +#else
7005 + if (is_exec && regs->nip == address) {
7006 +#endif
7007 + switch (pax_handle_fetch_fault(regs)) {
7008 + }
7009 +
7010 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7011 + do_group_exit(SIGKILL);
7012 + }
7013 + }
7014 +#endif
7015 +
7016 _exception(SIGSEGV, regs, code, address);
7017 return 0;
7018 }
7019 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7020 index 67a42ed..cd463e0 100644
7021 --- a/arch/powerpc/mm/mmap_64.c
7022 +++ b/arch/powerpc/mm/mmap_64.c
7023 @@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7024 {
7025 unsigned long rnd = 0;
7026
7027 +#ifdef CONFIG_PAX_RANDMMAP
7028 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7029 +#endif
7030 +
7031 if (current->flags & PF_RANDOMIZE) {
7032 /* 8MB for 32bit, 1GB for 64bit */
7033 if (is_32bit_task())
7034 @@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7035 */
7036 if (mmap_is_legacy()) {
7037 mm->mmap_base = TASK_UNMAPPED_BASE;
7038 +
7039 +#ifdef CONFIG_PAX_RANDMMAP
7040 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7041 + mm->mmap_base += mm->delta_mmap;
7042 +#endif
7043 +
7044 mm->get_unmapped_area = arch_get_unmapped_area;
7045 mm->unmap_area = arch_unmap_area;
7046 } else {
7047 mm->mmap_base = mmap_base();
7048 +
7049 +#ifdef CONFIG_PAX_RANDMMAP
7050 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7051 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7052 +#endif
7053 +
7054 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7055 mm->unmap_area = arch_unmap_area_topdown;
7056 }
7057 diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7058 index e779642..e5bb889 100644
7059 --- a/arch/powerpc/mm/mmu_context_nohash.c
7060 +++ b/arch/powerpc/mm/mmu_context_nohash.c
7061 @@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7062 return NOTIFY_OK;
7063 }
7064
7065 -static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7066 +static struct notifier_block mmu_context_cpu_nb = {
7067 .notifier_call = mmu_context_cpu_notify,
7068 };
7069
7070 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7071 index 6a252c4..3024d81 100644
7072 --- a/arch/powerpc/mm/numa.c
7073 +++ b/arch/powerpc/mm/numa.c
7074 @@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7075 return ret;
7076 }
7077
7078 -static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7079 +static struct notifier_block ppc64_numa_nb = {
7080 .notifier_call = cpu_numa_callback,
7081 .priority = 1 /* Must run before sched domains notifier. */
7082 };
7083 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7084 index cf9dada..241529f 100644
7085 --- a/arch/powerpc/mm/slice.c
7086 +++ b/arch/powerpc/mm/slice.c
7087 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7088 if ((mm->task_size - len) < addr)
7089 return 0;
7090 vma = find_vma(mm, addr);
7091 - return (!vma || (addr + len) <= vma->vm_start);
7092 + return check_heap_stack_gap(vma, addr, len, 0);
7093 }
7094
7095 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7096 @@ -272,7 +272,7 @@ full_search:
7097 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7098 continue;
7099 }
7100 - if (!vma || addr + len <= vma->vm_start) {
7101 + if (check_heap_stack_gap(vma, addr, len, 0)) {
7102 /*
7103 * Remember the place where we stopped the search:
7104 */
7105 @@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7106 }
7107 }
7108
7109 - addr = mm->mmap_base;
7110 - while (addr > len) {
7111 + if (mm->mmap_base < len)
7112 + addr = -ENOMEM;
7113 + else
7114 + addr = mm->mmap_base - len;
7115 +
7116 + while (!IS_ERR_VALUE(addr)) {
7117 /* Go down by chunk size */
7118 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7119 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
7120
7121 /* Check for hit with different page size */
7122 mask = slice_range_to_mask(addr, len);
7123 @@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7124 * return with success:
7125 */
7126 vma = find_vma(mm, addr);
7127 - if (!vma || (addr + len) <= vma->vm_start) {
7128 + if (check_heap_stack_gap(vma, addr, len, 0)) {
7129 /* remember the address as a hint for next time */
7130 if (use_cache)
7131 mm->free_area_cache = addr;
7132 @@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7133 mm->cached_hole_size = vma->vm_start - addr;
7134
7135 /* try just below the current vma->vm_start */
7136 - addr = vma->vm_start;
7137 + addr = skip_heap_stack_gap(vma, len, 0);
7138 }
7139
7140 /*
7141 @@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7142 if (fixed && addr > (mm->task_size - len))
7143 return -EINVAL;
7144
7145 +#ifdef CONFIG_PAX_RANDMMAP
7146 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7147 + addr = 0;
7148 +#endif
7149 +
7150 /* If hint, make sure it matches our alignment restrictions */
7151 if (!fixed && addr) {
7152 addr = _ALIGN_UP(addr, 1ul << pshift);
7153 diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7154 index 68c57d3..1fdcfb2 100644
7155 --- a/arch/powerpc/platforms/cell/spufs/file.c
7156 +++ b/arch/powerpc/platforms/cell/spufs/file.c
7157 @@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7158 return VM_FAULT_NOPAGE;
7159 }
7160
7161 -static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7162 +static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7163 unsigned long address,
7164 - void *buf, int len, int write)
7165 + void *buf, size_t len, int write)
7166 {
7167 struct spu_context *ctx = vma->vm_file->private_data;
7168 unsigned long offset = address - vma->vm_start;
7169 diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7170 index bdb738a..49c9f95 100644
7171 --- a/arch/powerpc/platforms/powermac/smp.c
7172 +++ b/arch/powerpc/platforms/powermac/smp.c
7173 @@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7174 return NOTIFY_OK;
7175 }
7176
7177 -static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7178 +static struct notifier_block smp_core99_cpu_nb = {
7179 .notifier_call = smp_core99_cpu_notify,
7180 };
7181 #endif /* CONFIG_HOTPLUG_CPU */
7182 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7183 index c797832..ce575c8 100644
7184 --- a/arch/s390/include/asm/atomic.h
7185 +++ b/arch/s390/include/asm/atomic.h
7186 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7187 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7188 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7189
7190 +#define atomic64_read_unchecked(v) atomic64_read(v)
7191 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7192 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7193 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7194 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7195 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7196 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7197 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7198 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7199 +
7200 #define smp_mb__before_atomic_dec() smp_mb()
7201 #define smp_mb__after_atomic_dec() smp_mb()
7202 #define smp_mb__before_atomic_inc() smp_mb()
7203 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7204 index 4d7ccac..d03d0ad 100644
7205 --- a/arch/s390/include/asm/cache.h
7206 +++ b/arch/s390/include/asm/cache.h
7207 @@ -9,8 +9,10 @@
7208 #ifndef __ARCH_S390_CACHE_H
7209 #define __ARCH_S390_CACHE_H
7210
7211 -#define L1_CACHE_BYTES 256
7212 +#include <linux/const.h>
7213 +
7214 #define L1_CACHE_SHIFT 8
7215 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7216 #define NET_SKB_PAD 32
7217
7218 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7219 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7220 index 1bfdf24..9c9ab2e 100644
7221 --- a/arch/s390/include/asm/elf.h
7222 +++ b/arch/s390/include/asm/elf.h
7223 @@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7224 the loader. We need to make sure that it is out of the way of the program
7225 that it will "exec", and that there is sufficient room for the brk. */
7226
7227 -extern unsigned long randomize_et_dyn(unsigned long base);
7228 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7229 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7230 +
7231 +#ifdef CONFIG_PAX_ASLR
7232 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7233 +
7234 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7235 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7236 +#endif
7237
7238 /* This yields a mask that user programs can use to figure out what
7239 instruction set this CPU supports. */
7240 @@ -207,9 +213,6 @@ struct linux_binprm;
7241 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7242 int arch_setup_additional_pages(struct linux_binprm *, int);
7243
7244 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7245 -#define arch_randomize_brk arch_randomize_brk
7246 -
7247 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7248
7249 #endif
7250 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7251 index c4a93d6..4d2a9b4 100644
7252 --- a/arch/s390/include/asm/exec.h
7253 +++ b/arch/s390/include/asm/exec.h
7254 @@ -7,6 +7,6 @@
7255 #ifndef __ASM_EXEC_H
7256 #define __ASM_EXEC_H
7257
7258 -extern unsigned long arch_align_stack(unsigned long sp);
7259 +#define arch_align_stack(x) ((x) & ~0xfUL)
7260
7261 #endif /* __ASM_EXEC_H */
7262 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7263 index 9c33ed4..e40cbef 100644
7264 --- a/arch/s390/include/asm/uaccess.h
7265 +++ b/arch/s390/include/asm/uaccess.h
7266 @@ -252,6 +252,10 @@ static inline unsigned long __must_check
7267 copy_to_user(void __user *to, const void *from, unsigned long n)
7268 {
7269 might_fault();
7270 +
7271 + if ((long)n < 0)
7272 + return n;
7273 +
7274 return __copy_to_user(to, from, n);
7275 }
7276
7277 @@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7278 static inline unsigned long __must_check
7279 __copy_from_user(void *to, const void __user *from, unsigned long n)
7280 {
7281 + if ((long)n < 0)
7282 + return n;
7283 +
7284 if (__builtin_constant_p(n) && (n <= 256))
7285 return uaccess.copy_from_user_small(n, from, to);
7286 else
7287 @@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7288 static inline unsigned long __must_check
7289 copy_from_user(void *to, const void __user *from, unsigned long n)
7290 {
7291 - unsigned int sz = __compiletime_object_size(to);
7292 + size_t sz = __compiletime_object_size(to);
7293
7294 might_fault();
7295 - if (unlikely(sz != -1 && sz < n)) {
7296 +
7297 + if ((long)n < 0)
7298 + return n;
7299 +
7300 + if (unlikely(sz != (size_t)-1 && sz < n)) {
7301 copy_from_user_overflow();
7302 return n;
7303 }
7304 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7305 index 7845e15..59c4353 100644
7306 --- a/arch/s390/kernel/module.c
7307 +++ b/arch/s390/kernel/module.c
7308 @@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7309
7310 /* Increase core size by size of got & plt and set start
7311 offsets for got and plt. */
7312 - me->core_size = ALIGN(me->core_size, 4);
7313 - me->arch.got_offset = me->core_size;
7314 - me->core_size += me->arch.got_size;
7315 - me->arch.plt_offset = me->core_size;
7316 - me->core_size += me->arch.plt_size;
7317 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
7318 + me->arch.got_offset = me->core_size_rw;
7319 + me->core_size_rw += me->arch.got_size;
7320 + me->arch.plt_offset = me->core_size_rx;
7321 + me->core_size_rx += me->arch.plt_size;
7322 return 0;
7323 }
7324
7325 @@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7326 if (info->got_initialized == 0) {
7327 Elf_Addr *gotent;
7328
7329 - gotent = me->module_core + me->arch.got_offset +
7330 + gotent = me->module_core_rw + me->arch.got_offset +
7331 info->got_offset;
7332 *gotent = val;
7333 info->got_initialized = 1;
7334 @@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7335 rc = apply_rela_bits(loc, val, 0, 64, 0);
7336 else if (r_type == R_390_GOTENT ||
7337 r_type == R_390_GOTPLTENT) {
7338 - val += (Elf_Addr) me->module_core - loc;
7339 + val += (Elf_Addr) me->module_core_rw - loc;
7340 rc = apply_rela_bits(loc, val, 1, 32, 1);
7341 }
7342 break;
7343 @@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7344 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7345 if (info->plt_initialized == 0) {
7346 unsigned int *ip;
7347 - ip = me->module_core + me->arch.plt_offset +
7348 + ip = me->module_core_rx + me->arch.plt_offset +
7349 info->plt_offset;
7350 #ifndef CONFIG_64BIT
7351 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7352 @@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7353 val - loc + 0xffffUL < 0x1ffffeUL) ||
7354 (r_type == R_390_PLT32DBL &&
7355 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7356 - val = (Elf_Addr) me->module_core +
7357 + val = (Elf_Addr) me->module_core_rx +
7358 me->arch.plt_offset +
7359 info->plt_offset;
7360 val += rela->r_addend - loc;
7361 @@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7362 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7363 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7364 val = val + rela->r_addend -
7365 - ((Elf_Addr) me->module_core + me->arch.got_offset);
7366 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7367 if (r_type == R_390_GOTOFF16)
7368 rc = apply_rela_bits(loc, val, 0, 16, 0);
7369 else if (r_type == R_390_GOTOFF32)
7370 @@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7371 break;
7372 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7373 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7374 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
7375 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7376 rela->r_addend - loc;
7377 if (r_type == R_390_GOTPC)
7378 rc = apply_rela_bits(loc, val, 1, 32, 0);
7379 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7380 index 536d645..4a5bd9e 100644
7381 --- a/arch/s390/kernel/process.c
7382 +++ b/arch/s390/kernel/process.c
7383 @@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7384 }
7385 return 0;
7386 }
7387 -
7388 -unsigned long arch_align_stack(unsigned long sp)
7389 -{
7390 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7391 - sp -= get_random_int() & ~PAGE_MASK;
7392 - return sp & ~0xf;
7393 -}
7394 -
7395 -static inline unsigned long brk_rnd(void)
7396 -{
7397 - /* 8MB for 32bit, 1GB for 64bit */
7398 - if (is_32bit_task())
7399 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7400 - else
7401 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7402 -}
7403 -
7404 -unsigned long arch_randomize_brk(struct mm_struct *mm)
7405 -{
7406 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7407 -
7408 - if (ret < mm->brk)
7409 - return mm->brk;
7410 - return ret;
7411 -}
7412 -
7413 -unsigned long randomize_et_dyn(unsigned long base)
7414 -{
7415 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7416 -
7417 - if (!(current->flags & PF_RANDOMIZE))
7418 - return base;
7419 - if (ret < base)
7420 - return base;
7421 - return ret;
7422 -}
7423 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7424 index 06bafec..2bca531 100644
7425 --- a/arch/s390/mm/mmap.c
7426 +++ b/arch/s390/mm/mmap.c
7427 @@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7428 */
7429 if (mmap_is_legacy()) {
7430 mm->mmap_base = TASK_UNMAPPED_BASE;
7431 +
7432 +#ifdef CONFIG_PAX_RANDMMAP
7433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7434 + mm->mmap_base += mm->delta_mmap;
7435 +#endif
7436 +
7437 mm->get_unmapped_area = arch_get_unmapped_area;
7438 mm->unmap_area = arch_unmap_area;
7439 } else {
7440 mm->mmap_base = mmap_base();
7441 +
7442 +#ifdef CONFIG_PAX_RANDMMAP
7443 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7444 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7445 +#endif
7446 +
7447 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7448 mm->unmap_area = arch_unmap_area_topdown;
7449 }
7450 @@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7451 */
7452 if (mmap_is_legacy()) {
7453 mm->mmap_base = TASK_UNMAPPED_BASE;
7454 +
7455 +#ifdef CONFIG_PAX_RANDMMAP
7456 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7457 + mm->mmap_base += mm->delta_mmap;
7458 +#endif
7459 +
7460 mm->get_unmapped_area = s390_get_unmapped_area;
7461 mm->unmap_area = arch_unmap_area;
7462 } else {
7463 mm->mmap_base = mmap_base();
7464 +
7465 +#ifdef CONFIG_PAX_RANDMMAP
7466 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7467 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7468 +#endif
7469 +
7470 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7471 mm->unmap_area = arch_unmap_area_topdown;
7472 }
7473 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7474 index ae3d59f..f65f075 100644
7475 --- a/arch/score/include/asm/cache.h
7476 +++ b/arch/score/include/asm/cache.h
7477 @@ -1,7 +1,9 @@
7478 #ifndef _ASM_SCORE_CACHE_H
7479 #define _ASM_SCORE_CACHE_H
7480
7481 +#include <linux/const.h>
7482 +
7483 #define L1_CACHE_SHIFT 4
7484 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7485 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7486
7487 #endif /* _ASM_SCORE_CACHE_H */
7488 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7489 index f9f3cd5..58ff438 100644
7490 --- a/arch/score/include/asm/exec.h
7491 +++ b/arch/score/include/asm/exec.h
7492 @@ -1,6 +1,6 @@
7493 #ifndef _ASM_SCORE_EXEC_H
7494 #define _ASM_SCORE_EXEC_H
7495
7496 -extern unsigned long arch_align_stack(unsigned long sp);
7497 +#define arch_align_stack(x) (x)
7498
7499 #endif /* _ASM_SCORE_EXEC_H */
7500 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7501 index 7956846..5f37677 100644
7502 --- a/arch/score/kernel/process.c
7503 +++ b/arch/score/kernel/process.c
7504 @@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7505
7506 return task_pt_regs(task)->cp0_epc;
7507 }
7508 -
7509 -unsigned long arch_align_stack(unsigned long sp)
7510 -{
7511 - return sp;
7512 -}
7513 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7514 index ef9e555..331bd29 100644
7515 --- a/arch/sh/include/asm/cache.h
7516 +++ b/arch/sh/include/asm/cache.h
7517 @@ -9,10 +9,11 @@
7518 #define __ASM_SH_CACHE_H
7519 #ifdef __KERNEL__
7520
7521 +#include <linux/const.h>
7522 #include <linux/init.h>
7523 #include <cpu/cache.h>
7524
7525 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7526 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7527
7528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7529
7530 diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7531 index 03f2b55..b0270327 100644
7532 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7533 +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7534 @@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7535 return NOTIFY_OK;
7536 }
7537
7538 -static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7539 +static struct notifier_block shx3_cpu_notifier = {
7540 .notifier_call = shx3_cpu_callback,
7541 };
7542
7543 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7544 index 6777177..cb5e44f 100644
7545 --- a/arch/sh/mm/mmap.c
7546 +++ b/arch/sh/mm/mmap.c
7547 @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7548 struct mm_struct *mm = current->mm;
7549 struct vm_area_struct *vma;
7550 int do_colour_align;
7551 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7552 struct vm_unmapped_area_info info;
7553
7554 if (flags & MAP_FIXED) {
7555 @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7556 if (filp || (flags & MAP_SHARED))
7557 do_colour_align = 1;
7558
7559 +#ifdef CONFIG_PAX_RANDMMAP
7560 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7561 +#endif
7562 +
7563 if (addr) {
7564 if (do_colour_align)
7565 addr = COLOUR_ALIGN(addr, pgoff);
7566 @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7567 addr = PAGE_ALIGN(addr);
7568
7569 vma = find_vma(mm, addr);
7570 - if (TASK_SIZE - len >= addr &&
7571 - (!vma || addr + len <= vma->vm_start))
7572 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7573 return addr;
7574 }
7575
7576 info.flags = 0;
7577 info.length = len;
7578 - info.low_limit = TASK_UNMAPPED_BASE;
7579 + info.low_limit = mm->mmap_base;
7580 info.high_limit = TASK_SIZE;
7581 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7582 info.align_offset = pgoff << PAGE_SHIFT;
7583 @@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7584 struct mm_struct *mm = current->mm;
7585 unsigned long addr = addr0;
7586 int do_colour_align;
7587 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7588 struct vm_unmapped_area_info info;
7589
7590 if (flags & MAP_FIXED) {
7591 @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7592 if (filp || (flags & MAP_SHARED))
7593 do_colour_align = 1;
7594
7595 +#ifdef CONFIG_PAX_RANDMMAP
7596 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7597 +#endif
7598 +
7599 /* requesting a specific address */
7600 if (addr) {
7601 if (do_colour_align)
7602 @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7603 addr = PAGE_ALIGN(addr);
7604
7605 vma = find_vma(mm, addr);
7606 - if (TASK_SIZE - len >= addr &&
7607 - (!vma || addr + len <= vma->vm_start))
7608 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7609 return addr;
7610 }
7611
7612 @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7613 VM_BUG_ON(addr != -ENOMEM);
7614 info.flags = 0;
7615 info.low_limit = TASK_UNMAPPED_BASE;
7616 +
7617 +#ifdef CONFIG_PAX_RANDMMAP
7618 + if (mm->pax_flags & MF_PAX_RANDMMAP)
7619 + info.low_limit += mm->delta_mmap;
7620 +#endif
7621 +
7622 info.high_limit = TASK_SIZE;
7623 addr = vm_unmapped_area(&info);
7624 }
7625 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7626 index be56a24..443328f 100644
7627 --- a/arch/sparc/include/asm/atomic_64.h
7628 +++ b/arch/sparc/include/asm/atomic_64.h
7629 @@ -14,18 +14,40 @@
7630 #define ATOMIC64_INIT(i) { (i) }
7631
7632 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7633 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7634 +{
7635 + return v->counter;
7636 +}
7637 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7638 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7639 +{
7640 + return v->counter;
7641 +}
7642
7643 #define atomic_set(v, i) (((v)->counter) = i)
7644 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7645 +{
7646 + v->counter = i;
7647 +}
7648 #define atomic64_set(v, i) (((v)->counter) = i)
7649 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7650 +{
7651 + v->counter = i;
7652 +}
7653
7654 extern void atomic_add(int, atomic_t *);
7655 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7656 extern void atomic64_add(long, atomic64_t *);
7657 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7658 extern void atomic_sub(int, atomic_t *);
7659 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7660 extern void atomic64_sub(long, atomic64_t *);
7661 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7662
7663 extern int atomic_add_ret(int, atomic_t *);
7664 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7665 extern long atomic64_add_ret(long, atomic64_t *);
7666 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7667 extern int atomic_sub_ret(int, atomic_t *);
7668 extern long atomic64_sub_ret(long, atomic64_t *);
7669
7670 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7671 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7672
7673 #define atomic_inc_return(v) atomic_add_ret(1, v)
7674 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7675 +{
7676 + return atomic_add_ret_unchecked(1, v);
7677 +}
7678 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7679 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7680 +{
7681 + return atomic64_add_ret_unchecked(1, v);
7682 +}
7683
7684 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7685 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7686
7687 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7688 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7689 +{
7690 + return atomic_add_ret_unchecked(i, v);
7691 +}
7692 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7693 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7694 +{
7695 + return atomic64_add_ret_unchecked(i, v);
7696 +}
7697
7698 /*
7699 * atomic_inc_and_test - increment and test
7700 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7701 * other cases.
7702 */
7703 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7704 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7705 +{
7706 + return atomic_inc_return_unchecked(v) == 0;
7707 +}
7708 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7709
7710 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7711 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7712 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7713
7714 #define atomic_inc(v) atomic_add(1, v)
7715 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7716 +{
7717 + atomic_add_unchecked(1, v);
7718 +}
7719 #define atomic64_inc(v) atomic64_add(1, v)
7720 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7721 +{
7722 + atomic64_add_unchecked(1, v);
7723 +}
7724
7725 #define atomic_dec(v) atomic_sub(1, v)
7726 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7727 +{
7728 + atomic_sub_unchecked(1, v);
7729 +}
7730 #define atomic64_dec(v) atomic64_sub(1, v)
7731 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7732 +{
7733 + atomic64_sub_unchecked(1, v);
7734 +}
7735
7736 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7737 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7738
7739 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7740 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7741 +{
7742 + return cmpxchg(&v->counter, old, new);
7743 +}
7744 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7745 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7746 +{
7747 + return xchg(&v->counter, new);
7748 +}
7749
7750 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7751 {
7752 - int c, old;
7753 + int c, old, new;
7754 c = atomic_read(v);
7755 for (;;) {
7756 - if (unlikely(c == (u)))
7757 + if (unlikely(c == u))
7758 break;
7759 - old = atomic_cmpxchg((v), c, c + (a));
7760 +
7761 + asm volatile("addcc %2, %0, %0\n"
7762 +
7763 +#ifdef CONFIG_PAX_REFCOUNT
7764 + "tvs %%icc, 6\n"
7765 +#endif
7766 +
7767 + : "=r" (new)
7768 + : "0" (c), "ir" (a)
7769 + : "cc");
7770 +
7771 + old = atomic_cmpxchg(v, c, new);
7772 if (likely(old == c))
7773 break;
7774 c = old;
7775 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7776 #define atomic64_cmpxchg(v, o, n) \
7777 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7778 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7779 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7780 +{
7781 + return xchg(&v->counter, new);
7782 +}
7783
7784 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7785 {
7786 - long c, old;
7787 + long c, old, new;
7788 c = atomic64_read(v);
7789 for (;;) {
7790 - if (unlikely(c == (u)))
7791 + if (unlikely(c == u))
7792 break;
7793 - old = atomic64_cmpxchg((v), c, c + (a));
7794 +
7795 + asm volatile("addcc %2, %0, %0\n"
7796 +
7797 +#ifdef CONFIG_PAX_REFCOUNT
7798 + "tvs %%xcc, 6\n"
7799 +#endif
7800 +
7801 + : "=r" (new)
7802 + : "0" (c), "ir" (a)
7803 + : "cc");
7804 +
7805 + old = atomic64_cmpxchg(v, c, new);
7806 if (likely(old == c))
7807 break;
7808 c = old;
7809 }
7810 - return c != (u);
7811 + return c != u;
7812 }
7813
7814 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7815 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7816 index 5bb6991..5c2132e 100644
7817 --- a/arch/sparc/include/asm/cache.h
7818 +++ b/arch/sparc/include/asm/cache.h
7819 @@ -7,10 +7,12 @@
7820 #ifndef _SPARC_CACHE_H
7821 #define _SPARC_CACHE_H
7822
7823 +#include <linux/const.h>
7824 +
7825 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7826
7827 #define L1_CACHE_SHIFT 5
7828 -#define L1_CACHE_BYTES 32
7829 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7830
7831 #ifdef CONFIG_SPARC32
7832 #define SMP_CACHE_BYTES_SHIFT 5
7833 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7834 index a24e41f..47677ff 100644
7835 --- a/arch/sparc/include/asm/elf_32.h
7836 +++ b/arch/sparc/include/asm/elf_32.h
7837 @@ -114,6 +114,13 @@ typedef struct {
7838
7839 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7840
7841 +#ifdef CONFIG_PAX_ASLR
7842 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
7843 +
7844 +#define PAX_DELTA_MMAP_LEN 16
7845 +#define PAX_DELTA_STACK_LEN 16
7846 +#endif
7847 +
7848 /* This yields a mask that user programs can use to figure out what
7849 instruction set this cpu supports. This can NOT be done in userspace
7850 on Sparc. */
7851 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7852 index 370ca1e..d4f4a98 100644
7853 --- a/arch/sparc/include/asm/elf_64.h
7854 +++ b/arch/sparc/include/asm/elf_64.h
7855 @@ -189,6 +189,13 @@ typedef struct {
7856 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7857 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7858
7859 +#ifdef CONFIG_PAX_ASLR
7860 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7861 +
7862 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7863 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7864 +#endif
7865 +
7866 extern unsigned long sparc64_elf_hwcap;
7867 #define ELF_HWCAP sparc64_elf_hwcap
7868
7869 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7870 index 9b1c36d..209298b 100644
7871 --- a/arch/sparc/include/asm/pgalloc_32.h
7872 +++ b/arch/sparc/include/asm/pgalloc_32.h
7873 @@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7874 }
7875
7876 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7877 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7878
7879 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7880 unsigned long address)
7881 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7882 index bcfe063..b333142 100644
7883 --- a/arch/sparc/include/asm/pgalloc_64.h
7884 +++ b/arch/sparc/include/asm/pgalloc_64.h
7885 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7886 }
7887
7888 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7889 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7890
7891 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7892 {
7893 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7894 index 6fc1348..390c50a 100644
7895 --- a/arch/sparc/include/asm/pgtable_32.h
7896 +++ b/arch/sparc/include/asm/pgtable_32.h
7897 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7898 #define PAGE_SHARED SRMMU_PAGE_SHARED
7899 #define PAGE_COPY SRMMU_PAGE_COPY
7900 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7901 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7902 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7903 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7904 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7905
7906 /* Top-level page directory - dummy used by init-mm.
7907 @@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7908
7909 /* xwr */
7910 #define __P000 PAGE_NONE
7911 -#define __P001 PAGE_READONLY
7912 -#define __P010 PAGE_COPY
7913 -#define __P011 PAGE_COPY
7914 +#define __P001 PAGE_READONLY_NOEXEC
7915 +#define __P010 PAGE_COPY_NOEXEC
7916 +#define __P011 PAGE_COPY_NOEXEC
7917 #define __P100 PAGE_READONLY
7918 #define __P101 PAGE_READONLY
7919 #define __P110 PAGE_COPY
7920 #define __P111 PAGE_COPY
7921
7922 #define __S000 PAGE_NONE
7923 -#define __S001 PAGE_READONLY
7924 -#define __S010 PAGE_SHARED
7925 -#define __S011 PAGE_SHARED
7926 +#define __S001 PAGE_READONLY_NOEXEC
7927 +#define __S010 PAGE_SHARED_NOEXEC
7928 +#define __S011 PAGE_SHARED_NOEXEC
7929 #define __S100 PAGE_READONLY
7930 #define __S101 PAGE_READONLY
7931 #define __S110 PAGE_SHARED
7932 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7933 index 79da178..c2eede8 100644
7934 --- a/arch/sparc/include/asm/pgtsrmmu.h
7935 +++ b/arch/sparc/include/asm/pgtsrmmu.h
7936 @@ -115,6 +115,11 @@
7937 SRMMU_EXEC | SRMMU_REF)
7938 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7939 SRMMU_EXEC | SRMMU_REF)
7940 +
7941 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7942 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7943 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7944 +
7945 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7946 SRMMU_DIRTY | SRMMU_REF)
7947
7948 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7949 index 9689176..63c18ea 100644
7950 --- a/arch/sparc/include/asm/spinlock_64.h
7951 +++ b/arch/sparc/include/asm/spinlock_64.h
7952 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7953
7954 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7955
7956 -static void inline arch_read_lock(arch_rwlock_t *lock)
7957 +static inline void arch_read_lock(arch_rwlock_t *lock)
7958 {
7959 unsigned long tmp1, tmp2;
7960
7961 __asm__ __volatile__ (
7962 "1: ldsw [%2], %0\n"
7963 " brlz,pn %0, 2f\n"
7964 -"4: add %0, 1, %1\n"
7965 +"4: addcc %0, 1, %1\n"
7966 +
7967 +#ifdef CONFIG_PAX_REFCOUNT
7968 +" tvs %%icc, 6\n"
7969 +#endif
7970 +
7971 " cas [%2], %0, %1\n"
7972 " cmp %0, %1\n"
7973 " bne,pn %%icc, 1b\n"
7974 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
7975 " .previous"
7976 : "=&r" (tmp1), "=&r" (tmp2)
7977 : "r" (lock)
7978 - : "memory");
7979 + : "memory", "cc");
7980 }
7981
7982 -static int inline arch_read_trylock(arch_rwlock_t *lock)
7983 +static inline int arch_read_trylock(arch_rwlock_t *lock)
7984 {
7985 int tmp1, tmp2;
7986
7987 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7988 "1: ldsw [%2], %0\n"
7989 " brlz,a,pn %0, 2f\n"
7990 " mov 0, %0\n"
7991 -" add %0, 1, %1\n"
7992 +" addcc %0, 1, %1\n"
7993 +
7994 +#ifdef CONFIG_PAX_REFCOUNT
7995 +" tvs %%icc, 6\n"
7996 +#endif
7997 +
7998 " cas [%2], %0, %1\n"
7999 " cmp %0, %1\n"
8000 " bne,pn %%icc, 1b\n"
8001 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8002 return tmp1;
8003 }
8004
8005 -static void inline arch_read_unlock(arch_rwlock_t *lock)
8006 +static inline void arch_read_unlock(arch_rwlock_t *lock)
8007 {
8008 unsigned long tmp1, tmp2;
8009
8010 __asm__ __volatile__(
8011 "1: lduw [%2], %0\n"
8012 -" sub %0, 1, %1\n"
8013 +" subcc %0, 1, %1\n"
8014 +
8015 +#ifdef CONFIG_PAX_REFCOUNT
8016 +" tvs %%icc, 6\n"
8017 +#endif
8018 +
8019 " cas [%2], %0, %1\n"
8020 " cmp %0, %1\n"
8021 " bne,pn %%xcc, 1b\n"
8022 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8023 : "memory");
8024 }
8025
8026 -static void inline arch_write_lock(arch_rwlock_t *lock)
8027 +static inline void arch_write_lock(arch_rwlock_t *lock)
8028 {
8029 unsigned long mask, tmp1, tmp2;
8030
8031 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8032 : "memory");
8033 }
8034
8035 -static void inline arch_write_unlock(arch_rwlock_t *lock)
8036 +static inline void arch_write_unlock(arch_rwlock_t *lock)
8037 {
8038 __asm__ __volatile__(
8039 " stw %%g0, [%0]"
8040 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8041 : "memory");
8042 }
8043
8044 -static int inline arch_write_trylock(arch_rwlock_t *lock)
8045 +static inline int arch_write_trylock(arch_rwlock_t *lock)
8046 {
8047 unsigned long mask, tmp1, tmp2, result;
8048
8049 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8050 index 25849ae..924c54b 100644
8051 --- a/arch/sparc/include/asm/thread_info_32.h
8052 +++ b/arch/sparc/include/asm/thread_info_32.h
8053 @@ -49,6 +49,8 @@ struct thread_info {
8054 unsigned long w_saved;
8055
8056 struct restart_block restart_block;
8057 +
8058 + unsigned long lowest_stack;
8059 };
8060
8061 /*
8062 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8063 index 269bd92..e46a9b8 100644
8064 --- a/arch/sparc/include/asm/thread_info_64.h
8065 +++ b/arch/sparc/include/asm/thread_info_64.h
8066 @@ -63,6 +63,8 @@ struct thread_info {
8067 struct pt_regs *kern_una_regs;
8068 unsigned int kern_una_insn;
8069
8070 + unsigned long lowest_stack;
8071 +
8072 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8073 };
8074
8075 @@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8076 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8077 /* flag bit 6 is available */
8078 #define TIF_32BIT 7 /* 32-bit binary */
8079 -/* flag bit 8 is available */
8080 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8081 #define TIF_SECCOMP 9 /* secure computing */
8082 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8083 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8084 +
8085 /* NOTE: Thread flags >= 12 should be ones we have no interest
8086 * in using in assembly, else we can't use the mask as
8087 * an immediate value in instructions such as andcc.
8088 @@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8089 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8090 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8091 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8092 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8093
8094 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8095 _TIF_DO_NOTIFY_RESUME_MASK | \
8096 _TIF_NEED_RESCHED)
8097 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8098
8099 +#define _TIF_WORK_SYSCALL \
8100 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8101 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8102 +
8103 +
8104 /*
8105 * Thread-synchronous status.
8106 *
8107 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8108 index 0167d26..767bb0c 100644
8109 --- a/arch/sparc/include/asm/uaccess.h
8110 +++ b/arch/sparc/include/asm/uaccess.h
8111 @@ -1,5 +1,6 @@
8112 #ifndef ___ASM_SPARC_UACCESS_H
8113 #define ___ASM_SPARC_UACCESS_H
8114 +
8115 #if defined(__sparc__) && defined(__arch64__)
8116 #include <asm/uaccess_64.h>
8117 #else
8118 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8119 index 53a28dd..50c38c3 100644
8120 --- a/arch/sparc/include/asm/uaccess_32.h
8121 +++ b/arch/sparc/include/asm/uaccess_32.h
8122 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8123
8124 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8125 {
8126 - if (n && __access_ok((unsigned long) to, n))
8127 + if ((long)n < 0)
8128 + return n;
8129 +
8130 + if (n && __access_ok((unsigned long) to, n)) {
8131 + if (!__builtin_constant_p(n))
8132 + check_object_size(from, n, true);
8133 return __copy_user(to, (__force void __user *) from, n);
8134 - else
8135 + } else
8136 return n;
8137 }
8138
8139 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8140 {
8141 + if ((long)n < 0)
8142 + return n;
8143 +
8144 + if (!__builtin_constant_p(n))
8145 + check_object_size(from, n, true);
8146 +
8147 return __copy_user(to, (__force void __user *) from, n);
8148 }
8149
8150 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8151 {
8152 - if (n && __access_ok((unsigned long) from, n))
8153 + if ((long)n < 0)
8154 + return n;
8155 +
8156 + if (n && __access_ok((unsigned long) from, n)) {
8157 + if (!__builtin_constant_p(n))
8158 + check_object_size(to, n, false);
8159 return __copy_user((__force void __user *) to, from, n);
8160 - else
8161 + } else
8162 return n;
8163 }
8164
8165 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8166 {
8167 + if ((long)n < 0)
8168 + return n;
8169 +
8170 return __copy_user((__force void __user *) to, from, n);
8171 }
8172
8173 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8174 index e562d3c..191f176 100644
8175 --- a/arch/sparc/include/asm/uaccess_64.h
8176 +++ b/arch/sparc/include/asm/uaccess_64.h
8177 @@ -10,6 +10,7 @@
8178 #include <linux/compiler.h>
8179 #include <linux/string.h>
8180 #include <linux/thread_info.h>
8181 +#include <linux/kernel.h>
8182 #include <asm/asi.h>
8183 #include <asm/spitfire.h>
8184 #include <asm-generic/uaccess-unaligned.h>
8185 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8186 static inline unsigned long __must_check
8187 copy_from_user(void *to, const void __user *from, unsigned long size)
8188 {
8189 - unsigned long ret = ___copy_from_user(to, from, size);
8190 + unsigned long ret;
8191
8192 + if ((long)size < 0 || size > INT_MAX)
8193 + return size;
8194 +
8195 + if (!__builtin_constant_p(size))
8196 + check_object_size(to, size, false);
8197 +
8198 + ret = ___copy_from_user(to, from, size);
8199 if (unlikely(ret))
8200 ret = copy_from_user_fixup(to, from, size);
8201
8202 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8203 static inline unsigned long __must_check
8204 copy_to_user(void __user *to, const void *from, unsigned long size)
8205 {
8206 - unsigned long ret = ___copy_to_user(to, from, size);
8207 + unsigned long ret;
8208
8209 + if ((long)size < 0 || size > INT_MAX)
8210 + return size;
8211 +
8212 + if (!__builtin_constant_p(size))
8213 + check_object_size(from, size, true);
8214 +
8215 + ret = ___copy_to_user(to, from, size);
8216 if (unlikely(ret))
8217 ret = copy_to_user_fixup(to, from, size);
8218 return ret;
8219 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8220 index 6cf591b..b49e65a 100644
8221 --- a/arch/sparc/kernel/Makefile
8222 +++ b/arch/sparc/kernel/Makefile
8223 @@ -3,7 +3,7 @@
8224 #
8225
8226 asflags-y := -ansi
8227 -ccflags-y := -Werror
8228 +#ccflags-y := -Werror
8229
8230 extra-y := head_$(BITS).o
8231
8232 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8233 index 62eede1..9c5b904 100644
8234 --- a/arch/sparc/kernel/process_32.c
8235 +++ b/arch/sparc/kernel/process_32.c
8236 @@ -125,14 +125,14 @@ void show_regs(struct pt_regs *r)
8237
8238 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8239 r->psr, r->pc, r->npc, r->y, print_tainted());
8240 - printk("PC: <%pS>\n", (void *) r->pc);
8241 + printk("PC: <%pA>\n", (void *) r->pc);
8242 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8243 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8244 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8245 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8246 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8247 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8248 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8249 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8250
8251 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8252 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8253 @@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8254 rw = (struct reg_window32 *) fp;
8255 pc = rw->ins[7];
8256 printk("[%08lx : ", pc);
8257 - printk("%pS ] ", (void *) pc);
8258 + printk("%pA ] ", (void *) pc);
8259 fp = rw->ins[6];
8260 } while (++count < 16);
8261 printk("\n");
8262 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8263 index cdb80b2..5ca141d 100644
8264 --- a/arch/sparc/kernel/process_64.c
8265 +++ b/arch/sparc/kernel/process_64.c
8266 @@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8267 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8268 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8269 if (regs->tstate & TSTATE_PRIV)
8270 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8271 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8272 }
8273
8274 void show_regs(struct pt_regs *regs)
8275 {
8276 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8277 regs->tpc, regs->tnpc, regs->y, print_tainted());
8278 - printk("TPC: <%pS>\n", (void *) regs->tpc);
8279 + printk("TPC: <%pA>\n", (void *) regs->tpc);
8280 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8281 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8282 regs->u_regs[3]);
8283 @@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8284 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8285 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8286 regs->u_regs[15]);
8287 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8288 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8289 show_regwindow(regs);
8290 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8291 }
8292 @@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8293 ((tp && tp->task) ? tp->task->pid : -1));
8294
8295 if (gp->tstate & TSTATE_PRIV) {
8296 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8297 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8298 (void *) gp->tpc,
8299 (void *) gp->o7,
8300 (void *) gp->i7,
8301 diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8302 index 9f20566..67eb41b 100644
8303 --- a/arch/sparc/kernel/prom_common.c
8304 +++ b/arch/sparc/kernel/prom_common.c
8305 @@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8306
8307 unsigned int prom_early_allocated __initdata;
8308
8309 -static struct of_pdt_ops prom_sparc_ops __initdata = {
8310 +static struct of_pdt_ops prom_sparc_ops __initconst = {
8311 .nextprop = prom_common_nextprop,
8312 .getproplen = prom_getproplen,
8313 .getproperty = prom_getproperty,
8314 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8315 index 7ff45e4..a58f271 100644
8316 --- a/arch/sparc/kernel/ptrace_64.c
8317 +++ b/arch/sparc/kernel/ptrace_64.c
8318 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8319 return ret;
8320 }
8321
8322 +#ifdef CONFIG_GRKERNSEC_SETXID
8323 +extern void gr_delayed_cred_worker(void);
8324 +#endif
8325 +
8326 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8327 {
8328 int ret = 0;
8329 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8330 /* do the secure computing check first */
8331 secure_computing_strict(regs->u_regs[UREG_G1]);
8332
8333 +#ifdef CONFIG_GRKERNSEC_SETXID
8334 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8335 + gr_delayed_cred_worker();
8336 +#endif
8337 +
8338 if (test_thread_flag(TIF_SYSCALL_TRACE))
8339 ret = tracehook_report_syscall_entry(regs);
8340
8341 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8342
8343 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8344 {
8345 +#ifdef CONFIG_GRKERNSEC_SETXID
8346 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8347 + gr_delayed_cred_worker();
8348 +#endif
8349 +
8350 audit_syscall_exit(regs);
8351
8352 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8353 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8354 index 3a8d184..49498a8 100644
8355 --- a/arch/sparc/kernel/sys_sparc_32.c
8356 +++ b/arch/sparc/kernel/sys_sparc_32.c
8357 @@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8358 if (len > TASK_SIZE - PAGE_SIZE)
8359 return -ENOMEM;
8360 if (!addr)
8361 - addr = TASK_UNMAPPED_BASE;
8362 + addr = current->mm->mmap_base;
8363
8364 info.flags = 0;
8365 info.length = len;
8366 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8367 index 708bc29..6bfdfad 100644
8368 --- a/arch/sparc/kernel/sys_sparc_64.c
8369 +++ b/arch/sparc/kernel/sys_sparc_64.c
8370 @@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8371 struct vm_area_struct * vma;
8372 unsigned long task_size = TASK_SIZE;
8373 int do_color_align;
8374 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8375 struct vm_unmapped_area_info info;
8376
8377 if (flags & MAP_FIXED) {
8378 /* We do not accept a shared mapping if it would violate
8379 * cache aliasing constraints.
8380 */
8381 - if ((flags & MAP_SHARED) &&
8382 + if ((filp || (flags & MAP_SHARED)) &&
8383 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8384 return -EINVAL;
8385 return addr;
8386 @@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8387 if (filp || (flags & MAP_SHARED))
8388 do_color_align = 1;
8389
8390 +#ifdef CONFIG_PAX_RANDMMAP
8391 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8392 +#endif
8393 +
8394 if (addr) {
8395 if (do_color_align)
8396 addr = COLOR_ALIGN(addr, pgoff);
8397 @@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8398 addr = PAGE_ALIGN(addr);
8399
8400 vma = find_vma(mm, addr);
8401 - if (task_size - len >= addr &&
8402 - (!vma || addr + len <= vma->vm_start))
8403 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8404 return addr;
8405 }
8406
8407 info.flags = 0;
8408 info.length = len;
8409 - info.low_limit = TASK_UNMAPPED_BASE;
8410 + info.low_limit = mm->mmap_base;
8411 info.high_limit = min(task_size, VA_EXCLUDE_START);
8412 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8413 info.align_offset = pgoff << PAGE_SHIFT;
8414 + info.threadstack_offset = offset;
8415 addr = vm_unmapped_area(&info);
8416
8417 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8418 VM_BUG_ON(addr != -ENOMEM);
8419 info.low_limit = VA_EXCLUDE_END;
8420 +
8421 +#ifdef CONFIG_PAX_RANDMMAP
8422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8423 + info.low_limit += mm->delta_mmap;
8424 +#endif
8425 +
8426 info.high_limit = task_size;
8427 addr = vm_unmapped_area(&info);
8428 }
8429 @@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8430 unsigned long task_size = STACK_TOP32;
8431 unsigned long addr = addr0;
8432 int do_color_align;
8433 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8434 struct vm_unmapped_area_info info;
8435
8436 /* This should only ever run for 32-bit processes. */
8437 @@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8438 /* We do not accept a shared mapping if it would violate
8439 * cache aliasing constraints.
8440 */
8441 - if ((flags & MAP_SHARED) &&
8442 + if ((filp || (flags & MAP_SHARED)) &&
8443 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8444 return -EINVAL;
8445 return addr;
8446 @@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8447 if (filp || (flags & MAP_SHARED))
8448 do_color_align = 1;
8449
8450 +#ifdef CONFIG_PAX_RANDMMAP
8451 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8452 +#endif
8453 +
8454 /* requesting a specific address */
8455 if (addr) {
8456 if (do_color_align)
8457 @@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8458 addr = PAGE_ALIGN(addr);
8459
8460 vma = find_vma(mm, addr);
8461 - if (task_size - len >= addr &&
8462 - (!vma || addr + len <= vma->vm_start))
8463 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8464 return addr;
8465 }
8466
8467 @@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8468 info.high_limit = mm->mmap_base;
8469 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8470 info.align_offset = pgoff << PAGE_SHIFT;
8471 + info.threadstack_offset = offset;
8472 addr = vm_unmapped_area(&info);
8473
8474 /*
8475 @@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8476 VM_BUG_ON(addr != -ENOMEM);
8477 info.flags = 0;
8478 info.low_limit = TASK_UNMAPPED_BASE;
8479 +
8480 +#ifdef CONFIG_PAX_RANDMMAP
8481 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8482 + info.low_limit += mm->delta_mmap;
8483 +#endif
8484 +
8485 info.high_limit = STACK_TOP32;
8486 addr = vm_unmapped_area(&info);
8487 }
8488 @@ -264,6 +286,10 @@ static unsigned long mmap_rnd(void)
8489 {
8490 unsigned long rnd = 0UL;
8491
8492 +#ifdef CONFIG_PAX_RANDMMAP
8493 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8494 +#endif
8495 +
8496 if (current->flags & PF_RANDOMIZE) {
8497 unsigned long val = get_random_int();
8498 if (test_thread_flag(TIF_32BIT))
8499 @@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8500 gap == RLIM_INFINITY ||
8501 sysctl_legacy_va_layout) {
8502 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8503 +
8504 +#ifdef CONFIG_PAX_RANDMMAP
8505 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8506 + mm->mmap_base += mm->delta_mmap;
8507 +#endif
8508 +
8509 mm->get_unmapped_area = arch_get_unmapped_area;
8510 mm->unmap_area = arch_unmap_area;
8511 } else {
8512 @@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8513 gap = (task_size / 6 * 5);
8514
8515 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8516 +
8517 +#ifdef CONFIG_PAX_RANDMMAP
8518 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8519 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8520 +#endif
8521 +
8522 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8523 mm->unmap_area = arch_unmap_area_topdown;
8524 }
8525 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8526 index 22a1098..6255eb9 100644
8527 --- a/arch/sparc/kernel/syscalls.S
8528 +++ b/arch/sparc/kernel/syscalls.S
8529 @@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8530 #endif
8531 .align 32
8532 1: ldx [%g6 + TI_FLAGS], %l5
8533 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8534 + andcc %l5, _TIF_WORK_SYSCALL, %g0
8535 be,pt %icc, rtrap
8536 nop
8537 call syscall_trace_leave
8538 @@ -184,7 +184,7 @@ linux_sparc_syscall32:
8539
8540 srl %i5, 0, %o5 ! IEU1
8541 srl %i2, 0, %o2 ! IEU0 Group
8542 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8543 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8544 bne,pn %icc, linux_syscall_trace32 ! CTI
8545 mov %i0, %l5 ! IEU1
8546 call %l7 ! CTI Group brk forced
8547 @@ -207,7 +207,7 @@ linux_sparc_syscall:
8548
8549 mov %i3, %o3 ! IEU1
8550 mov %i4, %o4 ! IEU0 Group
8551 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8552 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8553 bne,pn %icc, linux_syscall_trace ! CTI Group
8554 mov %i0, %l5 ! IEU0
8555 2: call %l7 ! CTI Group brk forced
8556 @@ -223,7 +223,7 @@ ret_sys_call:
8557
8558 cmp %o0, -ERESTART_RESTARTBLOCK
8559 bgeu,pn %xcc, 1f
8560 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8561 + andcc %l0, _TIF_WORK_SYSCALL, %g0
8562 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8563
8564 2:
8565 diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8566 index 654e8aa..45f431b 100644
8567 --- a/arch/sparc/kernel/sysfs.c
8568 +++ b/arch/sparc/kernel/sysfs.c
8569 @@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8570 return NOTIFY_OK;
8571 }
8572
8573 -static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8574 +static struct notifier_block sysfs_cpu_nb = {
8575 .notifier_call = sysfs_cpu_notify,
8576 };
8577
8578 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8579 index 6629829..036032d 100644
8580 --- a/arch/sparc/kernel/traps_32.c
8581 +++ b/arch/sparc/kernel/traps_32.c
8582 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8583 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8584 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8585
8586 +extern void gr_handle_kernel_exploit(void);
8587 +
8588 void die_if_kernel(char *str, struct pt_regs *regs)
8589 {
8590 static int die_counter;
8591 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8592 count++ < 30 &&
8593 (((unsigned long) rw) >= PAGE_OFFSET) &&
8594 !(((unsigned long) rw) & 0x7)) {
8595 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
8596 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
8597 (void *) rw->ins[7]);
8598 rw = (struct reg_window32 *)rw->ins[6];
8599 }
8600 }
8601 printk("Instruction DUMP:");
8602 instruction_dump ((unsigned long *) regs->pc);
8603 - if(regs->psr & PSR_PS)
8604 + if(regs->psr & PSR_PS) {
8605 + gr_handle_kernel_exploit();
8606 do_exit(SIGKILL);
8607 + }
8608 do_exit(SIGSEGV);
8609 }
8610
8611 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8612 index 8d38ca9..845b1d6 100644
8613 --- a/arch/sparc/kernel/traps_64.c
8614 +++ b/arch/sparc/kernel/traps_64.c
8615 @@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8616 i + 1,
8617 p->trapstack[i].tstate, p->trapstack[i].tpc,
8618 p->trapstack[i].tnpc, p->trapstack[i].tt);
8619 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8620 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8621 }
8622 }
8623
8624 @@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8625
8626 lvl -= 0x100;
8627 if (regs->tstate & TSTATE_PRIV) {
8628 +
8629 +#ifdef CONFIG_PAX_REFCOUNT
8630 + if (lvl == 6)
8631 + pax_report_refcount_overflow(regs);
8632 +#endif
8633 +
8634 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8635 die_if_kernel(buffer, regs);
8636 }
8637 @@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8638 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8639 {
8640 char buffer[32];
8641 -
8642 +
8643 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8644 0, lvl, SIGTRAP) == NOTIFY_STOP)
8645 return;
8646
8647 +#ifdef CONFIG_PAX_REFCOUNT
8648 + if (lvl == 6)
8649 + pax_report_refcount_overflow(regs);
8650 +#endif
8651 +
8652 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8653
8654 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8655 @@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8656 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8657 printk("%s" "ERROR(%d): ",
8658 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8659 - printk("TPC<%pS>\n", (void *) regs->tpc);
8660 + printk("TPC<%pA>\n", (void *) regs->tpc);
8661 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8662 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8663 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8664 @@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8665 smp_processor_id(),
8666 (type & 0x1) ? 'I' : 'D',
8667 regs->tpc);
8668 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8669 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8670 panic("Irrecoverable Cheetah+ parity error.");
8671 }
8672
8673 @@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8674 smp_processor_id(),
8675 (type & 0x1) ? 'I' : 'D',
8676 regs->tpc);
8677 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8678 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8679 }
8680
8681 struct sun4v_error_entry {
8682 @@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8683
8684 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8685 regs->tpc, tl);
8686 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8687 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8688 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8689 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8690 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8691 (void *) regs->u_regs[UREG_I7]);
8692 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8693 "pte[%lx] error[%lx]\n",
8694 @@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8695
8696 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8697 regs->tpc, tl);
8698 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8699 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8700 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8701 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8702 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8703 (void *) regs->u_regs[UREG_I7]);
8704 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8705 "pte[%lx] error[%lx]\n",
8706 @@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8707 fp = (unsigned long)sf->fp + STACK_BIAS;
8708 }
8709
8710 - printk(" [%016lx] %pS\n", pc, (void *) pc);
8711 + printk(" [%016lx] %pA\n", pc, (void *) pc);
8712 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8713 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8714 int index = tsk->curr_ret_stack;
8715 if (tsk->ret_stack && index >= graph) {
8716 pc = tsk->ret_stack[index - graph].ret;
8717 - printk(" [%016lx] %pS\n", pc, (void *) pc);
8718 + printk(" [%016lx] %pA\n", pc, (void *) pc);
8719 graph++;
8720 }
8721 }
8722 @@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8723 return (struct reg_window *) (fp + STACK_BIAS);
8724 }
8725
8726 +extern void gr_handle_kernel_exploit(void);
8727 +
8728 void die_if_kernel(char *str, struct pt_regs *regs)
8729 {
8730 static int die_counter;
8731 @@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8732 while (rw &&
8733 count++ < 30 &&
8734 kstack_valid(tp, (unsigned long) rw)) {
8735 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
8736 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
8737 (void *) rw->ins[7]);
8738
8739 rw = kernel_stack_up(rw);
8740 @@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8741 }
8742 user_instruction_dump ((unsigned int __user *) regs->tpc);
8743 }
8744 - if (regs->tstate & TSTATE_PRIV)
8745 + if (regs->tstate & TSTATE_PRIV) {
8746 + gr_handle_kernel_exploit();
8747 do_exit(SIGKILL);
8748 + }
8749 do_exit(SIGSEGV);
8750 }
8751 EXPORT_SYMBOL(die_if_kernel);
8752 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8753 index 8201c25e..072a2a7 100644
8754 --- a/arch/sparc/kernel/unaligned_64.c
8755 +++ b/arch/sparc/kernel/unaligned_64.c
8756 @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8757 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8758
8759 if (__ratelimit(&ratelimit)) {
8760 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
8761 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
8762 regs->tpc, (void *) regs->tpc);
8763 }
8764 }
8765 diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
8766 index eb1624b..55100de 100644
8767 --- a/arch/sparc/kernel/us3_cpufreq.c
8768 +++ b/arch/sparc/kernel/us3_cpufreq.c
8769 @@ -18,14 +18,12 @@
8770 #include <asm/head.h>
8771 #include <asm/timer.h>
8772
8773 -static struct cpufreq_driver *cpufreq_us3_driver;
8774 -
8775 struct us3_freq_percpu_info {
8776 struct cpufreq_frequency_table table[4];
8777 };
8778
8779 /* Indexed by cpu number. */
8780 -static struct us3_freq_percpu_info *us3_freq_table;
8781 +static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
8782
8783 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
8784 * in the Safari config register.
8785 @@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
8786
8787 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
8788 {
8789 - if (cpufreq_us3_driver)
8790 - us3_set_cpu_divider_index(policy->cpu, 0);
8791 + us3_set_cpu_divider_index(policy->cpu, 0);
8792
8793 return 0;
8794 }
8795
8796 +static int __init us3_freq_init(void);
8797 +static void __exit us3_freq_exit(void);
8798 +
8799 +static struct cpufreq_driver cpufreq_us3_driver = {
8800 + .init = us3_freq_cpu_init,
8801 + .verify = us3_freq_verify,
8802 + .target = us3_freq_target,
8803 + .get = us3_freq_get,
8804 + .exit = us3_freq_cpu_exit,
8805 + .owner = THIS_MODULE,
8806 + .name = "UltraSPARC-III",
8807 +
8808 +};
8809 +
8810 static int __init us3_freq_init(void)
8811 {
8812 unsigned long manuf, impl, ver;
8813 @@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
8814 (impl == CHEETAH_IMPL ||
8815 impl == CHEETAH_PLUS_IMPL ||
8816 impl == JAGUAR_IMPL ||
8817 - impl == PANTHER_IMPL)) {
8818 - struct cpufreq_driver *driver;
8819 -
8820 - ret = -ENOMEM;
8821 - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
8822 - if (!driver)
8823 - goto err_out;
8824 -
8825 - us3_freq_table = kzalloc(
8826 - (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
8827 - GFP_KERNEL);
8828 - if (!us3_freq_table)
8829 - goto err_out;
8830 -
8831 - driver->init = us3_freq_cpu_init;
8832 - driver->verify = us3_freq_verify;
8833 - driver->target = us3_freq_target;
8834 - driver->get = us3_freq_get;
8835 - driver->exit = us3_freq_cpu_exit;
8836 - driver->owner = THIS_MODULE,
8837 - strcpy(driver->name, "UltraSPARC-III");
8838 -
8839 - cpufreq_us3_driver = driver;
8840 - ret = cpufreq_register_driver(driver);
8841 - if (ret)
8842 - goto err_out;
8843 -
8844 - return 0;
8845 -
8846 -err_out:
8847 - if (driver) {
8848 - kfree(driver);
8849 - cpufreq_us3_driver = NULL;
8850 - }
8851 - kfree(us3_freq_table);
8852 - us3_freq_table = NULL;
8853 - return ret;
8854 - }
8855 + impl == PANTHER_IMPL))
8856 + return cpufreq_register_driver(&cpufreq_us3_driver);
8857
8858 return -ENODEV;
8859 }
8860
8861 static void __exit us3_freq_exit(void)
8862 {
8863 - if (cpufreq_us3_driver) {
8864 - cpufreq_unregister_driver(cpufreq_us3_driver);
8865 - kfree(cpufreq_us3_driver);
8866 - cpufreq_us3_driver = NULL;
8867 - kfree(us3_freq_table);
8868 - us3_freq_table = NULL;
8869 - }
8870 + cpufreq_unregister_driver(&cpufreq_us3_driver);
8871 }
8872
8873 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
8874 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8875 index 8410065f2..4fd4ca22 100644
8876 --- a/arch/sparc/lib/Makefile
8877 +++ b/arch/sparc/lib/Makefile
8878 @@ -2,7 +2,7 @@
8879 #
8880
8881 asflags-y := -ansi -DST_DIV0=0x02
8882 -ccflags-y := -Werror
8883 +#ccflags-y := -Werror
8884
8885 lib-$(CONFIG_SPARC32) += ashrdi3.o
8886 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8887 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8888 index 85c233d..68500e0 100644
8889 --- a/arch/sparc/lib/atomic_64.S
8890 +++ b/arch/sparc/lib/atomic_64.S
8891 @@ -17,7 +17,12 @@
8892 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8893 BACKOFF_SETUP(%o2)
8894 1: lduw [%o1], %g1
8895 - add %g1, %o0, %g7
8896 + addcc %g1, %o0, %g7
8897 +
8898 +#ifdef CONFIG_PAX_REFCOUNT
8899 + tvs %icc, 6
8900 +#endif
8901 +
8902 cas [%o1], %g1, %g7
8903 cmp %g1, %g7
8904 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8905 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8906 2: BACKOFF_SPIN(%o2, %o3, 1b)
8907 ENDPROC(atomic_add)
8908
8909 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8910 + BACKOFF_SETUP(%o2)
8911 +1: lduw [%o1], %g1
8912 + add %g1, %o0, %g7
8913 + cas [%o1], %g1, %g7
8914 + cmp %g1, %g7
8915 + bne,pn %icc, 2f
8916 + nop
8917 + retl
8918 + nop
8919 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8920 +ENDPROC(atomic_add_unchecked)
8921 +
8922 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8923 BACKOFF_SETUP(%o2)
8924 1: lduw [%o1], %g1
8925 - sub %g1, %o0, %g7
8926 + subcc %g1, %o0, %g7
8927 +
8928 +#ifdef CONFIG_PAX_REFCOUNT
8929 + tvs %icc, 6
8930 +#endif
8931 +
8932 cas [%o1], %g1, %g7
8933 cmp %g1, %g7
8934 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8935 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8936 2: BACKOFF_SPIN(%o2, %o3, 1b)
8937 ENDPROC(atomic_sub)
8938
8939 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8940 + BACKOFF_SETUP(%o2)
8941 +1: lduw [%o1], %g1
8942 + sub %g1, %o0, %g7
8943 + cas [%o1], %g1, %g7
8944 + cmp %g1, %g7
8945 + bne,pn %icc, 2f
8946 + nop
8947 + retl
8948 + nop
8949 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8950 +ENDPROC(atomic_sub_unchecked)
8951 +
8952 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8953 BACKOFF_SETUP(%o2)
8954 1: lduw [%o1], %g1
8955 - add %g1, %o0, %g7
8956 + addcc %g1, %o0, %g7
8957 +
8958 +#ifdef CONFIG_PAX_REFCOUNT
8959 + tvs %icc, 6
8960 +#endif
8961 +
8962 cas [%o1], %g1, %g7
8963 cmp %g1, %g7
8964 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8965 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8966 2: BACKOFF_SPIN(%o2, %o3, 1b)
8967 ENDPROC(atomic_add_ret)
8968
8969 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8970 + BACKOFF_SETUP(%o2)
8971 +1: lduw [%o1], %g1
8972 + addcc %g1, %o0, %g7
8973 + cas [%o1], %g1, %g7
8974 + cmp %g1, %g7
8975 + bne,pn %icc, 2f
8976 + add %g7, %o0, %g7
8977 + sra %g7, 0, %o0
8978 + retl
8979 + nop
8980 +2: BACKOFF_SPIN(%o2, %o3, 1b)
8981 +ENDPROC(atomic_add_ret_unchecked)
8982 +
8983 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8984 BACKOFF_SETUP(%o2)
8985 1: lduw [%o1], %g1
8986 - sub %g1, %o0, %g7
8987 + subcc %g1, %o0, %g7
8988 +
8989 +#ifdef CONFIG_PAX_REFCOUNT
8990 + tvs %icc, 6
8991 +#endif
8992 +
8993 cas [%o1], %g1, %g7
8994 cmp %g1, %g7
8995 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8996 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8997 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8998 BACKOFF_SETUP(%o2)
8999 1: ldx [%o1], %g1
9000 - add %g1, %o0, %g7
9001 + addcc %g1, %o0, %g7
9002 +
9003 +#ifdef CONFIG_PAX_REFCOUNT
9004 + tvs %xcc, 6
9005 +#endif
9006 +
9007 casx [%o1], %g1, %g7
9008 cmp %g1, %g7
9009 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9010 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9011 2: BACKOFF_SPIN(%o2, %o3, 1b)
9012 ENDPROC(atomic64_add)
9013
9014 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9015 + BACKOFF_SETUP(%o2)
9016 +1: ldx [%o1], %g1
9017 + addcc %g1, %o0, %g7
9018 + casx [%o1], %g1, %g7
9019 + cmp %g1, %g7
9020 + bne,pn %xcc, 2f
9021 + nop
9022 + retl
9023 + nop
9024 +2: BACKOFF_SPIN(%o2, %o3, 1b)
9025 +ENDPROC(atomic64_add_unchecked)
9026 +
9027 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9028 BACKOFF_SETUP(%o2)
9029 1: ldx [%o1], %g1
9030 - sub %g1, %o0, %g7
9031 + subcc %g1, %o0, %g7
9032 +
9033 +#ifdef CONFIG_PAX_REFCOUNT
9034 + tvs %xcc, 6
9035 +#endif
9036 +
9037 casx [%o1], %g1, %g7
9038 cmp %g1, %g7
9039 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9040 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9041 2: BACKOFF_SPIN(%o2, %o3, 1b)
9042 ENDPROC(atomic64_sub)
9043
9044 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9045 + BACKOFF_SETUP(%o2)
9046 +1: ldx [%o1], %g1
9047 + subcc %g1, %o0, %g7
9048 + casx [%o1], %g1, %g7
9049 + cmp %g1, %g7
9050 + bne,pn %xcc, 2f
9051 + nop
9052 + retl
9053 + nop
9054 +2: BACKOFF_SPIN(%o2, %o3, 1b)
9055 +ENDPROC(atomic64_sub_unchecked)
9056 +
9057 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9058 BACKOFF_SETUP(%o2)
9059 1: ldx [%o1], %g1
9060 - add %g1, %o0, %g7
9061 + addcc %g1, %o0, %g7
9062 +
9063 +#ifdef CONFIG_PAX_REFCOUNT
9064 + tvs %xcc, 6
9065 +#endif
9066 +
9067 casx [%o1], %g1, %g7
9068 cmp %g1, %g7
9069 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9070 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9071 2: BACKOFF_SPIN(%o2, %o3, 1b)
9072 ENDPROC(atomic64_add_ret)
9073
9074 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9075 + BACKOFF_SETUP(%o2)
9076 +1: ldx [%o1], %g1
9077 + addcc %g1, %o0, %g7
9078 + casx [%o1], %g1, %g7
9079 + cmp %g1, %g7
9080 + bne,pn %xcc, 2f
9081 + add %g7, %o0, %g7
9082 + mov %g7, %o0
9083 + retl
9084 + nop
9085 +2: BACKOFF_SPIN(%o2, %o3, 1b)
9086 +ENDPROC(atomic64_add_ret_unchecked)
9087 +
9088 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9089 BACKOFF_SETUP(%o2)
9090 1: ldx [%o1], %g1
9091 - sub %g1, %o0, %g7
9092 + subcc %g1, %o0, %g7
9093 +
9094 +#ifdef CONFIG_PAX_REFCOUNT
9095 + tvs %xcc, 6
9096 +#endif
9097 +
9098 casx [%o1], %g1, %g7
9099 cmp %g1, %g7
9100 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9101 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9102 index 0c4e35e..745d3e4 100644
9103 --- a/arch/sparc/lib/ksyms.c
9104 +++ b/arch/sparc/lib/ksyms.c
9105 @@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9106
9107 /* Atomic counter implementation. */
9108 EXPORT_SYMBOL(atomic_add);
9109 +EXPORT_SYMBOL(atomic_add_unchecked);
9110 EXPORT_SYMBOL(atomic_add_ret);
9111 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
9112 EXPORT_SYMBOL(atomic_sub);
9113 +EXPORT_SYMBOL(atomic_sub_unchecked);
9114 EXPORT_SYMBOL(atomic_sub_ret);
9115 EXPORT_SYMBOL(atomic64_add);
9116 +EXPORT_SYMBOL(atomic64_add_unchecked);
9117 EXPORT_SYMBOL(atomic64_add_ret);
9118 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9119 EXPORT_SYMBOL(atomic64_sub);
9120 +EXPORT_SYMBOL(atomic64_sub_unchecked);
9121 EXPORT_SYMBOL(atomic64_sub_ret);
9122 EXPORT_SYMBOL(atomic64_dec_if_positive);
9123
9124 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9125 index 30c3ecc..736f015 100644
9126 --- a/arch/sparc/mm/Makefile
9127 +++ b/arch/sparc/mm/Makefile
9128 @@ -2,7 +2,7 @@
9129 #
9130
9131 asflags-y := -ansi
9132 -ccflags-y := -Werror
9133 +#ccflags-y := -Werror
9134
9135 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9136 obj-y += fault_$(BITS).o
9137 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9138 index e98bfda..ea8d221 100644
9139 --- a/arch/sparc/mm/fault_32.c
9140 +++ b/arch/sparc/mm/fault_32.c
9141 @@ -21,6 +21,9 @@
9142 #include <linux/perf_event.h>
9143 #include <linux/interrupt.h>
9144 #include <linux/kdebug.h>
9145 +#include <linux/slab.h>
9146 +#include <linux/pagemap.h>
9147 +#include <linux/compiler.h>
9148
9149 #include <asm/page.h>
9150 #include <asm/pgtable.h>
9151 @@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9152 return safe_compute_effective_address(regs, insn);
9153 }
9154
9155 +#ifdef CONFIG_PAX_PAGEEXEC
9156 +#ifdef CONFIG_PAX_DLRESOLVE
9157 +static void pax_emuplt_close(struct vm_area_struct *vma)
9158 +{
9159 + vma->vm_mm->call_dl_resolve = 0UL;
9160 +}
9161 +
9162 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9163 +{
9164 + unsigned int *kaddr;
9165 +
9166 + vmf->page = alloc_page(GFP_HIGHUSER);
9167 + if (!vmf->page)
9168 + return VM_FAULT_OOM;
9169 +
9170 + kaddr = kmap(vmf->page);
9171 + memset(kaddr, 0, PAGE_SIZE);
9172 + kaddr[0] = 0x9DE3BFA8U; /* save */
9173 + flush_dcache_page(vmf->page);
9174 + kunmap(vmf->page);
9175 + return VM_FAULT_MAJOR;
9176 +}
9177 +
9178 +static const struct vm_operations_struct pax_vm_ops = {
9179 + .close = pax_emuplt_close,
9180 + .fault = pax_emuplt_fault
9181 +};
9182 +
9183 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9184 +{
9185 + int ret;
9186 +
9187 + INIT_LIST_HEAD(&vma->anon_vma_chain);
9188 + vma->vm_mm = current->mm;
9189 + vma->vm_start = addr;
9190 + vma->vm_end = addr + PAGE_SIZE;
9191 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9192 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9193 + vma->vm_ops = &pax_vm_ops;
9194 +
9195 + ret = insert_vm_struct(current->mm, vma);
9196 + if (ret)
9197 + return ret;
9198 +
9199 + ++current->mm->total_vm;
9200 + return 0;
9201 +}
9202 +#endif
9203 +
9204 +/*
9205 + * PaX: decide what to do with offenders (regs->pc = fault address)
9206 + *
9207 + * returns 1 when task should be killed
9208 + * 2 when patched PLT trampoline was detected
9209 + * 3 when unpatched PLT trampoline was detected
9210 + */
9211 +static int pax_handle_fetch_fault(struct pt_regs *regs)
9212 +{
9213 +
9214 +#ifdef CONFIG_PAX_EMUPLT
9215 + int err;
9216 +
9217 + do { /* PaX: patched PLT emulation #1 */
9218 + unsigned int sethi1, sethi2, jmpl;
9219 +
9220 + err = get_user(sethi1, (unsigned int *)regs->pc);
9221 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9222 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9223 +
9224 + if (err)
9225 + break;
9226 +
9227 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9228 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
9229 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
9230 + {
9231 + unsigned int addr;
9232 +
9233 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9234 + addr = regs->u_regs[UREG_G1];
9235 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9236 + regs->pc = addr;
9237 + regs->npc = addr+4;
9238 + return 2;
9239 + }
9240 + } while (0);
9241 +
9242 + do { /* PaX: patched PLT emulation #2 */
9243 + unsigned int ba;
9244 +
9245 + err = get_user(ba, (unsigned int *)regs->pc);
9246 +
9247 + if (err)
9248 + break;
9249 +
9250 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9251 + unsigned int addr;
9252 +
9253 + if ((ba & 0xFFC00000U) == 0x30800000U)
9254 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9255 + else
9256 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9257 + regs->pc = addr;
9258 + regs->npc = addr+4;
9259 + return 2;
9260 + }
9261 + } while (0);
9262 +
9263 + do { /* PaX: patched PLT emulation #3 */
9264 + unsigned int sethi, bajmpl, nop;
9265 +
9266 + err = get_user(sethi, (unsigned int *)regs->pc);
9267 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9268 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
9269 +
9270 + if (err)
9271 + break;
9272 +
9273 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9274 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9275 + nop == 0x01000000U)
9276 + {
9277 + unsigned int addr;
9278 +
9279 + addr = (sethi & 0x003FFFFFU) << 10;
9280 + regs->u_regs[UREG_G1] = addr;
9281 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9282 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9283 + else
9284 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9285 + regs->pc = addr;
9286 + regs->npc = addr+4;
9287 + return 2;
9288 + }
9289 + } while (0);
9290 +
9291 + do { /* PaX: unpatched PLT emulation step 1 */
9292 + unsigned int sethi, ba, nop;
9293 +
9294 + err = get_user(sethi, (unsigned int *)regs->pc);
9295 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
9296 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
9297 +
9298 + if (err)
9299 + break;
9300 +
9301 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9302 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9303 + nop == 0x01000000U)
9304 + {
9305 + unsigned int addr, save, call;
9306 +
9307 + if ((ba & 0xFFC00000U) == 0x30800000U)
9308 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9309 + else
9310 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9311 +
9312 + err = get_user(save, (unsigned int *)addr);
9313 + err |= get_user(call, (unsigned int *)(addr+4));
9314 + err |= get_user(nop, (unsigned int *)(addr+8));
9315 + if (err)
9316 + break;
9317 +
9318 +#ifdef CONFIG_PAX_DLRESOLVE
9319 + if (save == 0x9DE3BFA8U &&
9320 + (call & 0xC0000000U) == 0x40000000U &&
9321 + nop == 0x01000000U)
9322 + {
9323 + struct vm_area_struct *vma;
9324 + unsigned long call_dl_resolve;
9325 +
9326 + down_read(&current->mm->mmap_sem);
9327 + call_dl_resolve = current->mm->call_dl_resolve;
9328 + up_read(&current->mm->mmap_sem);
9329 + if (likely(call_dl_resolve))
9330 + goto emulate;
9331 +
9332 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9333 +
9334 + down_write(&current->mm->mmap_sem);
9335 + if (current->mm->call_dl_resolve) {
9336 + call_dl_resolve = current->mm->call_dl_resolve;
9337 + up_write(&current->mm->mmap_sem);
9338 + if (vma)
9339 + kmem_cache_free(vm_area_cachep, vma);
9340 + goto emulate;
9341 + }
9342 +
9343 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9344 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9345 + up_write(&current->mm->mmap_sem);
9346 + if (vma)
9347 + kmem_cache_free(vm_area_cachep, vma);
9348 + return 1;
9349 + }
9350 +
9351 + if (pax_insert_vma(vma, call_dl_resolve)) {
9352 + up_write(&current->mm->mmap_sem);
9353 + kmem_cache_free(vm_area_cachep, vma);
9354 + return 1;
9355 + }
9356 +
9357 + current->mm->call_dl_resolve = call_dl_resolve;
9358 + up_write(&current->mm->mmap_sem);
9359 +
9360 +emulate:
9361 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9362 + regs->pc = call_dl_resolve;
9363 + regs->npc = addr+4;
9364 + return 3;
9365 + }
9366 +#endif
9367 +
9368 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9369 + if ((save & 0xFFC00000U) == 0x05000000U &&
9370 + (call & 0xFFFFE000U) == 0x85C0A000U &&
9371 + nop == 0x01000000U)
9372 + {
9373 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9374 + regs->u_regs[UREG_G2] = addr + 4;
9375 + addr = (save & 0x003FFFFFU) << 10;
9376 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9377 + regs->pc = addr;
9378 + regs->npc = addr+4;
9379 + return 3;
9380 + }
9381 + }
9382 + } while (0);
9383 +
9384 + do { /* PaX: unpatched PLT emulation step 2 */
9385 + unsigned int save, call, nop;
9386 +
9387 + err = get_user(save, (unsigned int *)(regs->pc-4));
9388 + err |= get_user(call, (unsigned int *)regs->pc);
9389 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
9390 + if (err)
9391 + break;
9392 +
9393 + if (save == 0x9DE3BFA8U &&
9394 + (call & 0xC0000000U) == 0x40000000U &&
9395 + nop == 0x01000000U)
9396 + {
9397 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9398 +
9399 + regs->u_regs[UREG_RETPC] = regs->pc;
9400 + regs->pc = dl_resolve;
9401 + regs->npc = dl_resolve+4;
9402 + return 3;
9403 + }
9404 + } while (0);
9405 +#endif
9406 +
9407 + return 1;
9408 +}
9409 +
9410 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9411 +{
9412 + unsigned long i;
9413 +
9414 + printk(KERN_ERR "PAX: bytes at PC: ");
9415 + for (i = 0; i < 8; i++) {
9416 + unsigned int c;
9417 + if (get_user(c, (unsigned int *)pc+i))
9418 + printk(KERN_CONT "???????? ");
9419 + else
9420 + printk(KERN_CONT "%08x ", c);
9421 + }
9422 + printk("\n");
9423 +}
9424 +#endif
9425 +
9426 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9427 int text_fault)
9428 {
9429 @@ -230,6 +504,24 @@ good_area:
9430 if (!(vma->vm_flags & VM_WRITE))
9431 goto bad_area;
9432 } else {
9433 +
9434 +#ifdef CONFIG_PAX_PAGEEXEC
9435 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9436 + up_read(&mm->mmap_sem);
9437 + switch (pax_handle_fetch_fault(regs)) {
9438 +
9439 +#ifdef CONFIG_PAX_EMUPLT
9440 + case 2:
9441 + case 3:
9442 + return;
9443 +#endif
9444 +
9445 + }
9446 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9447 + do_group_exit(SIGKILL);
9448 + }
9449 +#endif
9450 +
9451 /* Allow reads even for write-only mappings */
9452 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9453 goto bad_area;
9454 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9455 index 5062ff3..e0b75f3 100644
9456 --- a/arch/sparc/mm/fault_64.c
9457 +++ b/arch/sparc/mm/fault_64.c
9458 @@ -21,6 +21,9 @@
9459 #include <linux/kprobes.h>
9460 #include <linux/kdebug.h>
9461 #include <linux/percpu.h>
9462 +#include <linux/slab.h>
9463 +#include <linux/pagemap.h>
9464 +#include <linux/compiler.h>
9465
9466 #include <asm/page.h>
9467 #include <asm/pgtable.h>
9468 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9469 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9470 regs->tpc);
9471 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9472 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9473 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9474 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9475 dump_stack();
9476 unhandled_fault(regs->tpc, current, regs);
9477 @@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9478 show_regs(regs);
9479 }
9480
9481 +#ifdef CONFIG_PAX_PAGEEXEC
9482 +#ifdef CONFIG_PAX_DLRESOLVE
9483 +static void pax_emuplt_close(struct vm_area_struct *vma)
9484 +{
9485 + vma->vm_mm->call_dl_resolve = 0UL;
9486 +}
9487 +
9488 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9489 +{
9490 + unsigned int *kaddr;
9491 +
9492 + vmf->page = alloc_page(GFP_HIGHUSER);
9493 + if (!vmf->page)
9494 + return VM_FAULT_OOM;
9495 +
9496 + kaddr = kmap(vmf->page);
9497 + memset(kaddr, 0, PAGE_SIZE);
9498 + kaddr[0] = 0x9DE3BFA8U; /* save */
9499 + flush_dcache_page(vmf->page);
9500 + kunmap(vmf->page);
9501 + return VM_FAULT_MAJOR;
9502 +}
9503 +
9504 +static const struct vm_operations_struct pax_vm_ops = {
9505 + .close = pax_emuplt_close,
9506 + .fault = pax_emuplt_fault
9507 +};
9508 +
9509 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9510 +{
9511 + int ret;
9512 +
9513 + INIT_LIST_HEAD(&vma->anon_vma_chain);
9514 + vma->vm_mm = current->mm;
9515 + vma->vm_start = addr;
9516 + vma->vm_end = addr + PAGE_SIZE;
9517 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9518 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9519 + vma->vm_ops = &pax_vm_ops;
9520 +
9521 + ret = insert_vm_struct(current->mm, vma);
9522 + if (ret)
9523 + return ret;
9524 +
9525 + ++current->mm->total_vm;
9526 + return 0;
9527 +}
9528 +#endif
9529 +
9530 +/*
9531 + * PaX: decide what to do with offenders (regs->tpc = fault address)
9532 + *
9533 + * returns 1 when task should be killed
9534 + * 2 when patched PLT trampoline was detected
9535 + * 3 when unpatched PLT trampoline was detected
9536 + */
9537 +static int pax_handle_fetch_fault(struct pt_regs *regs)
9538 +{
9539 +
9540 +#ifdef CONFIG_PAX_EMUPLT
9541 + int err;
9542 +
9543 + do { /* PaX: patched PLT emulation #1 */
9544 + unsigned int sethi1, sethi2, jmpl;
9545 +
9546 + err = get_user(sethi1, (unsigned int *)regs->tpc);
9547 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9548 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9549 +
9550 + if (err)
9551 + break;
9552 +
9553 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9554 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
9555 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
9556 + {
9557 + unsigned long addr;
9558 +
9559 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9560 + addr = regs->u_regs[UREG_G1];
9561 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9562 +
9563 + if (test_thread_flag(TIF_32BIT))
9564 + addr &= 0xFFFFFFFFUL;
9565 +
9566 + regs->tpc = addr;
9567 + regs->tnpc = addr+4;
9568 + return 2;
9569 + }
9570 + } while (0);
9571 +
9572 + do { /* PaX: patched PLT emulation #2 */
9573 + unsigned int ba;
9574 +
9575 + err = get_user(ba, (unsigned int *)regs->tpc);
9576 +
9577 + if (err)
9578 + break;
9579 +
9580 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9581 + unsigned long addr;
9582 +
9583 + if ((ba & 0xFFC00000U) == 0x30800000U)
9584 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9585 + else
9586 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9587 +
9588 + if (test_thread_flag(TIF_32BIT))
9589 + addr &= 0xFFFFFFFFUL;
9590 +
9591 + regs->tpc = addr;
9592 + regs->tnpc = addr+4;
9593 + return 2;
9594 + }
9595 + } while (0);
9596 +
9597 + do { /* PaX: patched PLT emulation #3 */
9598 + unsigned int sethi, bajmpl, nop;
9599 +
9600 + err = get_user(sethi, (unsigned int *)regs->tpc);
9601 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9602 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9603 +
9604 + if (err)
9605 + break;
9606 +
9607 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9608 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9609 + nop == 0x01000000U)
9610 + {
9611 + unsigned long addr;
9612 +
9613 + addr = (sethi & 0x003FFFFFU) << 10;
9614 + regs->u_regs[UREG_G1] = addr;
9615 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9616 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9617 + else
9618 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9619 +
9620 + if (test_thread_flag(TIF_32BIT))
9621 + addr &= 0xFFFFFFFFUL;
9622 +
9623 + regs->tpc = addr;
9624 + regs->tnpc = addr+4;
9625 + return 2;
9626 + }
9627 + } while (0);
9628 +
9629 + do { /* PaX: patched PLT emulation #4 */
9630 + unsigned int sethi, mov1, call, mov2;
9631 +
9632 + err = get_user(sethi, (unsigned int *)regs->tpc);
9633 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9634 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
9635 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9636 +
9637 + if (err)
9638 + break;
9639 +
9640 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9641 + mov1 == 0x8210000FU &&
9642 + (call & 0xC0000000U) == 0x40000000U &&
9643 + mov2 == 0x9E100001U)
9644 + {
9645 + unsigned long addr;
9646 +
9647 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9648 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9649 +
9650 + if (test_thread_flag(TIF_32BIT))
9651 + addr &= 0xFFFFFFFFUL;
9652 +
9653 + regs->tpc = addr;
9654 + regs->tnpc = addr+4;
9655 + return 2;
9656 + }
9657 + } while (0);
9658 +
9659 + do { /* PaX: patched PLT emulation #5 */
9660 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9661 +
9662 + err = get_user(sethi, (unsigned int *)regs->tpc);
9663 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9664 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9665 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9666 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9667 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9668 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9669 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9670 +
9671 + if (err)
9672 + break;
9673 +
9674 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9675 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
9676 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9677 + (or1 & 0xFFFFE000U) == 0x82106000U &&
9678 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
9679 + sllx == 0x83287020U &&
9680 + jmpl == 0x81C04005U &&
9681 + nop == 0x01000000U)
9682 + {
9683 + unsigned long addr;
9684 +
9685 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9686 + regs->u_regs[UREG_G1] <<= 32;
9687 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9688 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9689 + regs->tpc = addr;
9690 + regs->tnpc = addr+4;
9691 + return 2;
9692 + }
9693 + } while (0);
9694 +
9695 + do { /* PaX: patched PLT emulation #6 */
9696 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9697 +
9698 + err = get_user(sethi, (unsigned int *)regs->tpc);
9699 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9700 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9701 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9702 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
9703 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9704 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9705 +
9706 + if (err)
9707 + break;
9708 +
9709 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9710 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
9711 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9712 + sllx == 0x83287020U &&
9713 + (or & 0xFFFFE000U) == 0x8A116000U &&
9714 + jmpl == 0x81C04005U &&
9715 + nop == 0x01000000U)
9716 + {
9717 + unsigned long addr;
9718 +
9719 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9720 + regs->u_regs[UREG_G1] <<= 32;
9721 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9722 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9723 + regs->tpc = addr;
9724 + regs->tnpc = addr+4;
9725 + return 2;
9726 + }
9727 + } while (0);
9728 +
9729 + do { /* PaX: unpatched PLT emulation step 1 */
9730 + unsigned int sethi, ba, nop;
9731 +
9732 + err = get_user(sethi, (unsigned int *)regs->tpc);
9733 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9734 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9735 +
9736 + if (err)
9737 + break;
9738 +
9739 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9740 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9741 + nop == 0x01000000U)
9742 + {
9743 + unsigned long addr;
9744 + unsigned int save, call;
9745 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9746 +
9747 + if ((ba & 0xFFC00000U) == 0x30800000U)
9748 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9749 + else
9750 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9751 +
9752 + if (test_thread_flag(TIF_32BIT))
9753 + addr &= 0xFFFFFFFFUL;
9754 +
9755 + err = get_user(save, (unsigned int *)addr);
9756 + err |= get_user(call, (unsigned int *)(addr+4));
9757 + err |= get_user(nop, (unsigned int *)(addr+8));
9758 + if (err)
9759 + break;
9760 +
9761 +#ifdef CONFIG_PAX_DLRESOLVE
9762 + if (save == 0x9DE3BFA8U &&
9763 + (call & 0xC0000000U) == 0x40000000U &&
9764 + nop == 0x01000000U)
9765 + {
9766 + struct vm_area_struct *vma;
9767 + unsigned long call_dl_resolve;
9768 +
9769 + down_read(&current->mm->mmap_sem);
9770 + call_dl_resolve = current->mm->call_dl_resolve;
9771 + up_read(&current->mm->mmap_sem);
9772 + if (likely(call_dl_resolve))
9773 + goto emulate;
9774 +
9775 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9776 +
9777 + down_write(&current->mm->mmap_sem);
9778 + if (current->mm->call_dl_resolve) {
9779 + call_dl_resolve = current->mm->call_dl_resolve;
9780 + up_write(&current->mm->mmap_sem);
9781 + if (vma)
9782 + kmem_cache_free(vm_area_cachep, vma);
9783 + goto emulate;
9784 + }
9785 +
9786 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9787 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9788 + up_write(&current->mm->mmap_sem);
9789 + if (vma)
9790 + kmem_cache_free(vm_area_cachep, vma);
9791 + return 1;
9792 + }
9793 +
9794 + if (pax_insert_vma(vma, call_dl_resolve)) {
9795 + up_write(&current->mm->mmap_sem);
9796 + kmem_cache_free(vm_area_cachep, vma);
9797 + return 1;
9798 + }
9799 +
9800 + current->mm->call_dl_resolve = call_dl_resolve;
9801 + up_write(&current->mm->mmap_sem);
9802 +
9803 +emulate:
9804 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9805 + regs->tpc = call_dl_resolve;
9806 + regs->tnpc = addr+4;
9807 + return 3;
9808 + }
9809 +#endif
9810 +
9811 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9812 + if ((save & 0xFFC00000U) == 0x05000000U &&
9813 + (call & 0xFFFFE000U) == 0x85C0A000U &&
9814 + nop == 0x01000000U)
9815 + {
9816 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9817 + regs->u_regs[UREG_G2] = addr + 4;
9818 + addr = (save & 0x003FFFFFU) << 10;
9819 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9820 +
9821 + if (test_thread_flag(TIF_32BIT))
9822 + addr &= 0xFFFFFFFFUL;
9823 +
9824 + regs->tpc = addr;
9825 + regs->tnpc = addr+4;
9826 + return 3;
9827 + }
9828 +
9829 + /* PaX: 64-bit PLT stub */
9830 + err = get_user(sethi1, (unsigned int *)addr);
9831 + err |= get_user(sethi2, (unsigned int *)(addr+4));
9832 + err |= get_user(or1, (unsigned int *)(addr+8));
9833 + err |= get_user(or2, (unsigned int *)(addr+12));
9834 + err |= get_user(sllx, (unsigned int *)(addr+16));
9835 + err |= get_user(add, (unsigned int *)(addr+20));
9836 + err |= get_user(jmpl, (unsigned int *)(addr+24));
9837 + err |= get_user(nop, (unsigned int *)(addr+28));
9838 + if (err)
9839 + break;
9840 +
9841 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9842 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9843 + (or1 & 0xFFFFE000U) == 0x88112000U &&
9844 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
9845 + sllx == 0x89293020U &&
9846 + add == 0x8A010005U &&
9847 + jmpl == 0x89C14000U &&
9848 + nop == 0x01000000U)
9849 + {
9850 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9851 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9852 + regs->u_regs[UREG_G4] <<= 32;
9853 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9854 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9855 + regs->u_regs[UREG_G4] = addr + 24;
9856 + addr = regs->u_regs[UREG_G5];
9857 + regs->tpc = addr;
9858 + regs->tnpc = addr+4;
9859 + return 3;
9860 + }
9861 + }
9862 + } while (0);
9863 +
9864 +#ifdef CONFIG_PAX_DLRESOLVE
9865 + do { /* PaX: unpatched PLT emulation step 2 */
9866 + unsigned int save, call, nop;
9867 +
9868 + err = get_user(save, (unsigned int *)(regs->tpc-4));
9869 + err |= get_user(call, (unsigned int *)regs->tpc);
9870 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9871 + if (err)
9872 + break;
9873 +
9874 + if (save == 0x9DE3BFA8U &&
9875 + (call & 0xC0000000U) == 0x40000000U &&
9876 + nop == 0x01000000U)
9877 + {
9878 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9879 +
9880 + if (test_thread_flag(TIF_32BIT))
9881 + dl_resolve &= 0xFFFFFFFFUL;
9882 +
9883 + regs->u_regs[UREG_RETPC] = regs->tpc;
9884 + regs->tpc = dl_resolve;
9885 + regs->tnpc = dl_resolve+4;
9886 + return 3;
9887 + }
9888 + } while (0);
9889 +#endif
9890 +
9891 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9892 + unsigned int sethi, ba, nop;
9893 +
9894 + err = get_user(sethi, (unsigned int *)regs->tpc);
9895 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9896 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9897 +
9898 + if (err)
9899 + break;
9900 +
9901 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
9902 + (ba & 0xFFF00000U) == 0x30600000U &&
9903 + nop == 0x01000000U)
9904 + {
9905 + unsigned long addr;
9906 +
9907 + addr = (sethi & 0x003FFFFFU) << 10;
9908 + regs->u_regs[UREG_G1] = addr;
9909 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9910 +
9911 + if (test_thread_flag(TIF_32BIT))
9912 + addr &= 0xFFFFFFFFUL;
9913 +
9914 + regs->tpc = addr;
9915 + regs->tnpc = addr+4;
9916 + return 2;
9917 + }
9918 + } while (0);
9919 +
9920 +#endif
9921 +
9922 + return 1;
9923 +}
9924 +
9925 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9926 +{
9927 + unsigned long i;
9928 +
9929 + printk(KERN_ERR "PAX: bytes at PC: ");
9930 + for (i = 0; i < 8; i++) {
9931 + unsigned int c;
9932 + if (get_user(c, (unsigned int *)pc+i))
9933 + printk(KERN_CONT "???????? ");
9934 + else
9935 + printk(KERN_CONT "%08x ", c);
9936 + }
9937 + printk("\n");
9938 +}
9939 +#endif
9940 +
9941 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9942 {
9943 struct mm_struct *mm = current->mm;
9944 @@ -341,6 +804,29 @@ retry:
9945 if (!vma)
9946 goto bad_area;
9947
9948 +#ifdef CONFIG_PAX_PAGEEXEC
9949 + /* PaX: detect ITLB misses on non-exec pages */
9950 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9951 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9952 + {
9953 + if (address != regs->tpc)
9954 + goto good_area;
9955 +
9956 + up_read(&mm->mmap_sem);
9957 + switch (pax_handle_fetch_fault(regs)) {
9958 +
9959 +#ifdef CONFIG_PAX_EMUPLT
9960 + case 2:
9961 + case 3:
9962 + return;
9963 +#endif
9964 +
9965 + }
9966 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9967 + do_group_exit(SIGKILL);
9968 + }
9969 +#endif
9970 +
9971 /* Pure DTLB misses do not tell us whether the fault causing
9972 * load/store/atomic was a write or not, it only says that there
9973 * was no match. So in such a case we (carefully) read the
9974 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9975 index d2b5944..bd813f2 100644
9976 --- a/arch/sparc/mm/hugetlbpage.c
9977 +++ b/arch/sparc/mm/hugetlbpage.c
9978 @@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9979
9980 info.flags = 0;
9981 info.length = len;
9982 - info.low_limit = TASK_UNMAPPED_BASE;
9983 + info.low_limit = mm->mmap_base;
9984 info.high_limit = min(task_size, VA_EXCLUDE_START);
9985 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9986 info.align_offset = 0;
9987 @@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9988 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9989 VM_BUG_ON(addr != -ENOMEM);
9990 info.low_limit = VA_EXCLUDE_END;
9991 +
9992 +#ifdef CONFIG_PAX_RANDMMAP
9993 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9994 + info.low_limit += mm->delta_mmap;
9995 +#endif
9996 +
9997 info.high_limit = task_size;
9998 addr = vm_unmapped_area(&info);
9999 }
10000 @@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10001 VM_BUG_ON(addr != -ENOMEM);
10002 info.flags = 0;
10003 info.low_limit = TASK_UNMAPPED_BASE;
10004 +
10005 +#ifdef CONFIG_PAX_RANDMMAP
10006 + if (mm->pax_flags & MF_PAX_RANDMMAP)
10007 + info.low_limit += mm->delta_mmap;
10008 +#endif
10009 +
10010 info.high_limit = STACK_TOP32;
10011 addr = vm_unmapped_area(&info);
10012 }
10013 @@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10014 struct mm_struct *mm = current->mm;
10015 struct vm_area_struct *vma;
10016 unsigned long task_size = TASK_SIZE;
10017 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10018
10019 if (test_thread_flag(TIF_32BIT))
10020 task_size = STACK_TOP32;
10021 @@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10022 return addr;
10023 }
10024
10025 +#ifdef CONFIG_PAX_RANDMMAP
10026 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10027 +#endif
10028 +
10029 if (addr) {
10030 addr = ALIGN(addr, HPAGE_SIZE);
10031 vma = find_vma(mm, addr);
10032 - if (task_size - len >= addr &&
10033 - (!vma || addr + len <= vma->vm_start))
10034 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10035 return addr;
10036 }
10037 if (mm->get_unmapped_area == arch_get_unmapped_area)
10038 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10039 index f4500c6..889656c 100644
10040 --- a/arch/tile/include/asm/atomic_64.h
10041 +++ b/arch/tile/include/asm/atomic_64.h
10042 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10043
10044 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10045
10046 +#define atomic64_read_unchecked(v) atomic64_read(v)
10047 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10048 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10049 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10050 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10051 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
10052 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10053 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
10054 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10055 +
10056 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10057 #define smp_mb__before_atomic_dec() smp_mb()
10058 #define smp_mb__after_atomic_dec() smp_mb()
10059 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10060 index a9a5299..0fce79e 100644
10061 --- a/arch/tile/include/asm/cache.h
10062 +++ b/arch/tile/include/asm/cache.h
10063 @@ -15,11 +15,12 @@
10064 #ifndef _ASM_TILE_CACHE_H
10065 #define _ASM_TILE_CACHE_H
10066
10067 +#include <linux/const.h>
10068 #include <arch/chip.h>
10069
10070 /* bytes per L1 data cache line */
10071 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10072 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10073 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10074
10075 /* bytes per L2 cache line */
10076 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10077 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10078 index 9ab078a..d6635c2 100644
10079 --- a/arch/tile/include/asm/uaccess.h
10080 +++ b/arch/tile/include/asm/uaccess.h
10081 @@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10082 const void __user *from,
10083 unsigned long n)
10084 {
10085 - int sz = __compiletime_object_size(to);
10086 + size_t sz = __compiletime_object_size(to);
10087
10088 - if (likely(sz == -1 || sz >= n))
10089 + if (likely(sz == (size_t)-1 || sz >= n))
10090 n = _copy_from_user(to, from, n);
10091 else
10092 copy_from_user_overflow();
10093 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10094 index 650ccff..45fe2d6 100644
10095 --- a/arch/tile/mm/hugetlbpage.c
10096 +++ b/arch/tile/mm/hugetlbpage.c
10097 @@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10098 info.high_limit = TASK_SIZE;
10099 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10100 info.align_offset = 0;
10101 + info.threadstack_offset = 0;
10102 return vm_unmapped_area(&info);
10103 }
10104
10105 @@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10106 info.high_limit = current->mm->mmap_base;
10107 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10108 info.align_offset = 0;
10109 + info.threadstack_offset = 0;
10110 addr = vm_unmapped_area(&info);
10111
10112 /*
10113 diff --git a/arch/um/Makefile b/arch/um/Makefile
10114 index 133f7de..1d6f2f1 100644
10115 --- a/arch/um/Makefile
10116 +++ b/arch/um/Makefile
10117 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10118 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10119 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10120
10121 +ifdef CONSTIFY_PLUGIN
10122 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10123 +endif
10124 +
10125 #This will adjust *FLAGS accordingly to the platform.
10126 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10127
10128 diff --git a/arch/um/defconfig b/arch/um/defconfig
10129 index 08107a7..ab22afe 100644
10130 --- a/arch/um/defconfig
10131 +++ b/arch/um/defconfig
10132 @@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10133 CONFIG_X86_L1_CACHE_SHIFT=5
10134 CONFIG_X86_XADD=y
10135 CONFIG_X86_PPRO_FENCE=y
10136 -CONFIG_X86_WP_WORKS_OK=y
10137 CONFIG_X86_INVLPG=y
10138 CONFIG_X86_BSWAP=y
10139 CONFIG_X86_POPAD_OK=y
10140 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10141 index 19e1bdd..3665b77 100644
10142 --- a/arch/um/include/asm/cache.h
10143 +++ b/arch/um/include/asm/cache.h
10144 @@ -1,6 +1,7 @@
10145 #ifndef __UM_CACHE_H
10146 #define __UM_CACHE_H
10147
10148 +#include <linux/const.h>
10149
10150 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10151 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10152 @@ -12,6 +13,6 @@
10153 # define L1_CACHE_SHIFT 5
10154 #endif
10155
10156 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10157 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10158
10159 #endif
10160 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10161 index 2e0a6b1..a64d0f5 100644
10162 --- a/arch/um/include/asm/kmap_types.h
10163 +++ b/arch/um/include/asm/kmap_types.h
10164 @@ -8,6 +8,6 @@
10165
10166 /* No more #include "asm/arch/kmap_types.h" ! */
10167
10168 -#define KM_TYPE_NR 14
10169 +#define KM_TYPE_NR 15
10170
10171 #endif
10172 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10173 index 5ff53d9..5850cdf 100644
10174 --- a/arch/um/include/asm/page.h
10175 +++ b/arch/um/include/asm/page.h
10176 @@ -14,6 +14,9 @@
10177 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10178 #define PAGE_MASK (~(PAGE_SIZE-1))
10179
10180 +#define ktla_ktva(addr) (addr)
10181 +#define ktva_ktla(addr) (addr)
10182 +
10183 #ifndef __ASSEMBLY__
10184
10185 struct page;
10186 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10187 index 0032f92..cd151e0 100644
10188 --- a/arch/um/include/asm/pgtable-3level.h
10189 +++ b/arch/um/include/asm/pgtable-3level.h
10190 @@ -58,6 +58,7 @@
10191 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10192 #define pud_populate(mm, pud, pmd) \
10193 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10194 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10195
10196 #ifdef CONFIG_64BIT
10197 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10198 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10199 index b462b13..e7a19aa 100644
10200 --- a/arch/um/kernel/process.c
10201 +++ b/arch/um/kernel/process.c
10202 @@ -386,22 +386,6 @@ int singlestepping(void * t)
10203 return 2;
10204 }
10205
10206 -/*
10207 - * Only x86 and x86_64 have an arch_align_stack().
10208 - * All other arches have "#define arch_align_stack(x) (x)"
10209 - * in their asm/system.h
10210 - * As this is included in UML from asm-um/system-generic.h,
10211 - * we can use it to behave as the subarch does.
10212 - */
10213 -#ifndef arch_align_stack
10214 -unsigned long arch_align_stack(unsigned long sp)
10215 -{
10216 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10217 - sp -= get_random_int() % 8192;
10218 - return sp & ~0xf;
10219 -}
10220 -#endif
10221 -
10222 unsigned long get_wchan(struct task_struct *p)
10223 {
10224 unsigned long stack_page, sp, ip;
10225 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10226 index ad8f795..2c7eec6 100644
10227 --- a/arch/unicore32/include/asm/cache.h
10228 +++ b/arch/unicore32/include/asm/cache.h
10229 @@ -12,8 +12,10 @@
10230 #ifndef __UNICORE_CACHE_H__
10231 #define __UNICORE_CACHE_H__
10232
10233 -#define L1_CACHE_SHIFT (5)
10234 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10235 +#include <linux/const.h>
10236 +
10237 +#define L1_CACHE_SHIFT 5
10238 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10239
10240 /*
10241 * Memory returned by kmalloc() may be used for DMA, so we must make
10242 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10243 index 15b5cef..173babc 100644
10244 --- a/arch/x86/Kconfig
10245 +++ b/arch/x86/Kconfig
10246 @@ -244,7 +244,7 @@ config X86_HT
10247
10248 config X86_32_LAZY_GS
10249 def_bool y
10250 - depends on X86_32 && !CC_STACKPROTECTOR
10251 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10252
10253 config ARCH_HWEIGHT_CFLAGS
10254 string
10255 @@ -1077,6 +1077,7 @@ config MICROCODE_EARLY
10256
10257 config X86_MSR
10258 tristate "/dev/cpu/*/msr - Model-specific register support"
10259 + depends on !GRKERNSEC_KMEM
10260 ---help---
10261 This device gives privileged processes access to the x86
10262 Model-Specific Registers (MSRs). It is a character device with
10263 @@ -1100,7 +1101,7 @@ choice
10264
10265 config NOHIGHMEM
10266 bool "off"
10267 - depends on !X86_NUMAQ
10268 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10269 ---help---
10270 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10271 However, the address space of 32-bit x86 processors is only 4
10272 @@ -1137,7 +1138,7 @@ config NOHIGHMEM
10273
10274 config HIGHMEM4G
10275 bool "4GB"
10276 - depends on !X86_NUMAQ
10277 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10278 ---help---
10279 Select this if you have a 32-bit processor and between 1 and 4
10280 gigabytes of physical RAM.
10281 @@ -1190,7 +1191,7 @@ config PAGE_OFFSET
10282 hex
10283 default 0xB0000000 if VMSPLIT_3G_OPT
10284 default 0x80000000 if VMSPLIT_2G
10285 - default 0x78000000 if VMSPLIT_2G_OPT
10286 + default 0x70000000 if VMSPLIT_2G_OPT
10287 default 0x40000000 if VMSPLIT_1G
10288 default 0xC0000000
10289 depends on X86_32
10290 @@ -1588,6 +1589,7 @@ config SECCOMP
10291
10292 config CC_STACKPROTECTOR
10293 bool "Enable -fstack-protector buffer overflow detection"
10294 + depends on X86_64 || !PAX_MEMORY_UDEREF
10295 ---help---
10296 This option turns on the -fstack-protector GCC feature. This
10297 feature puts, at the beginning of functions, a canary value on
10298 @@ -1707,6 +1709,8 @@ config X86_NEED_RELOCS
10299 config PHYSICAL_ALIGN
10300 hex "Alignment value to which kernel should be aligned" if X86_32
10301 default "0x1000000"
10302 + range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10303 + range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10304 range 0x2000 0x1000000
10305 ---help---
10306 This value puts the alignment restrictions on physical address
10307 @@ -1782,9 +1786,10 @@ config DEBUG_HOTPLUG_CPU0
10308 If unsure, say N.
10309
10310 config COMPAT_VDSO
10311 - def_bool y
10312 + def_bool n
10313 prompt "Compat VDSO support"
10314 depends on X86_32 || IA32_EMULATION
10315 + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10316 ---help---
10317 Map the 32-bit VDSO to the predictable old-style address too.
10318
10319 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10320 index c026cca..14657ae 100644
10321 --- a/arch/x86/Kconfig.cpu
10322 +++ b/arch/x86/Kconfig.cpu
10323 @@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10324
10325 config X86_F00F_BUG
10326 def_bool y
10327 - depends on M586MMX || M586TSC || M586 || M486
10328 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10329
10330 config X86_INVD_BUG
10331 def_bool y
10332 @@ -327,7 +327,7 @@ config X86_INVD_BUG
10333
10334 config X86_ALIGNMENT_16
10335 def_bool y
10336 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10337 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10338
10339 config X86_INTEL_USERCOPY
10340 def_bool y
10341 @@ -373,7 +373,7 @@ config X86_CMPXCHG64
10342 # generates cmov.
10343 config X86_CMOV
10344 def_bool y
10345 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10346 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10347
10348 config X86_MINIMUM_CPU_FAMILY
10349 int
10350 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10351 index b322f12..652d0d9 100644
10352 --- a/arch/x86/Kconfig.debug
10353 +++ b/arch/x86/Kconfig.debug
10354 @@ -84,7 +84,7 @@ config X86_PTDUMP
10355 config DEBUG_RODATA
10356 bool "Write protect kernel read-only data structures"
10357 default y
10358 - depends on DEBUG_KERNEL
10359 + depends on DEBUG_KERNEL && BROKEN
10360 ---help---
10361 Mark the kernel read-only data as write-protected in the pagetables,
10362 in order to catch accidental (and incorrect) writes to such const
10363 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10364
10365 config DEBUG_SET_MODULE_RONX
10366 bool "Set loadable kernel module data as NX and text as RO"
10367 - depends on MODULES
10368 + depends on MODULES && BROKEN
10369 ---help---
10370 This option helps catch unintended modifications to loadable
10371 kernel module's text and read-only data. It also prevents execution
10372 @@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10373
10374 config DEBUG_STRICT_USER_COPY_CHECKS
10375 bool "Strict copy size checks"
10376 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10377 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10378 ---help---
10379 Enabling this option turns a certain set of sanity checks for user
10380 copy operations into compile time failures.
10381 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10382 index 5c47726..8c4fa67 100644
10383 --- a/arch/x86/Makefile
10384 +++ b/arch/x86/Makefile
10385 @@ -54,6 +54,7 @@ else
10386 UTS_MACHINE := x86_64
10387 CHECKFLAGS += -D__x86_64__ -m64
10388
10389 + biarch := $(call cc-option,-m64)
10390 KBUILD_AFLAGS += -m64
10391 KBUILD_CFLAGS += -m64
10392
10393 @@ -234,3 +235,12 @@ define archhelp
10394 echo ' FDARGS="..." arguments for the booted kernel'
10395 echo ' FDINITRD=file initrd for the booted kernel'
10396 endef
10397 +
10398 +define OLD_LD
10399 +
10400 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10401 +*** Please upgrade your binutils to 2.18 or newer
10402 +endef
10403 +
10404 +archprepare:
10405 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10406 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10407 index 379814b..add62ce 100644
10408 --- a/arch/x86/boot/Makefile
10409 +++ b/arch/x86/boot/Makefile
10410 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10411 $(call cc-option, -fno-stack-protector) \
10412 $(call cc-option, -mpreferred-stack-boundary=2)
10413 KBUILD_CFLAGS += $(call cc-option, -m32)
10414 +ifdef CONSTIFY_PLUGIN
10415 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10416 +endif
10417 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10418 GCOV_PROFILE := n
10419
10420 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10421 index 878e4b9..20537ab 100644
10422 --- a/arch/x86/boot/bitops.h
10423 +++ b/arch/x86/boot/bitops.h
10424 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10425 u8 v;
10426 const u32 *p = (const u32 *)addr;
10427
10428 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10429 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10430 return v;
10431 }
10432
10433 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10434
10435 static inline void set_bit(int nr, void *addr)
10436 {
10437 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10438 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10439 }
10440
10441 #endif /* BOOT_BITOPS_H */
10442 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10443 index 5b75319..331a4ca 100644
10444 --- a/arch/x86/boot/boot.h
10445 +++ b/arch/x86/boot/boot.h
10446 @@ -85,7 +85,7 @@ static inline void io_delay(void)
10447 static inline u16 ds(void)
10448 {
10449 u16 seg;
10450 - asm("movw %%ds,%0" : "=rm" (seg));
10451 + asm volatile("movw %%ds,%0" : "=rm" (seg));
10452 return seg;
10453 }
10454
10455 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10456 static inline int memcmp(const void *s1, const void *s2, size_t len)
10457 {
10458 u8 diff;
10459 - asm("repe; cmpsb; setnz %0"
10460 + asm volatile("repe; cmpsb; setnz %0"
10461 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10462 return diff;
10463 }
10464 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10465 index 5ef205c..342191d 100644
10466 --- a/arch/x86/boot/compressed/Makefile
10467 +++ b/arch/x86/boot/compressed/Makefile
10468 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10469 KBUILD_CFLAGS += $(cflags-y)
10470 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10471 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10472 +ifdef CONSTIFY_PLUGIN
10473 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10474 +endif
10475
10476 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10477 GCOV_PROFILE := n
10478 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10479 index 35ee62f..b6609b6 100644
10480 --- a/arch/x86/boot/compressed/eboot.c
10481 +++ b/arch/x86/boot/compressed/eboot.c
10482 @@ -150,7 +150,6 @@ again:
10483 *addr = max_addr;
10484 }
10485
10486 -free_pool:
10487 efi_call_phys1(sys_table->boottime->free_pool, map);
10488
10489 fail:
10490 @@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10491 if (i == map_size / desc_size)
10492 status = EFI_NOT_FOUND;
10493
10494 -free_pool:
10495 efi_call_phys1(sys_table->boottime->free_pool, map);
10496 fail:
10497 return status;
10498 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10499 index 1e3184f..0d11e2e 100644
10500 --- a/arch/x86/boot/compressed/head_32.S
10501 +++ b/arch/x86/boot/compressed/head_32.S
10502 @@ -118,7 +118,7 @@ preferred_addr:
10503 notl %eax
10504 andl %eax, %ebx
10505 #else
10506 - movl $LOAD_PHYSICAL_ADDR, %ebx
10507 + movl $____LOAD_PHYSICAL_ADDR, %ebx
10508 #endif
10509
10510 /* Target address to relocate to for decompression */
10511 @@ -204,7 +204,7 @@ relocated:
10512 * and where it was actually loaded.
10513 */
10514 movl %ebp, %ebx
10515 - subl $LOAD_PHYSICAL_ADDR, %ebx
10516 + subl $____LOAD_PHYSICAL_ADDR, %ebx
10517 jz 2f /* Nothing to be done if loaded at compiled addr. */
10518 /*
10519 * Process relocations.
10520 @@ -212,8 +212,7 @@ relocated:
10521
10522 1: subl $4, %edi
10523 movl (%edi), %ecx
10524 - testl %ecx, %ecx
10525 - jz 2f
10526 + jecxz 2f
10527 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10528 jmp 1b
10529 2:
10530 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10531 index c1d383d..57ab51c 100644
10532 --- a/arch/x86/boot/compressed/head_64.S
10533 +++ b/arch/x86/boot/compressed/head_64.S
10534 @@ -97,7 +97,7 @@ ENTRY(startup_32)
10535 notl %eax
10536 andl %eax, %ebx
10537 #else
10538 - movl $LOAD_PHYSICAL_ADDR, %ebx
10539 + movl $____LOAD_PHYSICAL_ADDR, %ebx
10540 #endif
10541
10542 /* Target address to relocate to for decompression */
10543 @@ -272,7 +272,7 @@ preferred_addr:
10544 notq %rax
10545 andq %rax, %rbp
10546 #else
10547 - movq $LOAD_PHYSICAL_ADDR, %rbp
10548 + movq $____LOAD_PHYSICAL_ADDR, %rbp
10549 #endif
10550
10551 /* Target address to relocate to for decompression */
10552 @@ -363,8 +363,8 @@ gdt:
10553 .long gdt
10554 .word 0
10555 .quad 0x0000000000000000 /* NULL descriptor */
10556 - .quad 0x00af9a000000ffff /* __KERNEL_CS */
10557 - .quad 0x00cf92000000ffff /* __KERNEL_DS */
10558 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
10559 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
10560 .quad 0x0080890000000000 /* TS descriptor */
10561 .quad 0x0000000000000000 /* TS continued */
10562 gdt_end:
10563 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10564 index 7cb56c6..d382d84 100644
10565 --- a/arch/x86/boot/compressed/misc.c
10566 +++ b/arch/x86/boot/compressed/misc.c
10567 @@ -303,7 +303,7 @@ static void parse_elf(void *output)
10568 case PT_LOAD:
10569 #ifdef CONFIG_RELOCATABLE
10570 dest = output;
10571 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10572 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10573 #else
10574 dest = (void *)(phdr->p_paddr);
10575 #endif
10576 @@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10577 error("Destination address too large");
10578 #endif
10579 #ifndef CONFIG_RELOCATABLE
10580 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10581 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10582 error("Wrong destination address");
10583 #endif
10584
10585 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10586 index 4d3ff03..e4972ff 100644
10587 --- a/arch/x86/boot/cpucheck.c
10588 +++ b/arch/x86/boot/cpucheck.c
10589 @@ -74,7 +74,7 @@ static int has_fpu(void)
10590 u16 fcw = -1, fsw = -1;
10591 u32 cr0;
10592
10593 - asm("movl %%cr0,%0" : "=r" (cr0));
10594 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
10595 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10596 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10597 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10598 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10599 {
10600 u32 f0, f1;
10601
10602 - asm("pushfl ; "
10603 + asm volatile("pushfl ; "
10604 "pushfl ; "
10605 "popl %0 ; "
10606 "movl %0,%1 ; "
10607 @@ -115,7 +115,7 @@ static void get_flags(void)
10608 set_bit(X86_FEATURE_FPU, cpu.flags);
10609
10610 if (has_eflag(X86_EFLAGS_ID)) {
10611 - asm("cpuid"
10612 + asm volatile("cpuid"
10613 : "=a" (max_intel_level),
10614 "=b" (cpu_vendor[0]),
10615 "=d" (cpu_vendor[1]),
10616 @@ -124,7 +124,7 @@ static void get_flags(void)
10617
10618 if (max_intel_level >= 0x00000001 &&
10619 max_intel_level <= 0x0000ffff) {
10620 - asm("cpuid"
10621 + asm volatile("cpuid"
10622 : "=a" (tfms),
10623 "=c" (cpu.flags[4]),
10624 "=d" (cpu.flags[0])
10625 @@ -136,7 +136,7 @@ static void get_flags(void)
10626 cpu.model += ((tfms >> 16) & 0xf) << 4;
10627 }
10628
10629 - asm("cpuid"
10630 + asm volatile("cpuid"
10631 : "=a" (max_amd_level)
10632 : "a" (0x80000000)
10633 : "ebx", "ecx", "edx");
10634 @@ -144,7 +144,7 @@ static void get_flags(void)
10635 if (max_amd_level >= 0x80000001 &&
10636 max_amd_level <= 0x8000ffff) {
10637 u32 eax = 0x80000001;
10638 - asm("cpuid"
10639 + asm volatile("cpuid"
10640 : "+a" (eax),
10641 "=c" (cpu.flags[6]),
10642 "=d" (cpu.flags[1])
10643 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10644 u32 ecx = MSR_K7_HWCR;
10645 u32 eax, edx;
10646
10647 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10648 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10649 eax &= ~(1 << 15);
10650 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10651 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10652
10653 get_flags(); /* Make sure it really did something */
10654 err = check_flags();
10655 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10656 u32 ecx = MSR_VIA_FCR;
10657 u32 eax, edx;
10658
10659 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10660 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10661 eax |= (1<<1)|(1<<7);
10662 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10663 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10664
10665 set_bit(X86_FEATURE_CX8, cpu.flags);
10666 err = check_flags();
10667 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10668 u32 eax, edx;
10669 u32 level = 1;
10670
10671 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10672 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10673 - asm("cpuid"
10674 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10675 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10676 + asm volatile("cpuid"
10677 : "+a" (level), "=d" (cpu.flags[0])
10678 : : "ecx", "ebx");
10679 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10680 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10681
10682 err = check_flags();
10683 }
10684 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10685 index 9ec06a1..2c25e79 100644
10686 --- a/arch/x86/boot/header.S
10687 +++ b/arch/x86/boot/header.S
10688 @@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10689 # single linked list of
10690 # struct setup_data
10691
10692 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10693 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10694
10695 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10696 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10697 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10698 +#else
10699 #define VO_INIT_SIZE (VO__end - VO__text)
10700 +#endif
10701 #if ZO_INIT_SIZE > VO_INIT_SIZE
10702 #define INIT_SIZE ZO_INIT_SIZE
10703 #else
10704 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10705 index db75d07..8e6d0af 100644
10706 --- a/arch/x86/boot/memory.c
10707 +++ b/arch/x86/boot/memory.c
10708 @@ -19,7 +19,7 @@
10709
10710 static int detect_memory_e820(void)
10711 {
10712 - int count = 0;
10713 + unsigned int count = 0;
10714 struct biosregs ireg, oreg;
10715 struct e820entry *desc = boot_params.e820_map;
10716 static struct e820entry buf; /* static so it is zeroed */
10717 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10718 index 11e8c6e..fdbb1ed 100644
10719 --- a/arch/x86/boot/video-vesa.c
10720 +++ b/arch/x86/boot/video-vesa.c
10721 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10722
10723 boot_params.screen_info.vesapm_seg = oreg.es;
10724 boot_params.screen_info.vesapm_off = oreg.di;
10725 + boot_params.screen_info.vesapm_size = oreg.cx;
10726 }
10727
10728 /*
10729 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10730 index 43eda28..5ab5fdb 100644
10731 --- a/arch/x86/boot/video.c
10732 +++ b/arch/x86/boot/video.c
10733 @@ -96,7 +96,7 @@ static void store_mode_params(void)
10734 static unsigned int get_entry(void)
10735 {
10736 char entry_buf[4];
10737 - int i, len = 0;
10738 + unsigned int i, len = 0;
10739 int key;
10740 unsigned int v;
10741
10742 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10743 index 9105655..5e37f27 100644
10744 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
10745 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10746 @@ -8,6 +8,8 @@
10747 * including this sentence is retained in full.
10748 */
10749
10750 +#include <asm/alternative-asm.h>
10751 +
10752 .extern crypto_ft_tab
10753 .extern crypto_it_tab
10754 .extern crypto_fl_tab
10755 @@ -70,6 +72,8 @@
10756 je B192; \
10757 leaq 32(r9),r9;
10758
10759 +#define ret pax_force_retaddr 0, 1; ret
10760 +
10761 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10762 movq r1,r2; \
10763 movq r3,r4; \
10764 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10765 index 04b7977..402f223 100644
10766 --- a/arch/x86/crypto/aesni-intel_asm.S
10767 +++ b/arch/x86/crypto/aesni-intel_asm.S
10768 @@ -31,6 +31,7 @@
10769
10770 #include <linux/linkage.h>
10771 #include <asm/inst.h>
10772 +#include <asm/alternative-asm.h>
10773
10774 #ifdef __x86_64__
10775 .data
10776 @@ -1435,6 +1436,7 @@ _return_T_done_decrypt:
10777 pop %r14
10778 pop %r13
10779 pop %r12
10780 + pax_force_retaddr 0, 1
10781 ret
10782 ENDPROC(aesni_gcm_dec)
10783
10784 @@ -1699,6 +1701,7 @@ _return_T_done_encrypt:
10785 pop %r14
10786 pop %r13
10787 pop %r12
10788 + pax_force_retaddr 0, 1
10789 ret
10790 ENDPROC(aesni_gcm_enc)
10791
10792 @@ -1716,6 +1719,7 @@ _key_expansion_256a:
10793 pxor %xmm1, %xmm0
10794 movaps %xmm0, (TKEYP)
10795 add $0x10, TKEYP
10796 + pax_force_retaddr_bts
10797 ret
10798 ENDPROC(_key_expansion_128)
10799 ENDPROC(_key_expansion_256a)
10800 @@ -1742,6 +1746,7 @@ _key_expansion_192a:
10801 shufps $0b01001110, %xmm2, %xmm1
10802 movaps %xmm1, 0x10(TKEYP)
10803 add $0x20, TKEYP
10804 + pax_force_retaddr_bts
10805 ret
10806 ENDPROC(_key_expansion_192a)
10807
10808 @@ -1762,6 +1767,7 @@ _key_expansion_192b:
10809
10810 movaps %xmm0, (TKEYP)
10811 add $0x10, TKEYP
10812 + pax_force_retaddr_bts
10813 ret
10814 ENDPROC(_key_expansion_192b)
10815
10816 @@ -1775,6 +1781,7 @@ _key_expansion_256b:
10817 pxor %xmm1, %xmm2
10818 movaps %xmm2, (TKEYP)
10819 add $0x10, TKEYP
10820 + pax_force_retaddr_bts
10821 ret
10822 ENDPROC(_key_expansion_256b)
10823
10824 @@ -1888,6 +1895,7 @@ ENTRY(aesni_set_key)
10825 #ifndef __x86_64__
10826 popl KEYP
10827 #endif
10828 + pax_force_retaddr 0, 1
10829 ret
10830 ENDPROC(aesni_set_key)
10831
10832 @@ -1910,6 +1918,7 @@ ENTRY(aesni_enc)
10833 popl KLEN
10834 popl KEYP
10835 #endif
10836 + pax_force_retaddr 0, 1
10837 ret
10838 ENDPROC(aesni_enc)
10839
10840 @@ -1968,6 +1977,7 @@ _aesni_enc1:
10841 AESENC KEY STATE
10842 movaps 0x70(TKEYP), KEY
10843 AESENCLAST KEY STATE
10844 + pax_force_retaddr_bts
10845 ret
10846 ENDPROC(_aesni_enc1)
10847
10848 @@ -2077,6 +2087,7 @@ _aesni_enc4:
10849 AESENCLAST KEY STATE2
10850 AESENCLAST KEY STATE3
10851 AESENCLAST KEY STATE4
10852 + pax_force_retaddr_bts
10853 ret
10854 ENDPROC(_aesni_enc4)
10855
10856 @@ -2100,6 +2111,7 @@ ENTRY(aesni_dec)
10857 popl KLEN
10858 popl KEYP
10859 #endif
10860 + pax_force_retaddr 0, 1
10861 ret
10862 ENDPROC(aesni_dec)
10863
10864 @@ -2158,6 +2170,7 @@ _aesni_dec1:
10865 AESDEC KEY STATE
10866 movaps 0x70(TKEYP), KEY
10867 AESDECLAST KEY STATE
10868 + pax_force_retaddr_bts
10869 ret
10870 ENDPROC(_aesni_dec1)
10871
10872 @@ -2267,6 +2280,7 @@ _aesni_dec4:
10873 AESDECLAST KEY STATE2
10874 AESDECLAST KEY STATE3
10875 AESDECLAST KEY STATE4
10876 + pax_force_retaddr_bts
10877 ret
10878 ENDPROC(_aesni_dec4)
10879
10880 @@ -2325,6 +2339,7 @@ ENTRY(aesni_ecb_enc)
10881 popl KEYP
10882 popl LEN
10883 #endif
10884 + pax_force_retaddr 0, 1
10885 ret
10886 ENDPROC(aesni_ecb_enc)
10887
10888 @@ -2384,6 +2399,7 @@ ENTRY(aesni_ecb_dec)
10889 popl KEYP
10890 popl LEN
10891 #endif
10892 + pax_force_retaddr 0, 1
10893 ret
10894 ENDPROC(aesni_ecb_dec)
10895
10896 @@ -2426,6 +2442,7 @@ ENTRY(aesni_cbc_enc)
10897 popl LEN
10898 popl IVP
10899 #endif
10900 + pax_force_retaddr 0, 1
10901 ret
10902 ENDPROC(aesni_cbc_enc)
10903
10904 @@ -2517,6 +2534,7 @@ ENTRY(aesni_cbc_dec)
10905 popl LEN
10906 popl IVP
10907 #endif
10908 + pax_force_retaddr 0, 1
10909 ret
10910 ENDPROC(aesni_cbc_dec)
10911
10912 @@ -2544,6 +2562,7 @@ _aesni_inc_init:
10913 mov $1, TCTR_LOW
10914 MOVQ_R64_XMM TCTR_LOW INC
10915 MOVQ_R64_XMM CTR TCTR_LOW
10916 + pax_force_retaddr_bts
10917 ret
10918 ENDPROC(_aesni_inc_init)
10919
10920 @@ -2573,6 +2592,7 @@ _aesni_inc:
10921 .Linc_low:
10922 movaps CTR, IV
10923 PSHUFB_XMM BSWAP_MASK IV
10924 + pax_force_retaddr_bts
10925 ret
10926 ENDPROC(_aesni_inc)
10927
10928 @@ -2634,6 +2654,7 @@ ENTRY(aesni_ctr_enc)
10929 .Lctr_enc_ret:
10930 movups IV, (IVP)
10931 .Lctr_enc_just_ret:
10932 + pax_force_retaddr 0, 1
10933 ret
10934 ENDPROC(aesni_ctr_enc)
10935 #endif
10936 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10937 index 246c670..4d1ed00 100644
10938 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10939 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10940 @@ -21,6 +21,7 @@
10941 */
10942
10943 #include <linux/linkage.h>
10944 +#include <asm/alternative-asm.h>
10945
10946 .file "blowfish-x86_64-asm.S"
10947 .text
10948 @@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
10949 jnz .L__enc_xor;
10950
10951 write_block();
10952 + pax_force_retaddr 0, 1
10953 ret;
10954 .L__enc_xor:
10955 xor_block();
10956 + pax_force_retaddr 0, 1
10957 ret;
10958 ENDPROC(__blowfish_enc_blk)
10959
10960 @@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
10961
10962 movq %r11, %rbp;
10963
10964 + pax_force_retaddr 0, 1
10965 ret;
10966 ENDPROC(blowfish_dec_blk)
10967
10968 @@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
10969
10970 popq %rbx;
10971 popq %rbp;
10972 + pax_force_retaddr 0, 1
10973 ret;
10974
10975 .L__enc_xor4:
10976 @@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
10977
10978 popq %rbx;
10979 popq %rbp;
10980 + pax_force_retaddr 0, 1
10981 ret;
10982 ENDPROC(__blowfish_enc_blk_4way)
10983
10984 @@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
10985 popq %rbx;
10986 popq %rbp;
10987
10988 + pax_force_retaddr 0, 1
10989 ret;
10990 ENDPROC(blowfish_dec_blk_4way)
10991 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10992 index 310319c..ce174a4 100644
10993 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10994 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10995 @@ -21,6 +21,7 @@
10996 */
10997
10998 #include <linux/linkage.h>
10999 +#include <asm/alternative-asm.h>
11000
11001 .file "camellia-x86_64-asm_64.S"
11002 .text
11003 @@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11004 enc_outunpack(mov, RT1);
11005
11006 movq RRBP, %rbp;
11007 + pax_force_retaddr 0, 1
11008 ret;
11009
11010 .L__enc_xor:
11011 enc_outunpack(xor, RT1);
11012
11013 movq RRBP, %rbp;
11014 + pax_force_retaddr 0, 1
11015 ret;
11016 ENDPROC(__camellia_enc_blk)
11017
11018 @@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11019 dec_outunpack();
11020
11021 movq RRBP, %rbp;
11022 + pax_force_retaddr 0, 1
11023 ret;
11024 ENDPROC(camellia_dec_blk)
11025
11026 @@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11027
11028 movq RRBP, %rbp;
11029 popq %rbx;
11030 + pax_force_retaddr 0, 1
11031 ret;
11032
11033 .L__enc2_xor:
11034 @@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11035
11036 movq RRBP, %rbp;
11037 popq %rbx;
11038 + pax_force_retaddr 0, 1
11039 ret;
11040 ENDPROC(__camellia_enc_blk_2way)
11041
11042 @@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11043
11044 movq RRBP, %rbp;
11045 movq RXOR, %rbx;
11046 + pax_force_retaddr 0, 1
11047 ret;
11048 ENDPROC(camellia_dec_blk_2way)
11049 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11050 index c35fd5d..c1ee236 100644
11051 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11052 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11053 @@ -24,6 +24,7 @@
11054 */
11055
11056 #include <linux/linkage.h>
11057 +#include <asm/alternative-asm.h>
11058
11059 .file "cast5-avx-x86_64-asm_64.S"
11060
11061 @@ -281,6 +282,7 @@ __cast5_enc_blk16:
11062 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11063 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11064
11065 + pax_force_retaddr 0, 1
11066 ret;
11067 ENDPROC(__cast5_enc_blk16)
11068
11069 @@ -352,6 +354,7 @@ __cast5_dec_blk16:
11070 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11071 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11072
11073 + pax_force_retaddr 0, 1
11074 ret;
11075
11076 .L__skip_dec:
11077 @@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11078 vmovdqu RR4, (6*4*4)(%r11);
11079 vmovdqu RL4, (7*4*4)(%r11);
11080
11081 + pax_force_retaddr
11082 ret;
11083 ENDPROC(cast5_ecb_enc_16way)
11084
11085 @@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11086 vmovdqu RR4, (6*4*4)(%r11);
11087 vmovdqu RL4, (7*4*4)(%r11);
11088
11089 + pax_force_retaddr
11090 ret;
11091 ENDPROC(cast5_ecb_dec_16way)
11092
11093 @@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11094
11095 popq %r12;
11096
11097 + pax_force_retaddr
11098 ret;
11099 ENDPROC(cast5_cbc_dec_16way)
11100
11101 @@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11102
11103 popq %r12;
11104
11105 + pax_force_retaddr
11106 ret;
11107 ENDPROC(cast5_ctr_16way)
11108 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11109 index f93b610..c09bf40 100644
11110 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11111 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11112 @@ -24,6 +24,7 @@
11113 */
11114
11115 #include <linux/linkage.h>
11116 +#include <asm/alternative-asm.h>
11117 #include "glue_helper-asm-avx.S"
11118
11119 .file "cast6-avx-x86_64-asm_64.S"
11120 @@ -293,6 +294,7 @@ __cast6_enc_blk8:
11121 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11122 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11123
11124 + pax_force_retaddr 0, 1
11125 ret;
11126 ENDPROC(__cast6_enc_blk8)
11127
11128 @@ -338,6 +340,7 @@ __cast6_dec_blk8:
11129 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11130 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11131
11132 + pax_force_retaddr 0, 1
11133 ret;
11134 ENDPROC(__cast6_dec_blk8)
11135
11136 @@ -356,6 +359,7 @@ ENTRY(cast6_ecb_enc_8way)
11137
11138 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11139
11140 + pax_force_retaddr
11141 ret;
11142 ENDPROC(cast6_ecb_enc_8way)
11143
11144 @@ -374,6 +378,7 @@ ENTRY(cast6_ecb_dec_8way)
11145
11146 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11147
11148 + pax_force_retaddr
11149 ret;
11150 ENDPROC(cast6_ecb_dec_8way)
11151
11152 @@ -397,6 +402,7 @@ ENTRY(cast6_cbc_dec_8way)
11153
11154 popq %r12;
11155
11156 + pax_force_retaddr
11157 ret;
11158 ENDPROC(cast6_cbc_dec_8way)
11159
11160 @@ -422,5 +428,6 @@ ENTRY(cast6_ctr_8way)
11161
11162 popq %r12;
11163
11164 + pax_force_retaddr
11165 ret;
11166 ENDPROC(cast6_ctr_8way)
11167 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11168 index 9279e0b..9270820 100644
11169 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11170 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11171 @@ -1,4 +1,5 @@
11172 #include <linux/linkage.h>
11173 +#include <asm/alternative-asm.h>
11174
11175 # enter salsa20_encrypt_bytes
11176 ENTRY(salsa20_encrypt_bytes)
11177 @@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11178 add %r11,%rsp
11179 mov %rdi,%rax
11180 mov %rsi,%rdx
11181 + pax_force_retaddr 0, 1
11182 ret
11183 # bytesatleast65:
11184 ._bytesatleast65:
11185 @@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11186 add %r11,%rsp
11187 mov %rdi,%rax
11188 mov %rsi,%rdx
11189 + pax_force_retaddr
11190 ret
11191 ENDPROC(salsa20_keysetup)
11192
11193 @@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11194 add %r11,%rsp
11195 mov %rdi,%rax
11196 mov %rsi,%rdx
11197 + pax_force_retaddr
11198 ret
11199 ENDPROC(salsa20_ivsetup)
11200 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11201 index 43c9386..a0e2d60 100644
11202 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11203 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11204 @@ -25,6 +25,7 @@
11205 */
11206
11207 #include <linux/linkage.h>
11208 +#include <asm/alternative-asm.h>
11209 #include "glue_helper-asm-avx.S"
11210
11211 .file "serpent-avx-x86_64-asm_64.S"
11212 @@ -617,6 +618,7 @@ __serpent_enc_blk8_avx:
11213 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11214 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11215
11216 + pax_force_retaddr
11217 ret;
11218 ENDPROC(__serpent_enc_blk8_avx)
11219
11220 @@ -671,6 +673,7 @@ __serpent_dec_blk8_avx:
11221 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11222 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11223
11224 + pax_force_retaddr
11225 ret;
11226 ENDPROC(__serpent_dec_blk8_avx)
11227
11228 @@ -687,6 +690,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11229
11230 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11231
11232 + pax_force_retaddr
11233 ret;
11234 ENDPROC(serpent_ecb_enc_8way_avx)
11235
11236 @@ -703,6 +707,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11237
11238 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11239
11240 + pax_force_retaddr
11241 ret;
11242 ENDPROC(serpent_ecb_dec_8way_avx)
11243
11244 @@ -719,6 +724,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
11245
11246 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11247
11248 + pax_force_retaddr
11249 ret;
11250 ENDPROC(serpent_cbc_dec_8way_avx)
11251
11252 @@ -737,5 +743,6 @@ ENTRY(serpent_ctr_8way_avx)
11253
11254 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11255
11256 + pax_force_retaddr
11257 ret;
11258 ENDPROC(serpent_ctr_8way_avx)
11259 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11260 index acc066c..1559cc4 100644
11261 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11262 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11263 @@ -25,6 +25,7 @@
11264 */
11265
11266 #include <linux/linkage.h>
11267 +#include <asm/alternative-asm.h>
11268
11269 .file "serpent-sse2-x86_64-asm_64.S"
11270 .text
11271 @@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
11272 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11273 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11274
11275 + pax_force_retaddr
11276 ret;
11277
11278 .L__enc_xor8:
11279 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11280 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11281
11282 + pax_force_retaddr
11283 ret;
11284 ENDPROC(__serpent_enc_blk_8way)
11285
11286 @@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
11287 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11288 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11289
11290 + pax_force_retaddr
11291 ret;
11292 ENDPROC(serpent_dec_blk_8way)
11293 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11294 index a410950..3356d42 100644
11295 --- a/arch/x86/crypto/sha1_ssse3_asm.S
11296 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
11297 @@ -29,6 +29,7 @@
11298 */
11299
11300 #include <linux/linkage.h>
11301 +#include <asm/alternative-asm.h>
11302
11303 #define CTX %rdi // arg1
11304 #define BUF %rsi // arg2
11305 @@ -104,6 +105,7 @@
11306 pop %r12
11307 pop %rbp
11308 pop %rbx
11309 + pax_force_retaddr 0, 1
11310 ret
11311
11312 ENDPROC(\name)
11313 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11314 index 8d3e113..898b161 100644
11315 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11316 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11317 @@ -24,6 +24,7 @@
11318 */
11319
11320 #include <linux/linkage.h>
11321 +#include <asm/alternative-asm.h>
11322 #include "glue_helper-asm-avx.S"
11323
11324 .file "twofish-avx-x86_64-asm_64.S"
11325 @@ -282,6 +283,7 @@ __twofish_enc_blk8:
11326 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11327 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11328
11329 + pax_force_retaddr 0, 1
11330 ret;
11331 ENDPROC(__twofish_enc_blk8)
11332
11333 @@ -322,6 +324,7 @@ __twofish_dec_blk8:
11334 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11335 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11336
11337 + pax_force_retaddr 0, 1
11338 ret;
11339 ENDPROC(__twofish_dec_blk8)
11340
11341 @@ -340,6 +343,7 @@ ENTRY(twofish_ecb_enc_8way)
11342
11343 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11344
11345 + pax_force_retaddr 0, 1
11346 ret;
11347 ENDPROC(twofish_ecb_enc_8way)
11348
11349 @@ -358,6 +362,7 @@ ENTRY(twofish_ecb_dec_8way)
11350
11351 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11352
11353 + pax_force_retaddr 0, 1
11354 ret;
11355 ENDPROC(twofish_ecb_dec_8way)
11356
11357 @@ -381,6 +386,7 @@ ENTRY(twofish_cbc_dec_8way)
11358
11359 popq %r12;
11360
11361 + pax_force_retaddr 0, 1
11362 ret;
11363 ENDPROC(twofish_cbc_dec_8way)
11364
11365 @@ -406,5 +412,6 @@ ENTRY(twofish_ctr_8way)
11366
11367 popq %r12;
11368
11369 + pax_force_retaddr 0, 1
11370 ret;
11371 ENDPROC(twofish_ctr_8way)
11372 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11373 index 1c3b7ce..b365c5e 100644
11374 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11375 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11376 @@ -21,6 +21,7 @@
11377 */
11378
11379 #include <linux/linkage.h>
11380 +#include <asm/alternative-asm.h>
11381
11382 .file "twofish-x86_64-asm-3way.S"
11383 .text
11384 @@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
11385 popq %r13;
11386 popq %r14;
11387 popq %r15;
11388 + pax_force_retaddr 0, 1
11389 ret;
11390
11391 .L__enc_xor3:
11392 @@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
11393 popq %r13;
11394 popq %r14;
11395 popq %r15;
11396 + pax_force_retaddr 0, 1
11397 ret;
11398 ENDPROC(__twofish_enc_blk_3way)
11399
11400 @@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
11401 popq %r13;
11402 popq %r14;
11403 popq %r15;
11404 + pax_force_retaddr 0, 1
11405 ret;
11406 ENDPROC(twofish_dec_blk_3way)
11407 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11408 index a039d21..29e7615 100644
11409 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11410 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11411 @@ -22,6 +22,7 @@
11412
11413 #include <linux/linkage.h>
11414 #include <asm/asm-offsets.h>
11415 +#include <asm/alternative-asm.h>
11416
11417 #define a_offset 0
11418 #define b_offset 4
11419 @@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
11420
11421 popq R1
11422 movq $1,%rax
11423 + pax_force_retaddr 0, 1
11424 ret
11425 ENDPROC(twofish_enc_blk)
11426
11427 @@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
11428
11429 popq R1
11430 movq $1,%rax
11431 + pax_force_retaddr 0, 1
11432 ret
11433 ENDPROC(twofish_dec_blk)
11434 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11435 index 03abf9b..a42ba29 100644
11436 --- a/arch/x86/ia32/ia32_aout.c
11437 +++ b/arch/x86/ia32/ia32_aout.c
11438 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11439 unsigned long dump_start, dump_size;
11440 struct user32 dump;
11441
11442 + memset(&dump, 0, sizeof(dump));
11443 +
11444 fs = get_fs();
11445 set_fs(KERNEL_DS);
11446 has_dumped = 1;
11447 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11448 index cf1a471..3bc4cf8 100644
11449 --- a/arch/x86/ia32/ia32_signal.c
11450 +++ b/arch/x86/ia32/ia32_signal.c
11451 @@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
11452 sp -= frame_size;
11453 /* Align the stack pointer according to the i386 ABI,
11454 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11455 - sp = ((sp + 4) & -16ul) - 4;
11456 + sp = ((sp - 12) & -16ul) - 4;
11457 return (void __user *) sp;
11458 }
11459
11460 @@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
11461 * These are actually not used anymore, but left because some
11462 * gdb versions depend on them as a marker.
11463 */
11464 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11465 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11466 } put_user_catch(err);
11467
11468 if (err)
11469 @@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11470 0xb8,
11471 __NR_ia32_rt_sigreturn,
11472 0x80cd,
11473 - 0,
11474 + 0
11475 };
11476
11477 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
11478 @@ -463,16 +463,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11479
11480 if (ksig->ka.sa.sa_flags & SA_RESTORER)
11481 restorer = ksig->ka.sa.sa_restorer;
11482 + else if (current->mm->context.vdso)
11483 + /* Return stub is in 32bit vsyscall page */
11484 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11485 else
11486 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11487 - rt_sigreturn);
11488 + restorer = &frame->retcode;
11489 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11490
11491 /*
11492 * Not actually used anymore, but left because some gdb
11493 * versions need it.
11494 */
11495 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11496 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11497 } put_user_catch(err);
11498
11499 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
11500 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11501 index 474dc1b..be7bff5 100644
11502 --- a/arch/x86/ia32/ia32entry.S
11503 +++ b/arch/x86/ia32/ia32entry.S
11504 @@ -15,8 +15,10 @@
11505 #include <asm/irqflags.h>
11506 #include <asm/asm.h>
11507 #include <asm/smap.h>
11508 +#include <asm/pgtable.h>
11509 #include <linux/linkage.h>
11510 #include <linux/err.h>
11511 +#include <asm/alternative-asm.h>
11512
11513 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11514 #include <linux/elf-em.h>
11515 @@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11516 ENDPROC(native_irq_enable_sysexit)
11517 #endif
11518
11519 + .macro pax_enter_kernel_user
11520 + pax_set_fptr_mask
11521 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11522 + call pax_enter_kernel_user
11523 +#endif
11524 + .endm
11525 +
11526 + .macro pax_exit_kernel_user
11527 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11528 + call pax_exit_kernel_user
11529 +#endif
11530 +#ifdef CONFIG_PAX_RANDKSTACK
11531 + pushq %rax
11532 + pushq %r11
11533 + call pax_randomize_kstack
11534 + popq %r11
11535 + popq %rax
11536 +#endif
11537 + .endm
11538 +
11539 +.macro pax_erase_kstack
11540 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11541 + call pax_erase_kstack
11542 +#endif
11543 +.endm
11544 +
11545 /*
11546 * 32bit SYSENTER instruction entry.
11547 *
11548 @@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11549 CFI_REGISTER rsp,rbp
11550 SWAPGS_UNSAFE_STACK
11551 movq PER_CPU_VAR(kernel_stack), %rsp
11552 - addq $(KERNEL_STACK_OFFSET),%rsp
11553 - /*
11554 - * No need to follow this irqs on/off section: the syscall
11555 - * disabled irqs, here we enable it straight after entry:
11556 - */
11557 - ENABLE_INTERRUPTS(CLBR_NONE)
11558 movl %ebp,%ebp /* zero extension */
11559 pushq_cfi $__USER32_DS
11560 /*CFI_REL_OFFSET ss,0*/
11561 @@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11562 CFI_REL_OFFSET rsp,0
11563 pushfq_cfi
11564 /*CFI_REL_OFFSET rflags,0*/
11565 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11566 - CFI_REGISTER rip,r10
11567 + orl $X86_EFLAGS_IF,(%rsp)
11568 + GET_THREAD_INFO(%r11)
11569 + movl TI_sysenter_return(%r11), %r11d
11570 + CFI_REGISTER rip,r11
11571 pushq_cfi $__USER32_CS
11572 /*CFI_REL_OFFSET cs,0*/
11573 movl %eax, %eax
11574 - pushq_cfi %r10
11575 + pushq_cfi %r11
11576 CFI_REL_OFFSET rip,0
11577 pushq_cfi %rax
11578 cld
11579 SAVE_ARGS 0,1,0
11580 + pax_enter_kernel_user
11581 +
11582 +#ifdef CONFIG_PAX_RANDKSTACK
11583 + pax_erase_kstack
11584 +#endif
11585 +
11586 + /*
11587 + * No need to follow this irqs on/off section: the syscall
11588 + * disabled irqs, here we enable it straight after entry:
11589 + */
11590 + ENABLE_INTERRUPTS(CLBR_NONE)
11591 /* no need to do an access_ok check here because rbp has been
11592 32bit zero extended */
11593 +
11594 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11595 + mov pax_user_shadow_base,%r11
11596 + add %r11,%rbp
11597 +#endif
11598 +
11599 ASM_STAC
11600 1: movl (%rbp),%ebp
11601 _ASM_EXTABLE(1b,ia32_badarg)
11602 ASM_CLAC
11603 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11604 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11605 + GET_THREAD_INFO(%r11)
11606 + orl $TS_COMPAT,TI_status(%r11)
11607 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11608 CFI_REMEMBER_STATE
11609 jnz sysenter_tracesys
11610 cmpq $(IA32_NR_syscalls-1),%rax
11611 @@ -162,12 +204,15 @@ sysenter_do_call:
11612 sysenter_dispatch:
11613 call *ia32_sys_call_table(,%rax,8)
11614 movq %rax,RAX-ARGOFFSET(%rsp)
11615 + GET_THREAD_INFO(%r11)
11616 DISABLE_INTERRUPTS(CLBR_NONE)
11617 TRACE_IRQS_OFF
11618 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11619 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11620 jnz sysexit_audit
11621 sysexit_from_sys_call:
11622 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11623 + pax_exit_kernel_user
11624 + pax_erase_kstack
11625 + andl $~TS_COMPAT,TI_status(%r11)
11626 /* clear IF, that popfq doesn't enable interrupts early */
11627 andl $~0x200,EFLAGS-R11(%rsp)
11628 movl RIP-R11(%rsp),%edx /* User %eip */
11629 @@ -193,6 +238,9 @@ sysexit_from_sys_call:
11630 movl %eax,%esi /* 2nd arg: syscall number */
11631 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11632 call __audit_syscall_entry
11633 +
11634 + pax_erase_kstack
11635 +
11636 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11637 cmpq $(IA32_NR_syscalls-1),%rax
11638 ja ia32_badsys
11639 @@ -204,7 +252,7 @@ sysexit_from_sys_call:
11640 .endm
11641
11642 .macro auditsys_exit exit
11643 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11644 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11645 jnz ia32_ret_from_sys_call
11646 TRACE_IRQS_ON
11647 ENABLE_INTERRUPTS(CLBR_NONE)
11648 @@ -215,11 +263,12 @@ sysexit_from_sys_call:
11649 1: setbe %al /* 1 if error, 0 if not */
11650 movzbl %al,%edi /* zero-extend that into %edi */
11651 call __audit_syscall_exit
11652 + GET_THREAD_INFO(%r11)
11653 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11654 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11655 DISABLE_INTERRUPTS(CLBR_NONE)
11656 TRACE_IRQS_OFF
11657 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11658 + testl %edi,TI_flags(%r11)
11659 jz \exit
11660 CLEAR_RREGS -ARGOFFSET
11661 jmp int_with_check
11662 @@ -237,7 +286,7 @@ sysexit_audit:
11663
11664 sysenter_tracesys:
11665 #ifdef CONFIG_AUDITSYSCALL
11666 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11667 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11668 jz sysenter_auditsys
11669 #endif
11670 SAVE_REST
11671 @@ -249,6 +298,9 @@ sysenter_tracesys:
11672 RESTORE_REST
11673 cmpq $(IA32_NR_syscalls-1),%rax
11674 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11675 +
11676 + pax_erase_kstack
11677 +
11678 jmp sysenter_do_call
11679 CFI_ENDPROC
11680 ENDPROC(ia32_sysenter_target)
11681 @@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11682 ENTRY(ia32_cstar_target)
11683 CFI_STARTPROC32 simple
11684 CFI_SIGNAL_FRAME
11685 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11686 + CFI_DEF_CFA rsp,0
11687 CFI_REGISTER rip,rcx
11688 /*CFI_REGISTER rflags,r11*/
11689 SWAPGS_UNSAFE_STACK
11690 movl %esp,%r8d
11691 CFI_REGISTER rsp,r8
11692 movq PER_CPU_VAR(kernel_stack),%rsp
11693 + SAVE_ARGS 8*6,0,0
11694 + pax_enter_kernel_user
11695 +
11696 +#ifdef CONFIG_PAX_RANDKSTACK
11697 + pax_erase_kstack
11698 +#endif
11699 +
11700 /*
11701 * No need to follow this irqs on/off section: the syscall
11702 * disabled irqs and here we enable it straight after entry:
11703 */
11704 ENABLE_INTERRUPTS(CLBR_NONE)
11705 - SAVE_ARGS 8,0,0
11706 movl %eax,%eax /* zero extension */
11707 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11708 movq %rcx,RIP-ARGOFFSET(%rsp)
11709 @@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11710 /* no need to do an access_ok check here because r8 has been
11711 32bit zero extended */
11712 /* hardware stack frame is complete now */
11713 +
11714 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11715 + mov pax_user_shadow_base,%r11
11716 + add %r11,%r8
11717 +#endif
11718 +
11719 ASM_STAC
11720 1: movl (%r8),%r9d
11721 _ASM_EXTABLE(1b,ia32_badarg)
11722 ASM_CLAC
11723 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11724 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11725 + GET_THREAD_INFO(%r11)
11726 + orl $TS_COMPAT,TI_status(%r11)
11727 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11728 CFI_REMEMBER_STATE
11729 jnz cstar_tracesys
11730 cmpq $IA32_NR_syscalls-1,%rax
11731 @@ -319,12 +384,15 @@ cstar_do_call:
11732 cstar_dispatch:
11733 call *ia32_sys_call_table(,%rax,8)
11734 movq %rax,RAX-ARGOFFSET(%rsp)
11735 + GET_THREAD_INFO(%r11)
11736 DISABLE_INTERRUPTS(CLBR_NONE)
11737 TRACE_IRQS_OFF
11738 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11739 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11740 jnz sysretl_audit
11741 sysretl_from_sys_call:
11742 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11743 + pax_exit_kernel_user
11744 + pax_erase_kstack
11745 + andl $~TS_COMPAT,TI_status(%r11)
11746 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11747 movl RIP-ARGOFFSET(%rsp),%ecx
11748 CFI_REGISTER rip,rcx
11749 @@ -352,7 +420,7 @@ sysretl_audit:
11750
11751 cstar_tracesys:
11752 #ifdef CONFIG_AUDITSYSCALL
11753 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11754 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11755 jz cstar_auditsys
11756 #endif
11757 xchgl %r9d,%ebp
11758 @@ -366,6 +434,9 @@ cstar_tracesys:
11759 xchgl %ebp,%r9d
11760 cmpq $(IA32_NR_syscalls-1),%rax
11761 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11762 +
11763 + pax_erase_kstack
11764 +
11765 jmp cstar_do_call
11766 END(ia32_cstar_target)
11767
11768 @@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11769 CFI_REL_OFFSET rip,RIP-RIP
11770 PARAVIRT_ADJUST_EXCEPTION_FRAME
11771 SWAPGS
11772 - /*
11773 - * No need to follow this irqs on/off section: the syscall
11774 - * disabled irqs and here we enable it straight after entry:
11775 - */
11776 - ENABLE_INTERRUPTS(CLBR_NONE)
11777 movl %eax,%eax
11778 pushq_cfi %rax
11779 cld
11780 /* note the registers are not zero extended to the sf.
11781 this could be a problem. */
11782 SAVE_ARGS 0,1,0
11783 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11784 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11785 + pax_enter_kernel_user
11786 +
11787 +#ifdef CONFIG_PAX_RANDKSTACK
11788 + pax_erase_kstack
11789 +#endif
11790 +
11791 + /*
11792 + * No need to follow this irqs on/off section: the syscall
11793 + * disabled irqs and here we enable it straight after entry:
11794 + */
11795 + ENABLE_INTERRUPTS(CLBR_NONE)
11796 + GET_THREAD_INFO(%r11)
11797 + orl $TS_COMPAT,TI_status(%r11)
11798 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11799 jnz ia32_tracesys
11800 cmpq $(IA32_NR_syscalls-1),%rax
11801 ja ia32_badsys
11802 @@ -442,6 +520,9 @@ ia32_tracesys:
11803 RESTORE_REST
11804 cmpq $(IA32_NR_syscalls-1),%rax
11805 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11806 +
11807 + pax_erase_kstack
11808 +
11809 jmp ia32_do_call
11810 END(ia32_syscall)
11811
11812 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11813 index ad7a20c..1ffa3c1 100644
11814 --- a/arch/x86/ia32/sys_ia32.c
11815 +++ b/arch/x86/ia32/sys_ia32.c
11816 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11817 */
11818 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11819 {
11820 - typeof(ubuf->st_uid) uid = 0;
11821 - typeof(ubuf->st_gid) gid = 0;
11822 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
11823 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
11824 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11825 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11826 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11827 @@ -205,7 +205,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11828 return -EFAULT;
11829
11830 set_fs(KERNEL_DS);
11831 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11832 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11833 count);
11834 set_fs(old_fs);
11835
11836 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11837 index 372231c..a5aa1a1 100644
11838 --- a/arch/x86/include/asm/alternative-asm.h
11839 +++ b/arch/x86/include/asm/alternative-asm.h
11840 @@ -18,6 +18,45 @@
11841 .endm
11842 #endif
11843
11844 +#ifdef KERNEXEC_PLUGIN
11845 + .macro pax_force_retaddr_bts rip=0
11846 + btsq $63,\rip(%rsp)
11847 + .endm
11848 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11849 + .macro pax_force_retaddr rip=0, reload=0
11850 + btsq $63,\rip(%rsp)
11851 + .endm
11852 + .macro pax_force_fptr ptr
11853 + btsq $63,\ptr
11854 + .endm
11855 + .macro pax_set_fptr_mask
11856 + .endm
11857 +#endif
11858 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11859 + .macro pax_force_retaddr rip=0, reload=0
11860 + .if \reload
11861 + pax_set_fptr_mask
11862 + .endif
11863 + orq %r10,\rip(%rsp)
11864 + .endm
11865 + .macro pax_force_fptr ptr
11866 + orq %r10,\ptr
11867 + .endm
11868 + .macro pax_set_fptr_mask
11869 + movabs $0x8000000000000000,%r10
11870 + .endm
11871 +#endif
11872 +#else
11873 + .macro pax_force_retaddr rip=0, reload=0
11874 + .endm
11875 + .macro pax_force_fptr ptr
11876 + .endm
11877 + .macro pax_force_retaddr_bts rip=0
11878 + .endm
11879 + .macro pax_set_fptr_mask
11880 + .endm
11881 +#endif
11882 +
11883 .macro altinstruction_entry orig alt feature orig_len alt_len
11884 .long \orig - .
11885 .long \alt - .
11886 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11887 index 58ed6d9..f1cbe58 100644
11888 --- a/arch/x86/include/asm/alternative.h
11889 +++ b/arch/x86/include/asm/alternative.h
11890 @@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11891 ".pushsection .discard,\"aw\",@progbits\n" \
11892 DISCARD_ENTRY(1) \
11893 ".popsection\n" \
11894 - ".pushsection .altinstr_replacement, \"ax\"\n" \
11895 + ".pushsection .altinstr_replacement, \"a\"\n" \
11896 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11897 ".popsection"
11898
11899 @@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11900 DISCARD_ENTRY(1) \
11901 DISCARD_ENTRY(2) \
11902 ".popsection\n" \
11903 - ".pushsection .altinstr_replacement, \"ax\"\n" \
11904 + ".pushsection .altinstr_replacement, \"a\"\n" \
11905 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11906 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11907 ".popsection"
11908 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11909 index 3388034..050f0b9 100644
11910 --- a/arch/x86/include/asm/apic.h
11911 +++ b/arch/x86/include/asm/apic.h
11912 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11913
11914 #ifdef CONFIG_X86_LOCAL_APIC
11915
11916 -extern unsigned int apic_verbosity;
11917 +extern int apic_verbosity;
11918 extern int local_apic_timer_c2_ok;
11919
11920 extern int disable_apic;
11921 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11922 index 20370c6..a2eb9b0 100644
11923 --- a/arch/x86/include/asm/apm.h
11924 +++ b/arch/x86/include/asm/apm.h
11925 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11926 __asm__ __volatile__(APM_DO_ZERO_SEGS
11927 "pushl %%edi\n\t"
11928 "pushl %%ebp\n\t"
11929 - "lcall *%%cs:apm_bios_entry\n\t"
11930 + "lcall *%%ss:apm_bios_entry\n\t"
11931 "setc %%al\n\t"
11932 "popl %%ebp\n\t"
11933 "popl %%edi\n\t"
11934 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11935 __asm__ __volatile__(APM_DO_ZERO_SEGS
11936 "pushl %%edi\n\t"
11937 "pushl %%ebp\n\t"
11938 - "lcall *%%cs:apm_bios_entry\n\t"
11939 + "lcall *%%ss:apm_bios_entry\n\t"
11940 "setc %%bl\n\t"
11941 "popl %%ebp\n\t"
11942 "popl %%edi\n\t"
11943 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11944 index 722aa3b..3a0bb27 100644
11945 --- a/arch/x86/include/asm/atomic.h
11946 +++ b/arch/x86/include/asm/atomic.h
11947 @@ -22,7 +22,18 @@
11948 */
11949 static inline int atomic_read(const atomic_t *v)
11950 {
11951 - return (*(volatile int *)&(v)->counter);
11952 + return (*(volatile const int *)&(v)->counter);
11953 +}
11954 +
11955 +/**
11956 + * atomic_read_unchecked - read atomic variable
11957 + * @v: pointer of type atomic_unchecked_t
11958 + *
11959 + * Atomically reads the value of @v.
11960 + */
11961 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11962 +{
11963 + return (*(volatile const int *)&(v)->counter);
11964 }
11965
11966 /**
11967 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
11968 }
11969
11970 /**
11971 + * atomic_set_unchecked - set atomic variable
11972 + * @v: pointer of type atomic_unchecked_t
11973 + * @i: required value
11974 + *
11975 + * Atomically sets the value of @v to @i.
11976 + */
11977 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
11978 +{
11979 + v->counter = i;
11980 +}
11981 +
11982 +/**
11983 * atomic_add - add integer to atomic variable
11984 * @i: integer value to add
11985 * @v: pointer of type atomic_t
11986 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
11987 */
11988 static inline void atomic_add(int i, atomic_t *v)
11989 {
11990 - asm volatile(LOCK_PREFIX "addl %1,%0"
11991 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
11992 +
11993 +#ifdef CONFIG_PAX_REFCOUNT
11994 + "jno 0f\n"
11995 + LOCK_PREFIX "subl %1,%0\n"
11996 + "int $4\n0:\n"
11997 + _ASM_EXTABLE(0b, 0b)
11998 +#endif
11999 +
12000 + : "+m" (v->counter)
12001 + : "ir" (i));
12002 +}
12003 +
12004 +/**
12005 + * atomic_add_unchecked - add integer to atomic variable
12006 + * @i: integer value to add
12007 + * @v: pointer of type atomic_unchecked_t
12008 + *
12009 + * Atomically adds @i to @v.
12010 + */
12011 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12012 +{
12013 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
12014 : "+m" (v->counter)
12015 : "ir" (i));
12016 }
12017 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12018 */
12019 static inline void atomic_sub(int i, atomic_t *v)
12020 {
12021 - asm volatile(LOCK_PREFIX "subl %1,%0"
12022 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
12023 +
12024 +#ifdef CONFIG_PAX_REFCOUNT
12025 + "jno 0f\n"
12026 + LOCK_PREFIX "addl %1,%0\n"
12027 + "int $4\n0:\n"
12028 + _ASM_EXTABLE(0b, 0b)
12029 +#endif
12030 +
12031 + : "+m" (v->counter)
12032 + : "ir" (i));
12033 +}
12034 +
12035 +/**
12036 + * atomic_sub_unchecked - subtract integer from atomic variable
12037 + * @i: integer value to subtract
12038 + * @v: pointer of type atomic_unchecked_t
12039 + *
12040 + * Atomically subtracts @i from @v.
12041 + */
12042 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12043 +{
12044 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
12045 : "+m" (v->counter)
12046 : "ir" (i));
12047 }
12048 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12049 {
12050 unsigned char c;
12051
12052 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12053 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
12054 +
12055 +#ifdef CONFIG_PAX_REFCOUNT
12056 + "jno 0f\n"
12057 + LOCK_PREFIX "addl %2,%0\n"
12058 + "int $4\n0:\n"
12059 + _ASM_EXTABLE(0b, 0b)
12060 +#endif
12061 +
12062 + "sete %1\n"
12063 : "+m" (v->counter), "=qm" (c)
12064 : "ir" (i) : "memory");
12065 return c;
12066 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12067 */
12068 static inline void atomic_inc(atomic_t *v)
12069 {
12070 - asm volatile(LOCK_PREFIX "incl %0"
12071 + asm volatile(LOCK_PREFIX "incl %0\n"
12072 +
12073 +#ifdef CONFIG_PAX_REFCOUNT
12074 + "jno 0f\n"
12075 + LOCK_PREFIX "decl %0\n"
12076 + "int $4\n0:\n"
12077 + _ASM_EXTABLE(0b, 0b)
12078 +#endif
12079 +
12080 + : "+m" (v->counter));
12081 +}
12082 +
12083 +/**
12084 + * atomic_inc_unchecked - increment atomic variable
12085 + * @v: pointer of type atomic_unchecked_t
12086 + *
12087 + * Atomically increments @v by 1.
12088 + */
12089 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12090 +{
12091 + asm volatile(LOCK_PREFIX "incl %0\n"
12092 : "+m" (v->counter));
12093 }
12094
12095 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12096 */
12097 static inline void atomic_dec(atomic_t *v)
12098 {
12099 - asm volatile(LOCK_PREFIX "decl %0"
12100 + asm volatile(LOCK_PREFIX "decl %0\n"
12101 +
12102 +#ifdef CONFIG_PAX_REFCOUNT
12103 + "jno 0f\n"
12104 + LOCK_PREFIX "incl %0\n"
12105 + "int $4\n0:\n"
12106 + _ASM_EXTABLE(0b, 0b)
12107 +#endif
12108 +
12109 + : "+m" (v->counter));
12110 +}
12111 +
12112 +/**
12113 + * atomic_dec_unchecked - decrement atomic variable
12114 + * @v: pointer of type atomic_unchecked_t
12115 + *
12116 + * Atomically decrements @v by 1.
12117 + */
12118 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12119 +{
12120 + asm volatile(LOCK_PREFIX "decl %0\n"
12121 : "+m" (v->counter));
12122 }
12123
12124 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12125 {
12126 unsigned char c;
12127
12128 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
12129 + asm volatile(LOCK_PREFIX "decl %0\n"
12130 +
12131 +#ifdef CONFIG_PAX_REFCOUNT
12132 + "jno 0f\n"
12133 + LOCK_PREFIX "incl %0\n"
12134 + "int $4\n0:\n"
12135 + _ASM_EXTABLE(0b, 0b)
12136 +#endif
12137 +
12138 + "sete %1\n"
12139 : "+m" (v->counter), "=qm" (c)
12140 : : "memory");
12141 return c != 0;
12142 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12143 {
12144 unsigned char c;
12145
12146 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
12147 + asm volatile(LOCK_PREFIX "incl %0\n"
12148 +
12149 +#ifdef CONFIG_PAX_REFCOUNT
12150 + "jno 0f\n"
12151 + LOCK_PREFIX "decl %0\n"
12152 + "int $4\n0:\n"
12153 + _ASM_EXTABLE(0b, 0b)
12154 +#endif
12155 +
12156 + "sete %1\n"
12157 + : "+m" (v->counter), "=qm" (c)
12158 + : : "memory");
12159 + return c != 0;
12160 +}
12161 +
12162 +/**
12163 + * atomic_inc_and_test_unchecked - increment and test
12164 + * @v: pointer of type atomic_unchecked_t
12165 + *
12166 + * Atomically increments @v by 1
12167 + * and returns true if the result is zero, or false for all
12168 + * other cases.
12169 + */
12170 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12171 +{
12172 + unsigned char c;
12173 +
12174 + asm volatile(LOCK_PREFIX "incl %0\n"
12175 + "sete %1\n"
12176 : "+m" (v->counter), "=qm" (c)
12177 : : "memory");
12178 return c != 0;
12179 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12180 {
12181 unsigned char c;
12182
12183 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12184 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
12185 +
12186 +#ifdef CONFIG_PAX_REFCOUNT
12187 + "jno 0f\n"
12188 + LOCK_PREFIX "subl %2,%0\n"
12189 + "int $4\n0:\n"
12190 + _ASM_EXTABLE(0b, 0b)
12191 +#endif
12192 +
12193 + "sets %1\n"
12194 : "+m" (v->counter), "=qm" (c)
12195 : "ir" (i) : "memory");
12196 return c;
12197 @@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12198 */
12199 static inline int atomic_add_return(int i, atomic_t *v)
12200 {
12201 + return i + xadd_check_overflow(&v->counter, i);
12202 +}
12203 +
12204 +/**
12205 + * atomic_add_return_unchecked - add integer and return
12206 + * @i: integer value to add
12207 + * @v: pointer of type atomic_unchecked_t
12208 + *
12209 + * Atomically adds @i to @v and returns @i + @v
12210 + */
12211 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12212 +{
12213 return i + xadd(&v->counter, i);
12214 }
12215
12216 @@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12217 }
12218
12219 #define atomic_inc_return(v) (atomic_add_return(1, v))
12220 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12221 +{
12222 + return atomic_add_return_unchecked(1, v);
12223 +}
12224 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12225
12226 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12227 @@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12228 return cmpxchg(&v->counter, old, new);
12229 }
12230
12231 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12232 +{
12233 + return cmpxchg(&v->counter, old, new);
12234 +}
12235 +
12236 static inline int atomic_xchg(atomic_t *v, int new)
12237 {
12238 return xchg(&v->counter, new);
12239 }
12240
12241 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12242 +{
12243 + return xchg(&v->counter, new);
12244 +}
12245 +
12246 /**
12247 * __atomic_add_unless - add unless the number is already a given value
12248 * @v: pointer of type atomic_t
12249 @@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12250 */
12251 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12252 {
12253 - int c, old;
12254 + int c, old, new;
12255 c = atomic_read(v);
12256 for (;;) {
12257 - if (unlikely(c == (u)))
12258 + if (unlikely(c == u))
12259 break;
12260 - old = atomic_cmpxchg((v), c, c + (a));
12261 +
12262 + asm volatile("addl %2,%0\n"
12263 +
12264 +#ifdef CONFIG_PAX_REFCOUNT
12265 + "jno 0f\n"
12266 + "subl %2,%0\n"
12267 + "int $4\n0:\n"
12268 + _ASM_EXTABLE(0b, 0b)
12269 +#endif
12270 +
12271 + : "=r" (new)
12272 + : "0" (c), "ir" (a));
12273 +
12274 + old = atomic_cmpxchg(v, c, new);
12275 if (likely(old == c))
12276 break;
12277 c = old;
12278 @@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12279 }
12280
12281 /**
12282 + * atomic_inc_not_zero_hint - increment if not null
12283 + * @v: pointer of type atomic_t
12284 + * @hint: probable value of the atomic before the increment
12285 + *
12286 + * This version of atomic_inc_not_zero() gives a hint of probable
12287 + * value of the atomic. This helps processor to not read the memory
12288 + * before doing the atomic read/modify/write cycle, lowering
12289 + * number of bus transactions on some arches.
12290 + *
12291 + * Returns: 0 if increment was not done, 1 otherwise.
12292 + */
12293 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12294 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12295 +{
12296 + int val, c = hint, new;
12297 +
12298 + /* sanity test, should be removed by compiler if hint is a constant */
12299 + if (!hint)
12300 + return __atomic_add_unless(v, 1, 0);
12301 +
12302 + do {
12303 + asm volatile("incl %0\n"
12304 +
12305 +#ifdef CONFIG_PAX_REFCOUNT
12306 + "jno 0f\n"
12307 + "decl %0\n"
12308 + "int $4\n0:\n"
12309 + _ASM_EXTABLE(0b, 0b)
12310 +#endif
12311 +
12312 + : "=r" (new)
12313 + : "0" (c));
12314 +
12315 + val = atomic_cmpxchg(v, c, new);
12316 + if (val == c)
12317 + return 1;
12318 + c = val;
12319 + } while (c);
12320 +
12321 + return 0;
12322 +}
12323 +
12324 +/**
12325 * atomic_inc_short - increment of a short integer
12326 * @v: pointer to type int
12327 *
12328 @@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12329 #endif
12330
12331 /* These are x86-specific, used by some header files */
12332 -#define atomic_clear_mask(mask, addr) \
12333 - asm volatile(LOCK_PREFIX "andl %0,%1" \
12334 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
12335 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12336 +{
12337 + asm volatile(LOCK_PREFIX "andl %1,%0"
12338 + : "+m" (v->counter)
12339 + : "r" (~(mask))
12340 + : "memory");
12341 +}
12342
12343 -#define atomic_set_mask(mask, addr) \
12344 - asm volatile(LOCK_PREFIX "orl %0,%1" \
12345 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12346 - : "memory")
12347 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12348 +{
12349 + asm volatile(LOCK_PREFIX "andl %1,%0"
12350 + : "+m" (v->counter)
12351 + : "r" (~(mask))
12352 + : "memory");
12353 +}
12354 +
12355 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12356 +{
12357 + asm volatile(LOCK_PREFIX "orl %1,%0"
12358 + : "+m" (v->counter)
12359 + : "r" (mask)
12360 + : "memory");
12361 +}
12362 +
12363 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12364 +{
12365 + asm volatile(LOCK_PREFIX "orl %1,%0"
12366 + : "+m" (v->counter)
12367 + : "r" (mask)
12368 + : "memory");
12369 +}
12370
12371 /* Atomic operations are already serializing on x86 */
12372 #define smp_mb__before_atomic_dec() barrier()
12373 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12374 index b154de7..aadebd8 100644
12375 --- a/arch/x86/include/asm/atomic64_32.h
12376 +++ b/arch/x86/include/asm/atomic64_32.h
12377 @@ -12,6 +12,14 @@ typedef struct {
12378 u64 __aligned(8) counter;
12379 } atomic64_t;
12380
12381 +#ifdef CONFIG_PAX_REFCOUNT
12382 +typedef struct {
12383 + u64 __aligned(8) counter;
12384 +} atomic64_unchecked_t;
12385 +#else
12386 +typedef atomic64_t atomic64_unchecked_t;
12387 +#endif
12388 +
12389 #define ATOMIC64_INIT(val) { (val) }
12390
12391 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12392 @@ -37,21 +45,31 @@ typedef struct {
12393 ATOMIC64_DECL_ONE(sym##_386)
12394
12395 ATOMIC64_DECL_ONE(add_386);
12396 +ATOMIC64_DECL_ONE(add_unchecked_386);
12397 ATOMIC64_DECL_ONE(sub_386);
12398 +ATOMIC64_DECL_ONE(sub_unchecked_386);
12399 ATOMIC64_DECL_ONE(inc_386);
12400 +ATOMIC64_DECL_ONE(inc_unchecked_386);
12401 ATOMIC64_DECL_ONE(dec_386);
12402 +ATOMIC64_DECL_ONE(dec_unchecked_386);
12403 #endif
12404
12405 #define alternative_atomic64(f, out, in...) \
12406 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12407
12408 ATOMIC64_DECL(read);
12409 +ATOMIC64_DECL(read_unchecked);
12410 ATOMIC64_DECL(set);
12411 +ATOMIC64_DECL(set_unchecked);
12412 ATOMIC64_DECL(xchg);
12413 ATOMIC64_DECL(add_return);
12414 +ATOMIC64_DECL(add_return_unchecked);
12415 ATOMIC64_DECL(sub_return);
12416 +ATOMIC64_DECL(sub_return_unchecked);
12417 ATOMIC64_DECL(inc_return);
12418 +ATOMIC64_DECL(inc_return_unchecked);
12419 ATOMIC64_DECL(dec_return);
12420 +ATOMIC64_DECL(dec_return_unchecked);
12421 ATOMIC64_DECL(dec_if_positive);
12422 ATOMIC64_DECL(inc_not_zero);
12423 ATOMIC64_DECL(add_unless);
12424 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12425 }
12426
12427 /**
12428 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12429 + * @p: pointer to type atomic64_unchecked_t
12430 + * @o: expected value
12431 + * @n: new value
12432 + *
12433 + * Atomically sets @v to @n if it was equal to @o and returns
12434 + * the old value.
12435 + */
12436 +
12437 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12438 +{
12439 + return cmpxchg64(&v->counter, o, n);
12440 +}
12441 +
12442 +/**
12443 * atomic64_xchg - xchg atomic64 variable
12444 * @v: pointer to type atomic64_t
12445 * @n: value to assign
12446 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12447 }
12448
12449 /**
12450 + * atomic64_set_unchecked - set atomic64 variable
12451 + * @v: pointer to type atomic64_unchecked_t
12452 + * @n: value to assign
12453 + *
12454 + * Atomically sets the value of @v to @n.
12455 + */
12456 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12457 +{
12458 + unsigned high = (unsigned)(i >> 32);
12459 + unsigned low = (unsigned)i;
12460 + alternative_atomic64(set, /* no output */,
12461 + "S" (v), "b" (low), "c" (high)
12462 + : "eax", "edx", "memory");
12463 +}
12464 +
12465 +/**
12466 * atomic64_read - read atomic64 variable
12467 * @v: pointer to type atomic64_t
12468 *
12469 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12470 }
12471
12472 /**
12473 + * atomic64_read_unchecked - read atomic64 variable
12474 + * @v: pointer to type atomic64_unchecked_t
12475 + *
12476 + * Atomically reads the value of @v and returns it.
12477 + */
12478 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12479 +{
12480 + long long r;
12481 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12482 + return r;
12483 + }
12484 +
12485 +/**
12486 * atomic64_add_return - add and return
12487 * @i: integer value to add
12488 * @v: pointer to type atomic64_t
12489 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12490 return i;
12491 }
12492
12493 +/**
12494 + * atomic64_add_return_unchecked - add and return
12495 + * @i: integer value to add
12496 + * @v: pointer to type atomic64_unchecked_t
12497 + *
12498 + * Atomically adds @i to @v and returns @i + *@v
12499 + */
12500 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12501 +{
12502 + alternative_atomic64(add_return_unchecked,
12503 + ASM_OUTPUT2("+A" (i), "+c" (v)),
12504 + ASM_NO_INPUT_CLOBBER("memory"));
12505 + return i;
12506 +}
12507 +
12508 /*
12509 * Other variants with different arithmetic operators:
12510 */
12511 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12512 return a;
12513 }
12514
12515 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12516 +{
12517 + long long a;
12518 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
12519 + "S" (v) : "memory", "ecx");
12520 + return a;
12521 +}
12522 +
12523 static inline long long atomic64_dec_return(atomic64_t *v)
12524 {
12525 long long a;
12526 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12527 }
12528
12529 /**
12530 + * atomic64_add_unchecked - add integer to atomic64 variable
12531 + * @i: integer value to add
12532 + * @v: pointer to type atomic64_unchecked_t
12533 + *
12534 + * Atomically adds @i to @v.
12535 + */
12536 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12537 +{
12538 + __alternative_atomic64(add_unchecked, add_return_unchecked,
12539 + ASM_OUTPUT2("+A" (i), "+c" (v)),
12540 + ASM_NO_INPUT_CLOBBER("memory"));
12541 + return i;
12542 +}
12543 +
12544 +/**
12545 * atomic64_sub - subtract the atomic64 variable
12546 * @i: integer value to subtract
12547 * @v: pointer to type atomic64_t
12548 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12549 index 0e1cbfc..5623683 100644
12550 --- a/arch/x86/include/asm/atomic64_64.h
12551 +++ b/arch/x86/include/asm/atomic64_64.h
12552 @@ -18,7 +18,19 @@
12553 */
12554 static inline long atomic64_read(const atomic64_t *v)
12555 {
12556 - return (*(volatile long *)&(v)->counter);
12557 + return (*(volatile const long *)&(v)->counter);
12558 +}
12559 +
12560 +/**
12561 + * atomic64_read_unchecked - read atomic64 variable
12562 + * @v: pointer of type atomic64_unchecked_t
12563 + *
12564 + * Atomically reads the value of @v.
12565 + * Doesn't imply a read memory barrier.
12566 + */
12567 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12568 +{
12569 + return (*(volatile const long *)&(v)->counter);
12570 }
12571
12572 /**
12573 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12574 }
12575
12576 /**
12577 + * atomic64_set_unchecked - set atomic64 variable
12578 + * @v: pointer to type atomic64_unchecked_t
12579 + * @i: required value
12580 + *
12581 + * Atomically sets the value of @v to @i.
12582 + */
12583 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12584 +{
12585 + v->counter = i;
12586 +}
12587 +
12588 +/**
12589 * atomic64_add - add integer to atomic64 variable
12590 * @i: integer value to add
12591 * @v: pointer to type atomic64_t
12592 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12593 */
12594 static inline void atomic64_add(long i, atomic64_t *v)
12595 {
12596 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
12597 +
12598 +#ifdef CONFIG_PAX_REFCOUNT
12599 + "jno 0f\n"
12600 + LOCK_PREFIX "subq %1,%0\n"
12601 + "int $4\n0:\n"
12602 + _ASM_EXTABLE(0b, 0b)
12603 +#endif
12604 +
12605 + : "=m" (v->counter)
12606 + : "er" (i), "m" (v->counter));
12607 +}
12608 +
12609 +/**
12610 + * atomic64_add_unchecked - add integer to atomic64 variable
12611 + * @i: integer value to add
12612 + * @v: pointer to type atomic64_unchecked_t
12613 + *
12614 + * Atomically adds @i to @v.
12615 + */
12616 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12617 +{
12618 asm volatile(LOCK_PREFIX "addq %1,%0"
12619 : "=m" (v->counter)
12620 : "er" (i), "m" (v->counter));
12621 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12622 */
12623 static inline void atomic64_sub(long i, atomic64_t *v)
12624 {
12625 - asm volatile(LOCK_PREFIX "subq %1,%0"
12626 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
12627 +
12628 +#ifdef CONFIG_PAX_REFCOUNT
12629 + "jno 0f\n"
12630 + LOCK_PREFIX "addq %1,%0\n"
12631 + "int $4\n0:\n"
12632 + _ASM_EXTABLE(0b, 0b)
12633 +#endif
12634 +
12635 + : "=m" (v->counter)
12636 + : "er" (i), "m" (v->counter));
12637 +}
12638 +
12639 +/**
12640 + * atomic64_sub_unchecked - subtract the atomic64 variable
12641 + * @i: integer value to subtract
12642 + * @v: pointer to type atomic64_unchecked_t
12643 + *
12644 + * Atomically subtracts @i from @v.
12645 + */
12646 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12647 +{
12648 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
12649 : "=m" (v->counter)
12650 : "er" (i), "m" (v->counter));
12651 }
12652 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12653 {
12654 unsigned char c;
12655
12656 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12657 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
12658 +
12659 +#ifdef CONFIG_PAX_REFCOUNT
12660 + "jno 0f\n"
12661 + LOCK_PREFIX "addq %2,%0\n"
12662 + "int $4\n0:\n"
12663 + _ASM_EXTABLE(0b, 0b)
12664 +#endif
12665 +
12666 + "sete %1\n"
12667 : "=m" (v->counter), "=qm" (c)
12668 : "er" (i), "m" (v->counter) : "memory");
12669 return c;
12670 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12671 */
12672 static inline void atomic64_inc(atomic64_t *v)
12673 {
12674 + asm volatile(LOCK_PREFIX "incq %0\n"
12675 +
12676 +#ifdef CONFIG_PAX_REFCOUNT
12677 + "jno 0f\n"
12678 + LOCK_PREFIX "decq %0\n"
12679 + "int $4\n0:\n"
12680 + _ASM_EXTABLE(0b, 0b)
12681 +#endif
12682 +
12683 + : "=m" (v->counter)
12684 + : "m" (v->counter));
12685 +}
12686 +
12687 +/**
12688 + * atomic64_inc_unchecked - increment atomic64 variable
12689 + * @v: pointer to type atomic64_unchecked_t
12690 + *
12691 + * Atomically increments @v by 1.
12692 + */
12693 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12694 +{
12695 asm volatile(LOCK_PREFIX "incq %0"
12696 : "=m" (v->counter)
12697 : "m" (v->counter));
12698 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12699 */
12700 static inline void atomic64_dec(atomic64_t *v)
12701 {
12702 - asm volatile(LOCK_PREFIX "decq %0"
12703 + asm volatile(LOCK_PREFIX "decq %0\n"
12704 +
12705 +#ifdef CONFIG_PAX_REFCOUNT
12706 + "jno 0f\n"
12707 + LOCK_PREFIX "incq %0\n"
12708 + "int $4\n0:\n"
12709 + _ASM_EXTABLE(0b, 0b)
12710 +#endif
12711 +
12712 + : "=m" (v->counter)
12713 + : "m" (v->counter));
12714 +}
12715 +
12716 +/**
12717 + * atomic64_dec_unchecked - decrement atomic64 variable
12718 + * @v: pointer to type atomic64_t
12719 + *
12720 + * Atomically decrements @v by 1.
12721 + */
12722 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12723 +{
12724 + asm volatile(LOCK_PREFIX "decq %0\n"
12725 : "=m" (v->counter)
12726 : "m" (v->counter));
12727 }
12728 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12729 {
12730 unsigned char c;
12731
12732 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
12733 + asm volatile(LOCK_PREFIX "decq %0\n"
12734 +
12735 +#ifdef CONFIG_PAX_REFCOUNT
12736 + "jno 0f\n"
12737 + LOCK_PREFIX "incq %0\n"
12738 + "int $4\n0:\n"
12739 + _ASM_EXTABLE(0b, 0b)
12740 +#endif
12741 +
12742 + "sete %1\n"
12743 : "=m" (v->counter), "=qm" (c)
12744 : "m" (v->counter) : "memory");
12745 return c != 0;
12746 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12747 {
12748 unsigned char c;
12749
12750 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
12751 + asm volatile(LOCK_PREFIX "incq %0\n"
12752 +
12753 +#ifdef CONFIG_PAX_REFCOUNT
12754 + "jno 0f\n"
12755 + LOCK_PREFIX "decq %0\n"
12756 + "int $4\n0:\n"
12757 + _ASM_EXTABLE(0b, 0b)
12758 +#endif
12759 +
12760 + "sete %1\n"
12761 : "=m" (v->counter), "=qm" (c)
12762 : "m" (v->counter) : "memory");
12763 return c != 0;
12764 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12765 {
12766 unsigned char c;
12767
12768 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12769 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
12770 +
12771 +#ifdef CONFIG_PAX_REFCOUNT
12772 + "jno 0f\n"
12773 + LOCK_PREFIX "subq %2,%0\n"
12774 + "int $4\n0:\n"
12775 + _ASM_EXTABLE(0b, 0b)
12776 +#endif
12777 +
12778 + "sets %1\n"
12779 : "=m" (v->counter), "=qm" (c)
12780 : "er" (i), "m" (v->counter) : "memory");
12781 return c;
12782 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12783 */
12784 static inline long atomic64_add_return(long i, atomic64_t *v)
12785 {
12786 + return i + xadd_check_overflow(&v->counter, i);
12787 +}
12788 +
12789 +/**
12790 + * atomic64_add_return_unchecked - add and return
12791 + * @i: integer value to add
12792 + * @v: pointer to type atomic64_unchecked_t
12793 + *
12794 + * Atomically adds @i to @v and returns @i + @v
12795 + */
12796 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12797 +{
12798 return i + xadd(&v->counter, i);
12799 }
12800
12801 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12802 }
12803
12804 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12805 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12806 +{
12807 + return atomic64_add_return_unchecked(1, v);
12808 +}
12809 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12810
12811 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12812 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12813 return cmpxchg(&v->counter, old, new);
12814 }
12815
12816 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12817 +{
12818 + return cmpxchg(&v->counter, old, new);
12819 +}
12820 +
12821 static inline long atomic64_xchg(atomic64_t *v, long new)
12822 {
12823 return xchg(&v->counter, new);
12824 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12825 */
12826 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12827 {
12828 - long c, old;
12829 + long c, old, new;
12830 c = atomic64_read(v);
12831 for (;;) {
12832 - if (unlikely(c == (u)))
12833 + if (unlikely(c == u))
12834 break;
12835 - old = atomic64_cmpxchg((v), c, c + (a));
12836 +
12837 + asm volatile("add %2,%0\n"
12838 +
12839 +#ifdef CONFIG_PAX_REFCOUNT
12840 + "jno 0f\n"
12841 + "sub %2,%0\n"
12842 + "int $4\n0:\n"
12843 + _ASM_EXTABLE(0b, 0b)
12844 +#endif
12845 +
12846 + : "=r" (new)
12847 + : "0" (c), "ir" (a));
12848 +
12849 + old = atomic64_cmpxchg(v, c, new);
12850 if (likely(old == c))
12851 break;
12852 c = old;
12853 }
12854 - return c != (u);
12855 + return c != u;
12856 }
12857
12858 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12859 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12860 index 6dfd019..28e188d 100644
12861 --- a/arch/x86/include/asm/bitops.h
12862 +++ b/arch/x86/include/asm/bitops.h
12863 @@ -40,7 +40,7 @@
12864 * a mask operation on a byte.
12865 */
12866 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12867 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12868 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12869 #define CONST_MASK(nr) (1 << ((nr) & 7))
12870
12871 /**
12872 @@ -486,7 +486,7 @@ static inline int fls(int x)
12873 * at position 64.
12874 */
12875 #ifdef CONFIG_X86_64
12876 -static __always_inline int fls64(__u64 x)
12877 +static __always_inline long fls64(__u64 x)
12878 {
12879 int bitpos = -1;
12880 /*
12881 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12882 index 4fa687a..60f2d39 100644
12883 --- a/arch/x86/include/asm/boot.h
12884 +++ b/arch/x86/include/asm/boot.h
12885 @@ -6,10 +6,15 @@
12886 #include <uapi/asm/boot.h>
12887
12888 /* Physical address where kernel should be loaded. */
12889 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12890 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12891 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12892 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12893
12894 +#ifndef __ASSEMBLY__
12895 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
12896 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12897 +#endif
12898 +
12899 /* Minimum kernel alignment, as a power of two */
12900 #ifdef CONFIG_X86_64
12901 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12902 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12903 index 48f99f1..d78ebf9 100644
12904 --- a/arch/x86/include/asm/cache.h
12905 +++ b/arch/x86/include/asm/cache.h
12906 @@ -5,12 +5,13 @@
12907
12908 /* L1 cache line size */
12909 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12910 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12911 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12912
12913 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12914 +#define __read_only __attribute__((__section__(".data..read_only")))
12915
12916 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12917 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12918 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12919
12920 #ifdef CONFIG_X86_VSMP
12921 #ifdef CONFIG_SMP
12922 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12923 index 9863ee3..4a1f8e1 100644
12924 --- a/arch/x86/include/asm/cacheflush.h
12925 +++ b/arch/x86/include/asm/cacheflush.h
12926 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12927 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12928
12929 if (pg_flags == _PGMT_DEFAULT)
12930 - return -1;
12931 + return ~0UL;
12932 else if (pg_flags == _PGMT_WC)
12933 return _PAGE_CACHE_WC;
12934 else if (pg_flags == _PGMT_UC_MINUS)
12935 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12936 index 46fc474..b02b0f9 100644
12937 --- a/arch/x86/include/asm/checksum_32.h
12938 +++ b/arch/x86/include/asm/checksum_32.h
12939 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12940 int len, __wsum sum,
12941 int *src_err_ptr, int *dst_err_ptr);
12942
12943 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12944 + int len, __wsum sum,
12945 + int *src_err_ptr, int *dst_err_ptr);
12946 +
12947 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12948 + int len, __wsum sum,
12949 + int *src_err_ptr, int *dst_err_ptr);
12950 +
12951 /*
12952 * Note: when you get a NULL pointer exception here this means someone
12953 * passed in an incorrect kernel address to one of these functions.
12954 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12955 int *err_ptr)
12956 {
12957 might_sleep();
12958 - return csum_partial_copy_generic((__force void *)src, dst,
12959 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
12960 len, sum, err_ptr, NULL);
12961 }
12962
12963 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
12964 {
12965 might_sleep();
12966 if (access_ok(VERIFY_WRITE, dst, len))
12967 - return csum_partial_copy_generic(src, (__force void *)dst,
12968 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
12969 len, sum, NULL, err_ptr);
12970
12971 if (len)
12972 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
12973 index 8d871ea..c1a0dc9 100644
12974 --- a/arch/x86/include/asm/cmpxchg.h
12975 +++ b/arch/x86/include/asm/cmpxchg.h
12976 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
12977 __compiletime_error("Bad argument size for cmpxchg");
12978 extern void __xadd_wrong_size(void)
12979 __compiletime_error("Bad argument size for xadd");
12980 +extern void __xadd_check_overflow_wrong_size(void)
12981 + __compiletime_error("Bad argument size for xadd_check_overflow");
12982 extern void __add_wrong_size(void)
12983 __compiletime_error("Bad argument size for add");
12984 +extern void __add_check_overflow_wrong_size(void)
12985 + __compiletime_error("Bad argument size for add_check_overflow");
12986
12987 /*
12988 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
12989 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
12990 __ret; \
12991 })
12992
12993 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
12994 + ({ \
12995 + __typeof__ (*(ptr)) __ret = (arg); \
12996 + switch (sizeof(*(ptr))) { \
12997 + case __X86_CASE_L: \
12998 + asm volatile (lock #op "l %0, %1\n" \
12999 + "jno 0f\n" \
13000 + "mov %0,%1\n" \
13001 + "int $4\n0:\n" \
13002 + _ASM_EXTABLE(0b, 0b) \
13003 + : "+r" (__ret), "+m" (*(ptr)) \
13004 + : : "memory", "cc"); \
13005 + break; \
13006 + case __X86_CASE_Q: \
13007 + asm volatile (lock #op "q %q0, %1\n" \
13008 + "jno 0f\n" \
13009 + "mov %0,%1\n" \
13010 + "int $4\n0:\n" \
13011 + _ASM_EXTABLE(0b, 0b) \
13012 + : "+r" (__ret), "+m" (*(ptr)) \
13013 + : : "memory", "cc"); \
13014 + break; \
13015 + default: \
13016 + __ ## op ## _check_overflow_wrong_size(); \
13017 + } \
13018 + __ret; \
13019 + })
13020 +
13021 /*
13022 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13023 * Since this is generally used to protect other memory information, we
13024 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13025 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13026 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13027
13028 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13029 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13030 +
13031 #define __add(ptr, inc, lock) \
13032 ({ \
13033 __typeof__ (*(ptr)) __ret = (inc); \
13034 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13035 index 59c6c40..5e0b22c 100644
13036 --- a/arch/x86/include/asm/compat.h
13037 +++ b/arch/x86/include/asm/compat.h
13038 @@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13039 typedef u32 compat_uint_t;
13040 typedef u32 compat_ulong_t;
13041 typedef u64 __attribute__((aligned(4))) compat_u64;
13042 -typedef u32 compat_uptr_t;
13043 +typedef u32 __user compat_uptr_t;
13044
13045 struct compat_timespec {
13046 compat_time_t tv_sec;
13047 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13048 index 93fe929..90858b7 100644
13049 --- a/arch/x86/include/asm/cpufeature.h
13050 +++ b/arch/x86/include/asm/cpufeature.h
13051 @@ -207,7 +207,7 @@
13052 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13053 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13054 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13055 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13056 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13057 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13058 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13059 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13060 @@ -377,7 +377,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13061 ".section .discard,\"aw\",@progbits\n"
13062 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13063 ".previous\n"
13064 - ".section .altinstr_replacement,\"ax\"\n"
13065 + ".section .altinstr_replacement,\"a\"\n"
13066 "3: movb $1,%0\n"
13067 "4:\n"
13068 ".previous\n"
13069 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13070 index 8bf1c06..b6ae785 100644
13071 --- a/arch/x86/include/asm/desc.h
13072 +++ b/arch/x86/include/asm/desc.h
13073 @@ -4,6 +4,7 @@
13074 #include <asm/desc_defs.h>
13075 #include <asm/ldt.h>
13076 #include <asm/mmu.h>
13077 +#include <asm/pgtable.h>
13078
13079 #include <linux/smp.h>
13080 #include <linux/percpu.h>
13081 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13082
13083 desc->type = (info->read_exec_only ^ 1) << 1;
13084 desc->type |= info->contents << 2;
13085 + desc->type |= info->seg_not_present ^ 1;
13086
13087 desc->s = 1;
13088 desc->dpl = 0x3;
13089 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13090 }
13091
13092 extern struct desc_ptr idt_descr;
13093 -extern gate_desc idt_table[];
13094 extern struct desc_ptr nmi_idt_descr;
13095 -extern gate_desc nmi_idt_table[];
13096 -
13097 -struct gdt_page {
13098 - struct desc_struct gdt[GDT_ENTRIES];
13099 -} __attribute__((aligned(PAGE_SIZE)));
13100 -
13101 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13102 +extern gate_desc idt_table[256];
13103 +extern gate_desc nmi_idt_table[256];
13104
13105 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13106 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13107 {
13108 - return per_cpu(gdt_page, cpu).gdt;
13109 + return cpu_gdt_table[cpu];
13110 }
13111
13112 #ifdef CONFIG_X86_64
13113 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13114 unsigned long base, unsigned dpl, unsigned flags,
13115 unsigned short seg)
13116 {
13117 - gate->a = (seg << 16) | (base & 0xffff);
13118 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13119 + gate->gate.offset_low = base;
13120 + gate->gate.seg = seg;
13121 + gate->gate.reserved = 0;
13122 + gate->gate.type = type;
13123 + gate->gate.s = 0;
13124 + gate->gate.dpl = dpl;
13125 + gate->gate.p = 1;
13126 + gate->gate.offset_high = base >> 16;
13127 }
13128
13129 #endif
13130 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13131
13132 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13133 {
13134 + pax_open_kernel();
13135 memcpy(&idt[entry], gate, sizeof(*gate));
13136 + pax_close_kernel();
13137 }
13138
13139 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13140 {
13141 + pax_open_kernel();
13142 memcpy(&ldt[entry], desc, 8);
13143 + pax_close_kernel();
13144 }
13145
13146 static inline void
13147 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13148 default: size = sizeof(*gdt); break;
13149 }
13150
13151 + pax_open_kernel();
13152 memcpy(&gdt[entry], desc, size);
13153 + pax_close_kernel();
13154 }
13155
13156 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13157 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13158
13159 static inline void native_load_tr_desc(void)
13160 {
13161 + pax_open_kernel();
13162 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13163 + pax_close_kernel();
13164 }
13165
13166 static inline void native_load_gdt(const struct desc_ptr *dtr)
13167 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13168 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13169 unsigned int i;
13170
13171 + pax_open_kernel();
13172 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13173 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13174 + pax_close_kernel();
13175 }
13176
13177 #define _LDT_empty(info) \
13178 @@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13179 preempt_enable();
13180 }
13181
13182 -static inline unsigned long get_desc_base(const struct desc_struct *desc)
13183 +static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13184 {
13185 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13186 }
13187 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13188 }
13189
13190 #ifdef CONFIG_X86_64
13191 -static inline void set_nmi_gate(int gate, void *addr)
13192 +static inline void set_nmi_gate(int gate, const void *addr)
13193 {
13194 gate_desc s;
13195
13196 @@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13197 }
13198 #endif
13199
13200 -static inline void _set_gate(int gate, unsigned type, void *addr,
13201 +static inline void _set_gate(int gate, unsigned type, const void *addr,
13202 unsigned dpl, unsigned ist, unsigned seg)
13203 {
13204 gate_desc s;
13205 @@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13206 * Pentium F0 0F bugfix can have resulted in the mapped
13207 * IDT being write-protected.
13208 */
13209 -static inline void set_intr_gate(unsigned int n, void *addr)
13210 +static inline void set_intr_gate(unsigned int n, const void *addr)
13211 {
13212 BUG_ON((unsigned)n > 0xFF);
13213 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13214 @@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13215 /*
13216 * This routine sets up an interrupt gate at directory privilege level 3.
13217 */
13218 -static inline void set_system_intr_gate(unsigned int n, void *addr)
13219 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
13220 {
13221 BUG_ON((unsigned)n > 0xFF);
13222 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13223 }
13224
13225 -static inline void set_system_trap_gate(unsigned int n, void *addr)
13226 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
13227 {
13228 BUG_ON((unsigned)n > 0xFF);
13229 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13230 }
13231
13232 -static inline void set_trap_gate(unsigned int n, void *addr)
13233 +static inline void set_trap_gate(unsigned int n, const void *addr)
13234 {
13235 BUG_ON((unsigned)n > 0xFF);
13236 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13237 @@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13238 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13239 {
13240 BUG_ON((unsigned)n > 0xFF);
13241 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13242 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13243 }
13244
13245 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13246 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13247 {
13248 BUG_ON((unsigned)n > 0xFF);
13249 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13250 }
13251
13252 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13253 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13254 {
13255 BUG_ON((unsigned)n > 0xFF);
13256 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13257 }
13258
13259 +#ifdef CONFIG_X86_32
13260 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13261 +{
13262 + struct desc_struct d;
13263 +
13264 + if (likely(limit))
13265 + limit = (limit - 1UL) >> PAGE_SHIFT;
13266 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
13267 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13268 +}
13269 +#endif
13270 +
13271 #endif /* _ASM_X86_DESC_H */
13272 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13273 index 278441f..b95a174 100644
13274 --- a/arch/x86/include/asm/desc_defs.h
13275 +++ b/arch/x86/include/asm/desc_defs.h
13276 @@ -31,6 +31,12 @@ struct desc_struct {
13277 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13278 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13279 };
13280 + struct {
13281 + u16 offset_low;
13282 + u16 seg;
13283 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13284 + unsigned offset_high: 16;
13285 + } gate;
13286 };
13287 } __attribute__((packed));
13288
13289 diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13290 index ced283a..ffe04cc 100644
13291 --- a/arch/x86/include/asm/div64.h
13292 +++ b/arch/x86/include/asm/div64.h
13293 @@ -39,7 +39,7 @@
13294 __mod; \
13295 })
13296
13297 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13298 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13299 {
13300 union {
13301 u64 v64;
13302 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13303 index 9c999c1..3860cb8 100644
13304 --- a/arch/x86/include/asm/elf.h
13305 +++ b/arch/x86/include/asm/elf.h
13306 @@ -243,7 +243,25 @@ extern int force_personality32;
13307 the loader. We need to make sure that it is out of the way of the program
13308 that it will "exec", and that there is sufficient room for the brk. */
13309
13310 +#ifdef CONFIG_PAX_SEGMEXEC
13311 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13312 +#else
13313 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13314 +#endif
13315 +
13316 +#ifdef CONFIG_PAX_ASLR
13317 +#ifdef CONFIG_X86_32
13318 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13319 +
13320 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13321 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13322 +#else
13323 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
13324 +
13325 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13326 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13327 +#endif
13328 +#endif
13329
13330 /* This yields a mask that user programs can use to figure out what
13331 instruction set this CPU supports. This could be done in user space,
13332 @@ -296,16 +314,12 @@ do { \
13333
13334 #define ARCH_DLINFO \
13335 do { \
13336 - if (vdso_enabled) \
13337 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13338 - (unsigned long)current->mm->context.vdso); \
13339 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13340 } while (0)
13341
13342 #define ARCH_DLINFO_X32 \
13343 do { \
13344 - if (vdso_enabled) \
13345 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13346 - (unsigned long)current->mm->context.vdso); \
13347 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13348 } while (0)
13349
13350 #define AT_SYSINFO 32
13351 @@ -320,7 +334,7 @@ else \
13352
13353 #endif /* !CONFIG_X86_32 */
13354
13355 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13356 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13357
13358 #define VDSO_ENTRY \
13359 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13360 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13361 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13362 #define compat_arch_setup_additional_pages syscall32_setup_pages
13363
13364 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13365 -#define arch_randomize_brk arch_randomize_brk
13366 -
13367 /*
13368 * True on X86_32 or when emulating IA32 on X86_64
13369 */
13370 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13371 index 75ce3f4..882e801 100644
13372 --- a/arch/x86/include/asm/emergency-restart.h
13373 +++ b/arch/x86/include/asm/emergency-restart.h
13374 @@ -13,6 +13,6 @@ enum reboot_type {
13375
13376 extern enum reboot_type reboot_type;
13377
13378 -extern void machine_emergency_restart(void);
13379 +extern void machine_emergency_restart(void) __noreturn;
13380
13381 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13382 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13383 index e25cc33..425d099 100644
13384 --- a/arch/x86/include/asm/fpu-internal.h
13385 +++ b/arch/x86/include/asm/fpu-internal.h
13386 @@ -127,7 +127,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13387 ({ \
13388 int err; \
13389 asm volatile(ASM_STAC "\n" \
13390 - "1:" #insn "\n\t" \
13391 + "1:" \
13392 + __copyuser_seg \
13393 + #insn "\n\t" \
13394 "2: " ASM_CLAC "\n" \
13395 ".section .fixup,\"ax\"\n" \
13396 "3: movl $-1,%[err]\n" \
13397 @@ -300,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13398 "emms\n\t" /* clear stack tags */
13399 "fildl %P[addr]", /* set F?P to defined value */
13400 X86_FEATURE_FXSAVE_LEAK,
13401 - [addr] "m" (tsk->thread.fpu.has_fpu));
13402 + [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13403
13404 return fpu_restore_checking(&tsk->thread.fpu);
13405 }
13406 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13407 index be27ba1..8f13ff9 100644
13408 --- a/arch/x86/include/asm/futex.h
13409 +++ b/arch/x86/include/asm/futex.h
13410 @@ -12,6 +12,7 @@
13411 #include <asm/smap.h>
13412
13413 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13414 + typecheck(u32 __user *, uaddr); \
13415 asm volatile("\t" ASM_STAC "\n" \
13416 "1:\t" insn "\n" \
13417 "2:\t" ASM_CLAC "\n" \
13418 @@ -20,15 +21,16 @@
13419 "\tjmp\t2b\n" \
13420 "\t.previous\n" \
13421 _ASM_EXTABLE(1b, 3b) \
13422 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13423 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13424 : "i" (-EFAULT), "0" (oparg), "1" (0))
13425
13426 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13427 + typecheck(u32 __user *, uaddr); \
13428 asm volatile("\t" ASM_STAC "\n" \
13429 "1:\tmovl %2, %0\n" \
13430 "\tmovl\t%0, %3\n" \
13431 "\t" insn "\n" \
13432 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13433 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13434 "\tjnz\t1b\n" \
13435 "3:\t" ASM_CLAC "\n" \
13436 "\t.section .fixup,\"ax\"\n" \
13437 @@ -38,7 +40,7 @@
13438 _ASM_EXTABLE(1b, 4b) \
13439 _ASM_EXTABLE(2b, 4b) \
13440 : "=&a" (oldval), "=&r" (ret), \
13441 - "+m" (*uaddr), "=&r" (tem) \
13442 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13443 : "r" (oparg), "i" (-EFAULT), "1" (0))
13444
13445 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13446 @@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13447
13448 switch (op) {
13449 case FUTEX_OP_SET:
13450 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13451 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13452 break;
13453 case FUTEX_OP_ADD:
13454 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13455 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13456 uaddr, oparg);
13457 break;
13458 case FUTEX_OP_OR:
13459 @@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13460 return -EFAULT;
13461
13462 asm volatile("\t" ASM_STAC "\n"
13463 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13464 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13465 "2:\t" ASM_CLAC "\n"
13466 "\t.section .fixup, \"ax\"\n"
13467 "3:\tmov %3, %0\n"
13468 "\tjmp 2b\n"
13469 "\t.previous\n"
13470 _ASM_EXTABLE(1b, 3b)
13471 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13472 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13473 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13474 : "memory"
13475 );
13476 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13477 index 10a78c3..cc77143 100644
13478 --- a/arch/x86/include/asm/hw_irq.h
13479 +++ b/arch/x86/include/asm/hw_irq.h
13480 @@ -147,8 +147,8 @@ extern void setup_ioapic_dest(void);
13481 extern void enable_IO_APIC(void);
13482
13483 /* Statistics */
13484 -extern atomic_t irq_err_count;
13485 -extern atomic_t irq_mis_count;
13486 +extern atomic_unchecked_t irq_err_count;
13487 +extern atomic_unchecked_t irq_mis_count;
13488
13489 /* EISA */
13490 extern void eisa_set_level_irq(unsigned int irq);
13491 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13492 index a203659..9889f1c 100644
13493 --- a/arch/x86/include/asm/i8259.h
13494 +++ b/arch/x86/include/asm/i8259.h
13495 @@ -62,7 +62,7 @@ struct legacy_pic {
13496 void (*init)(int auto_eoi);
13497 int (*irq_pending)(unsigned int irq);
13498 void (*make_irq)(unsigned int irq);
13499 -};
13500 +} __do_const;
13501
13502 extern struct legacy_pic *legacy_pic;
13503 extern struct legacy_pic null_legacy_pic;
13504 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13505 index d8e8eef..1765f78 100644
13506 --- a/arch/x86/include/asm/io.h
13507 +++ b/arch/x86/include/asm/io.h
13508 @@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13509 "m" (*(volatile type __force *)addr) barrier); }
13510
13511 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13512 -build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13513 -build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13514 +build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13515 +build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13516
13517 build_mmio_read(__readb, "b", unsigned char, "=q", )
13518 -build_mmio_read(__readw, "w", unsigned short, "=r", )
13519 -build_mmio_read(__readl, "l", unsigned int, "=r", )
13520 +build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13521 +build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13522
13523 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13524 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13525 @@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13526 return ioremap_nocache(offset, size);
13527 }
13528
13529 -extern void iounmap(volatile void __iomem *addr);
13530 +extern void iounmap(const volatile void __iomem *addr);
13531
13532 extern void set_iounmap_nonlazy(void);
13533
13534 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13535
13536 #include <linux/vmalloc.h>
13537
13538 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13539 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13540 +{
13541 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13542 +}
13543 +
13544 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13545 +{
13546 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13547 +}
13548 +
13549 /*
13550 * Convert a virtual cached pointer to an uncached pointer
13551 */
13552 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13553 index bba3cf8..06bc8da 100644
13554 --- a/arch/x86/include/asm/irqflags.h
13555 +++ b/arch/x86/include/asm/irqflags.h
13556 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13557 sti; \
13558 sysexit
13559
13560 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
13561 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13562 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
13563 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13564 +
13565 #else
13566 #define INTERRUPT_RETURN iret
13567 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13568 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13569 index 5a6d287..f815789 100644
13570 --- a/arch/x86/include/asm/kprobes.h
13571 +++ b/arch/x86/include/asm/kprobes.h
13572 @@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13573 #define RELATIVEJUMP_SIZE 5
13574 #define RELATIVECALL_OPCODE 0xe8
13575 #define RELATIVE_ADDR_SIZE 4
13576 -#define MAX_STACK_SIZE 64
13577 -#define MIN_STACK_SIZE(ADDR) \
13578 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13579 - THREAD_SIZE - (unsigned long)(ADDR))) \
13580 - ? (MAX_STACK_SIZE) \
13581 - : (((unsigned long)current_thread_info()) + \
13582 - THREAD_SIZE - (unsigned long)(ADDR)))
13583 +#define MAX_STACK_SIZE 64UL
13584 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13585
13586 #define flush_insn_slot(p) do { } while (0)
13587
13588 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13589 index 2d89e39..baee879 100644
13590 --- a/arch/x86/include/asm/local.h
13591 +++ b/arch/x86/include/asm/local.h
13592 @@ -10,33 +10,97 @@ typedef struct {
13593 atomic_long_t a;
13594 } local_t;
13595
13596 +typedef struct {
13597 + atomic_long_unchecked_t a;
13598 +} local_unchecked_t;
13599 +
13600 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13601
13602 #define local_read(l) atomic_long_read(&(l)->a)
13603 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13604 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13605 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13606
13607 static inline void local_inc(local_t *l)
13608 {
13609 - asm volatile(_ASM_INC "%0"
13610 + asm volatile(_ASM_INC "%0\n"
13611 +
13612 +#ifdef CONFIG_PAX_REFCOUNT
13613 + "jno 0f\n"
13614 + _ASM_DEC "%0\n"
13615 + "int $4\n0:\n"
13616 + _ASM_EXTABLE(0b, 0b)
13617 +#endif
13618 +
13619 + : "+m" (l->a.counter));
13620 +}
13621 +
13622 +static inline void local_inc_unchecked(local_unchecked_t *l)
13623 +{
13624 + asm volatile(_ASM_INC "%0\n"
13625 : "+m" (l->a.counter));
13626 }
13627
13628 static inline void local_dec(local_t *l)
13629 {
13630 - asm volatile(_ASM_DEC "%0"
13631 + asm volatile(_ASM_DEC "%0\n"
13632 +
13633 +#ifdef CONFIG_PAX_REFCOUNT
13634 + "jno 0f\n"
13635 + _ASM_INC "%0\n"
13636 + "int $4\n0:\n"
13637 + _ASM_EXTABLE(0b, 0b)
13638 +#endif
13639 +
13640 + : "+m" (l->a.counter));
13641 +}
13642 +
13643 +static inline void local_dec_unchecked(local_unchecked_t *l)
13644 +{
13645 + asm volatile(_ASM_DEC "%0\n"
13646 : "+m" (l->a.counter));
13647 }
13648
13649 static inline void local_add(long i, local_t *l)
13650 {
13651 - asm volatile(_ASM_ADD "%1,%0"
13652 + asm volatile(_ASM_ADD "%1,%0\n"
13653 +
13654 +#ifdef CONFIG_PAX_REFCOUNT
13655 + "jno 0f\n"
13656 + _ASM_SUB "%1,%0\n"
13657 + "int $4\n0:\n"
13658 + _ASM_EXTABLE(0b, 0b)
13659 +#endif
13660 +
13661 + : "+m" (l->a.counter)
13662 + : "ir" (i));
13663 +}
13664 +
13665 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
13666 +{
13667 + asm volatile(_ASM_ADD "%1,%0\n"
13668 : "+m" (l->a.counter)
13669 : "ir" (i));
13670 }
13671
13672 static inline void local_sub(long i, local_t *l)
13673 {
13674 - asm volatile(_ASM_SUB "%1,%0"
13675 + asm volatile(_ASM_SUB "%1,%0\n"
13676 +
13677 +#ifdef CONFIG_PAX_REFCOUNT
13678 + "jno 0f\n"
13679 + _ASM_ADD "%1,%0\n"
13680 + "int $4\n0:\n"
13681 + _ASM_EXTABLE(0b, 0b)
13682 +#endif
13683 +
13684 + : "+m" (l->a.counter)
13685 + : "ir" (i));
13686 +}
13687 +
13688 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13689 +{
13690 + asm volatile(_ASM_SUB "%1,%0\n"
13691 : "+m" (l->a.counter)
13692 : "ir" (i));
13693 }
13694 @@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13695 {
13696 unsigned char c;
13697
13698 - asm volatile(_ASM_SUB "%2,%0; sete %1"
13699 + asm volatile(_ASM_SUB "%2,%0\n"
13700 +
13701 +#ifdef CONFIG_PAX_REFCOUNT
13702 + "jno 0f\n"
13703 + _ASM_ADD "%2,%0\n"
13704 + "int $4\n0:\n"
13705 + _ASM_EXTABLE(0b, 0b)
13706 +#endif
13707 +
13708 + "sete %1\n"
13709 : "+m" (l->a.counter), "=qm" (c)
13710 : "ir" (i) : "memory");
13711 return c;
13712 @@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13713 {
13714 unsigned char c;
13715
13716 - asm volatile(_ASM_DEC "%0; sete %1"
13717 + asm volatile(_ASM_DEC "%0\n"
13718 +
13719 +#ifdef CONFIG_PAX_REFCOUNT
13720 + "jno 0f\n"
13721 + _ASM_INC "%0\n"
13722 + "int $4\n0:\n"
13723 + _ASM_EXTABLE(0b, 0b)
13724 +#endif
13725 +
13726 + "sete %1\n"
13727 : "+m" (l->a.counter), "=qm" (c)
13728 : : "memory");
13729 return c != 0;
13730 @@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13731 {
13732 unsigned char c;
13733
13734 - asm volatile(_ASM_INC "%0; sete %1"
13735 + asm volatile(_ASM_INC "%0\n"
13736 +
13737 +#ifdef CONFIG_PAX_REFCOUNT
13738 + "jno 0f\n"
13739 + _ASM_DEC "%0\n"
13740 + "int $4\n0:\n"
13741 + _ASM_EXTABLE(0b, 0b)
13742 +#endif
13743 +
13744 + "sete %1\n"
13745 : "+m" (l->a.counter), "=qm" (c)
13746 : : "memory");
13747 return c != 0;
13748 @@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13749 {
13750 unsigned char c;
13751
13752 - asm volatile(_ASM_ADD "%2,%0; sets %1"
13753 + asm volatile(_ASM_ADD "%2,%0\n"
13754 +
13755 +#ifdef CONFIG_PAX_REFCOUNT
13756 + "jno 0f\n"
13757 + _ASM_SUB "%2,%0\n"
13758 + "int $4\n0:\n"
13759 + _ASM_EXTABLE(0b, 0b)
13760 +#endif
13761 +
13762 + "sets %1\n"
13763 : "+m" (l->a.counter), "=qm" (c)
13764 : "ir" (i) : "memory");
13765 return c;
13766 @@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13767 static inline long local_add_return(long i, local_t *l)
13768 {
13769 long __i = i;
13770 + asm volatile(_ASM_XADD "%0, %1\n"
13771 +
13772 +#ifdef CONFIG_PAX_REFCOUNT
13773 + "jno 0f\n"
13774 + _ASM_MOV "%0,%1\n"
13775 + "int $4\n0:\n"
13776 + _ASM_EXTABLE(0b, 0b)
13777 +#endif
13778 +
13779 + : "+r" (i), "+m" (l->a.counter)
13780 + : : "memory");
13781 + return i + __i;
13782 +}
13783 +
13784 +/**
13785 + * local_add_return_unchecked - add and return
13786 + * @i: integer value to add
13787 + * @l: pointer to type local_unchecked_t
13788 + *
13789 + * Atomically adds @i to @l and returns @i + @l
13790 + */
13791 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13792 +{
13793 + long __i = i;
13794 asm volatile(_ASM_XADD "%0, %1;"
13795 : "+r" (i), "+m" (l->a.counter)
13796 : : "memory");
13797 @@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13798
13799 #define local_cmpxchg(l, o, n) \
13800 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13801 +#define local_cmpxchg_unchecked(l, o, n) \
13802 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
13803 /* Always has a lock prefix */
13804 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13805
13806 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13807 new file mode 100644
13808 index 0000000..2bfd3ba
13809 --- /dev/null
13810 +++ b/arch/x86/include/asm/mman.h
13811 @@ -0,0 +1,15 @@
13812 +#ifndef _X86_MMAN_H
13813 +#define _X86_MMAN_H
13814 +
13815 +#include <uapi/asm/mman.h>
13816 +
13817 +#ifdef __KERNEL__
13818 +#ifndef __ASSEMBLY__
13819 +#ifdef CONFIG_X86_32
13820 +#define arch_mmap_check i386_mmap_check
13821 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13822 +#endif
13823 +#endif
13824 +#endif
13825 +
13826 +#endif /* X86_MMAN_H */
13827 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13828 index 5f55e69..e20bfb1 100644
13829 --- a/arch/x86/include/asm/mmu.h
13830 +++ b/arch/x86/include/asm/mmu.h
13831 @@ -9,7 +9,7 @@
13832 * we put the segment information here.
13833 */
13834 typedef struct {
13835 - void *ldt;
13836 + struct desc_struct *ldt;
13837 int size;
13838
13839 #ifdef CONFIG_X86_64
13840 @@ -18,7 +18,19 @@ typedef struct {
13841 #endif
13842
13843 struct mutex lock;
13844 - void *vdso;
13845 + unsigned long vdso;
13846 +
13847 +#ifdef CONFIG_X86_32
13848 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13849 + unsigned long user_cs_base;
13850 + unsigned long user_cs_limit;
13851 +
13852 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13853 + cpumask_t cpu_user_cs_mask;
13854 +#endif
13855 +
13856 +#endif
13857 +#endif
13858 } mm_context_t;
13859
13860 #ifdef CONFIG_SMP
13861 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13862 index cdbf367..adb37ac 100644
13863 --- a/arch/x86/include/asm/mmu_context.h
13864 +++ b/arch/x86/include/asm/mmu_context.h
13865 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13866
13867 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13868 {
13869 +
13870 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13871 + unsigned int i;
13872 + pgd_t *pgd;
13873 +
13874 + pax_open_kernel();
13875 + pgd = get_cpu_pgd(smp_processor_id());
13876 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13877 + set_pgd_batched(pgd+i, native_make_pgd(0));
13878 + pax_close_kernel();
13879 +#endif
13880 +
13881 #ifdef CONFIG_SMP
13882 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13883 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13884 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13885 struct task_struct *tsk)
13886 {
13887 unsigned cpu = smp_processor_id();
13888 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13889 + int tlbstate = TLBSTATE_OK;
13890 +#endif
13891
13892 if (likely(prev != next)) {
13893 #ifdef CONFIG_SMP
13894 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13895 + tlbstate = this_cpu_read(cpu_tlbstate.state);
13896 +#endif
13897 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13898 this_cpu_write(cpu_tlbstate.active_mm, next);
13899 #endif
13900 cpumask_set_cpu(cpu, mm_cpumask(next));
13901
13902 /* Re-load page tables */
13903 +#ifdef CONFIG_PAX_PER_CPU_PGD
13904 + pax_open_kernel();
13905 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13906 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13907 + pax_close_kernel();
13908 + load_cr3(get_cpu_pgd(cpu));
13909 +#else
13910 load_cr3(next->pgd);
13911 +#endif
13912
13913 /* stop flush ipis for the previous mm */
13914 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13915 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13916 */
13917 if (unlikely(prev->context.ldt != next->context.ldt))
13918 load_LDT_nolock(&next->context);
13919 - }
13920 +
13921 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13922 + if (!(__supported_pte_mask & _PAGE_NX)) {
13923 + smp_mb__before_clear_bit();
13924 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13925 + smp_mb__after_clear_bit();
13926 + cpu_set(cpu, next->context.cpu_user_cs_mask);
13927 + }
13928 +#endif
13929 +
13930 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13931 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13932 + prev->context.user_cs_limit != next->context.user_cs_limit))
13933 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13934 #ifdef CONFIG_SMP
13935 + else if (unlikely(tlbstate != TLBSTATE_OK))
13936 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13937 +#endif
13938 +#endif
13939 +
13940 + }
13941 else {
13942 +
13943 +#ifdef CONFIG_PAX_PER_CPU_PGD
13944 + pax_open_kernel();
13945 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13946 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13947 + pax_close_kernel();
13948 + load_cr3(get_cpu_pgd(cpu));
13949 +#endif
13950 +
13951 +#ifdef CONFIG_SMP
13952 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13953 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13954
13955 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13956 * tlb flush IPI delivery. We must reload CR3
13957 * to make sure to use no freed page tables.
13958 */
13959 +
13960 +#ifndef CONFIG_PAX_PER_CPU_PGD
13961 load_cr3(next->pgd);
13962 +#endif
13963 +
13964 load_LDT_nolock(&next->context);
13965 +
13966 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13967 + if (!(__supported_pte_mask & _PAGE_NX))
13968 + cpu_set(cpu, next->context.cpu_user_cs_mask);
13969 +#endif
13970 +
13971 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13972 +#ifdef CONFIG_PAX_PAGEEXEC
13973 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
13974 +#endif
13975 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13976 +#endif
13977 +
13978 }
13979 +#endif
13980 }
13981 -#endif
13982 }
13983
13984 #define activate_mm(prev, next) \
13985 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
13986 index e3b7819..b257c64 100644
13987 --- a/arch/x86/include/asm/module.h
13988 +++ b/arch/x86/include/asm/module.h
13989 @@ -5,6 +5,7 @@
13990
13991 #ifdef CONFIG_X86_64
13992 /* X86_64 does not define MODULE_PROC_FAMILY */
13993 +#define MODULE_PROC_FAMILY ""
13994 #elif defined CONFIG_M486
13995 #define MODULE_PROC_FAMILY "486 "
13996 #elif defined CONFIG_M586
13997 @@ -57,8 +58,20 @@
13998 #error unknown processor family
13999 #endif
14000
14001 -#ifdef CONFIG_X86_32
14002 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14003 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14004 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14005 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14006 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14007 +#else
14008 +#define MODULE_PAX_KERNEXEC ""
14009 #endif
14010
14011 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14012 +#define MODULE_PAX_UDEREF "UDEREF "
14013 +#else
14014 +#define MODULE_PAX_UDEREF ""
14015 +#endif
14016 +
14017 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14018 +
14019 #endif /* _ASM_X86_MODULE_H */
14020 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14021 index c0fa356..07a498a 100644
14022 --- a/arch/x86/include/asm/nmi.h
14023 +++ b/arch/x86/include/asm/nmi.h
14024 @@ -42,11 +42,11 @@ struct nmiaction {
14025 nmi_handler_t handler;
14026 unsigned long flags;
14027 const char *name;
14028 -};
14029 +} __do_const;
14030
14031 #define register_nmi_handler(t, fn, fg, n, init...) \
14032 ({ \
14033 - static struct nmiaction init fn##_na = { \
14034 + static const struct nmiaction init fn##_na = { \
14035 .handler = (fn), \
14036 .name = (n), \
14037 .flags = (fg), \
14038 @@ -54,7 +54,7 @@ struct nmiaction {
14039 __register_nmi_handler((t), &fn##_na); \
14040 })
14041
14042 -int __register_nmi_handler(unsigned int, struct nmiaction *);
14043 +int __register_nmi_handler(unsigned int, const struct nmiaction *);
14044
14045 void unregister_nmi_handler(unsigned int, const char *);
14046
14047 diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
14048 index 0f1ddee..e56bec9 100644
14049 --- a/arch/x86/include/asm/page_64.h
14050 +++ b/arch/x86/include/asm/page_64.h
14051 @@ -7,7 +7,7 @@
14052
14053 /* duplicated to the one in bootmem.h */
14054 extern unsigned long max_pfn;
14055 -extern unsigned long phys_base;
14056 +extern const unsigned long phys_base;
14057
14058 static inline unsigned long __phys_addr_nodebug(unsigned long x)
14059 {
14060 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14061 index 7361e47..16dc226 100644
14062 --- a/arch/x86/include/asm/paravirt.h
14063 +++ b/arch/x86/include/asm/paravirt.h
14064 @@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14065 return (pmd_t) { ret };
14066 }
14067
14068 -static inline pmdval_t pmd_val(pmd_t pmd)
14069 +static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14070 {
14071 pmdval_t ret;
14072
14073 @@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14074 val);
14075 }
14076
14077 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14078 +{
14079 + pgdval_t val = native_pgd_val(pgd);
14080 +
14081 + if (sizeof(pgdval_t) > sizeof(long))
14082 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14083 + val, (u64)val >> 32);
14084 + else
14085 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14086 + val);
14087 +}
14088 +
14089 static inline void pgd_clear(pgd_t *pgdp)
14090 {
14091 set_pgd(pgdp, __pgd(0));
14092 @@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14093 pv_mmu_ops.set_fixmap(idx, phys, flags);
14094 }
14095
14096 +#ifdef CONFIG_PAX_KERNEXEC
14097 +static inline unsigned long pax_open_kernel(void)
14098 +{
14099 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14100 +}
14101 +
14102 +static inline unsigned long pax_close_kernel(void)
14103 +{
14104 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14105 +}
14106 +#else
14107 +static inline unsigned long pax_open_kernel(void) { return 0; }
14108 +static inline unsigned long pax_close_kernel(void) { return 0; }
14109 +#endif
14110 +
14111 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14112
14113 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14114 @@ -930,7 +957,7 @@ extern void default_banner(void);
14115
14116 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14117 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14118 -#define PARA_INDIRECT(addr) *%cs:addr
14119 +#define PARA_INDIRECT(addr) *%ss:addr
14120 #endif
14121
14122 #define INTERRUPT_RETURN \
14123 @@ -1005,6 +1032,21 @@ extern void default_banner(void);
14124 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14125 CLBR_NONE, \
14126 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14127 +
14128 +#define GET_CR0_INTO_RDI \
14129 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14130 + mov %rax,%rdi
14131 +
14132 +#define SET_RDI_INTO_CR0 \
14133 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14134 +
14135 +#define GET_CR3_INTO_RDI \
14136 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14137 + mov %rax,%rdi
14138 +
14139 +#define SET_RDI_INTO_CR3 \
14140 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14141 +
14142 #endif /* CONFIG_X86_32 */
14143
14144 #endif /* __ASSEMBLY__ */
14145 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14146 index b3b0ec1..b1cd3eb 100644
14147 --- a/arch/x86/include/asm/paravirt_types.h
14148 +++ b/arch/x86/include/asm/paravirt_types.h
14149 @@ -84,7 +84,7 @@ struct pv_init_ops {
14150 */
14151 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14152 unsigned long addr, unsigned len);
14153 -};
14154 +} __no_const;
14155
14156
14157 struct pv_lazy_ops {
14158 @@ -98,7 +98,7 @@ struct pv_time_ops {
14159 unsigned long long (*sched_clock)(void);
14160 unsigned long long (*steal_clock)(int cpu);
14161 unsigned long (*get_tsc_khz)(void);
14162 -};
14163 +} __no_const;
14164
14165 struct pv_cpu_ops {
14166 /* hooks for various privileged instructions */
14167 @@ -192,7 +192,7 @@ struct pv_cpu_ops {
14168
14169 void (*start_context_switch)(struct task_struct *prev);
14170 void (*end_context_switch)(struct task_struct *next);
14171 -};
14172 +} __no_const;
14173
14174 struct pv_irq_ops {
14175 /*
14176 @@ -223,7 +223,7 @@ struct pv_apic_ops {
14177 unsigned long start_eip,
14178 unsigned long start_esp);
14179 #endif
14180 -};
14181 +} __no_const;
14182
14183 struct pv_mmu_ops {
14184 unsigned long (*read_cr2)(void);
14185 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
14186 struct paravirt_callee_save make_pud;
14187
14188 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14189 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14190 #endif /* PAGETABLE_LEVELS == 4 */
14191 #endif /* PAGETABLE_LEVELS >= 3 */
14192
14193 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
14194 an mfn. We can tell which is which from the index. */
14195 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14196 phys_addr_t phys, pgprot_t flags);
14197 +
14198 +#ifdef CONFIG_PAX_KERNEXEC
14199 + unsigned long (*pax_open_kernel)(void);
14200 + unsigned long (*pax_close_kernel)(void);
14201 +#endif
14202 +
14203 };
14204
14205 struct arch_spinlock;
14206 @@ -334,7 +341,7 @@ struct pv_lock_ops {
14207 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14208 int (*spin_trylock)(struct arch_spinlock *lock);
14209 void (*spin_unlock)(struct arch_spinlock *lock);
14210 -};
14211 +} __no_const;
14212
14213 /* This contains all the paravirt structures: we get a convenient
14214 * number for each function using the offset which we use to indicate
14215 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14216 index b4389a4..7024269 100644
14217 --- a/arch/x86/include/asm/pgalloc.h
14218 +++ b/arch/x86/include/asm/pgalloc.h
14219 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14220 pmd_t *pmd, pte_t *pte)
14221 {
14222 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14223 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14224 +}
14225 +
14226 +static inline void pmd_populate_user(struct mm_struct *mm,
14227 + pmd_t *pmd, pte_t *pte)
14228 +{
14229 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14230 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14231 }
14232
14233 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14234
14235 #ifdef CONFIG_X86_PAE
14236 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14237 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14238 +{
14239 + pud_populate(mm, pudp, pmd);
14240 +}
14241 #else /* !CONFIG_X86_PAE */
14242 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14243 {
14244 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14245 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14246 }
14247 +
14248 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14249 +{
14250 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14251 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14252 +}
14253 #endif /* CONFIG_X86_PAE */
14254
14255 #if PAGETABLE_LEVELS > 3
14256 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14257 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14258 }
14259
14260 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14261 +{
14262 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14263 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14264 +}
14265 +
14266 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14267 {
14268 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14269 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14270 index f2b489c..4f7e2e5 100644
14271 --- a/arch/x86/include/asm/pgtable-2level.h
14272 +++ b/arch/x86/include/asm/pgtable-2level.h
14273 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14274
14275 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14276 {
14277 + pax_open_kernel();
14278 *pmdp = pmd;
14279 + pax_close_kernel();
14280 }
14281
14282 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14283 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14284 index 4cc9f2b..5fd9226 100644
14285 --- a/arch/x86/include/asm/pgtable-3level.h
14286 +++ b/arch/x86/include/asm/pgtable-3level.h
14287 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14288
14289 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14290 {
14291 + pax_open_kernel();
14292 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14293 + pax_close_kernel();
14294 }
14295
14296 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14297 {
14298 + pax_open_kernel();
14299 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14300 + pax_close_kernel();
14301 }
14302
14303 /*
14304 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14305 index 1e67223..dd6e7ea 100644
14306 --- a/arch/x86/include/asm/pgtable.h
14307 +++ b/arch/x86/include/asm/pgtable.h
14308 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14309
14310 #ifndef __PAGETABLE_PUD_FOLDED
14311 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14312 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14313 #define pgd_clear(pgd) native_pgd_clear(pgd)
14314 #endif
14315
14316 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14317
14318 #define arch_end_context_switch(prev) do {} while(0)
14319
14320 +#define pax_open_kernel() native_pax_open_kernel()
14321 +#define pax_close_kernel() native_pax_close_kernel()
14322 #endif /* CONFIG_PARAVIRT */
14323
14324 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
14325 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14326 +
14327 +#ifdef CONFIG_PAX_KERNEXEC
14328 +static inline unsigned long native_pax_open_kernel(void)
14329 +{
14330 + unsigned long cr0;
14331 +
14332 + preempt_disable();
14333 + barrier();
14334 + cr0 = read_cr0() ^ X86_CR0_WP;
14335 + BUG_ON(cr0 & X86_CR0_WP);
14336 + write_cr0(cr0);
14337 + return cr0 ^ X86_CR0_WP;
14338 +}
14339 +
14340 +static inline unsigned long native_pax_close_kernel(void)
14341 +{
14342 + unsigned long cr0;
14343 +
14344 + cr0 = read_cr0() ^ X86_CR0_WP;
14345 + BUG_ON(!(cr0 & X86_CR0_WP));
14346 + write_cr0(cr0);
14347 + barrier();
14348 + preempt_enable_no_resched();
14349 + return cr0 ^ X86_CR0_WP;
14350 +}
14351 +#else
14352 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
14353 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
14354 +#endif
14355 +
14356 /*
14357 * The following only work if pte_present() is true.
14358 * Undefined behaviour if not..
14359 */
14360 +static inline int pte_user(pte_t pte)
14361 +{
14362 + return pte_val(pte) & _PAGE_USER;
14363 +}
14364 +
14365 static inline int pte_dirty(pte_t pte)
14366 {
14367 return pte_flags(pte) & _PAGE_DIRTY;
14368 @@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
14369 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
14370 }
14371
14372 +static inline unsigned long pgd_pfn(pgd_t pgd)
14373 +{
14374 + return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
14375 +}
14376 +
14377 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
14378
14379 static inline int pmd_large(pmd_t pte)
14380 @@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14381 return pte_clear_flags(pte, _PAGE_RW);
14382 }
14383
14384 +static inline pte_t pte_mkread(pte_t pte)
14385 +{
14386 + return __pte(pte_val(pte) | _PAGE_USER);
14387 +}
14388 +
14389 static inline pte_t pte_mkexec(pte_t pte)
14390 {
14391 - return pte_clear_flags(pte, _PAGE_NX);
14392 +#ifdef CONFIG_X86_PAE
14393 + if (__supported_pte_mask & _PAGE_NX)
14394 + return pte_clear_flags(pte, _PAGE_NX);
14395 + else
14396 +#endif
14397 + return pte_set_flags(pte, _PAGE_USER);
14398 +}
14399 +
14400 +static inline pte_t pte_exprotect(pte_t pte)
14401 +{
14402 +#ifdef CONFIG_X86_PAE
14403 + if (__supported_pte_mask & _PAGE_NX)
14404 + return pte_set_flags(pte, _PAGE_NX);
14405 + else
14406 +#endif
14407 + return pte_clear_flags(pte, _PAGE_USER);
14408 }
14409
14410 static inline pte_t pte_mkdirty(pte_t pte)
14411 @@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14412 #endif
14413
14414 #ifndef __ASSEMBLY__
14415 +
14416 +#ifdef CONFIG_PAX_PER_CPU_PGD
14417 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14418 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14419 +{
14420 + return cpu_pgd[cpu];
14421 +}
14422 +#endif
14423 +
14424 #include <linux/mm_types.h>
14425 #include <linux/log2.h>
14426
14427 @@ -529,7 +603,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
14428 * Currently stuck as a macro due to indirect forward reference to
14429 * linux/mmzone.h's __section_mem_map_addr() definition:
14430 */
14431 -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
14432 +#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
14433
14434 /* Find an entry in the second-level page table.. */
14435 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
14436 @@ -569,7 +643,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
14437 * Currently stuck as a macro due to indirect forward reference to
14438 * linux/mmzone.h's __section_mem_map_addr() definition:
14439 */
14440 -#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
14441 +#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
14442
14443 /* to find an entry in a page-table-directory. */
14444 static inline unsigned long pud_index(unsigned long address)
14445 @@ -584,7 +658,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14446
14447 static inline int pgd_bad(pgd_t pgd)
14448 {
14449 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14450 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14451 }
14452
14453 static inline int pgd_none(pgd_t pgd)
14454 @@ -607,7 +681,12 @@ static inline int pgd_none(pgd_t pgd)
14455 * pgd_offset() returns a (pgd_t *)
14456 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14457 */
14458 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14459 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14460 +
14461 +#ifdef CONFIG_PAX_PER_CPU_PGD
14462 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14463 +#endif
14464 +
14465 /*
14466 * a shortcut which implies the use of the kernel's pgd, instead
14467 * of a process's
14468 @@ -618,6 +697,22 @@ static inline int pgd_none(pgd_t pgd)
14469 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14470 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14471
14472 +#ifdef CONFIG_X86_32
14473 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14474 +#else
14475 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14476 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14477 +
14478 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14479 +#ifdef __ASSEMBLY__
14480 +#define pax_user_shadow_base pax_user_shadow_base(%rip)
14481 +#else
14482 +extern unsigned long pax_user_shadow_base;
14483 +#endif
14484 +#endif
14485 +
14486 +#endif
14487 +
14488 #ifndef __ASSEMBLY__
14489
14490 extern int direct_gbpages;
14491 @@ -784,11 +879,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14492 * dst and src can be on the same page, but the range must not overlap,
14493 * and must not cross a page boundary.
14494 */
14495 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14496 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14497 {
14498 - memcpy(dst, src, count * sizeof(pgd_t));
14499 + pax_open_kernel();
14500 + while (count--)
14501 + *dst++ = *src++;
14502 + pax_close_kernel();
14503 }
14504
14505 +#ifdef CONFIG_PAX_PER_CPU_PGD
14506 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14507 +#endif
14508 +
14509 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14510 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14511 +#else
14512 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14513 +#endif
14514 +
14515 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
14516 static inline int page_level_shift(enum pg_level level)
14517 {
14518 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14519 index 9ee3221..b979c6b 100644
14520 --- a/arch/x86/include/asm/pgtable_32.h
14521 +++ b/arch/x86/include/asm/pgtable_32.h
14522 @@ -25,9 +25,6 @@
14523 struct mm_struct;
14524 struct vm_area_struct;
14525
14526 -extern pgd_t swapper_pg_dir[1024];
14527 -extern pgd_t initial_page_table[1024];
14528 -
14529 static inline void pgtable_cache_init(void) { }
14530 static inline void check_pgt_cache(void) { }
14531 void paging_init(void);
14532 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14533 # include <asm/pgtable-2level.h>
14534 #endif
14535
14536 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14537 +extern pgd_t initial_page_table[PTRS_PER_PGD];
14538 +#ifdef CONFIG_X86_PAE
14539 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14540 +#endif
14541 +
14542 #if defined(CONFIG_HIGHPTE)
14543 #define pte_offset_map(dir, address) \
14544 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14545 @@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14546 /* Clear a kernel PTE and flush it from the TLB */
14547 #define kpte_clear_flush(ptep, vaddr) \
14548 do { \
14549 + pax_open_kernel(); \
14550 pte_clear(&init_mm, (vaddr), (ptep)); \
14551 + pax_close_kernel(); \
14552 __flush_tlb_one((vaddr)); \
14553 } while (0)
14554
14555 #endif /* !__ASSEMBLY__ */
14556
14557 +#define HAVE_ARCH_UNMAPPED_AREA
14558 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14559 +
14560 /*
14561 * kern_addr_valid() is (1) for FLATMEM and (0) for
14562 * SPARSEMEM and DISCONTIGMEM
14563 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14564 index ed5903b..c7fe163 100644
14565 --- a/arch/x86/include/asm/pgtable_32_types.h
14566 +++ b/arch/x86/include/asm/pgtable_32_types.h
14567 @@ -8,7 +8,7 @@
14568 */
14569 #ifdef CONFIG_X86_PAE
14570 # include <asm/pgtable-3level_types.h>
14571 -# define PMD_SIZE (1UL << PMD_SHIFT)
14572 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14573 # define PMD_MASK (~(PMD_SIZE - 1))
14574 #else
14575 # include <asm/pgtable-2level_types.h>
14576 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14577 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14578 #endif
14579
14580 +#ifdef CONFIG_PAX_KERNEXEC
14581 +#ifndef __ASSEMBLY__
14582 +extern unsigned char MODULES_EXEC_VADDR[];
14583 +extern unsigned char MODULES_EXEC_END[];
14584 +#endif
14585 +#include <asm/boot.h>
14586 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14587 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14588 +#else
14589 +#define ktla_ktva(addr) (addr)
14590 +#define ktva_ktla(addr) (addr)
14591 +#endif
14592 +
14593 #define MODULES_VADDR VMALLOC_START
14594 #define MODULES_END VMALLOC_END
14595 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14596 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14597 index e22c1db..23a625a 100644
14598 --- a/arch/x86/include/asm/pgtable_64.h
14599 +++ b/arch/x86/include/asm/pgtable_64.h
14600 @@ -16,10 +16,14 @@
14601
14602 extern pud_t level3_kernel_pgt[512];
14603 extern pud_t level3_ident_pgt[512];
14604 +extern pud_t level3_vmalloc_start_pgt[512];
14605 +extern pud_t level3_vmalloc_end_pgt[512];
14606 +extern pud_t level3_vmemmap_pgt[512];
14607 +extern pud_t level2_vmemmap_pgt[512];
14608 extern pmd_t level2_kernel_pgt[512];
14609 extern pmd_t level2_fixmap_pgt[512];
14610 -extern pmd_t level2_ident_pgt[512];
14611 -extern pgd_t init_level4_pgt[];
14612 +extern pmd_t level2_ident_pgt[512*2];
14613 +extern pgd_t init_level4_pgt[512];
14614
14615 #define swapper_pg_dir init_level4_pgt
14616
14617 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14618
14619 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14620 {
14621 + pax_open_kernel();
14622 *pmdp = pmd;
14623 + pax_close_kernel();
14624 }
14625
14626 static inline void native_pmd_clear(pmd_t *pmd)
14627 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14628
14629 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14630 {
14631 + pax_open_kernel();
14632 *pudp = pud;
14633 + pax_close_kernel();
14634 }
14635
14636 static inline void native_pud_clear(pud_t *pud)
14637 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14638
14639 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14640 {
14641 + pax_open_kernel();
14642 + *pgdp = pgd;
14643 + pax_close_kernel();
14644 +}
14645 +
14646 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14647 +{
14648 *pgdp = pgd;
14649 }
14650
14651 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14652 index 2d88344..4679fc3 100644
14653 --- a/arch/x86/include/asm/pgtable_64_types.h
14654 +++ b/arch/x86/include/asm/pgtable_64_types.h
14655 @@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
14656 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14657 #define MODULES_END _AC(0xffffffffff000000, UL)
14658 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14659 +#define MODULES_EXEC_VADDR MODULES_VADDR
14660 +#define MODULES_EXEC_END MODULES_END
14661 +
14662 +#define ktla_ktva(addr) (addr)
14663 +#define ktva_ktla(addr) (addr)
14664
14665 #define EARLY_DYNAMIC_PAGE_TABLES 64
14666
14667 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14668 index 567b5d0..bd91d64 100644
14669 --- a/arch/x86/include/asm/pgtable_types.h
14670 +++ b/arch/x86/include/asm/pgtable_types.h
14671 @@ -16,13 +16,12 @@
14672 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14673 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14674 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14675 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14676 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14677 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14678 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14679 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14680 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14681 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14682 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14683 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14684 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14685 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14686
14687 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14688 @@ -40,7 +39,6 @@
14689 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14690 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14691 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14692 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14693 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14694 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14695 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14696 @@ -57,8 +55,10 @@
14697
14698 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14699 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14700 -#else
14701 +#elif defined(CONFIG_KMEMCHECK)
14702 #define _PAGE_NX (_AT(pteval_t, 0))
14703 +#else
14704 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14705 #endif
14706
14707 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14708 @@ -116,6 +116,9 @@
14709 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14710 _PAGE_ACCESSED)
14711
14712 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
14713 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
14714 +
14715 #define __PAGE_KERNEL_EXEC \
14716 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14717 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14718 @@ -126,7 +129,7 @@
14719 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14720 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14721 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14722 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14723 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14724 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14725 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14726 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14727 @@ -188,8 +191,8 @@
14728 * bits are combined, this will alow user to access the high address mapped
14729 * VDSO in the presence of CONFIG_COMPAT_VDSO
14730 */
14731 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14732 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14733 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14734 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14735 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14736 #endif
14737
14738 @@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14739 {
14740 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14741 }
14742 +#endif
14743
14744 +#if PAGETABLE_LEVELS == 3
14745 +#include <asm-generic/pgtable-nopud.h>
14746 +#endif
14747 +
14748 +#if PAGETABLE_LEVELS == 2
14749 +#include <asm-generic/pgtable-nopmd.h>
14750 +#endif
14751 +
14752 +#ifndef __ASSEMBLY__
14753 #if PAGETABLE_LEVELS > 3
14754 typedef struct { pudval_t pud; } pud_t;
14755
14756 @@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14757 return pud.pud;
14758 }
14759 #else
14760 -#include <asm-generic/pgtable-nopud.h>
14761 -
14762 static inline pudval_t native_pud_val(pud_t pud)
14763 {
14764 return native_pgd_val(pud.pgd);
14765 @@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14766 return pmd.pmd;
14767 }
14768 #else
14769 -#include <asm-generic/pgtable-nopmd.h>
14770 -
14771 static inline pmdval_t native_pmd_val(pmd_t pmd)
14772 {
14773 return native_pgd_val(pmd.pud.pgd);
14774 @@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14775
14776 extern pteval_t __supported_pte_mask;
14777 extern void set_nx(void);
14778 -extern int nx_enabled;
14779
14780 #define pgprot_writecombine pgprot_writecombine
14781 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14782 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14783 index 3270116..8d99d82 100644
14784 --- a/arch/x86/include/asm/processor.h
14785 +++ b/arch/x86/include/asm/processor.h
14786 @@ -285,7 +285,7 @@ struct tss_struct {
14787
14788 } ____cacheline_aligned;
14789
14790 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14791 +extern struct tss_struct init_tss[NR_CPUS];
14792
14793 /*
14794 * Save the original ist values for checking stack pointers during debugging
14795 @@ -826,11 +826,18 @@ static inline void spin_lock_prefetch(const void *x)
14796 */
14797 #define TASK_SIZE PAGE_OFFSET
14798 #define TASK_SIZE_MAX TASK_SIZE
14799 +
14800 +#ifdef CONFIG_PAX_SEGMEXEC
14801 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14802 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14803 +#else
14804 #define STACK_TOP TASK_SIZE
14805 -#define STACK_TOP_MAX STACK_TOP
14806 +#endif
14807 +
14808 +#define STACK_TOP_MAX TASK_SIZE
14809
14810 #define INIT_THREAD { \
14811 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
14812 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14813 .vm86_info = NULL, \
14814 .sysenter_cs = __KERNEL_CS, \
14815 .io_bitmap_ptr = NULL, \
14816 @@ -844,7 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
14817 */
14818 #define INIT_TSS { \
14819 .x86_tss = { \
14820 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
14821 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14822 .ss0 = __KERNEL_DS, \
14823 .ss1 = __KERNEL_CS, \
14824 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14825 @@ -855,11 +862,7 @@ static inline void spin_lock_prefetch(const void *x)
14826 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14827
14828 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14829 -#define KSTK_TOP(info) \
14830 -({ \
14831 - unsigned long *__ptr = (unsigned long *)(info); \
14832 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14833 -})
14834 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14835
14836 /*
14837 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14838 @@ -874,7 +877,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14839 #define task_pt_regs(task) \
14840 ({ \
14841 struct pt_regs *__regs__; \
14842 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14843 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14844 __regs__ - 1; \
14845 })
14846
14847 @@ -884,13 +887,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14848 /*
14849 * User space process size. 47bits minus one guard page.
14850 */
14851 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14852 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14853
14854 /* This decides where the kernel will search for a free chunk of vm
14855 * space during mmap's.
14856 */
14857 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14858 - 0xc0000000 : 0xFFFFe000)
14859 + 0xc0000000 : 0xFFFFf000)
14860
14861 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14862 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14863 @@ -901,11 +904,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14864 #define STACK_TOP_MAX TASK_SIZE_MAX
14865
14866 #define INIT_THREAD { \
14867 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14868 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14869 }
14870
14871 #define INIT_TSS { \
14872 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14873 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14874 }
14875
14876 /*
14877 @@ -933,6 +936,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14878 */
14879 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14880
14881 +#ifdef CONFIG_PAX_SEGMEXEC
14882 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14883 +#endif
14884 +
14885 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14886
14887 /* Get/set a process' ability to use the timestamp counter instruction */
14888 @@ -993,7 +1000,7 @@ extern bool cpu_has_amd_erratum(const int *);
14889 #define cpu_has_amd_erratum(x) (false)
14890 #endif /* CONFIG_CPU_SUP_AMD */
14891
14892 -extern unsigned long arch_align_stack(unsigned long sp);
14893 +#define arch_align_stack(x) ((x) & ~0xfUL)
14894 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14895
14896 void default_idle(void);
14897 @@ -1003,6 +1010,6 @@ bool xen_set_default_idle(void);
14898 #define xen_set_default_idle 0
14899 #endif
14900
14901 -void stop_this_cpu(void *dummy);
14902 +void stop_this_cpu(void *dummy) __noreturn;
14903
14904 #endif /* _ASM_X86_PROCESSOR_H */
14905 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14906 index 942a086..6c26446 100644
14907 --- a/arch/x86/include/asm/ptrace.h
14908 +++ b/arch/x86/include/asm/ptrace.h
14909 @@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14910 }
14911
14912 /*
14913 - * user_mode_vm(regs) determines whether a register set came from user mode.
14914 + * user_mode(regs) determines whether a register set came from user mode.
14915 * This is true if V8086 mode was enabled OR if the register set was from
14916 * protected mode with RPL-3 CS value. This tricky test checks that with
14917 * one comparison. Many places in the kernel can bypass this full check
14918 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14919 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14920 + * be used.
14921 */
14922 -static inline int user_mode(struct pt_regs *regs)
14923 +static inline int user_mode_novm(struct pt_regs *regs)
14924 {
14925 #ifdef CONFIG_X86_32
14926 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14927 #else
14928 - return !!(regs->cs & 3);
14929 + return !!(regs->cs & SEGMENT_RPL_MASK);
14930 #endif
14931 }
14932
14933 -static inline int user_mode_vm(struct pt_regs *regs)
14934 +static inline int user_mode(struct pt_regs *regs)
14935 {
14936 #ifdef CONFIG_X86_32
14937 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14938 USER_RPL;
14939 #else
14940 - return user_mode(regs);
14941 + return user_mode_novm(regs);
14942 #endif
14943 }
14944
14945 @@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14946 #ifdef CONFIG_X86_64
14947 static inline bool user_64bit_mode(struct pt_regs *regs)
14948 {
14949 + unsigned long cs = regs->cs & 0xffff;
14950 #ifndef CONFIG_PARAVIRT
14951 /*
14952 * On non-paravirt systems, this is the only long mode CPL 3
14953 * selector. We do not allow long mode selectors in the LDT.
14954 */
14955 - return regs->cs == __USER_CS;
14956 + return cs == __USER_CS;
14957 #else
14958 /* Headers are too twisted for this to go in paravirt.h. */
14959 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
14960 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
14961 #endif
14962 }
14963
14964 @@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
14965 * Traps from the kernel do not save sp and ss.
14966 * Use the helper function to retrieve sp.
14967 */
14968 - if (offset == offsetof(struct pt_regs, sp) &&
14969 - regs->cs == __KERNEL_CS)
14970 - return kernel_stack_pointer(regs);
14971 + if (offset == offsetof(struct pt_regs, sp)) {
14972 + unsigned long cs = regs->cs & 0xffff;
14973 + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
14974 + return kernel_stack_pointer(regs);
14975 + }
14976 #endif
14977 return *(unsigned long *)((unsigned long)regs + offset);
14978 }
14979 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
14980 index 9c6b890..5305f53 100644
14981 --- a/arch/x86/include/asm/realmode.h
14982 +++ b/arch/x86/include/asm/realmode.h
14983 @@ -22,16 +22,14 @@ struct real_mode_header {
14984 #endif
14985 /* APM/BIOS reboot */
14986 u32 machine_real_restart_asm;
14987 -#ifdef CONFIG_X86_64
14988 u32 machine_real_restart_seg;
14989 -#endif
14990 };
14991
14992 /* This must match data at trampoline_32/64.S */
14993 struct trampoline_header {
14994 #ifdef CONFIG_X86_32
14995 u32 start;
14996 - u16 gdt_pad;
14997 + u16 boot_cs;
14998 u16 gdt_limit;
14999 u32 gdt_base;
15000 #else
15001 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15002 index a82c4f1..ac45053 100644
15003 --- a/arch/x86/include/asm/reboot.h
15004 +++ b/arch/x86/include/asm/reboot.h
15005 @@ -6,13 +6,13 @@
15006 struct pt_regs;
15007
15008 struct machine_ops {
15009 - void (*restart)(char *cmd);
15010 - void (*halt)(void);
15011 - void (*power_off)(void);
15012 + void (* __noreturn restart)(char *cmd);
15013 + void (* __noreturn halt)(void);
15014 + void (* __noreturn power_off)(void);
15015 void (*shutdown)(void);
15016 void (*crash_shutdown)(struct pt_regs *);
15017 - void (*emergency_restart)(void);
15018 -};
15019 + void (* __noreturn emergency_restart)(void);
15020 +} __no_const;
15021
15022 extern struct machine_ops machine_ops;
15023
15024 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15025 index 2dbe4a7..ce1db00 100644
15026 --- a/arch/x86/include/asm/rwsem.h
15027 +++ b/arch/x86/include/asm/rwsem.h
15028 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15029 {
15030 asm volatile("# beginning down_read\n\t"
15031 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15032 +
15033 +#ifdef CONFIG_PAX_REFCOUNT
15034 + "jno 0f\n"
15035 + LOCK_PREFIX _ASM_DEC "(%1)\n"
15036 + "int $4\n0:\n"
15037 + _ASM_EXTABLE(0b, 0b)
15038 +#endif
15039 +
15040 /* adds 0x00000001 */
15041 " jns 1f\n"
15042 " call call_rwsem_down_read_failed\n"
15043 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15044 "1:\n\t"
15045 " mov %1,%2\n\t"
15046 " add %3,%2\n\t"
15047 +
15048 +#ifdef CONFIG_PAX_REFCOUNT
15049 + "jno 0f\n"
15050 + "sub %3,%2\n"
15051 + "int $4\n0:\n"
15052 + _ASM_EXTABLE(0b, 0b)
15053 +#endif
15054 +
15055 " jle 2f\n\t"
15056 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15057 " jnz 1b\n\t"
15058 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15059 long tmp;
15060 asm volatile("# beginning down_write\n\t"
15061 LOCK_PREFIX " xadd %1,(%2)\n\t"
15062 +
15063 +#ifdef CONFIG_PAX_REFCOUNT
15064 + "jno 0f\n"
15065 + "mov %1,(%2)\n"
15066 + "int $4\n0:\n"
15067 + _ASM_EXTABLE(0b, 0b)
15068 +#endif
15069 +
15070 /* adds 0xffff0001, returns the old value */
15071 " test %1,%1\n\t"
15072 /* was the count 0 before? */
15073 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15074 long tmp;
15075 asm volatile("# beginning __up_read\n\t"
15076 LOCK_PREFIX " xadd %1,(%2)\n\t"
15077 +
15078 +#ifdef CONFIG_PAX_REFCOUNT
15079 + "jno 0f\n"
15080 + "mov %1,(%2)\n"
15081 + "int $4\n0:\n"
15082 + _ASM_EXTABLE(0b, 0b)
15083 +#endif
15084 +
15085 /* subtracts 1, returns the old value */
15086 " jns 1f\n\t"
15087 " call call_rwsem_wake\n" /* expects old value in %edx */
15088 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15089 long tmp;
15090 asm volatile("# beginning __up_write\n\t"
15091 LOCK_PREFIX " xadd %1,(%2)\n\t"
15092 +
15093 +#ifdef CONFIG_PAX_REFCOUNT
15094 + "jno 0f\n"
15095 + "mov %1,(%2)\n"
15096 + "int $4\n0:\n"
15097 + _ASM_EXTABLE(0b, 0b)
15098 +#endif
15099 +
15100 /* subtracts 0xffff0001, returns the old value */
15101 " jns 1f\n\t"
15102 " call call_rwsem_wake\n" /* expects old value in %edx */
15103 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15104 {
15105 asm volatile("# beginning __downgrade_write\n\t"
15106 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15107 +
15108 +#ifdef CONFIG_PAX_REFCOUNT
15109 + "jno 0f\n"
15110 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15111 + "int $4\n0:\n"
15112 + _ASM_EXTABLE(0b, 0b)
15113 +#endif
15114 +
15115 /*
15116 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15117 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15118 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15119 */
15120 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15121 {
15122 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15123 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15124 +
15125 +#ifdef CONFIG_PAX_REFCOUNT
15126 + "jno 0f\n"
15127 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
15128 + "int $4\n0:\n"
15129 + _ASM_EXTABLE(0b, 0b)
15130 +#endif
15131 +
15132 : "+m" (sem->count)
15133 : "er" (delta));
15134 }
15135 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15136 */
15137 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15138 {
15139 - return delta + xadd(&sem->count, delta);
15140 + return delta + xadd_check_overflow(&sem->count, delta);
15141 }
15142
15143 #endif /* __KERNEL__ */
15144 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15145 index c48a950..c6d7468 100644
15146 --- a/arch/x86/include/asm/segment.h
15147 +++ b/arch/x86/include/asm/segment.h
15148 @@ -64,10 +64,15 @@
15149 * 26 - ESPFIX small SS
15150 * 27 - per-cpu [ offset to per-cpu data area ]
15151 * 28 - stack_canary-20 [ for stack protector ]
15152 - * 29 - unused
15153 - * 30 - unused
15154 + * 29 - PCI BIOS CS
15155 + * 30 - PCI BIOS DS
15156 * 31 - TSS for double fault handler
15157 */
15158 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15159 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15160 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15161 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15162 +
15163 #define GDT_ENTRY_TLS_MIN 6
15164 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15165
15166 @@ -79,6 +84,8 @@
15167
15168 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15169
15170 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15171 +
15172 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15173
15174 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15175 @@ -104,6 +111,12 @@
15176 #define __KERNEL_STACK_CANARY 0
15177 #endif
15178
15179 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15180 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15181 +
15182 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15183 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15184 +
15185 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15186
15187 /*
15188 @@ -141,7 +154,7 @@
15189 */
15190
15191 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15192 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15193 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15194
15195
15196 #else
15197 @@ -165,6 +178,8 @@
15198 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15199 #define __USER32_DS __USER_DS
15200
15201 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15202 +
15203 #define GDT_ENTRY_TSS 8 /* needs two entries */
15204 #define GDT_ENTRY_LDT 10 /* needs two entries */
15205 #define GDT_ENTRY_TLS_MIN 12
15206 @@ -185,6 +200,7 @@
15207 #endif
15208
15209 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15210 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15211 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15212 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15213 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15214 @@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15215 {
15216 unsigned long __limit;
15217 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15218 - return __limit + 1;
15219 + return __limit;
15220 }
15221
15222 #endif /* !__ASSEMBLY__ */
15223 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15224 index b073aae..39f9bdd 100644
15225 --- a/arch/x86/include/asm/smp.h
15226 +++ b/arch/x86/include/asm/smp.h
15227 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15228 /* cpus sharing the last level cache: */
15229 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15230 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15231 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15232 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15233
15234 static inline struct cpumask *cpu_sibling_mask(int cpu)
15235 {
15236 @@ -79,7 +79,7 @@ struct smp_ops {
15237
15238 void (*send_call_func_ipi)(const struct cpumask *mask);
15239 void (*send_call_func_single_ipi)(int cpu);
15240 -};
15241 +} __no_const;
15242
15243 /* Globals due to paravirt */
15244 extern void set_cpu_sibling_map(int cpu);
15245 @@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15246 extern int safe_smp_processor_id(void);
15247
15248 #elif defined(CONFIG_X86_64_SMP)
15249 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15250 -
15251 -#define stack_smp_processor_id() \
15252 -({ \
15253 - struct thread_info *ti; \
15254 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15255 - ti->cpu; \
15256 -})
15257 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15258 +#define stack_smp_processor_id() raw_smp_processor_id()
15259 #define safe_smp_processor_id() smp_processor_id()
15260
15261 #endif
15262 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15263 index 33692ea..350a534 100644
15264 --- a/arch/x86/include/asm/spinlock.h
15265 +++ b/arch/x86/include/asm/spinlock.h
15266 @@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15267 static inline void arch_read_lock(arch_rwlock_t *rw)
15268 {
15269 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15270 +
15271 +#ifdef CONFIG_PAX_REFCOUNT
15272 + "jno 0f\n"
15273 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15274 + "int $4\n0:\n"
15275 + _ASM_EXTABLE(0b, 0b)
15276 +#endif
15277 +
15278 "jns 1f\n"
15279 "call __read_lock_failed\n\t"
15280 "1:\n"
15281 @@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15282 static inline void arch_write_lock(arch_rwlock_t *rw)
15283 {
15284 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15285 +
15286 +#ifdef CONFIG_PAX_REFCOUNT
15287 + "jno 0f\n"
15288 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15289 + "int $4\n0:\n"
15290 + _ASM_EXTABLE(0b, 0b)
15291 +#endif
15292 +
15293 "jz 1f\n"
15294 "call __write_lock_failed\n\t"
15295 "1:\n"
15296 @@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15297
15298 static inline void arch_read_unlock(arch_rwlock_t *rw)
15299 {
15300 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15301 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15302 +
15303 +#ifdef CONFIG_PAX_REFCOUNT
15304 + "jno 0f\n"
15305 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15306 + "int $4\n0:\n"
15307 + _ASM_EXTABLE(0b, 0b)
15308 +#endif
15309 +
15310 :"+m" (rw->lock) : : "memory");
15311 }
15312
15313 static inline void arch_write_unlock(arch_rwlock_t *rw)
15314 {
15315 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15316 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15317 +
15318 +#ifdef CONFIG_PAX_REFCOUNT
15319 + "jno 0f\n"
15320 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15321 + "int $4\n0:\n"
15322 + _ASM_EXTABLE(0b, 0b)
15323 +#endif
15324 +
15325 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15326 }
15327
15328 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15329 index 6a99859..03cb807 100644
15330 --- a/arch/x86/include/asm/stackprotector.h
15331 +++ b/arch/x86/include/asm/stackprotector.h
15332 @@ -47,7 +47,7 @@
15333 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15334 */
15335 #define GDT_STACK_CANARY_INIT \
15336 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15337 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15338
15339 /*
15340 * Initialize the stackprotector canary value.
15341 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15342
15343 static inline void load_stack_canary_segment(void)
15344 {
15345 -#ifdef CONFIG_X86_32
15346 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15347 asm volatile ("mov %0, %%gs" : : "r" (0));
15348 #endif
15349 }
15350 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15351 index 70bbe39..4ae2bd4 100644
15352 --- a/arch/x86/include/asm/stacktrace.h
15353 +++ b/arch/x86/include/asm/stacktrace.h
15354 @@ -11,28 +11,20 @@
15355
15356 extern int kstack_depth_to_print;
15357
15358 -struct thread_info;
15359 +struct task_struct;
15360 struct stacktrace_ops;
15361
15362 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15363 - unsigned long *stack,
15364 - unsigned long bp,
15365 - const struct stacktrace_ops *ops,
15366 - void *data,
15367 - unsigned long *end,
15368 - int *graph);
15369 +typedef unsigned long walk_stack_t(struct task_struct *task,
15370 + void *stack_start,
15371 + unsigned long *stack,
15372 + unsigned long bp,
15373 + const struct stacktrace_ops *ops,
15374 + void *data,
15375 + unsigned long *end,
15376 + int *graph);
15377
15378 -extern unsigned long
15379 -print_context_stack(struct thread_info *tinfo,
15380 - unsigned long *stack, unsigned long bp,
15381 - const struct stacktrace_ops *ops, void *data,
15382 - unsigned long *end, int *graph);
15383 -
15384 -extern unsigned long
15385 -print_context_stack_bp(struct thread_info *tinfo,
15386 - unsigned long *stack, unsigned long bp,
15387 - const struct stacktrace_ops *ops, void *data,
15388 - unsigned long *end, int *graph);
15389 +extern walk_stack_t print_context_stack;
15390 +extern walk_stack_t print_context_stack_bp;
15391
15392 /* Generic stack tracer with callbacks */
15393
15394 @@ -40,7 +32,7 @@ struct stacktrace_ops {
15395 void (*address)(void *data, unsigned long address, int reliable);
15396 /* On negative return stop dumping */
15397 int (*stack)(void *data, char *name);
15398 - walk_stack_t walk_stack;
15399 + walk_stack_t *walk_stack;
15400 };
15401
15402 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15403 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15404 index 4ec45b3..a4f0a8a 100644
15405 --- a/arch/x86/include/asm/switch_to.h
15406 +++ b/arch/x86/include/asm/switch_to.h
15407 @@ -108,7 +108,7 @@ do { \
15408 "call __switch_to\n\t" \
15409 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15410 __switch_canary \
15411 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
15412 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15413 "movq %%rax,%%rdi\n\t" \
15414 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15415 "jnz ret_from_fork\n\t" \
15416 @@ -119,7 +119,7 @@ do { \
15417 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15418 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15419 [_tif_fork] "i" (_TIF_FORK), \
15420 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
15421 + [thread_info] "m" (current_tinfo), \
15422 [current_task] "m" (current_task) \
15423 __switch_canary_iparam \
15424 : "memory", "cc" __EXTRA_CLOBBER)
15425 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15426 index 2cd056e..0224df8 100644
15427 --- a/arch/x86/include/asm/thread_info.h
15428 +++ b/arch/x86/include/asm/thread_info.h
15429 @@ -10,6 +10,7 @@
15430 #include <linux/compiler.h>
15431 #include <asm/page.h>
15432 #include <asm/types.h>
15433 +#include <asm/percpu.h>
15434
15435 /*
15436 * low level task data that entry.S needs immediate access to
15437 @@ -23,7 +24,6 @@ struct exec_domain;
15438 #include <linux/atomic.h>
15439
15440 struct thread_info {
15441 - struct task_struct *task; /* main task structure */
15442 struct exec_domain *exec_domain; /* execution domain */
15443 __u32 flags; /* low level flags */
15444 __u32 status; /* thread synchronous flags */
15445 @@ -33,19 +33,13 @@ struct thread_info {
15446 mm_segment_t addr_limit;
15447 struct restart_block restart_block;
15448 void __user *sysenter_return;
15449 -#ifdef CONFIG_X86_32
15450 - unsigned long previous_esp; /* ESP of the previous stack in
15451 - case of nested (IRQ) stacks
15452 - */
15453 - __u8 supervisor_stack[0];
15454 -#endif
15455 + unsigned long lowest_stack;
15456 unsigned int sig_on_uaccess_error:1;
15457 unsigned int uaccess_err:1; /* uaccess failed */
15458 };
15459
15460 -#define INIT_THREAD_INFO(tsk) \
15461 +#define INIT_THREAD_INFO \
15462 { \
15463 - .task = &tsk, \
15464 .exec_domain = &default_exec_domain, \
15465 .flags = 0, \
15466 .cpu = 0, \
15467 @@ -56,7 +50,7 @@ struct thread_info {
15468 }, \
15469 }
15470
15471 -#define init_thread_info (init_thread_union.thread_info)
15472 +#define init_thread_info (init_thread_union.stack)
15473 #define init_stack (init_thread_union.stack)
15474
15475 #else /* !__ASSEMBLY__ */
15476 @@ -97,6 +91,7 @@ struct thread_info {
15477 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15478 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15479 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15480 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15481
15482 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15483 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15484 @@ -121,17 +116,18 @@ struct thread_info {
15485 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15486 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15487 #define _TIF_X32 (1 << TIF_X32)
15488 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15489
15490 /* work to do in syscall_trace_enter() */
15491 #define _TIF_WORK_SYSCALL_ENTRY \
15492 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15493 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15494 - _TIF_NOHZ)
15495 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
15496
15497 /* work to do in syscall_trace_leave() */
15498 #define _TIF_WORK_SYSCALL_EXIT \
15499 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15500 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15501 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15502
15503 /* work to do on interrupt/exception return */
15504 #define _TIF_WORK_MASK \
15505 @@ -142,7 +138,7 @@ struct thread_info {
15506 /* work to do on any return to user space */
15507 #define _TIF_ALLWORK_MASK \
15508 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15509 - _TIF_NOHZ)
15510 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
15511
15512 /* Only used for 64 bit */
15513 #define _TIF_DO_NOTIFY_MASK \
15514 @@ -158,45 +154,40 @@ struct thread_info {
15515
15516 #define PREEMPT_ACTIVE 0x10000000
15517
15518 -#ifdef CONFIG_X86_32
15519 -
15520 -#define STACK_WARN (THREAD_SIZE/8)
15521 -/*
15522 - * macros/functions for gaining access to the thread information structure
15523 - *
15524 - * preempt_count needs to be 1 initially, until the scheduler is functional.
15525 - */
15526 -#ifndef __ASSEMBLY__
15527 -
15528 -
15529 -/* how to get the current stack pointer from C */
15530 -register unsigned long current_stack_pointer asm("esp") __used;
15531 -
15532 -/* how to get the thread information struct from C */
15533 -static inline struct thread_info *current_thread_info(void)
15534 -{
15535 - return (struct thread_info *)
15536 - (current_stack_pointer & ~(THREAD_SIZE - 1));
15537 -}
15538 -
15539 -#else /* !__ASSEMBLY__ */
15540 -
15541 +#ifdef __ASSEMBLY__
15542 /* how to get the thread information struct from ASM */
15543 #define GET_THREAD_INFO(reg) \
15544 - movl $-THREAD_SIZE, reg; \
15545 - andl %esp, reg
15546 + mov PER_CPU_VAR(current_tinfo), reg
15547
15548 /* use this one if reg already contains %esp */
15549 -#define GET_THREAD_INFO_WITH_ESP(reg) \
15550 - andl $-THREAD_SIZE, reg
15551 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15552 +#else
15553 +/* how to get the thread information struct from C */
15554 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15555 +
15556 +static __always_inline struct thread_info *current_thread_info(void)
15557 +{
15558 + return this_cpu_read_stable(current_tinfo);
15559 +}
15560 +#endif
15561 +
15562 +#ifdef CONFIG_X86_32
15563 +
15564 +#define STACK_WARN (THREAD_SIZE/8)
15565 +/*
15566 + * macros/functions for gaining access to the thread information structure
15567 + *
15568 + * preempt_count needs to be 1 initially, until the scheduler is functional.
15569 + */
15570 +#ifndef __ASSEMBLY__
15571 +
15572 +/* how to get the current stack pointer from C */
15573 +register unsigned long current_stack_pointer asm("esp") __used;
15574
15575 #endif
15576
15577 #else /* X86_32 */
15578
15579 -#include <asm/percpu.h>
15580 -#define KERNEL_STACK_OFFSET (5*8)
15581 -
15582 /*
15583 * macros/functions for gaining access to the thread information structure
15584 * preempt_count needs to be 1 initially, until the scheduler is functional.
15585 @@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
15586 #ifndef __ASSEMBLY__
15587 DECLARE_PER_CPU(unsigned long, kernel_stack);
15588
15589 -static inline struct thread_info *current_thread_info(void)
15590 -{
15591 - struct thread_info *ti;
15592 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
15593 - KERNEL_STACK_OFFSET - THREAD_SIZE);
15594 - return ti;
15595 -}
15596 -
15597 -#else /* !__ASSEMBLY__ */
15598 -
15599 -/* how to get the thread information struct from ASM */
15600 -#define GET_THREAD_INFO(reg) \
15601 - movq PER_CPU_VAR(kernel_stack),reg ; \
15602 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15603 -
15604 -/*
15605 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15606 - * a certain register (to be used in assembler memory operands).
15607 - */
15608 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15609 -
15610 +/* how to get the current stack pointer from C */
15611 +register unsigned long current_stack_pointer asm("rsp") __used;
15612 #endif
15613
15614 #endif /* !X86_32 */
15615 @@ -285,5 +257,12 @@ static inline bool is_ia32_task(void)
15616 extern void arch_task_cache_init(void);
15617 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15618 extern void arch_release_task_struct(struct task_struct *tsk);
15619 +
15620 +#define __HAVE_THREAD_FUNCTIONS
15621 +#define task_thread_info(task) (&(task)->tinfo)
15622 +#define task_stack_page(task) ((task)->stack)
15623 +#define setup_thread_stack(p, org) do {} while (0)
15624 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15625 +
15626 #endif
15627 #endif /* _ASM_X86_THREAD_INFO_H */
15628 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15629 index 5ee2687..70d5895 100644
15630 --- a/arch/x86/include/asm/uaccess.h
15631 +++ b/arch/x86/include/asm/uaccess.h
15632 @@ -7,6 +7,7 @@
15633 #include <linux/compiler.h>
15634 #include <linux/thread_info.h>
15635 #include <linux/string.h>
15636 +#include <linux/sched.h>
15637 #include <asm/asm.h>
15638 #include <asm/page.h>
15639 #include <asm/smap.h>
15640 @@ -29,7 +30,12 @@
15641
15642 #define get_ds() (KERNEL_DS)
15643 #define get_fs() (current_thread_info()->addr_limit)
15644 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15645 +void __set_fs(mm_segment_t x);
15646 +void set_fs(mm_segment_t x);
15647 +#else
15648 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15649 +#endif
15650
15651 #define segment_eq(a, b) ((a).seg == (b).seg)
15652
15653 @@ -77,8 +83,33 @@
15654 * checks that the pointer is in the user space range - after calling
15655 * this function, memory access functions may still return -EFAULT.
15656 */
15657 -#define access_ok(type, addr, size) \
15658 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15659 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15660 +#define access_ok(type, addr, size) \
15661 +({ \
15662 + long __size = size; \
15663 + unsigned long __addr = (unsigned long)addr; \
15664 + unsigned long __addr_ao = __addr & PAGE_MASK; \
15665 + unsigned long __end_ao = __addr + __size - 1; \
15666 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15667 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15668 + while(__addr_ao <= __end_ao) { \
15669 + char __c_ao; \
15670 + __addr_ao += PAGE_SIZE; \
15671 + if (__size > PAGE_SIZE) \
15672 + cond_resched(); \
15673 + if (__get_user(__c_ao, (char __user *)__addr)) \
15674 + break; \
15675 + if (type != VERIFY_WRITE) { \
15676 + __addr = __addr_ao; \
15677 + continue; \
15678 + } \
15679 + if (__put_user(__c_ao, (char __user *)__addr)) \
15680 + break; \
15681 + __addr = __addr_ao; \
15682 + } \
15683 + } \
15684 + __ret_ao; \
15685 +})
15686
15687 /*
15688 * The exception table consists of pairs of addresses relative to the
15689 @@ -176,13 +207,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15690 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15691 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15692
15693 -
15694 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15695 +#define __copyuser_seg "gs;"
15696 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15697 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15698 +#else
15699 +#define __copyuser_seg
15700 +#define __COPYUSER_SET_ES
15701 +#define __COPYUSER_RESTORE_ES
15702 +#endif
15703
15704 #ifdef CONFIG_X86_32
15705 #define __put_user_asm_u64(x, addr, err, errret) \
15706 asm volatile(ASM_STAC "\n" \
15707 - "1: movl %%eax,0(%2)\n" \
15708 - "2: movl %%edx,4(%2)\n" \
15709 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15710 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15711 "3: " ASM_CLAC "\n" \
15712 ".section .fixup,\"ax\"\n" \
15713 "4: movl %3,%0\n" \
15714 @@ -195,8 +234,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15715
15716 #define __put_user_asm_ex_u64(x, addr) \
15717 asm volatile(ASM_STAC "\n" \
15718 - "1: movl %%eax,0(%1)\n" \
15719 - "2: movl %%edx,4(%1)\n" \
15720 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15721 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15722 "3: " ASM_CLAC "\n" \
15723 _ASM_EXTABLE_EX(1b, 2b) \
15724 _ASM_EXTABLE_EX(2b, 3b) \
15725 @@ -246,7 +285,7 @@ extern void __put_user_8(void);
15726 __typeof__(*(ptr)) __pu_val; \
15727 __chk_user_ptr(ptr); \
15728 might_fault(); \
15729 - __pu_val = x; \
15730 + __pu_val = (x); \
15731 switch (sizeof(*(ptr))) { \
15732 case 1: \
15733 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15734 @@ -345,7 +384,7 @@ do { \
15735
15736 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15737 asm volatile(ASM_STAC "\n" \
15738 - "1: mov"itype" %2,%"rtype"1\n" \
15739 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15740 "2: " ASM_CLAC "\n" \
15741 ".section .fixup,\"ax\"\n" \
15742 "3: mov %3,%0\n" \
15743 @@ -353,7 +392,7 @@ do { \
15744 " jmp 2b\n" \
15745 ".previous\n" \
15746 _ASM_EXTABLE(1b, 3b) \
15747 - : "=r" (err), ltype(x) \
15748 + : "=r" (err), ltype (x) \
15749 : "m" (__m(addr)), "i" (errret), "0" (err))
15750
15751 #define __get_user_size_ex(x, ptr, size) \
15752 @@ -378,7 +417,7 @@ do { \
15753 } while (0)
15754
15755 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15756 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15757 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15758 "2:\n" \
15759 _ASM_EXTABLE_EX(1b, 2b) \
15760 : ltype(x) : "m" (__m(addr)))
15761 @@ -395,13 +434,24 @@ do { \
15762 int __gu_err; \
15763 unsigned long __gu_val; \
15764 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15765 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
15766 + (x) = (__typeof__(*(ptr)))__gu_val; \
15767 __gu_err; \
15768 })
15769
15770 /* FIXME: this hack is definitely wrong -AK */
15771 struct __large_struct { unsigned long buf[100]; };
15772 -#define __m(x) (*(struct __large_struct __user *)(x))
15773 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15774 +#define ____m(x) \
15775 +({ \
15776 + unsigned long ____x = (unsigned long)(x); \
15777 + if (____x < pax_user_shadow_base) \
15778 + ____x += pax_user_shadow_base; \
15779 + (typeof(x))____x; \
15780 +})
15781 +#else
15782 +#define ____m(x) (x)
15783 +#endif
15784 +#define __m(x) (*(struct __large_struct __user *)____m(x))
15785
15786 /*
15787 * Tell gcc we read from memory instead of writing: this is because
15788 @@ -410,7 +460,7 @@ struct __large_struct { unsigned long buf[100]; };
15789 */
15790 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15791 asm volatile(ASM_STAC "\n" \
15792 - "1: mov"itype" %"rtype"1,%2\n" \
15793 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15794 "2: " ASM_CLAC "\n" \
15795 ".section .fixup,\"ax\"\n" \
15796 "3: mov %3,%0\n" \
15797 @@ -418,10 +468,10 @@ struct __large_struct { unsigned long buf[100]; };
15798 ".previous\n" \
15799 _ASM_EXTABLE(1b, 3b) \
15800 : "=r"(err) \
15801 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15802 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15803
15804 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15805 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15806 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15807 "2:\n" \
15808 _ASM_EXTABLE_EX(1b, 2b) \
15809 : : ltype(x), "m" (__m(addr)))
15810 @@ -460,8 +510,12 @@ struct __large_struct { unsigned long buf[100]; };
15811 * On error, the variable @x is set to zero.
15812 */
15813
15814 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15815 +#define __get_user(x, ptr) get_user((x), (ptr))
15816 +#else
15817 #define __get_user(x, ptr) \
15818 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15819 +#endif
15820
15821 /**
15822 * __put_user: - Write a simple value into user space, with less checking.
15823 @@ -483,8 +537,12 @@ struct __large_struct { unsigned long buf[100]; };
15824 * Returns zero on success, or -EFAULT on error.
15825 */
15826
15827 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15828 +#define __put_user(x, ptr) put_user((x), (ptr))
15829 +#else
15830 #define __put_user(x, ptr) \
15831 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15832 +#endif
15833
15834 #define __get_user_unaligned __get_user
15835 #define __put_user_unaligned __put_user
15836 @@ -502,7 +560,7 @@ struct __large_struct { unsigned long buf[100]; };
15837 #define get_user_ex(x, ptr) do { \
15838 unsigned long __gue_val; \
15839 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15840 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
15841 + (x) = (__typeof__(*(ptr)))__gue_val; \
15842 } while (0)
15843
15844 #define put_user_try uaccess_try
15845 @@ -519,8 +577,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15846 extern __must_check long strlen_user(const char __user *str);
15847 extern __must_check long strnlen_user(const char __user *str, long n);
15848
15849 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15850 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15851 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15852 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15853
15854 /*
15855 * movsl can be slow when source and dest are not both 8-byte aligned
15856 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15857 index 7f760a9..04b1c65 100644
15858 --- a/arch/x86/include/asm/uaccess_32.h
15859 +++ b/arch/x86/include/asm/uaccess_32.h
15860 @@ -11,15 +11,15 @@
15861 #include <asm/page.h>
15862
15863 unsigned long __must_check __copy_to_user_ll
15864 - (void __user *to, const void *from, unsigned long n);
15865 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15866 unsigned long __must_check __copy_from_user_ll
15867 - (void *to, const void __user *from, unsigned long n);
15868 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15869 unsigned long __must_check __copy_from_user_ll_nozero
15870 - (void *to, const void __user *from, unsigned long n);
15871 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15872 unsigned long __must_check __copy_from_user_ll_nocache
15873 - (void *to, const void __user *from, unsigned long n);
15874 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15875 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15876 - (void *to, const void __user *from, unsigned long n);
15877 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15878
15879 /**
15880 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15881 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15882 static __always_inline unsigned long __must_check
15883 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15884 {
15885 + if ((long)n < 0)
15886 + return n;
15887 +
15888 + check_object_size(from, n, true);
15889 +
15890 if (__builtin_constant_p(n)) {
15891 unsigned long ret;
15892
15893 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15894 __copy_to_user(void __user *to, const void *from, unsigned long n)
15895 {
15896 might_fault();
15897 +
15898 return __copy_to_user_inatomic(to, from, n);
15899 }
15900
15901 static __always_inline unsigned long
15902 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15903 {
15904 + if ((long)n < 0)
15905 + return n;
15906 +
15907 /* Avoid zeroing the tail if the copy fails..
15908 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15909 * but as the zeroing behaviour is only significant when n is not
15910 @@ -137,6 +146,12 @@ static __always_inline unsigned long
15911 __copy_from_user(void *to, const void __user *from, unsigned long n)
15912 {
15913 might_fault();
15914 +
15915 + if ((long)n < 0)
15916 + return n;
15917 +
15918 + check_object_size(to, n, false);
15919 +
15920 if (__builtin_constant_p(n)) {
15921 unsigned long ret;
15922
15923 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15924 const void __user *from, unsigned long n)
15925 {
15926 might_fault();
15927 +
15928 + if ((long)n < 0)
15929 + return n;
15930 +
15931 if (__builtin_constant_p(n)) {
15932 unsigned long ret;
15933
15934 @@ -181,15 +200,19 @@ static __always_inline unsigned long
15935 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15936 unsigned long n)
15937 {
15938 - return __copy_from_user_ll_nocache_nozero(to, from, n);
15939 + if ((long)n < 0)
15940 + return n;
15941 +
15942 + return __copy_from_user_ll_nocache_nozero(to, from, n);
15943 }
15944
15945 -unsigned long __must_check copy_to_user(void __user *to,
15946 - const void *from, unsigned long n);
15947 -unsigned long __must_check _copy_from_user(void *to,
15948 - const void __user *from,
15949 - unsigned long n);
15950 -
15951 +extern void copy_to_user_overflow(void)
15952 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15953 + __compiletime_error("copy_to_user() buffer size is not provably correct")
15954 +#else
15955 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
15956 +#endif
15957 +;
15958
15959 extern void copy_from_user_overflow(void)
15960 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15961 @@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
15962 #endif
15963 ;
15964
15965 -static inline unsigned long __must_check copy_from_user(void *to,
15966 - const void __user *from,
15967 - unsigned long n)
15968 +/**
15969 + * copy_to_user: - Copy a block of data into user space.
15970 + * @to: Destination address, in user space.
15971 + * @from: Source address, in kernel space.
15972 + * @n: Number of bytes to copy.
15973 + *
15974 + * Context: User context only. This function may sleep.
15975 + *
15976 + * Copy data from kernel space to user space.
15977 + *
15978 + * Returns number of bytes that could not be copied.
15979 + * On success, this will be zero.
15980 + */
15981 +static inline unsigned long __must_check
15982 +copy_to_user(void __user *to, const void *from, unsigned long n)
15983 {
15984 - int sz = __compiletime_object_size(to);
15985 + size_t sz = __compiletime_object_size(from);
15986
15987 - if (likely(sz == -1 || sz >= n))
15988 - n = _copy_from_user(to, from, n);
15989 - else
15990 + if (unlikely(sz != (size_t)-1 && sz < n))
15991 + copy_to_user_overflow();
15992 + else if (access_ok(VERIFY_WRITE, to, n))
15993 + n = __copy_to_user(to, from, n);
15994 + return n;
15995 +}
15996 +
15997 +/**
15998 + * copy_from_user: - Copy a block of data from user space.
15999 + * @to: Destination address, in kernel space.
16000 + * @from: Source address, in user space.
16001 + * @n: Number of bytes to copy.
16002 + *
16003 + * Context: User context only. This function may sleep.
16004 + *
16005 + * Copy data from user space to kernel space.
16006 + *
16007 + * Returns number of bytes that could not be copied.
16008 + * On success, this will be zero.
16009 + *
16010 + * If some data could not be copied, this function will pad the copied
16011 + * data to the requested size using zero bytes.
16012 + */
16013 +static inline unsigned long __must_check
16014 +copy_from_user(void *to, const void __user *from, unsigned long n)
16015 +{
16016 + size_t sz = __compiletime_object_size(to);
16017 +
16018 + check_object_size(to, n, false);
16019 +
16020 + if (unlikely(sz != (size_t)-1 && sz < n))
16021 copy_from_user_overflow();
16022 -
16023 + else if (access_ok(VERIFY_READ, from, n))
16024 + n = __copy_from_user(to, from, n);
16025 + else if ((long)n > 0)
16026 + memset(to, 0, n);
16027 return n;
16028 }
16029
16030 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16031 index 142810c..1f2a0a7 100644
16032 --- a/arch/x86/include/asm/uaccess_64.h
16033 +++ b/arch/x86/include/asm/uaccess_64.h
16034 @@ -10,6 +10,9 @@
16035 #include <asm/alternative.h>
16036 #include <asm/cpufeature.h>
16037 #include <asm/page.h>
16038 +#include <asm/pgtable.h>
16039 +
16040 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
16041
16042 /*
16043 * Copy To/From Userspace
16044 @@ -17,13 +20,13 @@
16045
16046 /* Handles exceptions in both to and from, but doesn't do access_ok */
16047 __must_check unsigned long
16048 -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16049 +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16050 __must_check unsigned long
16051 -copy_user_generic_string(void *to, const void *from, unsigned len);
16052 +copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16053 __must_check unsigned long
16054 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16055 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16056
16057 -static __always_inline __must_check unsigned long
16058 +static __always_inline __must_check __size_overflow(3) unsigned long
16059 copy_user_generic(void *to, const void *from, unsigned len)
16060 {
16061 unsigned ret;
16062 @@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16063 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16064 "=d" (len)),
16065 "1" (to), "2" (from), "3" (len)
16066 - : "memory", "rcx", "r8", "r9", "r10", "r11");
16067 + : "memory", "rcx", "r8", "r9", "r11");
16068 return ret;
16069 }
16070
16071 +static __always_inline __must_check unsigned long
16072 +__copy_to_user(void __user *to, const void *from, unsigned long len);
16073 +static __always_inline __must_check unsigned long
16074 +__copy_from_user(void *to, const void __user *from, unsigned long len);
16075 __must_check unsigned long
16076 -_copy_to_user(void __user *to, const void *from, unsigned len);
16077 -__must_check unsigned long
16078 -_copy_from_user(void *to, const void __user *from, unsigned len);
16079 -__must_check unsigned long
16080 -copy_in_user(void __user *to, const void __user *from, unsigned len);
16081 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
16082 +
16083 +extern void copy_to_user_overflow(void)
16084 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16085 + __compiletime_error("copy_to_user() buffer size is not provably correct")
16086 +#else
16087 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
16088 +#endif
16089 +;
16090 +
16091 +extern void copy_from_user_overflow(void)
16092 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16093 + __compiletime_error("copy_from_user() buffer size is not provably correct")
16094 +#else
16095 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
16096 +#endif
16097 +;
16098
16099 static inline unsigned long __must_check copy_from_user(void *to,
16100 const void __user *from,
16101 unsigned long n)
16102 {
16103 - int sz = __compiletime_object_size(to);
16104 -
16105 might_fault();
16106 - if (likely(sz == -1 || sz >= n))
16107 - n = _copy_from_user(to, from, n);
16108 -#ifdef CONFIG_DEBUG_VM
16109 - else
16110 - WARN(1, "Buffer overflow detected!\n");
16111 -#endif
16112 +
16113 + check_object_size(to, n, false);
16114 +
16115 + if (access_ok(VERIFY_READ, from, n))
16116 + n = __copy_from_user(to, from, n);
16117 + else if (n < INT_MAX)
16118 + memset(to, 0, n);
16119 return n;
16120 }
16121
16122 static __always_inline __must_check
16123 -int copy_to_user(void __user *dst, const void *src, unsigned size)
16124 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
16125 {
16126 might_fault();
16127
16128 - return _copy_to_user(dst, src, size);
16129 + if (access_ok(VERIFY_WRITE, dst, size))
16130 + size = __copy_to_user(dst, src, size);
16131 + return size;
16132 }
16133
16134 static __always_inline __must_check
16135 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
16136 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16137 {
16138 - int ret = 0;
16139 + size_t sz = __compiletime_object_size(dst);
16140 + unsigned ret = 0;
16141
16142 might_fault();
16143 +
16144 + if (size > INT_MAX)
16145 + return size;
16146 +
16147 + check_object_size(dst, size, false);
16148 +
16149 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16150 + if (!__access_ok(VERIFY_READ, src, size))
16151 + return size;
16152 +#endif
16153 +
16154 + if (unlikely(sz != (size_t)-1 && sz < size)) {
16155 + copy_from_user_overflow();
16156 + return size;
16157 + }
16158 +
16159 if (!__builtin_constant_p(size))
16160 - return copy_user_generic(dst, (__force void *)src, size);
16161 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16162 switch (size) {
16163 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16164 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16165 ret, "b", "b", "=q", 1);
16166 return ret;
16167 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16168 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16169 ret, "w", "w", "=r", 2);
16170 return ret;
16171 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16172 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16173 ret, "l", "k", "=r", 4);
16174 return ret;
16175 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16176 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16177 ret, "q", "", "=r", 8);
16178 return ret;
16179 case 10:
16180 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16181 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16182 ret, "q", "", "=r", 10);
16183 if (unlikely(ret))
16184 return ret;
16185 __get_user_asm(*(u16 *)(8 + (char *)dst),
16186 - (u16 __user *)(8 + (char __user *)src),
16187 + (const u16 __user *)(8 + (const char __user *)src),
16188 ret, "w", "w", "=r", 2);
16189 return ret;
16190 case 16:
16191 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16192 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16193 ret, "q", "", "=r", 16);
16194 if (unlikely(ret))
16195 return ret;
16196 __get_user_asm(*(u64 *)(8 + (char *)dst),
16197 - (u64 __user *)(8 + (char __user *)src),
16198 + (const u64 __user *)(8 + (const char __user *)src),
16199 ret, "q", "", "=r", 8);
16200 return ret;
16201 default:
16202 - return copy_user_generic(dst, (__force void *)src, size);
16203 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16204 }
16205 }
16206
16207 static __always_inline __must_check
16208 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
16209 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16210 {
16211 - int ret = 0;
16212 + size_t sz = __compiletime_object_size(src);
16213 + unsigned ret = 0;
16214
16215 might_fault();
16216 +
16217 + if (size > INT_MAX)
16218 + return size;
16219 +
16220 + check_object_size(src, size, true);
16221 +
16222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16223 + if (!__access_ok(VERIFY_WRITE, dst, size))
16224 + return size;
16225 +#endif
16226 +
16227 + if (unlikely(sz != (size_t)-1 && sz < size)) {
16228 + copy_to_user_overflow();
16229 + return size;
16230 + }
16231 +
16232 if (!__builtin_constant_p(size))
16233 - return copy_user_generic((__force void *)dst, src, size);
16234 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16235 switch (size) {
16236 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16237 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16238 ret, "b", "b", "iq", 1);
16239 return ret;
16240 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16241 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16242 ret, "w", "w", "ir", 2);
16243 return ret;
16244 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16245 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16246 ret, "l", "k", "ir", 4);
16247 return ret;
16248 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16249 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16250 ret, "q", "", "er", 8);
16251 return ret;
16252 case 10:
16253 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16254 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16255 ret, "q", "", "er", 10);
16256 if (unlikely(ret))
16257 return ret;
16258 asm("":::"memory");
16259 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16260 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16261 ret, "w", "w", "ir", 2);
16262 return ret;
16263 case 16:
16264 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16265 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16266 ret, "q", "", "er", 16);
16267 if (unlikely(ret))
16268 return ret;
16269 asm("":::"memory");
16270 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16271 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16272 ret, "q", "", "er", 8);
16273 return ret;
16274 default:
16275 - return copy_user_generic((__force void *)dst, src, size);
16276 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16277 }
16278 }
16279
16280 static __always_inline __must_check
16281 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16282 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16283 {
16284 - int ret = 0;
16285 + unsigned ret = 0;
16286
16287 might_fault();
16288 +
16289 + if (size > INT_MAX)
16290 + return size;
16291 +
16292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16293 + if (!__access_ok(VERIFY_READ, src, size))
16294 + return size;
16295 + if (!__access_ok(VERIFY_WRITE, dst, size))
16296 + return size;
16297 +#endif
16298 +
16299 if (!__builtin_constant_p(size))
16300 - return copy_user_generic((__force void *)dst,
16301 - (__force void *)src, size);
16302 + return copy_user_generic((__force_kernel void *)____m(dst),
16303 + (__force_kernel const void *)____m(src), size);
16304 switch (size) {
16305 case 1: {
16306 u8 tmp;
16307 - __get_user_asm(tmp, (u8 __user *)src,
16308 + __get_user_asm(tmp, (const u8 __user *)src,
16309 ret, "b", "b", "=q", 1);
16310 if (likely(!ret))
16311 __put_user_asm(tmp, (u8 __user *)dst,
16312 @@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16313 }
16314 case 2: {
16315 u16 tmp;
16316 - __get_user_asm(tmp, (u16 __user *)src,
16317 + __get_user_asm(tmp, (const u16 __user *)src,
16318 ret, "w", "w", "=r", 2);
16319 if (likely(!ret))
16320 __put_user_asm(tmp, (u16 __user *)dst,
16321 @@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16322
16323 case 4: {
16324 u32 tmp;
16325 - __get_user_asm(tmp, (u32 __user *)src,
16326 + __get_user_asm(tmp, (const u32 __user *)src,
16327 ret, "l", "k", "=r", 4);
16328 if (likely(!ret))
16329 __put_user_asm(tmp, (u32 __user *)dst,
16330 @@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16331 }
16332 case 8: {
16333 u64 tmp;
16334 - __get_user_asm(tmp, (u64 __user *)src,
16335 + __get_user_asm(tmp, (const u64 __user *)src,
16336 ret, "q", "", "=r", 8);
16337 if (likely(!ret))
16338 __put_user_asm(tmp, (u64 __user *)dst,
16339 @@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16340 return ret;
16341 }
16342 default:
16343 - return copy_user_generic((__force void *)dst,
16344 - (__force void *)src, size);
16345 + return copy_user_generic((__force_kernel void *)____m(dst),
16346 + (__force_kernel const void *)____m(src), size);
16347 }
16348 }
16349
16350 static __must_check __always_inline int
16351 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16352 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16353 {
16354 - return copy_user_generic(dst, (__force const void *)src, size);
16355 + if (size > INT_MAX)
16356 + return size;
16357 +
16358 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16359 + if (!__access_ok(VERIFY_READ, src, size))
16360 + return size;
16361 +#endif
16362 +
16363 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16364 }
16365
16366 -static __must_check __always_inline int
16367 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16368 +static __must_check __always_inline unsigned long
16369 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16370 {
16371 - return copy_user_generic((__force void *)dst, src, size);
16372 + if (size > INT_MAX)
16373 + return size;
16374 +
16375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16376 + if (!__access_ok(VERIFY_WRITE, dst, size))
16377 + return size;
16378 +#endif
16379 +
16380 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16381 }
16382
16383 -extern long __copy_user_nocache(void *dst, const void __user *src,
16384 - unsigned size, int zerorest);
16385 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16386 + unsigned long size, int zerorest) __size_overflow(3);
16387
16388 -static inline int
16389 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16390 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16391 {
16392 might_sleep();
16393 +
16394 + if (size > INT_MAX)
16395 + return size;
16396 +
16397 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16398 + if (!__access_ok(VERIFY_READ, src, size))
16399 + return size;
16400 +#endif
16401 +
16402 return __copy_user_nocache(dst, src, size, 1);
16403 }
16404
16405 -static inline int
16406 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16407 - unsigned size)
16408 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16409 + unsigned long size)
16410 {
16411 + if (size > INT_MAX)
16412 + return size;
16413 +
16414 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16415 + if (!__access_ok(VERIFY_READ, src, size))
16416 + return size;
16417 +#endif
16418 +
16419 return __copy_user_nocache(dst, src, size, 0);
16420 }
16421
16422 -unsigned long
16423 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16424 +extern unsigned long
16425 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16426
16427 #endif /* _ASM_X86_UACCESS_64_H */
16428 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16429 index 5b238981..77fdd78 100644
16430 --- a/arch/x86/include/asm/word-at-a-time.h
16431 +++ b/arch/x86/include/asm/word-at-a-time.h
16432 @@ -11,7 +11,7 @@
16433 * and shift, for example.
16434 */
16435 struct word_at_a_time {
16436 - const unsigned long one_bits, high_bits;
16437 + unsigned long one_bits, high_bits;
16438 };
16439
16440 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16441 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16442 index d8d9922..bf6cecb 100644
16443 --- a/arch/x86/include/asm/x86_init.h
16444 +++ b/arch/x86/include/asm/x86_init.h
16445 @@ -129,7 +129,7 @@ struct x86_init_ops {
16446 struct x86_init_timers timers;
16447 struct x86_init_iommu iommu;
16448 struct x86_init_pci pci;
16449 -};
16450 +} __no_const;
16451
16452 /**
16453 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16454 @@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
16455 void (*setup_percpu_clockev)(void);
16456 void (*early_percpu_clock_init)(void);
16457 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16458 -};
16459 +} __no_const;
16460
16461 /**
16462 * struct x86_platform_ops - platform specific runtime functions
16463 @@ -166,7 +166,7 @@ struct x86_platform_ops {
16464 void (*save_sched_clock_state)(void);
16465 void (*restore_sched_clock_state)(void);
16466 void (*apic_post_init)(void);
16467 -};
16468 +} __no_const;
16469
16470 struct pci_dev;
16471 struct msi_msg;
16472 @@ -180,7 +180,7 @@ struct x86_msi_ops {
16473 void (*teardown_msi_irqs)(struct pci_dev *dev);
16474 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16475 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
16476 -};
16477 +} __no_const;
16478
16479 struct IO_APIC_route_entry;
16480 struct io_apic_irq_attr;
16481 @@ -201,7 +201,7 @@ struct x86_io_apic_ops {
16482 unsigned int destination, int vector,
16483 struct io_apic_irq_attr *attr);
16484 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
16485 -};
16486 +} __no_const;
16487
16488 extern struct x86_init_ops x86_init;
16489 extern struct x86_cpuinit_ops x86_cpuinit;
16490 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16491 index 0415cda..b43d877 100644
16492 --- a/arch/x86/include/asm/xsave.h
16493 +++ b/arch/x86/include/asm/xsave.h
16494 @@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16495 return -EFAULT;
16496
16497 __asm__ __volatile__(ASM_STAC "\n"
16498 - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16499 + "1:"
16500 + __copyuser_seg
16501 + ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16502 "2: " ASM_CLAC "\n"
16503 ".section .fixup,\"ax\"\n"
16504 "3: movl $-1,%[err]\n"
16505 @@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16506 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16507 {
16508 int err;
16509 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16510 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16511 u32 lmask = mask;
16512 u32 hmask = mask >> 32;
16513
16514 __asm__ __volatile__(ASM_STAC "\n"
16515 - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16516 + "1:"
16517 + __copyuser_seg
16518 + ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16519 "2: " ASM_CLAC "\n"
16520 ".section .fixup,\"ax\"\n"
16521 "3: movl $-1,%[err]\n"
16522 diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16523 index bbae024..e1528f9 100644
16524 --- a/arch/x86/include/uapi/asm/e820.h
16525 +++ b/arch/x86/include/uapi/asm/e820.h
16526 @@ -63,7 +63,7 @@ struct e820map {
16527 #define ISA_START_ADDRESS 0xa0000
16528 #define ISA_END_ADDRESS 0x100000
16529
16530 -#define BIOS_BEGIN 0x000a0000
16531 +#define BIOS_BEGIN 0x000c0000
16532 #define BIOS_END 0x00100000
16533
16534 #define BIOS_ROM_BASE 0xffe00000
16535 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16536 index 7bd3bd3..5dac791 100644
16537 --- a/arch/x86/kernel/Makefile
16538 +++ b/arch/x86/kernel/Makefile
16539 @@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16540 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16541 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16542 obj-y += probe_roms.o
16543 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16544 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16545 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16546 obj-y += syscall_$(BITS).o
16547 obj-$(CONFIG_X86_64) += vsyscall_64.o
16548 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16549 index 230c8ea..f915130 100644
16550 --- a/arch/x86/kernel/acpi/boot.c
16551 +++ b/arch/x86/kernel/acpi/boot.c
16552 @@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16553 * If your system is blacklisted here, but you find that acpi=force
16554 * works for you, please contact linux-acpi@vger.kernel.org
16555 */
16556 -static struct dmi_system_id __initdata acpi_dmi_table[] = {
16557 +static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16558 /*
16559 * Boxes that need ACPI disabled
16560 */
16561 @@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16562 };
16563
16564 /* second table for DMI checks that should run after early-quirks */
16565 -static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16566 +static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16567 /*
16568 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16569 * which includes some code which overrides all temperature
16570 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16571 index 0532f5d..36afc0a 100644
16572 --- a/arch/x86/kernel/acpi/sleep.c
16573 +++ b/arch/x86/kernel/acpi/sleep.c
16574 @@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16575 #else /* CONFIG_64BIT */
16576 #ifdef CONFIG_SMP
16577 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16578 +
16579 + pax_open_kernel();
16580 early_gdt_descr.address =
16581 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16582 + pax_close_kernel();
16583 +
16584 initial_gs = per_cpu_offset(smp_processor_id());
16585 #endif
16586 initial_code = (unsigned long)wakeup_long64;
16587 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16588 index 13ab720..95d5442 100644
16589 --- a/arch/x86/kernel/acpi/wakeup_32.S
16590 +++ b/arch/x86/kernel/acpi/wakeup_32.S
16591 @@ -30,13 +30,11 @@ wakeup_pmode_return:
16592 # and restore the stack ... but you need gdt for this to work
16593 movl saved_context_esp, %esp
16594
16595 - movl %cs:saved_magic, %eax
16596 - cmpl $0x12345678, %eax
16597 + cmpl $0x12345678, saved_magic
16598 jne bogus_magic
16599
16600 # jump to place where we left off
16601 - movl saved_eip, %eax
16602 - jmp *%eax
16603 + jmp *(saved_eip)
16604
16605 bogus_magic:
16606 jmp bogus_magic
16607 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16608 index ef5ccca..bd83949 100644
16609 --- a/arch/x86/kernel/alternative.c
16610 +++ b/arch/x86/kernel/alternative.c
16611 @@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16612 */
16613 for (a = start; a < end; a++) {
16614 instr = (u8 *)&a->instr_offset + a->instr_offset;
16615 +
16616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16617 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16618 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16619 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16620 +#endif
16621 +
16622 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16623 BUG_ON(a->replacementlen > a->instrlen);
16624 BUG_ON(a->instrlen > sizeof(insnbuf));
16625 @@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16626 for (poff = start; poff < end; poff++) {
16627 u8 *ptr = (u8 *)poff + *poff;
16628
16629 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16630 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16631 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16632 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16633 +#endif
16634 +
16635 if (!*poff || ptr < text || ptr >= text_end)
16636 continue;
16637 /* turn DS segment override prefix into lock prefix */
16638 - if (*ptr == 0x3e)
16639 + if (*ktla_ktva(ptr) == 0x3e)
16640 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16641 }
16642 mutex_unlock(&text_mutex);
16643 @@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16644 for (poff = start; poff < end; poff++) {
16645 u8 *ptr = (u8 *)poff + *poff;
16646
16647 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16648 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16649 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16650 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16651 +#endif
16652 +
16653 if (!*poff || ptr < text || ptr >= text_end)
16654 continue;
16655 /* turn lock prefix into DS segment override prefix */
16656 - if (*ptr == 0xf0)
16657 + if (*ktla_ktva(ptr) == 0xf0)
16658 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16659 }
16660 mutex_unlock(&text_mutex);
16661 @@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16662
16663 BUG_ON(p->len > MAX_PATCH_LEN);
16664 /* prep the buffer with the original instructions */
16665 - memcpy(insnbuf, p->instr, p->len);
16666 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16667 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16668 (unsigned long)p->instr, p->len);
16669
16670 @@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16671 if (!uniproc_patched || num_possible_cpus() == 1)
16672 free_init_pages("SMP alternatives",
16673 (unsigned long)__smp_locks,
16674 - (unsigned long)__smp_locks_end);
16675 + PAGE_ALIGN((unsigned long)__smp_locks_end));
16676 #endif
16677
16678 apply_paravirt(__parainstructions, __parainstructions_end);
16679 @@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16680 * instructions. And on the local CPU you need to be protected again NMI or MCE
16681 * handlers seeing an inconsistent instruction while you patch.
16682 */
16683 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
16684 +void *__kprobes text_poke_early(void *addr, const void *opcode,
16685 size_t len)
16686 {
16687 unsigned long flags;
16688 local_irq_save(flags);
16689 - memcpy(addr, opcode, len);
16690 +
16691 + pax_open_kernel();
16692 + memcpy(ktla_ktva(addr), opcode, len);
16693 sync_core();
16694 + pax_close_kernel();
16695 +
16696 local_irq_restore(flags);
16697 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16698 that causes hangs on some VIA CPUs. */
16699 @@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16700 */
16701 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16702 {
16703 - unsigned long flags;
16704 - char *vaddr;
16705 + unsigned char *vaddr = ktla_ktva(addr);
16706 struct page *pages[2];
16707 - int i;
16708 + size_t i;
16709
16710 if (!core_kernel_text((unsigned long)addr)) {
16711 - pages[0] = vmalloc_to_page(addr);
16712 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16713 + pages[0] = vmalloc_to_page(vaddr);
16714 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16715 } else {
16716 - pages[0] = virt_to_page(addr);
16717 + pages[0] = virt_to_page(vaddr);
16718 WARN_ON(!PageReserved(pages[0]));
16719 - pages[1] = virt_to_page(addr + PAGE_SIZE);
16720 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16721 }
16722 BUG_ON(!pages[0]);
16723 - local_irq_save(flags);
16724 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16725 - if (pages[1])
16726 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16727 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16728 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16729 - clear_fixmap(FIX_TEXT_POKE0);
16730 - if (pages[1])
16731 - clear_fixmap(FIX_TEXT_POKE1);
16732 - local_flush_tlb();
16733 - sync_core();
16734 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
16735 - that causes hangs on some VIA CPUs. */
16736 + text_poke_early(addr, opcode, len);
16737 for (i = 0; i < len; i++)
16738 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16739 - local_irq_restore(flags);
16740 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16741 return addr;
16742 }
16743
16744 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16745 index 904611b..004dde6 100644
16746 --- a/arch/x86/kernel/apic/apic.c
16747 +++ b/arch/x86/kernel/apic/apic.c
16748 @@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16749 /*
16750 * Debug level, exported for io_apic.c
16751 */
16752 -unsigned int apic_verbosity;
16753 +int apic_verbosity;
16754
16755 int pic_mode;
16756
16757 @@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16758 apic_write(APIC_ESR, 0);
16759 v1 = apic_read(APIC_ESR);
16760 ack_APIC_irq();
16761 - atomic_inc(&irq_err_count);
16762 + atomic_inc_unchecked(&irq_err_count);
16763
16764 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16765 smp_processor_id(), v0 , v1);
16766 diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16767 index 00c77cf..2dc6a2d 100644
16768 --- a/arch/x86/kernel/apic/apic_flat_64.c
16769 +++ b/arch/x86/kernel/apic/apic_flat_64.c
16770 @@ -157,7 +157,7 @@ static int flat_probe(void)
16771 return 1;
16772 }
16773
16774 -static struct apic apic_flat = {
16775 +static struct apic apic_flat __read_only = {
16776 .name = "flat",
16777 .probe = flat_probe,
16778 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16779 @@ -271,7 +271,7 @@ static int physflat_probe(void)
16780 return 0;
16781 }
16782
16783 -static struct apic apic_physflat = {
16784 +static struct apic apic_physflat __read_only = {
16785
16786 .name = "physical flat",
16787 .probe = physflat_probe,
16788 diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16789 index e145f28..2752888 100644
16790 --- a/arch/x86/kernel/apic/apic_noop.c
16791 +++ b/arch/x86/kernel/apic/apic_noop.c
16792 @@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16793 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16794 }
16795
16796 -struct apic apic_noop = {
16797 +struct apic apic_noop __read_only = {
16798 .name = "noop",
16799 .probe = noop_probe,
16800 .acpi_madt_oem_check = NULL,
16801 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16802 index d50e364..543bee3 100644
16803 --- a/arch/x86/kernel/apic/bigsmp_32.c
16804 +++ b/arch/x86/kernel/apic/bigsmp_32.c
16805 @@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16806 return dmi_bigsmp;
16807 }
16808
16809 -static struct apic apic_bigsmp = {
16810 +static struct apic apic_bigsmp __read_only = {
16811
16812 .name = "bigsmp",
16813 .probe = probe_bigsmp,
16814 diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16815 index 0874799..a7a7892 100644
16816 --- a/arch/x86/kernel/apic/es7000_32.c
16817 +++ b/arch/x86/kernel/apic/es7000_32.c
16818 @@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16819 return ret && es7000_apic_is_cluster();
16820 }
16821
16822 -/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16823 -static struct apic __refdata apic_es7000_cluster = {
16824 +static struct apic apic_es7000_cluster __read_only = {
16825
16826 .name = "es7000",
16827 .probe = probe_es7000,
16828 @@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16829 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16830 };
16831
16832 -static struct apic __refdata apic_es7000 = {
16833 +static struct apic apic_es7000 __read_only = {
16834
16835 .name = "es7000",
16836 .probe = probe_es7000,
16837 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16838 index 9ed796c..e930fe4 100644
16839 --- a/arch/x86/kernel/apic/io_apic.c
16840 +++ b/arch/x86/kernel/apic/io_apic.c
16841 @@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16842 }
16843 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16844
16845 -void lock_vector_lock(void)
16846 +void lock_vector_lock(void) __acquires(vector_lock)
16847 {
16848 /* Used to the online set of cpus does not change
16849 * during assign_irq_vector.
16850 @@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
16851 raw_spin_lock(&vector_lock);
16852 }
16853
16854 -void unlock_vector_lock(void)
16855 +void unlock_vector_lock(void) __releases(vector_lock)
16856 {
16857 raw_spin_unlock(&vector_lock);
16858 }
16859 @@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
16860 ack_APIC_irq();
16861 }
16862
16863 -atomic_t irq_mis_count;
16864 +atomic_unchecked_t irq_mis_count;
16865
16866 #ifdef CONFIG_GENERIC_PENDING_IRQ
16867 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16868 @@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
16869 * at the cpu.
16870 */
16871 if (!(v & (1 << (i & 0x1f)))) {
16872 - atomic_inc(&irq_mis_count);
16873 + atomic_inc_unchecked(&irq_mis_count);
16874
16875 eoi_ioapic_irq(irq, cfg);
16876 }
16877 diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16878 index d661ee9..791fd33 100644
16879 --- a/arch/x86/kernel/apic/numaq_32.c
16880 +++ b/arch/x86/kernel/apic/numaq_32.c
16881 @@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16882 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16883 }
16884
16885 -/* Use __refdata to keep false positive warning calm. */
16886 -static struct apic __refdata apic_numaq = {
16887 +static struct apic apic_numaq __read_only = {
16888
16889 .name = "NUMAQ",
16890 .probe = probe_numaq,
16891 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16892 index eb35ef9..f184a21 100644
16893 --- a/arch/x86/kernel/apic/probe_32.c
16894 +++ b/arch/x86/kernel/apic/probe_32.c
16895 @@ -72,7 +72,7 @@ static int probe_default(void)
16896 return 1;
16897 }
16898
16899 -static struct apic apic_default = {
16900 +static struct apic apic_default __read_only = {
16901
16902 .name = "default",
16903 .probe = probe_default,
16904 diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16905 index 77c95c0..434f8a4 100644
16906 --- a/arch/x86/kernel/apic/summit_32.c
16907 +++ b/arch/x86/kernel/apic/summit_32.c
16908 @@ -486,7 +486,7 @@ void setup_summit(void)
16909 }
16910 #endif
16911
16912 -static struct apic apic_summit = {
16913 +static struct apic apic_summit __read_only = {
16914
16915 .name = "summit",
16916 .probe = probe_summit,
16917 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16918 index c88baa4..757aee1 100644
16919 --- a/arch/x86/kernel/apic/x2apic_cluster.c
16920 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
16921 @@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16922 return notifier_from_errno(err);
16923 }
16924
16925 -static struct notifier_block __refdata x2apic_cpu_notifier = {
16926 +static struct notifier_block x2apic_cpu_notifier = {
16927 .notifier_call = update_clusterinfo,
16928 };
16929
16930 @@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16931 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16932 }
16933
16934 -static struct apic apic_x2apic_cluster = {
16935 +static struct apic apic_x2apic_cluster __read_only = {
16936
16937 .name = "cluster x2apic",
16938 .probe = x2apic_cluster_probe,
16939 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16940 index 562a76d..a003c0f 100644
16941 --- a/arch/x86/kernel/apic/x2apic_phys.c
16942 +++ b/arch/x86/kernel/apic/x2apic_phys.c
16943 @@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16944 return apic == &apic_x2apic_phys;
16945 }
16946
16947 -static struct apic apic_x2apic_phys = {
16948 +static struct apic apic_x2apic_phys __read_only = {
16949
16950 .name = "physical x2apic",
16951 .probe = x2apic_phys_probe,
16952 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
16953 index 794f6eb..67e1db2 100644
16954 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
16955 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
16956 @@ -342,7 +342,7 @@ static int uv_probe(void)
16957 return apic == &apic_x2apic_uv_x;
16958 }
16959
16960 -static struct apic __refdata apic_x2apic_uv_x = {
16961 +static struct apic apic_x2apic_uv_x __read_only = {
16962
16963 .name = "UV large system",
16964 .probe = uv_probe,
16965 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
16966 index 66b5faf..3442423 100644
16967 --- a/arch/x86/kernel/apm_32.c
16968 +++ b/arch/x86/kernel/apm_32.c
16969 @@ -434,7 +434,7 @@ static DEFINE_MUTEX(apm_mutex);
16970 * This is for buggy BIOS's that refer to (real mode) segment 0x40
16971 * even though they are called in protected mode.
16972 */
16973 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
16974 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
16975 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
16976
16977 static const char driver_version[] = "1.16ac"; /* no spaces */
16978 @@ -612,7 +612,10 @@ static long __apm_bios_call(void *_call)
16979 BUG_ON(cpu != 0);
16980 gdt = get_cpu_gdt_table(cpu);
16981 save_desc_40 = gdt[0x40 / 8];
16982 +
16983 + pax_open_kernel();
16984 gdt[0x40 / 8] = bad_bios_desc;
16985 + pax_close_kernel();
16986
16987 apm_irq_save(flags);
16988 APM_DO_SAVE_SEGS;
16989 @@ -621,7 +624,11 @@ static long __apm_bios_call(void *_call)
16990 &call->esi);
16991 APM_DO_RESTORE_SEGS;
16992 apm_irq_restore(flags);
16993 +
16994 + pax_open_kernel();
16995 gdt[0x40 / 8] = save_desc_40;
16996 + pax_close_kernel();
16997 +
16998 put_cpu();
16999
17000 return call->eax & 0xff;
17001 @@ -688,7 +695,10 @@ static long __apm_bios_call_simple(void *_call)
17002 BUG_ON(cpu != 0);
17003 gdt = get_cpu_gdt_table(cpu);
17004 save_desc_40 = gdt[0x40 / 8];
17005 +
17006 + pax_open_kernel();
17007 gdt[0x40 / 8] = bad_bios_desc;
17008 + pax_close_kernel();
17009
17010 apm_irq_save(flags);
17011 APM_DO_SAVE_SEGS;
17012 @@ -696,7 +706,11 @@ static long __apm_bios_call_simple(void *_call)
17013 &call->eax);
17014 APM_DO_RESTORE_SEGS;
17015 apm_irq_restore(flags);
17016 +
17017 + pax_open_kernel();
17018 gdt[0x40 / 8] = save_desc_40;
17019 + pax_close_kernel();
17020 +
17021 put_cpu();
17022 return error;
17023 }
17024 @@ -2363,12 +2377,15 @@ static int __init apm_init(void)
17025 * code to that CPU.
17026 */
17027 gdt = get_cpu_gdt_table(0);
17028 +
17029 + pax_open_kernel();
17030 set_desc_base(&gdt[APM_CS >> 3],
17031 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17032 set_desc_base(&gdt[APM_CS_16 >> 3],
17033 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17034 set_desc_base(&gdt[APM_DS >> 3],
17035 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17036 + pax_close_kernel();
17037
17038 proc_create("apm", 0, NULL, &apm_file_ops);
17039
17040 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17041 index 2861082..6d4718e 100644
17042 --- a/arch/x86/kernel/asm-offsets.c
17043 +++ b/arch/x86/kernel/asm-offsets.c
17044 @@ -33,6 +33,8 @@ void common(void) {
17045 OFFSET(TI_status, thread_info, status);
17046 OFFSET(TI_addr_limit, thread_info, addr_limit);
17047 OFFSET(TI_preempt_count, thread_info, preempt_count);
17048 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17049 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17050
17051 BLANK();
17052 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17053 @@ -53,8 +55,26 @@ void common(void) {
17054 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17055 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17056 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17057 +
17058 +#ifdef CONFIG_PAX_KERNEXEC
17059 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17060 #endif
17061
17062 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17063 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17064 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17065 +#ifdef CONFIG_X86_64
17066 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17067 +#endif
17068 +#endif
17069 +
17070 +#endif
17071 +
17072 + BLANK();
17073 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17074 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17075 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17076 +
17077 #ifdef CONFIG_XEN
17078 BLANK();
17079 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17080 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17081 index 1b4754f..fbb4227 100644
17082 --- a/arch/x86/kernel/asm-offsets_64.c
17083 +++ b/arch/x86/kernel/asm-offsets_64.c
17084 @@ -76,6 +76,7 @@ int main(void)
17085 BLANK();
17086 #undef ENTRY
17087
17088 + DEFINE(TSS_size, sizeof(struct tss_struct));
17089 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17090 BLANK();
17091
17092 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17093 index a0e067d..9c7db16 100644
17094 --- a/arch/x86/kernel/cpu/Makefile
17095 +++ b/arch/x86/kernel/cpu/Makefile
17096 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17097 CFLAGS_REMOVE_perf_event.o = -pg
17098 endif
17099
17100 -# Make sure load_percpu_segment has no stackprotector
17101 -nostackp := $(call cc-option, -fno-stack-protector)
17102 -CFLAGS_common.o := $(nostackp)
17103 -
17104 obj-y := intel_cacheinfo.o scattered.o topology.o
17105 obj-y += proc.o capflags.o powerflags.o common.o
17106 obj-y += vmware.o hypervisor.o mshyperv.o
17107 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17108 index fa96eb0..03efe73 100644
17109 --- a/arch/x86/kernel/cpu/amd.c
17110 +++ b/arch/x86/kernel/cpu/amd.c
17111 @@ -737,7 +737,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17112 unsigned int size)
17113 {
17114 /* AMD errata T13 (order #21922) */
17115 - if ((c->x86 == 6)) {
17116 + if (c->x86 == 6) {
17117 /* Duron Rev A0 */
17118 if (c->x86_model == 3 && c->x86_mask == 0)
17119 size = 64;
17120 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17121 index d814772..c615653 100644
17122 --- a/arch/x86/kernel/cpu/common.c
17123 +++ b/arch/x86/kernel/cpu/common.c
17124 @@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17125
17126 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17127
17128 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17129 -#ifdef CONFIG_X86_64
17130 - /*
17131 - * We need valid kernel segments for data and code in long mode too
17132 - * IRET will check the segment types kkeil 2000/10/28
17133 - * Also sysret mandates a special GDT layout
17134 - *
17135 - * TLS descriptors are currently at a different place compared to i386.
17136 - * Hopefully nobody expects them at a fixed place (Wine?)
17137 - */
17138 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17139 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17140 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17141 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17142 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17143 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17144 -#else
17145 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17146 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17147 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17148 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17149 - /*
17150 - * Segments used for calling PnP BIOS have byte granularity.
17151 - * They code segments and data segments have fixed 64k limits,
17152 - * the transfer segment sizes are set at run time.
17153 - */
17154 - /* 32-bit code */
17155 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17156 - /* 16-bit code */
17157 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17158 - /* 16-bit data */
17159 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17160 - /* 16-bit data */
17161 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17162 - /* 16-bit data */
17163 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17164 - /*
17165 - * The APM segments have byte granularity and their bases
17166 - * are set at run time. All have 64k limits.
17167 - */
17168 - /* 32-bit code */
17169 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17170 - /* 16-bit code */
17171 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17172 - /* data */
17173 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17174 -
17175 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17176 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17177 - GDT_STACK_CANARY_INIT
17178 -#endif
17179 -} };
17180 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17181 -
17182 static int __init x86_xsave_setup(char *s)
17183 {
17184 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17185 @@ -386,7 +332,7 @@ void switch_to_new_gdt(int cpu)
17186 {
17187 struct desc_ptr gdt_descr;
17188
17189 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17190 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17191 gdt_descr.size = GDT_SIZE - 1;
17192 load_gdt(&gdt_descr);
17193 /* Reload the per-cpu base */
17194 @@ -882,6 +828,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17195 /* Filter out anything that depends on CPUID levels we don't have */
17196 filter_cpuid_features(c, true);
17197
17198 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17199 + setup_clear_cpu_cap(X86_FEATURE_SEP);
17200 +#endif
17201 +
17202 /* If the model name is still unset, do table lookup. */
17203 if (!c->x86_model_id[0]) {
17204 const char *p;
17205 @@ -1065,10 +1015,12 @@ static __init int setup_disablecpuid(char *arg)
17206 }
17207 __setup("clearcpuid=", setup_disablecpuid);
17208
17209 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17210 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
17211 +
17212 #ifdef CONFIG_X86_64
17213 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17214 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17215 - (unsigned long) nmi_idt_table };
17216 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17217
17218 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17219 irq_stack_union) __aligned(PAGE_SIZE);
17220 @@ -1082,7 +1034,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17221 EXPORT_PER_CPU_SYMBOL(current_task);
17222
17223 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17224 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17225 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17226 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17227
17228 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17229 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
17230 load_ucode_ap();
17231
17232 cpu = stack_smp_processor_id();
17233 - t = &per_cpu(init_tss, cpu);
17234 + t = init_tss + cpu;
17235 oist = &per_cpu(orig_ist, cpu);
17236
17237 #ifdef CONFIG_NUMA
17238 @@ -1253,7 +1205,7 @@ void __cpuinit cpu_init(void)
17239 switch_to_new_gdt(cpu);
17240 loadsegment(fs, 0);
17241
17242 - load_idt((const struct desc_ptr *)&idt_descr);
17243 + load_idt(&idt_descr);
17244
17245 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17246 syscall_init();
17247 @@ -1262,7 +1214,6 @@ void __cpuinit cpu_init(void)
17248 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17249 barrier();
17250
17251 - x86_configure_nx();
17252 enable_x2apic();
17253
17254 /*
17255 @@ -1314,7 +1265,7 @@ void __cpuinit cpu_init(void)
17256 {
17257 int cpu = smp_processor_id();
17258 struct task_struct *curr = current;
17259 - struct tss_struct *t = &per_cpu(init_tss, cpu);
17260 + struct tss_struct *t = init_tss + cpu;
17261 struct thread_struct *thread = &curr->thread;
17262
17263 show_ucode_info_early();
17264 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17265 index 1905ce9..a7ac587 100644
17266 --- a/arch/x86/kernel/cpu/intel.c
17267 +++ b/arch/x86/kernel/cpu/intel.c
17268 @@ -173,7 +173,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17269 * Update the IDT descriptor and reload the IDT so that
17270 * it uses the read-only mapped virtual address.
17271 */
17272 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17273 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17274 load_idt(&idt_descr);
17275 }
17276 #endif
17277 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17278 index 7c6f7d5..8cac382 100644
17279 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17280 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17281 @@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17282 };
17283
17284 #ifdef CONFIG_AMD_NB
17285 +static struct attribute *default_attrs_amd_nb[] = {
17286 + &type.attr,
17287 + &level.attr,
17288 + &coherency_line_size.attr,
17289 + &physical_line_partition.attr,
17290 + &ways_of_associativity.attr,
17291 + &number_of_sets.attr,
17292 + &size.attr,
17293 + &shared_cpu_map.attr,
17294 + &shared_cpu_list.attr,
17295 + NULL,
17296 + NULL,
17297 + NULL,
17298 + NULL
17299 +};
17300 +
17301 static struct attribute ** __cpuinit amd_l3_attrs(void)
17302 {
17303 static struct attribute **attrs;
17304 @@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17305
17306 n = ARRAY_SIZE(default_attrs);
17307
17308 - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17309 - n += 2;
17310 -
17311 - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17312 - n += 1;
17313 -
17314 - attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17315 - if (attrs == NULL)
17316 - return attrs = default_attrs;
17317 -
17318 - for (n = 0; default_attrs[n]; n++)
17319 - attrs[n] = default_attrs[n];
17320 + attrs = default_attrs_amd_nb;
17321
17322 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17323 attrs[n++] = &cache_disable_0.attr;
17324 @@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17325 .default_attrs = default_attrs,
17326 };
17327
17328 +#ifdef CONFIG_AMD_NB
17329 +static struct kobj_type ktype_cache_amd_nb = {
17330 + .sysfs_ops = &sysfs_ops,
17331 + .default_attrs = default_attrs_amd_nb,
17332 +};
17333 +#endif
17334 +
17335 static struct kobj_type ktype_percpu_entry = {
17336 .sysfs_ops = &sysfs_ops,
17337 };
17338 @@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17339 return retval;
17340 }
17341
17342 +#ifdef CONFIG_AMD_NB
17343 + amd_l3_attrs();
17344 +#endif
17345 +
17346 for (i = 0; i < num_cache_leaves; i++) {
17347 + struct kobj_type *ktype;
17348 +
17349 this_object = INDEX_KOBJECT_PTR(cpu, i);
17350 this_object->cpu = cpu;
17351 this_object->index = i;
17352
17353 this_leaf = CPUID4_INFO_IDX(cpu, i);
17354
17355 - ktype_cache.default_attrs = default_attrs;
17356 + ktype = &ktype_cache;
17357 #ifdef CONFIG_AMD_NB
17358 if (this_leaf->base.nb)
17359 - ktype_cache.default_attrs = amd_l3_attrs();
17360 + ktype = &ktype_cache_amd_nb;
17361 #endif
17362 retval = kobject_init_and_add(&(this_object->kobj),
17363 - &ktype_cache,
17364 + ktype,
17365 per_cpu(ici_cache_kobject, cpu),
17366 "index%1lu", i);
17367 if (unlikely(retval)) {
17368 @@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17369 return NOTIFY_OK;
17370 }
17371
17372 -static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17373 +static struct notifier_block cacheinfo_cpu_notifier = {
17374 .notifier_call = cacheinfo_cpu_callback,
17375 };
17376
17377 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17378 index 7bc1263..ce2cbfb 100644
17379 --- a/arch/x86/kernel/cpu/mcheck/mce.c
17380 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
17381 @@ -45,6 +45,7 @@
17382 #include <asm/processor.h>
17383 #include <asm/mce.h>
17384 #include <asm/msr.h>
17385 +#include <asm/local.h>
17386
17387 #include "mce-internal.h"
17388
17389 @@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17390 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17391 m->cs, m->ip);
17392
17393 - if (m->cs == __KERNEL_CS)
17394 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17395 print_symbol("{%s}", m->ip);
17396 pr_cont("\n");
17397 }
17398 @@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17399
17400 #define PANIC_TIMEOUT 5 /* 5 seconds */
17401
17402 -static atomic_t mce_paniced;
17403 +static atomic_unchecked_t mce_paniced;
17404
17405 static int fake_panic;
17406 -static atomic_t mce_fake_paniced;
17407 +static atomic_unchecked_t mce_fake_paniced;
17408
17409 /* Panic in progress. Enable interrupts and wait for final IPI */
17410 static void wait_for_panic(void)
17411 @@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17412 /*
17413 * Make sure only one CPU runs in machine check panic
17414 */
17415 - if (atomic_inc_return(&mce_paniced) > 1)
17416 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17417 wait_for_panic();
17418 barrier();
17419
17420 @@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17421 console_verbose();
17422 } else {
17423 /* Don't log too much for fake panic */
17424 - if (atomic_inc_return(&mce_fake_paniced) > 1)
17425 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17426 return;
17427 }
17428 /* First print corrected ones that are still unlogged */
17429 @@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
17430 * might have been modified by someone else.
17431 */
17432 rmb();
17433 - if (atomic_read(&mce_paniced))
17434 + if (atomic_read_unchecked(&mce_paniced))
17435 wait_for_panic();
17436 if (!mca_cfg.monarch_timeout)
17437 goto out;
17438 @@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17439 }
17440
17441 /* Call the installed machine check handler for this CPU setup. */
17442 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
17443 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17444 unexpected_machine_check;
17445
17446 /*
17447 @@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17448 return;
17449 }
17450
17451 + pax_open_kernel();
17452 machine_check_vector = do_machine_check;
17453 + pax_close_kernel();
17454
17455 __mcheck_cpu_init_generic();
17456 __mcheck_cpu_init_vendor(c);
17457 @@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17458 */
17459
17460 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17461 -static int mce_chrdev_open_count; /* #times opened */
17462 +static local_t mce_chrdev_open_count; /* #times opened */
17463 static int mce_chrdev_open_exclu; /* already open exclusive? */
17464
17465 static int mce_chrdev_open(struct inode *inode, struct file *file)
17466 @@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17467 spin_lock(&mce_chrdev_state_lock);
17468
17469 if (mce_chrdev_open_exclu ||
17470 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17471 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17472 spin_unlock(&mce_chrdev_state_lock);
17473
17474 return -EBUSY;
17475 @@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17476
17477 if (file->f_flags & O_EXCL)
17478 mce_chrdev_open_exclu = 1;
17479 - mce_chrdev_open_count++;
17480 + local_inc(&mce_chrdev_open_count);
17481
17482 spin_unlock(&mce_chrdev_state_lock);
17483
17484 @@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17485 {
17486 spin_lock(&mce_chrdev_state_lock);
17487
17488 - mce_chrdev_open_count--;
17489 + local_dec(&mce_chrdev_open_count);
17490 mce_chrdev_open_exclu = 0;
17491
17492 spin_unlock(&mce_chrdev_state_lock);
17493 @@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17494 return NOTIFY_OK;
17495 }
17496
17497 -static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17498 +static struct notifier_block mce_cpu_notifier = {
17499 .notifier_call = mce_cpu_callback,
17500 };
17501
17502 @@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
17503
17504 for (i = 0; i < mca_cfg.banks; i++) {
17505 struct mce_bank *b = &mce_banks[i];
17506 - struct device_attribute *a = &b->attr;
17507 + device_attribute_no_const *a = &b->attr;
17508
17509 sysfs_attr_init(&a->attr);
17510 a->attr.name = b->attrname;
17511 @@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
17512 static void mce_reset(void)
17513 {
17514 cpu_missing = 0;
17515 - atomic_set(&mce_fake_paniced, 0);
17516 + atomic_set_unchecked(&mce_fake_paniced, 0);
17517 atomic_set(&mce_executing, 0);
17518 atomic_set(&mce_callin, 0);
17519 atomic_set(&global_nwo, 0);
17520 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17521 index 1c044b1..37a2a43 100644
17522 --- a/arch/x86/kernel/cpu/mcheck/p5.c
17523 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
17524 @@ -11,6 +11,7 @@
17525 #include <asm/processor.h>
17526 #include <asm/mce.h>
17527 #include <asm/msr.h>
17528 +#include <asm/pgtable.h>
17529
17530 /* By default disabled */
17531 int mce_p5_enabled __read_mostly;
17532 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17533 if (!cpu_has(c, X86_FEATURE_MCE))
17534 return;
17535
17536 + pax_open_kernel();
17537 machine_check_vector = pentium_machine_check;
17538 + pax_close_kernel();
17539 /* Make sure the vector pointer is visible before we enable MCEs: */
17540 wmb();
17541
17542 diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17543 index 47a1870..8c019a7 100644
17544 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17545 +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17546 @@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17547 return notifier_from_errno(err);
17548 }
17549
17550 -static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17551 +static struct notifier_block thermal_throttle_cpu_notifier =
17552 {
17553 .notifier_call = thermal_throttle_cpu_callback,
17554 };
17555 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17556 index e9a701a..35317d6 100644
17557 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
17558 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17559 @@ -10,6 +10,7 @@
17560 #include <asm/processor.h>
17561 #include <asm/mce.h>
17562 #include <asm/msr.h>
17563 +#include <asm/pgtable.h>
17564
17565 /* Machine check handler for WinChip C6: */
17566 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17567 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17568 {
17569 u32 lo, hi;
17570
17571 + pax_open_kernel();
17572 machine_check_vector = winchip_machine_check;
17573 + pax_close_kernel();
17574 /* Make sure the vector pointer is visible before we enable MCEs: */
17575 wmb();
17576
17577 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17578 index 726bf96..81f0526 100644
17579 --- a/arch/x86/kernel/cpu/mtrr/main.c
17580 +++ b/arch/x86/kernel/cpu/mtrr/main.c
17581 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17582 u64 size_or_mask, size_and_mask;
17583 static bool mtrr_aps_delayed_init;
17584
17585 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17586 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17587
17588 const struct mtrr_ops *mtrr_if;
17589
17590 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17591 index df5e41f..816c719 100644
17592 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17593 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17594 @@ -25,7 +25,7 @@ struct mtrr_ops {
17595 int (*validate_add_page)(unsigned long base, unsigned long size,
17596 unsigned int type);
17597 int (*have_wrcomb)(void);
17598 -};
17599 +} __do_const;
17600
17601 extern int generic_get_free_region(unsigned long base, unsigned long size,
17602 int replace_reg);
17603 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17604 index bf0f01a..9adfee1 100644
17605 --- a/arch/x86/kernel/cpu/perf_event.c
17606 +++ b/arch/x86/kernel/cpu/perf_event.c
17607 @@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17608 pr_info("no hardware sampling interrupt available.\n");
17609 }
17610
17611 -static struct attribute_group x86_pmu_format_group = {
17612 +static attribute_group_no_const x86_pmu_format_group = {
17613 .name = "format",
17614 .attrs = NULL,
17615 };
17616 @@ -1374,7 +1374,7 @@ static struct attribute *events_attr[] = {
17617 NULL,
17618 };
17619
17620 -static struct attribute_group x86_pmu_events_group = {
17621 +static attribute_group_no_const x86_pmu_events_group = {
17622 .name = "events",
17623 .attrs = events_attr,
17624 };
17625 @@ -1873,7 +1873,7 @@ static unsigned long get_segment_base(unsigned int segment)
17626 if (idx > GDT_ENTRIES)
17627 return 0;
17628
17629 - desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17630 + desc = get_cpu_gdt_table(smp_processor_id());
17631 }
17632
17633 return get_desc_base(desc + idx);
17634 @@ -1963,7 +1963,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17635 break;
17636
17637 perf_callchain_store(entry, frame.return_address);
17638 - fp = frame.next_frame;
17639 + fp = (const void __force_user *)frame.next_frame;
17640 }
17641 }
17642
17643 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17644 index 4a0a462..be3b204 100644
17645 --- a/arch/x86/kernel/cpu/perf_event_intel.c
17646 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
17647 @@ -1994,10 +1994,10 @@ __init int intel_pmu_init(void)
17648 * v2 and above have a perf capabilities MSR
17649 */
17650 if (version > 1) {
17651 - u64 capabilities;
17652 + u64 capabilities = x86_pmu.intel_cap.capabilities;
17653
17654 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17655 - x86_pmu.intel_cap.capabilities = capabilities;
17656 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17657 + x86_pmu.intel_cap.capabilities = capabilities;
17658 }
17659
17660 intel_ds_init();
17661 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17662 index 3e091f0..d2dc8d6 100644
17663 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17664 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17665 @@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17666 static int __init uncore_type_init(struct intel_uncore_type *type)
17667 {
17668 struct intel_uncore_pmu *pmus;
17669 - struct attribute_group *attr_group;
17670 + attribute_group_no_const *attr_group;
17671 struct attribute **attrs;
17672 int i, j;
17673
17674 @@ -2826,7 +2826,7 @@ static int
17675 return NOTIFY_OK;
17676 }
17677
17678 -static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17679 +static struct notifier_block uncore_cpu_nb = {
17680 .notifier_call = uncore_cpu_notifier,
17681 /*
17682 * to migrate uncore events, our notifier should be executed
17683 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17684 index e68a455..975a932 100644
17685 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17686 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17687 @@ -428,7 +428,7 @@ struct intel_uncore_box {
17688 struct uncore_event_desc {
17689 struct kobj_attribute attr;
17690 const char *config;
17691 -};
17692 +} __do_const;
17693
17694 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17695 { \
17696 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17697 index 1e4dbcf..b9a34c2 100644
17698 --- a/arch/x86/kernel/cpuid.c
17699 +++ b/arch/x86/kernel/cpuid.c
17700 @@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17701 return notifier_from_errno(err);
17702 }
17703
17704 -static struct notifier_block __refdata cpuid_class_cpu_notifier =
17705 +static struct notifier_block cpuid_class_cpu_notifier =
17706 {
17707 .notifier_call = cpuid_class_cpu_callback,
17708 };
17709 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17710 index 74467fe..18793d5 100644
17711 --- a/arch/x86/kernel/crash.c
17712 +++ b/arch/x86/kernel/crash.c
17713 @@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17714 {
17715 #ifdef CONFIG_X86_32
17716 struct pt_regs fixed_regs;
17717 -#endif
17718
17719 -#ifdef CONFIG_X86_32
17720 - if (!user_mode_vm(regs)) {
17721 + if (!user_mode(regs)) {
17722 crash_fixup_ss_esp(&fixed_regs, regs);
17723 regs = &fixed_regs;
17724 }
17725 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17726 index 37250fe..bf2ec74 100644
17727 --- a/arch/x86/kernel/doublefault_32.c
17728 +++ b/arch/x86/kernel/doublefault_32.c
17729 @@ -11,7 +11,7 @@
17730
17731 #define DOUBLEFAULT_STACKSIZE (1024)
17732 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17733 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17734 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17735
17736 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17737
17738 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
17739 unsigned long gdt, tss;
17740
17741 store_gdt(&gdt_desc);
17742 - gdt = gdt_desc.address;
17743 + gdt = (unsigned long)gdt_desc.address;
17744
17745 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17746
17747 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17748 /* 0x2 bit is always set */
17749 .flags = X86_EFLAGS_SF | 0x2,
17750 .sp = STACK_START,
17751 - .es = __USER_DS,
17752 + .es = __KERNEL_DS,
17753 .cs = __KERNEL_CS,
17754 .ss = __KERNEL_DS,
17755 - .ds = __USER_DS,
17756 + .ds = __KERNEL_DS,
17757 .fs = __KERNEL_PERCPU,
17758
17759 .__cr3 = __pa_nodebug(swapper_pg_dir),
17760 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17761 index c8797d5..c605e53 100644
17762 --- a/arch/x86/kernel/dumpstack.c
17763 +++ b/arch/x86/kernel/dumpstack.c
17764 @@ -2,6 +2,9 @@
17765 * Copyright (C) 1991, 1992 Linus Torvalds
17766 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17767 */
17768 +#ifdef CONFIG_GRKERNSEC_HIDESYM
17769 +#define __INCLUDED_BY_HIDESYM 1
17770 +#endif
17771 #include <linux/kallsyms.h>
17772 #include <linux/kprobes.h>
17773 #include <linux/uaccess.h>
17774 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17775 static void
17776 print_ftrace_graph_addr(unsigned long addr, void *data,
17777 const struct stacktrace_ops *ops,
17778 - struct thread_info *tinfo, int *graph)
17779 + struct task_struct *task, int *graph)
17780 {
17781 - struct task_struct *task;
17782 unsigned long ret_addr;
17783 int index;
17784
17785 if (addr != (unsigned long)return_to_handler)
17786 return;
17787
17788 - task = tinfo->task;
17789 index = task->curr_ret_stack;
17790
17791 if (!task->ret_stack || index < *graph)
17792 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17793 static inline void
17794 print_ftrace_graph_addr(unsigned long addr, void *data,
17795 const struct stacktrace_ops *ops,
17796 - struct thread_info *tinfo, int *graph)
17797 + struct task_struct *task, int *graph)
17798 { }
17799 #endif
17800
17801 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17802 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17803 */
17804
17805 -static inline int valid_stack_ptr(struct thread_info *tinfo,
17806 - void *p, unsigned int size, void *end)
17807 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17808 {
17809 - void *t = tinfo;
17810 if (end) {
17811 if (p < end && p >= (end-THREAD_SIZE))
17812 return 1;
17813 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17814 }
17815
17816 unsigned long
17817 -print_context_stack(struct thread_info *tinfo,
17818 +print_context_stack(struct task_struct *task, void *stack_start,
17819 unsigned long *stack, unsigned long bp,
17820 const struct stacktrace_ops *ops, void *data,
17821 unsigned long *end, int *graph)
17822 {
17823 struct stack_frame *frame = (struct stack_frame *)bp;
17824
17825 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17826 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17827 unsigned long addr;
17828
17829 addr = *stack;
17830 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17831 } else {
17832 ops->address(data, addr, 0);
17833 }
17834 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17835 + print_ftrace_graph_addr(addr, data, ops, task, graph);
17836 }
17837 stack++;
17838 }
17839 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17840 EXPORT_SYMBOL_GPL(print_context_stack);
17841
17842 unsigned long
17843 -print_context_stack_bp(struct thread_info *tinfo,
17844 +print_context_stack_bp(struct task_struct *task, void *stack_start,
17845 unsigned long *stack, unsigned long bp,
17846 const struct stacktrace_ops *ops, void *data,
17847 unsigned long *end, int *graph)
17848 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17849 struct stack_frame *frame = (struct stack_frame *)bp;
17850 unsigned long *ret_addr = &frame->return_address;
17851
17852 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17853 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17854 unsigned long addr = *ret_addr;
17855
17856 if (!__kernel_text_address(addr))
17857 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17858 ops->address(data, addr, 1);
17859 frame = frame->next_frame;
17860 ret_addr = &frame->return_address;
17861 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17862 + print_ftrace_graph_addr(addr, data, ops, task, graph);
17863 }
17864
17865 return (unsigned long)frame;
17866 @@ -189,7 +188,7 @@ void dump_stack(void)
17867
17868 bp = stack_frame(current, NULL);
17869 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17870 - current->pid, current->comm, print_tainted(),
17871 + task_pid_nr(current), current->comm, print_tainted(),
17872 init_utsname()->release,
17873 (int)strcspn(init_utsname()->version, " "),
17874 init_utsname()->version);
17875 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17876 }
17877 EXPORT_SYMBOL_GPL(oops_begin);
17878
17879 +extern void gr_handle_kernel_exploit(void);
17880 +
17881 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17882 {
17883 if (regs && kexec_should_crash(current))
17884 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17885 panic("Fatal exception in interrupt");
17886 if (panic_on_oops)
17887 panic("Fatal exception");
17888 - do_exit(signr);
17889 +
17890 + gr_handle_kernel_exploit();
17891 +
17892 + do_group_exit(signr);
17893 }
17894
17895 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17896 @@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17897 print_modules();
17898 show_regs(regs);
17899 #ifdef CONFIG_X86_32
17900 - if (user_mode_vm(regs)) {
17901 + if (user_mode(regs)) {
17902 sp = regs->sp;
17903 ss = regs->ss & 0xffff;
17904 } else {
17905 @@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17906 unsigned long flags = oops_begin();
17907 int sig = SIGSEGV;
17908
17909 - if (!user_mode_vm(regs))
17910 + if (!user_mode(regs))
17911 report_bug(regs->ip, regs);
17912
17913 if (__die(str, regs, err))
17914 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17915 index 1038a41..db2c12b 100644
17916 --- a/arch/x86/kernel/dumpstack_32.c
17917 +++ b/arch/x86/kernel/dumpstack_32.c
17918 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17919 bp = stack_frame(task, regs);
17920
17921 for (;;) {
17922 - struct thread_info *context;
17923 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17924
17925 - context = (struct thread_info *)
17926 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17927 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17928 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17929
17930 - stack = (unsigned long *)context->previous_esp;
17931 - if (!stack)
17932 + if (stack_start == task_stack_page(task))
17933 break;
17934 + stack = *(unsigned long **)stack_start;
17935 if (ops->stack(data, "IRQ") < 0)
17936 break;
17937 touch_nmi_watchdog();
17938 @@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
17939 {
17940 int i;
17941
17942 - __show_regs(regs, !user_mode_vm(regs));
17943 + __show_regs(regs, !user_mode(regs));
17944
17945 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
17946 TASK_COMM_LEN, current->comm, task_pid_nr(current),
17947 @@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
17948 * When in-kernel, we also print out the stack and code at the
17949 * time of the fault..
17950 */
17951 - if (!user_mode_vm(regs)) {
17952 + if (!user_mode(regs)) {
17953 unsigned int code_prologue = code_bytes * 43 / 64;
17954 unsigned int code_len = code_bytes;
17955 unsigned char c;
17956 u8 *ip;
17957 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
17958
17959 pr_emerg("Stack:\n");
17960 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
17961
17962 pr_emerg("Code:");
17963
17964 - ip = (u8 *)regs->ip - code_prologue;
17965 + ip = (u8 *)regs->ip - code_prologue + cs_base;
17966 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
17967 /* try starting at IP */
17968 - ip = (u8 *)regs->ip;
17969 + ip = (u8 *)regs->ip + cs_base;
17970 code_len = code_len - code_prologue + 1;
17971 }
17972 for (i = 0; i < code_len; i++, ip++) {
17973 @@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
17974 pr_cont(" Bad EIP value.");
17975 break;
17976 }
17977 - if (ip == (u8 *)regs->ip)
17978 + if (ip == (u8 *)regs->ip + cs_base)
17979 pr_cont(" <%02x>", c);
17980 else
17981 pr_cont(" %02x", c);
17982 @@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
17983 {
17984 unsigned short ud2;
17985
17986 + ip = ktla_ktva(ip);
17987 if (ip < PAGE_OFFSET)
17988 return 0;
17989 if (probe_kernel_address((unsigned short *)ip, ud2))
17990 @@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
17991
17992 return ud2 == 0x0b0f;
17993 }
17994 +
17995 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17996 +void pax_check_alloca(unsigned long size)
17997 +{
17998 + unsigned long sp = (unsigned long)&sp, stack_left;
17999 +
18000 + /* all kernel stacks are of the same size */
18001 + stack_left = sp & (THREAD_SIZE - 1);
18002 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
18003 +}
18004 +EXPORT_SYMBOL(pax_check_alloca);
18005 +#endif
18006 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18007 index b653675..51cc8c0 100644
18008 --- a/arch/x86/kernel/dumpstack_64.c
18009 +++ b/arch/x86/kernel/dumpstack_64.c
18010 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18011 unsigned long *irq_stack_end =
18012 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18013 unsigned used = 0;
18014 - struct thread_info *tinfo;
18015 int graph = 0;
18016 unsigned long dummy;
18017 + void *stack_start;
18018
18019 if (!task)
18020 task = current;
18021 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18022 * current stack address. If the stacks consist of nested
18023 * exceptions
18024 */
18025 - tinfo = task_thread_info(task);
18026 for (;;) {
18027 char *id;
18028 unsigned long *estack_end;
18029 +
18030 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18031 &used, &id);
18032
18033 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18034 if (ops->stack(data, id) < 0)
18035 break;
18036
18037 - bp = ops->walk_stack(tinfo, stack, bp, ops,
18038 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18039 data, estack_end, &graph);
18040 ops->stack(data, "<EOE>");
18041 /*
18042 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18043 * second-to-last pointer (index -2 to end) in the
18044 * exception stack:
18045 */
18046 + if ((u16)estack_end[-1] != __KERNEL_DS)
18047 + goto out;
18048 stack = (unsigned long *) estack_end[-2];
18049 continue;
18050 }
18051 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18052 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18053 if (ops->stack(data, "IRQ") < 0)
18054 break;
18055 - bp = ops->walk_stack(tinfo, stack, bp,
18056 + bp = ops->walk_stack(task, irq_stack, stack, bp,
18057 ops, data, irq_stack_end, &graph);
18058 /*
18059 * We link to the next stack (which would be
18060 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18061 /*
18062 * This handles the process stack:
18063 */
18064 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18065 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18066 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18067 +out:
18068 put_cpu();
18069 }
18070 EXPORT_SYMBOL(dump_trace);
18071 @@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18072 {
18073 int i;
18074 unsigned long sp;
18075 - const int cpu = smp_processor_id();
18076 + const int cpu = raw_smp_processor_id();
18077 struct task_struct *cur = current;
18078
18079 sp = regs->sp;
18080 @@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18081
18082 return ud2 == 0x0b0f;
18083 }
18084 +
18085 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18086 +void pax_check_alloca(unsigned long size)
18087 +{
18088 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18089 + unsigned cpu, used;
18090 + char *id;
18091 +
18092 + /* check the process stack first */
18093 + stack_start = (unsigned long)task_stack_page(current);
18094 + stack_end = stack_start + THREAD_SIZE;
18095 + if (likely(stack_start <= sp && sp < stack_end)) {
18096 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
18097 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
18098 + return;
18099 + }
18100 +
18101 + cpu = get_cpu();
18102 +
18103 + /* check the irq stacks */
18104 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18105 + stack_start = stack_end - IRQ_STACK_SIZE;
18106 + if (stack_start <= sp && sp < stack_end) {
18107 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18108 + put_cpu();
18109 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
18110 + return;
18111 + }
18112 +
18113 + /* check the exception stacks */
18114 + used = 0;
18115 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18116 + stack_start = stack_end - EXCEPTION_STKSZ;
18117 + if (stack_end && stack_start <= sp && sp < stack_end) {
18118 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18119 + put_cpu();
18120 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
18121 + return;
18122 + }
18123 +
18124 + put_cpu();
18125 +
18126 + /* unknown stack */
18127 + BUG();
18128 +}
18129 +EXPORT_SYMBOL(pax_check_alloca);
18130 +#endif
18131 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18132 index 9b9f18b..9fcaa04 100644
18133 --- a/arch/x86/kernel/early_printk.c
18134 +++ b/arch/x86/kernel/early_printk.c
18135 @@ -7,6 +7,7 @@
18136 #include <linux/pci_regs.h>
18137 #include <linux/pci_ids.h>
18138 #include <linux/errno.h>
18139 +#include <linux/sched.h>
18140 #include <asm/io.h>
18141 #include <asm/processor.h>
18142 #include <asm/fcntl.h>
18143 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18144 index 8f3e2de..934870f 100644
18145 --- a/arch/x86/kernel/entry_32.S
18146 +++ b/arch/x86/kernel/entry_32.S
18147 @@ -177,13 +177,153 @@
18148 /*CFI_REL_OFFSET gs, PT_GS*/
18149 .endm
18150 .macro SET_KERNEL_GS reg
18151 +
18152 +#ifdef CONFIG_CC_STACKPROTECTOR
18153 movl $(__KERNEL_STACK_CANARY), \reg
18154 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18155 + movl $(__USER_DS), \reg
18156 +#else
18157 + xorl \reg, \reg
18158 +#endif
18159 +
18160 movl \reg, %gs
18161 .endm
18162
18163 #endif /* CONFIG_X86_32_LAZY_GS */
18164
18165 -.macro SAVE_ALL
18166 +.macro pax_enter_kernel
18167 +#ifdef CONFIG_PAX_KERNEXEC
18168 + call pax_enter_kernel
18169 +#endif
18170 +.endm
18171 +
18172 +.macro pax_exit_kernel
18173 +#ifdef CONFIG_PAX_KERNEXEC
18174 + call pax_exit_kernel
18175 +#endif
18176 +.endm
18177 +
18178 +#ifdef CONFIG_PAX_KERNEXEC
18179 +ENTRY(pax_enter_kernel)
18180 +#ifdef CONFIG_PARAVIRT
18181 + pushl %eax
18182 + pushl %ecx
18183 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18184 + mov %eax, %esi
18185 +#else
18186 + mov %cr0, %esi
18187 +#endif
18188 + bts $16, %esi
18189 + jnc 1f
18190 + mov %cs, %esi
18191 + cmp $__KERNEL_CS, %esi
18192 + jz 3f
18193 + ljmp $__KERNEL_CS, $3f
18194 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18195 +2:
18196 +#ifdef CONFIG_PARAVIRT
18197 + mov %esi, %eax
18198 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18199 +#else
18200 + mov %esi, %cr0
18201 +#endif
18202 +3:
18203 +#ifdef CONFIG_PARAVIRT
18204 + popl %ecx
18205 + popl %eax
18206 +#endif
18207 + ret
18208 +ENDPROC(pax_enter_kernel)
18209 +
18210 +ENTRY(pax_exit_kernel)
18211 +#ifdef CONFIG_PARAVIRT
18212 + pushl %eax
18213 + pushl %ecx
18214 +#endif
18215 + mov %cs, %esi
18216 + cmp $__KERNEXEC_KERNEL_CS, %esi
18217 + jnz 2f
18218 +#ifdef CONFIG_PARAVIRT
18219 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18220 + mov %eax, %esi
18221 +#else
18222 + mov %cr0, %esi
18223 +#endif
18224 + btr $16, %esi
18225 + ljmp $__KERNEL_CS, $1f
18226 +1:
18227 +#ifdef CONFIG_PARAVIRT
18228 + mov %esi, %eax
18229 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18230 +#else
18231 + mov %esi, %cr0
18232 +#endif
18233 +2:
18234 +#ifdef CONFIG_PARAVIRT
18235 + popl %ecx
18236 + popl %eax
18237 +#endif
18238 + ret
18239 +ENDPROC(pax_exit_kernel)
18240 +#endif
18241 +
18242 +.macro pax_erase_kstack
18243 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18244 + call pax_erase_kstack
18245 +#endif
18246 +.endm
18247 +
18248 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18249 +/*
18250 + * ebp: thread_info
18251 + */
18252 +ENTRY(pax_erase_kstack)
18253 + pushl %edi
18254 + pushl %ecx
18255 + pushl %eax
18256 +
18257 + mov TI_lowest_stack(%ebp), %edi
18258 + mov $-0xBEEF, %eax
18259 + std
18260 +
18261 +1: mov %edi, %ecx
18262 + and $THREAD_SIZE_asm - 1, %ecx
18263 + shr $2, %ecx
18264 + repne scasl
18265 + jecxz 2f
18266 +
18267 + cmp $2*16, %ecx
18268 + jc 2f
18269 +
18270 + mov $2*16, %ecx
18271 + repe scasl
18272 + jecxz 2f
18273 + jne 1b
18274 +
18275 +2: cld
18276 + mov %esp, %ecx
18277 + sub %edi, %ecx
18278 +
18279 + cmp $THREAD_SIZE_asm, %ecx
18280 + jb 3f
18281 + ud2
18282 +3:
18283 +
18284 + shr $2, %ecx
18285 + rep stosl
18286 +
18287 + mov TI_task_thread_sp0(%ebp), %edi
18288 + sub $128, %edi
18289 + mov %edi, TI_lowest_stack(%ebp)
18290 +
18291 + popl %eax
18292 + popl %ecx
18293 + popl %edi
18294 + ret
18295 +ENDPROC(pax_erase_kstack)
18296 +#endif
18297 +
18298 +.macro __SAVE_ALL _DS
18299 cld
18300 PUSH_GS
18301 pushl_cfi %fs
18302 @@ -206,7 +346,7 @@
18303 CFI_REL_OFFSET ecx, 0
18304 pushl_cfi %ebx
18305 CFI_REL_OFFSET ebx, 0
18306 - movl $(__USER_DS), %edx
18307 + movl $\_DS, %edx
18308 movl %edx, %ds
18309 movl %edx, %es
18310 movl $(__KERNEL_PERCPU), %edx
18311 @@ -214,6 +354,15 @@
18312 SET_KERNEL_GS %edx
18313 .endm
18314
18315 +.macro SAVE_ALL
18316 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18317 + __SAVE_ALL __KERNEL_DS
18318 + pax_enter_kernel
18319 +#else
18320 + __SAVE_ALL __USER_DS
18321 +#endif
18322 +.endm
18323 +
18324 .macro RESTORE_INT_REGS
18325 popl_cfi %ebx
18326 CFI_RESTORE ebx
18327 @@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18328 popfl_cfi
18329 jmp syscall_exit
18330 CFI_ENDPROC
18331 -END(ret_from_fork)
18332 +ENDPROC(ret_from_fork)
18333
18334 ENTRY(ret_from_kernel_thread)
18335 CFI_STARTPROC
18336 @@ -344,7 +493,15 @@ ret_from_intr:
18337 andl $SEGMENT_RPL_MASK, %eax
18338 #endif
18339 cmpl $USER_RPL, %eax
18340 +
18341 +#ifdef CONFIG_PAX_KERNEXEC
18342 + jae resume_userspace
18343 +
18344 + pax_exit_kernel
18345 + jmp resume_kernel
18346 +#else
18347 jb resume_kernel # not returning to v8086 or userspace
18348 +#endif
18349
18350 ENTRY(resume_userspace)
18351 LOCKDEP_SYS_EXIT
18352 @@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18353 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18354 # int/exception return?
18355 jne work_pending
18356 - jmp restore_all
18357 -END(ret_from_exception)
18358 + jmp restore_all_pax
18359 +ENDPROC(ret_from_exception)
18360
18361 #ifdef CONFIG_PREEMPT
18362 ENTRY(resume_kernel)
18363 @@ -372,7 +529,7 @@ need_resched:
18364 jz restore_all
18365 call preempt_schedule_irq
18366 jmp need_resched
18367 -END(resume_kernel)
18368 +ENDPROC(resume_kernel)
18369 #endif
18370 CFI_ENDPROC
18371 /*
18372 @@ -406,30 +563,45 @@ sysenter_past_esp:
18373 /*CFI_REL_OFFSET cs, 0*/
18374 /*
18375 * Push current_thread_info()->sysenter_return to the stack.
18376 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18377 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
18378 */
18379 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18380 + pushl_cfi $0
18381 CFI_REL_OFFSET eip, 0
18382
18383 pushl_cfi %eax
18384 SAVE_ALL
18385 + GET_THREAD_INFO(%ebp)
18386 + movl TI_sysenter_return(%ebp),%ebp
18387 + movl %ebp,PT_EIP(%esp)
18388 ENABLE_INTERRUPTS(CLBR_NONE)
18389
18390 /*
18391 * Load the potential sixth argument from user stack.
18392 * Careful about security.
18393 */
18394 + movl PT_OLDESP(%esp),%ebp
18395 +
18396 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18397 + mov PT_OLDSS(%esp),%ds
18398 +1: movl %ds:(%ebp),%ebp
18399 + push %ss
18400 + pop %ds
18401 +#else
18402 cmpl $__PAGE_OFFSET-3,%ebp
18403 jae syscall_fault
18404 ASM_STAC
18405 1: movl (%ebp),%ebp
18406 ASM_CLAC
18407 +#endif
18408 +
18409 movl %ebp,PT_EBP(%esp)
18410 _ASM_EXTABLE(1b,syscall_fault)
18411
18412 GET_THREAD_INFO(%ebp)
18413
18414 +#ifdef CONFIG_PAX_RANDKSTACK
18415 + pax_erase_kstack
18416 +#endif
18417 +
18418 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18419 jnz sysenter_audit
18420 sysenter_do_call:
18421 @@ -444,12 +616,24 @@ sysenter_do_call:
18422 testl $_TIF_ALLWORK_MASK, %ecx
18423 jne sysexit_audit
18424 sysenter_exit:
18425 +
18426 +#ifdef CONFIG_PAX_RANDKSTACK
18427 + pushl_cfi %eax
18428 + movl %esp, %eax
18429 + call pax_randomize_kstack
18430 + popl_cfi %eax
18431 +#endif
18432 +
18433 + pax_erase_kstack
18434 +
18435 /* if something modifies registers it must also disable sysexit */
18436 movl PT_EIP(%esp), %edx
18437 movl PT_OLDESP(%esp), %ecx
18438 xorl %ebp,%ebp
18439 TRACE_IRQS_ON
18440 1: mov PT_FS(%esp), %fs
18441 +2: mov PT_DS(%esp), %ds
18442 +3: mov PT_ES(%esp), %es
18443 PTGS_TO_GS
18444 ENABLE_INTERRUPTS_SYSEXIT
18445
18446 @@ -466,6 +650,9 @@ sysenter_audit:
18447 movl %eax,%edx /* 2nd arg: syscall number */
18448 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18449 call __audit_syscall_entry
18450 +
18451 + pax_erase_kstack
18452 +
18453 pushl_cfi %ebx
18454 movl PT_EAX(%esp),%eax /* reload syscall number */
18455 jmp sysenter_do_call
18456 @@ -491,10 +678,16 @@ sysexit_audit:
18457
18458 CFI_ENDPROC
18459 .pushsection .fixup,"ax"
18460 -2: movl $0,PT_FS(%esp)
18461 +4: movl $0,PT_FS(%esp)
18462 + jmp 1b
18463 +5: movl $0,PT_DS(%esp)
18464 + jmp 1b
18465 +6: movl $0,PT_ES(%esp)
18466 jmp 1b
18467 .popsection
18468 - _ASM_EXTABLE(1b,2b)
18469 + _ASM_EXTABLE(1b,4b)
18470 + _ASM_EXTABLE(2b,5b)
18471 + _ASM_EXTABLE(3b,6b)
18472 PTGS_TO_GS_EX
18473 ENDPROC(ia32_sysenter_target)
18474
18475 @@ -509,6 +702,11 @@ ENTRY(system_call)
18476 pushl_cfi %eax # save orig_eax
18477 SAVE_ALL
18478 GET_THREAD_INFO(%ebp)
18479 +
18480 +#ifdef CONFIG_PAX_RANDKSTACK
18481 + pax_erase_kstack
18482 +#endif
18483 +
18484 # system call tracing in operation / emulation
18485 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18486 jnz syscall_trace_entry
18487 @@ -527,6 +725,15 @@ syscall_exit:
18488 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18489 jne syscall_exit_work
18490
18491 +restore_all_pax:
18492 +
18493 +#ifdef CONFIG_PAX_RANDKSTACK
18494 + movl %esp, %eax
18495 + call pax_randomize_kstack
18496 +#endif
18497 +
18498 + pax_erase_kstack
18499 +
18500 restore_all:
18501 TRACE_IRQS_IRET
18502 restore_all_notrace:
18503 @@ -583,14 +790,34 @@ ldt_ss:
18504 * compensating for the offset by changing to the ESPFIX segment with
18505 * a base address that matches for the difference.
18506 */
18507 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18508 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18509 mov %esp, %edx /* load kernel esp */
18510 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18511 mov %dx, %ax /* eax: new kernel esp */
18512 sub %eax, %edx /* offset (low word is 0) */
18513 +#ifdef CONFIG_SMP
18514 + movl PER_CPU_VAR(cpu_number), %ebx
18515 + shll $PAGE_SHIFT_asm, %ebx
18516 + addl $cpu_gdt_table, %ebx
18517 +#else
18518 + movl $cpu_gdt_table, %ebx
18519 +#endif
18520 shr $16, %edx
18521 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18522 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18523 +
18524 +#ifdef CONFIG_PAX_KERNEXEC
18525 + mov %cr0, %esi
18526 + btr $16, %esi
18527 + mov %esi, %cr0
18528 +#endif
18529 +
18530 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18531 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18532 +
18533 +#ifdef CONFIG_PAX_KERNEXEC
18534 + bts $16, %esi
18535 + mov %esi, %cr0
18536 +#endif
18537 +
18538 pushl_cfi $__ESPFIX_SS
18539 pushl_cfi %eax /* new kernel esp */
18540 /* Disable interrupts, but do not irqtrace this section: we
18541 @@ -619,20 +846,18 @@ work_resched:
18542 movl TI_flags(%ebp), %ecx
18543 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18544 # than syscall tracing?
18545 - jz restore_all
18546 + jz restore_all_pax
18547 testb $_TIF_NEED_RESCHED, %cl
18548 jnz work_resched
18549
18550 work_notifysig: # deal with pending signals and
18551 # notify-resume requests
18552 + movl %esp, %eax
18553 #ifdef CONFIG_VM86
18554 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18555 - movl %esp, %eax
18556 jne work_notifysig_v86 # returning to kernel-space or
18557 # vm86-space
18558 1:
18559 -#else
18560 - movl %esp, %eax
18561 #endif
18562 TRACE_IRQS_ON
18563 ENABLE_INTERRUPTS(CLBR_NONE)
18564 @@ -653,7 +878,7 @@ work_notifysig_v86:
18565 movl %eax, %esp
18566 jmp 1b
18567 #endif
18568 -END(work_pending)
18569 +ENDPROC(work_pending)
18570
18571 # perform syscall exit tracing
18572 ALIGN
18573 @@ -661,11 +886,14 @@ syscall_trace_entry:
18574 movl $-ENOSYS,PT_EAX(%esp)
18575 movl %esp, %eax
18576 call syscall_trace_enter
18577 +
18578 + pax_erase_kstack
18579 +
18580 /* What it returned is what we'll actually use. */
18581 cmpl $(NR_syscalls), %eax
18582 jnae syscall_call
18583 jmp syscall_exit
18584 -END(syscall_trace_entry)
18585 +ENDPROC(syscall_trace_entry)
18586
18587 # perform syscall exit tracing
18588 ALIGN
18589 @@ -678,21 +906,25 @@ syscall_exit_work:
18590 movl %esp, %eax
18591 call syscall_trace_leave
18592 jmp resume_userspace
18593 -END(syscall_exit_work)
18594 +ENDPROC(syscall_exit_work)
18595 CFI_ENDPROC
18596
18597 RING0_INT_FRAME # can't unwind into user space anyway
18598 syscall_fault:
18599 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18600 + push %ss
18601 + pop %ds
18602 +#endif
18603 ASM_CLAC
18604 GET_THREAD_INFO(%ebp)
18605 movl $-EFAULT,PT_EAX(%esp)
18606 jmp resume_userspace
18607 -END(syscall_fault)
18608 +ENDPROC(syscall_fault)
18609
18610 syscall_badsys:
18611 movl $-ENOSYS,PT_EAX(%esp)
18612 jmp resume_userspace
18613 -END(syscall_badsys)
18614 +ENDPROC(syscall_badsys)
18615 CFI_ENDPROC
18616 /*
18617 * End of kprobes section
18618 @@ -708,8 +940,15 @@ END(syscall_badsys)
18619 * normal stack and adjusts ESP with the matching offset.
18620 */
18621 /* fixup the stack */
18622 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18623 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18624 +#ifdef CONFIG_SMP
18625 + movl PER_CPU_VAR(cpu_number), %ebx
18626 + shll $PAGE_SHIFT_asm, %ebx
18627 + addl $cpu_gdt_table, %ebx
18628 +#else
18629 + movl $cpu_gdt_table, %ebx
18630 +#endif
18631 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18632 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18633 shl $16, %eax
18634 addl %esp, %eax /* the adjusted stack pointer */
18635 pushl_cfi $__KERNEL_DS
18636 @@ -762,7 +1001,7 @@ vector=vector+1
18637 .endr
18638 2: jmp common_interrupt
18639 .endr
18640 -END(irq_entries_start)
18641 +ENDPROC(irq_entries_start)
18642
18643 .previous
18644 END(interrupt)
18645 @@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
18646 pushl_cfi $do_coprocessor_error
18647 jmp error_code
18648 CFI_ENDPROC
18649 -END(coprocessor_error)
18650 +ENDPROC(coprocessor_error)
18651
18652 ENTRY(simd_coprocessor_error)
18653 RING0_INT_FRAME
18654 @@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
18655 #endif
18656 jmp error_code
18657 CFI_ENDPROC
18658 -END(simd_coprocessor_error)
18659 +ENDPROC(simd_coprocessor_error)
18660
18661 ENTRY(device_not_available)
18662 RING0_INT_FRAME
18663 @@ -844,18 +1083,18 @@ ENTRY(device_not_available)
18664 pushl_cfi $do_device_not_available
18665 jmp error_code
18666 CFI_ENDPROC
18667 -END(device_not_available)
18668 +ENDPROC(device_not_available)
18669
18670 #ifdef CONFIG_PARAVIRT
18671 ENTRY(native_iret)
18672 iret
18673 _ASM_EXTABLE(native_iret, iret_exc)
18674 -END(native_iret)
18675 +ENDPROC(native_iret)
18676
18677 ENTRY(native_irq_enable_sysexit)
18678 sti
18679 sysexit
18680 -END(native_irq_enable_sysexit)
18681 +ENDPROC(native_irq_enable_sysexit)
18682 #endif
18683
18684 ENTRY(overflow)
18685 @@ -865,7 +1104,7 @@ ENTRY(overflow)
18686 pushl_cfi $do_overflow
18687 jmp error_code
18688 CFI_ENDPROC
18689 -END(overflow)
18690 +ENDPROC(overflow)
18691
18692 ENTRY(bounds)
18693 RING0_INT_FRAME
18694 @@ -874,7 +1113,7 @@ ENTRY(bounds)
18695 pushl_cfi $do_bounds
18696 jmp error_code
18697 CFI_ENDPROC
18698 -END(bounds)
18699 +ENDPROC(bounds)
18700
18701 ENTRY(invalid_op)
18702 RING0_INT_FRAME
18703 @@ -883,7 +1122,7 @@ ENTRY(invalid_op)
18704 pushl_cfi $do_invalid_op
18705 jmp error_code
18706 CFI_ENDPROC
18707 -END(invalid_op)
18708 +ENDPROC(invalid_op)
18709
18710 ENTRY(coprocessor_segment_overrun)
18711 RING0_INT_FRAME
18712 @@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
18713 pushl_cfi $do_coprocessor_segment_overrun
18714 jmp error_code
18715 CFI_ENDPROC
18716 -END(coprocessor_segment_overrun)
18717 +ENDPROC(coprocessor_segment_overrun)
18718
18719 ENTRY(invalid_TSS)
18720 RING0_EC_FRAME
18721 @@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
18722 pushl_cfi $do_invalid_TSS
18723 jmp error_code
18724 CFI_ENDPROC
18725 -END(invalid_TSS)
18726 +ENDPROC(invalid_TSS)
18727
18728 ENTRY(segment_not_present)
18729 RING0_EC_FRAME
18730 @@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
18731 pushl_cfi $do_segment_not_present
18732 jmp error_code
18733 CFI_ENDPROC
18734 -END(segment_not_present)
18735 +ENDPROC(segment_not_present)
18736
18737 ENTRY(stack_segment)
18738 RING0_EC_FRAME
18739 @@ -916,7 +1155,7 @@ ENTRY(stack_segment)
18740 pushl_cfi $do_stack_segment
18741 jmp error_code
18742 CFI_ENDPROC
18743 -END(stack_segment)
18744 +ENDPROC(stack_segment)
18745
18746 ENTRY(alignment_check)
18747 RING0_EC_FRAME
18748 @@ -924,7 +1163,7 @@ ENTRY(alignment_check)
18749 pushl_cfi $do_alignment_check
18750 jmp error_code
18751 CFI_ENDPROC
18752 -END(alignment_check)
18753 +ENDPROC(alignment_check)
18754
18755 ENTRY(divide_error)
18756 RING0_INT_FRAME
18757 @@ -933,7 +1172,7 @@ ENTRY(divide_error)
18758 pushl_cfi $do_divide_error
18759 jmp error_code
18760 CFI_ENDPROC
18761 -END(divide_error)
18762 +ENDPROC(divide_error)
18763
18764 #ifdef CONFIG_X86_MCE
18765 ENTRY(machine_check)
18766 @@ -943,7 +1182,7 @@ ENTRY(machine_check)
18767 pushl_cfi machine_check_vector
18768 jmp error_code
18769 CFI_ENDPROC
18770 -END(machine_check)
18771 +ENDPROC(machine_check)
18772 #endif
18773
18774 ENTRY(spurious_interrupt_bug)
18775 @@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
18776 pushl_cfi $do_spurious_interrupt_bug
18777 jmp error_code
18778 CFI_ENDPROC
18779 -END(spurious_interrupt_bug)
18780 +ENDPROC(spurious_interrupt_bug)
18781 /*
18782 * End of kprobes section
18783 */
18784 @@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
18785
18786 ENTRY(mcount)
18787 ret
18788 -END(mcount)
18789 +ENDPROC(mcount)
18790
18791 ENTRY(ftrace_caller)
18792 cmpl $0, function_trace_stop
18793 @@ -1096,7 +1335,7 @@ ftrace_graph_call:
18794 .globl ftrace_stub
18795 ftrace_stub:
18796 ret
18797 -END(ftrace_caller)
18798 +ENDPROC(ftrace_caller)
18799
18800 ENTRY(ftrace_regs_caller)
18801 pushf /* push flags before compare (in cs location) */
18802 @@ -1197,7 +1436,7 @@ trace:
18803 popl %ecx
18804 popl %eax
18805 jmp ftrace_stub
18806 -END(mcount)
18807 +ENDPROC(mcount)
18808 #endif /* CONFIG_DYNAMIC_FTRACE */
18809 #endif /* CONFIG_FUNCTION_TRACER */
18810
18811 @@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
18812 popl %ecx
18813 popl %eax
18814 ret
18815 -END(ftrace_graph_caller)
18816 +ENDPROC(ftrace_graph_caller)
18817
18818 .globl return_to_handler
18819 return_to_handler:
18820 @@ -1271,15 +1510,18 @@ error_code:
18821 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18822 REG_TO_PTGS %ecx
18823 SET_KERNEL_GS %ecx
18824 - movl $(__USER_DS), %ecx
18825 + movl $(__KERNEL_DS), %ecx
18826 movl %ecx, %ds
18827 movl %ecx, %es
18828 +
18829 + pax_enter_kernel
18830 +
18831 TRACE_IRQS_OFF
18832 movl %esp,%eax # pt_regs pointer
18833 call *%edi
18834 jmp ret_from_exception
18835 CFI_ENDPROC
18836 -END(page_fault)
18837 +ENDPROC(page_fault)
18838
18839 /*
18840 * Debug traps and NMI can happen at the one SYSENTER instruction
18841 @@ -1322,7 +1564,7 @@ debug_stack_correct:
18842 call do_debug
18843 jmp ret_from_exception
18844 CFI_ENDPROC
18845 -END(debug)
18846 +ENDPROC(debug)
18847
18848 /*
18849 * NMI is doubly nasty. It can happen _while_ we're handling
18850 @@ -1360,6 +1602,9 @@ nmi_stack_correct:
18851 xorl %edx,%edx # zero error code
18852 movl %esp,%eax # pt_regs pointer
18853 call do_nmi
18854 +
18855 + pax_exit_kernel
18856 +
18857 jmp restore_all_notrace
18858 CFI_ENDPROC
18859
18860 @@ -1396,12 +1641,15 @@ nmi_espfix_stack:
18861 FIXUP_ESPFIX_STACK # %eax == %esp
18862 xorl %edx,%edx # zero error code
18863 call do_nmi
18864 +
18865 + pax_exit_kernel
18866 +
18867 RESTORE_REGS
18868 lss 12+4(%esp), %esp # back to espfix stack
18869 CFI_ADJUST_CFA_OFFSET -24
18870 jmp irq_return
18871 CFI_ENDPROC
18872 -END(nmi)
18873 +ENDPROC(nmi)
18874
18875 ENTRY(int3)
18876 RING0_INT_FRAME
18877 @@ -1414,14 +1662,14 @@ ENTRY(int3)
18878 call do_int3
18879 jmp ret_from_exception
18880 CFI_ENDPROC
18881 -END(int3)
18882 +ENDPROC(int3)
18883
18884 ENTRY(general_protection)
18885 RING0_EC_FRAME
18886 pushl_cfi $do_general_protection
18887 jmp error_code
18888 CFI_ENDPROC
18889 -END(general_protection)
18890 +ENDPROC(general_protection)
18891
18892 #ifdef CONFIG_KVM_GUEST
18893 ENTRY(async_page_fault)
18894 @@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
18895 pushl_cfi $do_async_page_fault
18896 jmp error_code
18897 CFI_ENDPROC
18898 -END(async_page_fault)
18899 +ENDPROC(async_page_fault)
18900 #endif
18901
18902 /*
18903 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18904 index c1d01e6..5625dce 100644
18905 --- a/arch/x86/kernel/entry_64.S
18906 +++ b/arch/x86/kernel/entry_64.S
18907 @@ -59,6 +59,8 @@
18908 #include <asm/context_tracking.h>
18909 #include <asm/smap.h>
18910 #include <linux/err.h>
18911 +#include <asm/pgtable.h>
18912 +#include <asm/alternative-asm.h>
18913
18914 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18915 #include <linux/elf-em.h>
18916 @@ -80,8 +82,9 @@
18917 #ifdef CONFIG_DYNAMIC_FTRACE
18918
18919 ENTRY(function_hook)
18920 + pax_force_retaddr
18921 retq
18922 -END(function_hook)
18923 +ENDPROC(function_hook)
18924
18925 /* skip is set if stack has been adjusted */
18926 .macro ftrace_caller_setup skip=0
18927 @@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18928 #endif
18929
18930 GLOBAL(ftrace_stub)
18931 + pax_force_retaddr
18932 retq
18933 -END(ftrace_caller)
18934 +ENDPROC(ftrace_caller)
18935
18936 ENTRY(ftrace_regs_caller)
18937 /* Save the current flags before compare (in SS location)*/
18938 @@ -191,7 +195,7 @@ ftrace_restore_flags:
18939 popfq
18940 jmp ftrace_stub
18941
18942 -END(ftrace_regs_caller)
18943 +ENDPROC(ftrace_regs_caller)
18944
18945
18946 #else /* ! CONFIG_DYNAMIC_FTRACE */
18947 @@ -212,6 +216,7 @@ ENTRY(function_hook)
18948 #endif
18949
18950 GLOBAL(ftrace_stub)
18951 + pax_force_retaddr
18952 retq
18953
18954 trace:
18955 @@ -225,12 +230,13 @@ trace:
18956 #endif
18957 subq $MCOUNT_INSN_SIZE, %rdi
18958
18959 + pax_force_fptr ftrace_trace_function
18960 call *ftrace_trace_function
18961
18962 MCOUNT_RESTORE_FRAME
18963
18964 jmp ftrace_stub
18965 -END(function_hook)
18966 +ENDPROC(function_hook)
18967 #endif /* CONFIG_DYNAMIC_FTRACE */
18968 #endif /* CONFIG_FUNCTION_TRACER */
18969
18970 @@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
18971
18972 MCOUNT_RESTORE_FRAME
18973
18974 + pax_force_retaddr
18975 retq
18976 -END(ftrace_graph_caller)
18977 +ENDPROC(ftrace_graph_caller)
18978
18979 GLOBAL(return_to_handler)
18980 subq $24, %rsp
18981 @@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
18982 movq 8(%rsp), %rdx
18983 movq (%rsp), %rax
18984 addq $24, %rsp
18985 + pax_force_fptr %rdi
18986 jmp *%rdi
18987 +ENDPROC(return_to_handler)
18988 #endif
18989
18990
18991 @@ -284,6 +293,282 @@ ENTRY(native_usergs_sysret64)
18992 ENDPROC(native_usergs_sysret64)
18993 #endif /* CONFIG_PARAVIRT */
18994
18995 + .macro ljmpq sel, off
18996 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
18997 + .byte 0x48; ljmp *1234f(%rip)
18998 + .pushsection .rodata
18999 + .align 16
19000 + 1234: .quad \off; .word \sel
19001 + .popsection
19002 +#else
19003 + pushq $\sel
19004 + pushq $\off
19005 + lretq
19006 +#endif
19007 + .endm
19008 +
19009 + .macro pax_enter_kernel
19010 + pax_set_fptr_mask
19011 +#ifdef CONFIG_PAX_KERNEXEC
19012 + call pax_enter_kernel
19013 +#endif
19014 + .endm
19015 +
19016 + .macro pax_exit_kernel
19017 +#ifdef CONFIG_PAX_KERNEXEC
19018 + call pax_exit_kernel
19019 +#endif
19020 + .endm
19021 +
19022 +#ifdef CONFIG_PAX_KERNEXEC
19023 +ENTRY(pax_enter_kernel)
19024 + pushq %rdi
19025 +
19026 +#ifdef CONFIG_PARAVIRT
19027 + PV_SAVE_REGS(CLBR_RDI)
19028 +#endif
19029 +
19030 + GET_CR0_INTO_RDI
19031 + bts $16,%rdi
19032 + jnc 3f
19033 + mov %cs,%edi
19034 + cmp $__KERNEL_CS,%edi
19035 + jnz 2f
19036 +1:
19037 +
19038 +#ifdef CONFIG_PARAVIRT
19039 + PV_RESTORE_REGS(CLBR_RDI)
19040 +#endif
19041 +
19042 + popq %rdi
19043 + pax_force_retaddr
19044 + retq
19045 +
19046 +2: ljmpq __KERNEL_CS,1b
19047 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
19048 +4: SET_RDI_INTO_CR0
19049 + jmp 1b
19050 +ENDPROC(pax_enter_kernel)
19051 +
19052 +ENTRY(pax_exit_kernel)
19053 + pushq %rdi
19054 +
19055 +#ifdef CONFIG_PARAVIRT
19056 + PV_SAVE_REGS(CLBR_RDI)
19057 +#endif
19058 +
19059 + mov %cs,%rdi
19060 + cmp $__KERNEXEC_KERNEL_CS,%edi
19061 + jz 2f
19062 + GET_CR0_INTO_RDI
19063 + bts $16,%rdi
19064 + jnc 4f
19065 +1:
19066 +
19067 +#ifdef CONFIG_PARAVIRT
19068 + PV_RESTORE_REGS(CLBR_RDI);
19069 +#endif
19070 +
19071 + popq %rdi
19072 + pax_force_retaddr
19073 + retq
19074 +
19075 +2: GET_CR0_INTO_RDI
19076 + btr $16,%rdi
19077 + jnc 4f
19078 + ljmpq __KERNEL_CS,3f
19079 +3: SET_RDI_INTO_CR0
19080 + jmp 1b
19081 +4: ud2
19082 + jmp 4b
19083 +ENDPROC(pax_exit_kernel)
19084 +#endif
19085 +
19086 + .macro pax_enter_kernel_user
19087 + pax_set_fptr_mask
19088 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19089 + call pax_enter_kernel_user
19090 +#endif
19091 + .endm
19092 +
19093 + .macro pax_exit_kernel_user
19094 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19095 + call pax_exit_kernel_user
19096 +#endif
19097 +#ifdef CONFIG_PAX_RANDKSTACK
19098 + pushq %rax
19099 + call pax_randomize_kstack
19100 + popq %rax
19101 +#endif
19102 + .endm
19103 +
19104 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19105 +ENTRY(pax_enter_kernel_user)
19106 + pushq %rdi
19107 + pushq %rbx
19108 +
19109 +#ifdef CONFIG_PARAVIRT
19110 + PV_SAVE_REGS(CLBR_RDI)
19111 +#endif
19112 +
19113 + GET_CR3_INTO_RDI
19114 + mov %rdi,%rbx
19115 + add $__START_KERNEL_map,%rbx
19116 + sub phys_base(%rip),%rbx
19117 +
19118 +#ifdef CONFIG_PARAVIRT
19119 + pushq %rdi
19120 + cmpl $0, pv_info+PARAVIRT_enabled
19121 + jz 1f
19122 + i = 0
19123 + .rept USER_PGD_PTRS
19124 + mov i*8(%rbx),%rsi
19125 + mov $0,%sil
19126 + lea i*8(%rbx),%rdi
19127 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19128 + i = i + 1
19129 + .endr
19130 + jmp 2f
19131 +1:
19132 +#endif
19133 +
19134 + i = 0
19135 + .rept USER_PGD_PTRS
19136 + movb $0,i*8(%rbx)
19137 + i = i + 1
19138 + .endr
19139 +
19140 +#ifdef CONFIG_PARAVIRT
19141 +2: popq %rdi
19142 +#endif
19143 + SET_RDI_INTO_CR3
19144 +
19145 +#ifdef CONFIG_PAX_KERNEXEC
19146 + GET_CR0_INTO_RDI
19147 + bts $16,%rdi
19148 + SET_RDI_INTO_CR0
19149 +#endif
19150 +
19151 +#ifdef CONFIG_PARAVIRT
19152 + PV_RESTORE_REGS(CLBR_RDI)
19153 +#endif
19154 +
19155 + popq %rbx
19156 + popq %rdi
19157 + pax_force_retaddr
19158 + retq
19159 +ENDPROC(pax_enter_kernel_user)
19160 +
19161 +ENTRY(pax_exit_kernel_user)
19162 + push %rdi
19163 +
19164 +#ifdef CONFIG_PARAVIRT
19165 + pushq %rbx
19166 + PV_SAVE_REGS(CLBR_RDI)
19167 +#endif
19168 +
19169 +#ifdef CONFIG_PAX_KERNEXEC
19170 + GET_CR0_INTO_RDI
19171 + btr $16,%rdi
19172 + jnc 3f
19173 + SET_RDI_INTO_CR0
19174 +#endif
19175 +
19176 + GET_CR3_INTO_RDI
19177 + add $__START_KERNEL_map,%rdi
19178 + sub phys_base(%rip),%rdi
19179 +
19180 +#ifdef CONFIG_PARAVIRT
19181 + cmpl $0, pv_info+PARAVIRT_enabled
19182 + jz 1f
19183 + mov %rdi,%rbx
19184 + i = 0
19185 + .rept USER_PGD_PTRS
19186 + mov i*8(%rbx),%rsi
19187 + mov $0x67,%sil
19188 + lea i*8(%rbx),%rdi
19189 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19190 + i = i + 1
19191 + .endr
19192 + jmp 2f
19193 +1:
19194 +#endif
19195 +
19196 + i = 0
19197 + .rept USER_PGD_PTRS
19198 + movb $0x67,i*8(%rdi)
19199 + i = i + 1
19200 + .endr
19201 +
19202 +#ifdef CONFIG_PARAVIRT
19203 +2: PV_RESTORE_REGS(CLBR_RDI)
19204 + popq %rbx
19205 +#endif
19206 +
19207 + popq %rdi
19208 + pax_force_retaddr
19209 + retq
19210 +3: ud2
19211 + jmp 3b
19212 +ENDPROC(pax_exit_kernel_user)
19213 +#endif
19214 +
19215 +.macro pax_erase_kstack
19216 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19217 + call pax_erase_kstack
19218 +#endif
19219 +.endm
19220 +
19221 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19222 +ENTRY(pax_erase_kstack)
19223 + pushq %rdi
19224 + pushq %rcx
19225 + pushq %rax
19226 + pushq %r11
19227 +
19228 + GET_THREAD_INFO(%r11)
19229 + mov TI_lowest_stack(%r11), %rdi
19230 + mov $-0xBEEF, %rax
19231 + std
19232 +
19233 +1: mov %edi, %ecx
19234 + and $THREAD_SIZE_asm - 1, %ecx
19235 + shr $3, %ecx
19236 + repne scasq
19237 + jecxz 2f
19238 +
19239 + cmp $2*8, %ecx
19240 + jc 2f
19241 +
19242 + mov $2*8, %ecx
19243 + repe scasq
19244 + jecxz 2f
19245 + jne 1b
19246 +
19247 +2: cld
19248 + mov %esp, %ecx
19249 + sub %edi, %ecx
19250 +
19251 + cmp $THREAD_SIZE_asm, %rcx
19252 + jb 3f
19253 + ud2
19254 +3:
19255 +
19256 + shr $3, %ecx
19257 + rep stosq
19258 +
19259 + mov TI_task_thread_sp0(%r11), %rdi
19260 + sub $256, %rdi
19261 + mov %rdi, TI_lowest_stack(%r11)
19262 +
19263 + popq %r11
19264 + popq %rax
19265 + popq %rcx
19266 + popq %rdi
19267 + pax_force_retaddr
19268 + ret
19269 +ENDPROC(pax_erase_kstack)
19270 +#endif
19271
19272 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19273 #ifdef CONFIG_TRACE_IRQFLAGS
19274 @@ -375,8 +660,8 @@ ENDPROC(native_usergs_sysret64)
19275 .endm
19276
19277 .macro UNFAKE_STACK_FRAME
19278 - addq $8*6, %rsp
19279 - CFI_ADJUST_CFA_OFFSET -(6*8)
19280 + addq $8*6 + ARG_SKIP, %rsp
19281 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19282 .endm
19283
19284 /*
19285 @@ -463,7 +748,7 @@ ENDPROC(native_usergs_sysret64)
19286 movq %rsp, %rsi
19287
19288 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19289 - testl $3, CS-RBP(%rsi)
19290 + testb $3, CS-RBP(%rsi)
19291 je 1f
19292 SWAPGS
19293 /*
19294 @@ -498,9 +783,10 @@ ENTRY(save_rest)
19295 movq_cfi r15, R15+16
19296 movq %r11, 8(%rsp) /* return address */
19297 FIXUP_TOP_OF_STACK %r11, 16
19298 + pax_force_retaddr
19299 ret
19300 CFI_ENDPROC
19301 -END(save_rest)
19302 +ENDPROC(save_rest)
19303
19304 /* save complete stack frame */
19305 .pushsection .kprobes.text, "ax"
19306 @@ -529,9 +815,10 @@ ENTRY(save_paranoid)
19307 js 1f /* negative -> in kernel */
19308 SWAPGS
19309 xorl %ebx,%ebx
19310 -1: ret
19311 +1: pax_force_retaddr_bts
19312 + ret
19313 CFI_ENDPROC
19314 -END(save_paranoid)
19315 +ENDPROC(save_paranoid)
19316 .popsection
19317
19318 /*
19319 @@ -553,7 +840,7 @@ ENTRY(ret_from_fork)
19320
19321 RESTORE_REST
19322
19323 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19324 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19325 jz 1f
19326
19327 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19328 @@ -571,7 +858,7 @@ ENTRY(ret_from_fork)
19329 RESTORE_REST
19330 jmp int_ret_from_sys_call
19331 CFI_ENDPROC
19332 -END(ret_from_fork)
19333 +ENDPROC(ret_from_fork)
19334
19335 /*
19336 * System call entry. Up to 6 arguments in registers are supported.
19337 @@ -608,7 +895,7 @@ END(ret_from_fork)
19338 ENTRY(system_call)
19339 CFI_STARTPROC simple
19340 CFI_SIGNAL_FRAME
19341 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19342 + CFI_DEF_CFA rsp,0
19343 CFI_REGISTER rip,rcx
19344 /*CFI_REGISTER rflags,r11*/
19345 SWAPGS_UNSAFE_STACK
19346 @@ -621,16 +908,23 @@ GLOBAL(system_call_after_swapgs)
19347
19348 movq %rsp,PER_CPU_VAR(old_rsp)
19349 movq PER_CPU_VAR(kernel_stack),%rsp
19350 + SAVE_ARGS 8*6,0
19351 + pax_enter_kernel_user
19352 +
19353 +#ifdef CONFIG_PAX_RANDKSTACK
19354 + pax_erase_kstack
19355 +#endif
19356 +
19357 /*
19358 * No need to follow this irqs off/on section - it's straight
19359 * and short:
19360 */
19361 ENABLE_INTERRUPTS(CLBR_NONE)
19362 - SAVE_ARGS 8,0
19363 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19364 movq %rcx,RIP-ARGOFFSET(%rsp)
19365 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19366 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19367 + GET_THREAD_INFO(%rcx)
19368 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19369 jnz tracesys
19370 system_call_fastpath:
19371 #if __SYSCALL_MASK == ~0
19372 @@ -640,7 +934,7 @@ system_call_fastpath:
19373 cmpl $__NR_syscall_max,%eax
19374 #endif
19375 ja badsys
19376 - movq %r10,%rcx
19377 + movq R10-ARGOFFSET(%rsp),%rcx
19378 call *sys_call_table(,%rax,8) # XXX: rip relative
19379 movq %rax,RAX-ARGOFFSET(%rsp)
19380 /*
19381 @@ -654,10 +948,13 @@ sysret_check:
19382 LOCKDEP_SYS_EXIT
19383 DISABLE_INTERRUPTS(CLBR_NONE)
19384 TRACE_IRQS_OFF
19385 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19386 + GET_THREAD_INFO(%rcx)
19387 + movl TI_flags(%rcx),%edx
19388 andl %edi,%edx
19389 jnz sysret_careful
19390 CFI_REMEMBER_STATE
19391 + pax_exit_kernel_user
19392 + pax_erase_kstack
19393 /*
19394 * sysretq will re-enable interrupts:
19395 */
19396 @@ -709,14 +1006,18 @@ badsys:
19397 * jump back to the normal fast path.
19398 */
19399 auditsys:
19400 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
19401 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19402 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19403 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19404 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19405 movq %rax,%rsi /* 2nd arg: syscall number */
19406 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19407 call __audit_syscall_entry
19408 +
19409 + pax_erase_kstack
19410 +
19411 LOAD_ARGS 0 /* reload call-clobbered registers */
19412 + pax_set_fptr_mask
19413 jmp system_call_fastpath
19414
19415 /*
19416 @@ -737,7 +1038,7 @@ sysret_audit:
19417 /* Do syscall tracing */
19418 tracesys:
19419 #ifdef CONFIG_AUDITSYSCALL
19420 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19421 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19422 jz auditsys
19423 #endif
19424 SAVE_REST
19425 @@ -745,12 +1046,16 @@ tracesys:
19426 FIXUP_TOP_OF_STACK %rdi
19427 movq %rsp,%rdi
19428 call syscall_trace_enter
19429 +
19430 + pax_erase_kstack
19431 +
19432 /*
19433 * Reload arg registers from stack in case ptrace changed them.
19434 * We don't reload %rax because syscall_trace_enter() returned
19435 * the value it wants us to use in the table lookup.
19436 */
19437 LOAD_ARGS ARGOFFSET, 1
19438 + pax_set_fptr_mask
19439 RESTORE_REST
19440 #if __SYSCALL_MASK == ~0
19441 cmpq $__NR_syscall_max,%rax
19442 @@ -759,7 +1064,7 @@ tracesys:
19443 cmpl $__NR_syscall_max,%eax
19444 #endif
19445 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19446 - movq %r10,%rcx /* fixup for C */
19447 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19448 call *sys_call_table(,%rax,8)
19449 movq %rax,RAX-ARGOFFSET(%rsp)
19450 /* Use IRET because user could have changed frame */
19451 @@ -780,7 +1085,9 @@ GLOBAL(int_with_check)
19452 andl %edi,%edx
19453 jnz int_careful
19454 andl $~TS_COMPAT,TI_status(%rcx)
19455 - jmp retint_swapgs
19456 + pax_exit_kernel_user
19457 + pax_erase_kstack
19458 + jmp retint_swapgs_pax
19459
19460 /* Either reschedule or signal or syscall exit tracking needed. */
19461 /* First do a reschedule test. */
19462 @@ -826,7 +1133,7 @@ int_restore_rest:
19463 TRACE_IRQS_OFF
19464 jmp int_with_check
19465 CFI_ENDPROC
19466 -END(system_call)
19467 +ENDPROC(system_call)
19468
19469 .macro FORK_LIKE func
19470 ENTRY(stub_\func)
19471 @@ -839,9 +1146,10 @@ ENTRY(stub_\func)
19472 DEFAULT_FRAME 0 8 /* offset 8: return address */
19473 call sys_\func
19474 RESTORE_TOP_OF_STACK %r11, 8
19475 + pax_force_retaddr
19476 ret $REST_SKIP /* pop extended registers */
19477 CFI_ENDPROC
19478 -END(stub_\func)
19479 +ENDPROC(stub_\func)
19480 .endm
19481
19482 .macro FIXED_FRAME label,func
19483 @@ -851,9 +1159,10 @@ ENTRY(\label)
19484 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
19485 call \func
19486 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
19487 + pax_force_retaddr
19488 ret
19489 CFI_ENDPROC
19490 -END(\label)
19491 +ENDPROC(\label)
19492 .endm
19493
19494 FORK_LIKE clone
19495 @@ -870,9 +1179,10 @@ ENTRY(ptregscall_common)
19496 movq_cfi_restore R12+8, r12
19497 movq_cfi_restore RBP+8, rbp
19498 movq_cfi_restore RBX+8, rbx
19499 + pax_force_retaddr
19500 ret $REST_SKIP /* pop extended registers */
19501 CFI_ENDPROC
19502 -END(ptregscall_common)
19503 +ENDPROC(ptregscall_common)
19504
19505 ENTRY(stub_execve)
19506 CFI_STARTPROC
19507 @@ -885,7 +1195,7 @@ ENTRY(stub_execve)
19508 RESTORE_REST
19509 jmp int_ret_from_sys_call
19510 CFI_ENDPROC
19511 -END(stub_execve)
19512 +ENDPROC(stub_execve)
19513
19514 /*
19515 * sigreturn is special because it needs to restore all registers on return.
19516 @@ -902,7 +1212,7 @@ ENTRY(stub_rt_sigreturn)
19517 RESTORE_REST
19518 jmp int_ret_from_sys_call
19519 CFI_ENDPROC
19520 -END(stub_rt_sigreturn)
19521 +ENDPROC(stub_rt_sigreturn)
19522
19523 #ifdef CONFIG_X86_X32_ABI
19524 ENTRY(stub_x32_rt_sigreturn)
19525 @@ -916,7 +1226,7 @@ ENTRY(stub_x32_rt_sigreturn)
19526 RESTORE_REST
19527 jmp int_ret_from_sys_call
19528 CFI_ENDPROC
19529 -END(stub_x32_rt_sigreturn)
19530 +ENDPROC(stub_x32_rt_sigreturn)
19531
19532 ENTRY(stub_x32_execve)
19533 CFI_STARTPROC
19534 @@ -930,7 +1240,7 @@ ENTRY(stub_x32_execve)
19535 RESTORE_REST
19536 jmp int_ret_from_sys_call
19537 CFI_ENDPROC
19538 -END(stub_x32_execve)
19539 +ENDPROC(stub_x32_execve)
19540
19541 #endif
19542
19543 @@ -967,7 +1277,7 @@ vector=vector+1
19544 2: jmp common_interrupt
19545 .endr
19546 CFI_ENDPROC
19547 -END(irq_entries_start)
19548 +ENDPROC(irq_entries_start)
19549
19550 .previous
19551 END(interrupt)
19552 @@ -987,6 +1297,16 @@ END(interrupt)
19553 subq $ORIG_RAX-RBP, %rsp
19554 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19555 SAVE_ARGS_IRQ
19556 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19557 + testb $3, CS(%rdi)
19558 + jnz 1f
19559 + pax_enter_kernel
19560 + jmp 2f
19561 +1: pax_enter_kernel_user
19562 +2:
19563 +#else
19564 + pax_enter_kernel
19565 +#endif
19566 call \func
19567 .endm
19568
19569 @@ -1019,7 +1339,7 @@ ret_from_intr:
19570
19571 exit_intr:
19572 GET_THREAD_INFO(%rcx)
19573 - testl $3,CS-ARGOFFSET(%rsp)
19574 + testb $3,CS-ARGOFFSET(%rsp)
19575 je retint_kernel
19576
19577 /* Interrupt came from user space */
19578 @@ -1041,12 +1361,16 @@ retint_swapgs: /* return to user-space */
19579 * The iretq could re-enable interrupts:
19580 */
19581 DISABLE_INTERRUPTS(CLBR_ANY)
19582 + pax_exit_kernel_user
19583 +retint_swapgs_pax:
19584 TRACE_IRQS_IRETQ
19585 SWAPGS
19586 jmp restore_args
19587
19588 retint_restore_args: /* return to kernel space */
19589 DISABLE_INTERRUPTS(CLBR_ANY)
19590 + pax_exit_kernel
19591 + pax_force_retaddr (RIP-ARGOFFSET)
19592 /*
19593 * The iretq could re-enable interrupts:
19594 */
19595 @@ -1129,7 +1453,7 @@ ENTRY(retint_kernel)
19596 #endif
19597
19598 CFI_ENDPROC
19599 -END(common_interrupt)
19600 +ENDPROC(common_interrupt)
19601 /*
19602 * End of kprobes section
19603 */
19604 @@ -1147,7 +1471,7 @@ ENTRY(\sym)
19605 interrupt \do_sym
19606 jmp ret_from_intr
19607 CFI_ENDPROC
19608 -END(\sym)
19609 +ENDPROC(\sym)
19610 .endm
19611
19612 #ifdef CONFIG_SMP
19613 @@ -1203,12 +1527,22 @@ ENTRY(\sym)
19614 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19615 call error_entry
19616 DEFAULT_FRAME 0
19617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19618 + testb $3, CS(%rsp)
19619 + jnz 1f
19620 + pax_enter_kernel
19621 + jmp 2f
19622 +1: pax_enter_kernel_user
19623 +2:
19624 +#else
19625 + pax_enter_kernel
19626 +#endif
19627 movq %rsp,%rdi /* pt_regs pointer */
19628 xorl %esi,%esi /* no error code */
19629 call \do_sym
19630 jmp error_exit /* %ebx: no swapgs flag */
19631 CFI_ENDPROC
19632 -END(\sym)
19633 +ENDPROC(\sym)
19634 .endm
19635
19636 .macro paranoidzeroentry sym do_sym
19637 @@ -1221,15 +1555,25 @@ ENTRY(\sym)
19638 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19639 call save_paranoid
19640 TRACE_IRQS_OFF
19641 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19642 + testb $3, CS(%rsp)
19643 + jnz 1f
19644 + pax_enter_kernel
19645 + jmp 2f
19646 +1: pax_enter_kernel_user
19647 +2:
19648 +#else
19649 + pax_enter_kernel
19650 +#endif
19651 movq %rsp,%rdi /* pt_regs pointer */
19652 xorl %esi,%esi /* no error code */
19653 call \do_sym
19654 jmp paranoid_exit /* %ebx: no swapgs flag */
19655 CFI_ENDPROC
19656 -END(\sym)
19657 +ENDPROC(\sym)
19658 .endm
19659
19660 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19661 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19662 .macro paranoidzeroentry_ist sym do_sym ist
19663 ENTRY(\sym)
19664 INTR_FRAME
19665 @@ -1240,14 +1584,30 @@ ENTRY(\sym)
19666 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19667 call save_paranoid
19668 TRACE_IRQS_OFF_DEBUG
19669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19670 + testb $3, CS(%rsp)
19671 + jnz 1f
19672 + pax_enter_kernel
19673 + jmp 2f
19674 +1: pax_enter_kernel_user
19675 +2:
19676 +#else
19677 + pax_enter_kernel
19678 +#endif
19679 movq %rsp,%rdi /* pt_regs pointer */
19680 xorl %esi,%esi /* no error code */
19681 +#ifdef CONFIG_SMP
19682 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19683 + lea init_tss(%r12), %r12
19684 +#else
19685 + lea init_tss(%rip), %r12
19686 +#endif
19687 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19688 call \do_sym
19689 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19690 jmp paranoid_exit /* %ebx: no swapgs flag */
19691 CFI_ENDPROC
19692 -END(\sym)
19693 +ENDPROC(\sym)
19694 .endm
19695
19696 .macro errorentry sym do_sym
19697 @@ -1259,13 +1619,23 @@ ENTRY(\sym)
19698 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19699 call error_entry
19700 DEFAULT_FRAME 0
19701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19702 + testb $3, CS(%rsp)
19703 + jnz 1f
19704 + pax_enter_kernel
19705 + jmp 2f
19706 +1: pax_enter_kernel_user
19707 +2:
19708 +#else
19709 + pax_enter_kernel
19710 +#endif
19711 movq %rsp,%rdi /* pt_regs pointer */
19712 movq ORIG_RAX(%rsp),%rsi /* get error code */
19713 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19714 call \do_sym
19715 jmp error_exit /* %ebx: no swapgs flag */
19716 CFI_ENDPROC
19717 -END(\sym)
19718 +ENDPROC(\sym)
19719 .endm
19720
19721 /* error code is on the stack already */
19722 @@ -1279,13 +1649,23 @@ ENTRY(\sym)
19723 call save_paranoid
19724 DEFAULT_FRAME 0
19725 TRACE_IRQS_OFF
19726 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19727 + testb $3, CS(%rsp)
19728 + jnz 1f
19729 + pax_enter_kernel
19730 + jmp 2f
19731 +1: pax_enter_kernel_user
19732 +2:
19733 +#else
19734 + pax_enter_kernel
19735 +#endif
19736 movq %rsp,%rdi /* pt_regs pointer */
19737 movq ORIG_RAX(%rsp),%rsi /* get error code */
19738 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19739 call \do_sym
19740 jmp paranoid_exit /* %ebx: no swapgs flag */
19741 CFI_ENDPROC
19742 -END(\sym)
19743 +ENDPROC(\sym)
19744 .endm
19745
19746 zeroentry divide_error do_divide_error
19747 @@ -1315,9 +1695,10 @@ gs_change:
19748 2: mfence /* workaround */
19749 SWAPGS
19750 popfq_cfi
19751 + pax_force_retaddr
19752 ret
19753 CFI_ENDPROC
19754 -END(native_load_gs_index)
19755 +ENDPROC(native_load_gs_index)
19756
19757 _ASM_EXTABLE(gs_change,bad_gs)
19758 .section .fixup,"ax"
19759 @@ -1345,9 +1726,10 @@ ENTRY(call_softirq)
19760 CFI_DEF_CFA_REGISTER rsp
19761 CFI_ADJUST_CFA_OFFSET -8
19762 decl PER_CPU_VAR(irq_count)
19763 + pax_force_retaddr
19764 ret
19765 CFI_ENDPROC
19766 -END(call_softirq)
19767 +ENDPROC(call_softirq)
19768
19769 #ifdef CONFIG_XEN
19770 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19771 @@ -1385,7 +1767,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19772 decl PER_CPU_VAR(irq_count)
19773 jmp error_exit
19774 CFI_ENDPROC
19775 -END(xen_do_hypervisor_callback)
19776 +ENDPROC(xen_do_hypervisor_callback)
19777
19778 /*
19779 * Hypervisor uses this for application faults while it executes.
19780 @@ -1444,7 +1826,7 @@ ENTRY(xen_failsafe_callback)
19781 SAVE_ALL
19782 jmp error_exit
19783 CFI_ENDPROC
19784 -END(xen_failsafe_callback)
19785 +ENDPROC(xen_failsafe_callback)
19786
19787 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
19788 xen_hvm_callback_vector xen_evtchn_do_upcall
19789 @@ -1498,16 +1880,31 @@ ENTRY(paranoid_exit)
19790 TRACE_IRQS_OFF_DEBUG
19791 testl %ebx,%ebx /* swapgs needed? */
19792 jnz paranoid_restore
19793 - testl $3,CS(%rsp)
19794 + testb $3,CS(%rsp)
19795 jnz paranoid_userspace
19796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19797 + pax_exit_kernel
19798 + TRACE_IRQS_IRETQ 0
19799 + SWAPGS_UNSAFE_STACK
19800 + RESTORE_ALL 8
19801 + pax_force_retaddr_bts
19802 + jmp irq_return
19803 +#endif
19804 paranoid_swapgs:
19805 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19806 + pax_exit_kernel_user
19807 +#else
19808 + pax_exit_kernel
19809 +#endif
19810 TRACE_IRQS_IRETQ 0
19811 SWAPGS_UNSAFE_STACK
19812 RESTORE_ALL 8
19813 jmp irq_return
19814 paranoid_restore:
19815 + pax_exit_kernel
19816 TRACE_IRQS_IRETQ_DEBUG 0
19817 RESTORE_ALL 8
19818 + pax_force_retaddr_bts
19819 jmp irq_return
19820 paranoid_userspace:
19821 GET_THREAD_INFO(%rcx)
19822 @@ -1536,7 +1933,7 @@ paranoid_schedule:
19823 TRACE_IRQS_OFF
19824 jmp paranoid_userspace
19825 CFI_ENDPROC
19826 -END(paranoid_exit)
19827 +ENDPROC(paranoid_exit)
19828
19829 /*
19830 * Exception entry point. This expects an error code/orig_rax on the stack.
19831 @@ -1563,12 +1960,13 @@ ENTRY(error_entry)
19832 movq_cfi r14, R14+8
19833 movq_cfi r15, R15+8
19834 xorl %ebx,%ebx
19835 - testl $3,CS+8(%rsp)
19836 + testb $3,CS+8(%rsp)
19837 je error_kernelspace
19838 error_swapgs:
19839 SWAPGS
19840 error_sti:
19841 TRACE_IRQS_OFF
19842 + pax_force_retaddr_bts
19843 ret
19844
19845 /*
19846 @@ -1595,7 +1993,7 @@ bstep_iret:
19847 movq %rcx,RIP+8(%rsp)
19848 jmp error_swapgs
19849 CFI_ENDPROC
19850 -END(error_entry)
19851 +ENDPROC(error_entry)
19852
19853
19854 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19855 @@ -1615,7 +2013,7 @@ ENTRY(error_exit)
19856 jnz retint_careful
19857 jmp retint_swapgs
19858 CFI_ENDPROC
19859 -END(error_exit)
19860 +ENDPROC(error_exit)
19861
19862 /*
19863 * Test if a given stack is an NMI stack or not.
19864 @@ -1673,9 +2071,11 @@ ENTRY(nmi)
19865 * If %cs was not the kernel segment, then the NMI triggered in user
19866 * space, which means it is definitely not nested.
19867 */
19868 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19869 + je 1f
19870 cmpl $__KERNEL_CS, 16(%rsp)
19871 jne first_nmi
19872 -
19873 +1:
19874 /*
19875 * Check the special variable on the stack to see if NMIs are
19876 * executing.
19877 @@ -1709,8 +2109,7 @@ nested_nmi:
19878
19879 1:
19880 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
19881 - leaq -1*8(%rsp), %rdx
19882 - movq %rdx, %rsp
19883 + subq $8, %rsp
19884 CFI_ADJUST_CFA_OFFSET 1*8
19885 leaq -10*8(%rsp), %rdx
19886 pushq_cfi $__KERNEL_DS
19887 @@ -1728,6 +2127,7 @@ nested_nmi_out:
19888 CFI_RESTORE rdx
19889
19890 /* No need to check faults here */
19891 + pax_force_retaddr_bts
19892 INTERRUPT_RETURN
19893
19894 CFI_RESTORE_STATE
19895 @@ -1844,6 +2244,17 @@ end_repeat_nmi:
19896 */
19897 movq %cr2, %r12
19898
19899 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19900 + testb $3, CS(%rsp)
19901 + jnz 1f
19902 + pax_enter_kernel
19903 + jmp 2f
19904 +1: pax_enter_kernel_user
19905 +2:
19906 +#else
19907 + pax_enter_kernel
19908 +#endif
19909 +
19910 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19911 movq %rsp,%rdi
19912 movq $-1,%rsi
19913 @@ -1859,23 +2270,34 @@ end_repeat_nmi:
19914 testl %ebx,%ebx /* swapgs needed? */
19915 jnz nmi_restore
19916 nmi_swapgs:
19917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19918 + pax_exit_kernel_user
19919 +#else
19920 + pax_exit_kernel
19921 +#endif
19922 SWAPGS_UNSAFE_STACK
19923 + RESTORE_ALL 6*8
19924 + /* Clear the NMI executing stack variable */
19925 + movq $0, 5*8(%rsp)
19926 + jmp irq_return
19927 nmi_restore:
19928 + pax_exit_kernel
19929 /* Pop the extra iret frame at once */
19930 RESTORE_ALL 6*8
19931 + pax_force_retaddr_bts
19932
19933 /* Clear the NMI executing stack variable */
19934 movq $0, 5*8(%rsp)
19935 jmp irq_return
19936 CFI_ENDPROC
19937 -END(nmi)
19938 +ENDPROC(nmi)
19939
19940 ENTRY(ignore_sysret)
19941 CFI_STARTPROC
19942 mov $-ENOSYS,%eax
19943 sysret
19944 CFI_ENDPROC
19945 -END(ignore_sysret)
19946 +ENDPROC(ignore_sysret)
19947
19948 /*
19949 * End of kprobes section
19950 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
19951 index 42a392a..fbbd930 100644
19952 --- a/arch/x86/kernel/ftrace.c
19953 +++ b/arch/x86/kernel/ftrace.c
19954 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
19955 {
19956 unsigned char replaced[MCOUNT_INSN_SIZE];
19957
19958 + ip = ktla_ktva(ip);
19959 +
19960 /*
19961 * Note: Due to modules and __init, code can
19962 * disappear and change, we need to protect against faulting
19963 @@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19964 unsigned char old[MCOUNT_INSN_SIZE], *new;
19965 int ret;
19966
19967 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
19968 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
19969 new = ftrace_call_replace(ip, (unsigned long)func);
19970
19971 /* See comment above by declaration of modifying_ftrace_code */
19972 @@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19973 /* Also update the regs callback function */
19974 if (!ret) {
19975 ip = (unsigned long)(&ftrace_regs_call);
19976 - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
19977 + memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
19978 new = ftrace_call_replace(ip, (unsigned long)func);
19979 ret = ftrace_modify_code(ip, old, new);
19980 }
19981 @@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
19982 * kernel identity mapping to modify code.
19983 */
19984 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
19985 - ip = (unsigned long)__va(__pa_symbol(ip));
19986 + ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
19987
19988 return probe_kernel_write((void *)ip, val, size);
19989 }
19990 @@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
19991 unsigned char replaced[MCOUNT_INSN_SIZE];
19992 unsigned char brk = BREAKPOINT_INSTRUCTION;
19993
19994 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
19995 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
19996 return -EFAULT;
19997
19998 /* Make sure it is what we expect it to be */
19999 @@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20000 return ret;
20001
20002 fail_update:
20003 - probe_kernel_write((void *)ip, &old_code[0], 1);
20004 + probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20005 goto out;
20006 }
20007
20008 @@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20009 {
20010 unsigned char code[MCOUNT_INSN_SIZE];
20011
20012 + ip = ktla_ktva(ip);
20013 +
20014 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20015 return -EFAULT;
20016
20017 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
20018 index 1c68ccb..b4bc15c 100644
20019 --- a/arch/x86/kernel/head64.c
20020 +++ b/arch/x86/kernel/head64.c
20021 @@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
20022 if (console_loglevel == 10)
20023 early_printk("Kernel alive\n");
20024
20025 - clear_page(init_level4_pgt);
20026 /* set init_level4_pgt kernel high mapping*/
20027 init_level4_pgt[511] = early_level4_pgt[511];
20028
20029 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20030 index 73afd11..d1670f5 100644
20031 --- a/arch/x86/kernel/head_32.S
20032 +++ b/arch/x86/kernel/head_32.S
20033 @@ -26,6 +26,12 @@
20034 /* Physical address */
20035 #define pa(X) ((X) - __PAGE_OFFSET)
20036
20037 +#ifdef CONFIG_PAX_KERNEXEC
20038 +#define ta(X) (X)
20039 +#else
20040 +#define ta(X) ((X) - __PAGE_OFFSET)
20041 +#endif
20042 +
20043 /*
20044 * References to members of the new_cpu_data structure.
20045 */
20046 @@ -55,11 +61,7 @@
20047 * and small than max_low_pfn, otherwise will waste some page table entries
20048 */
20049
20050 -#if PTRS_PER_PMD > 1
20051 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20052 -#else
20053 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20054 -#endif
20055 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20056
20057 /* Number of possible pages in the lowmem region */
20058 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20059 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20060 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20061
20062 /*
20063 + * Real beginning of normal "text" segment
20064 + */
20065 +ENTRY(stext)
20066 +ENTRY(_stext)
20067 +
20068 +/*
20069 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20070 * %esi points to the real-mode code as a 32-bit pointer.
20071 * CS and DS must be 4 GB flat segments, but we don't depend on
20072 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20073 * can.
20074 */
20075 __HEAD
20076 +
20077 +#ifdef CONFIG_PAX_KERNEXEC
20078 + jmp startup_32
20079 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20080 +.fill PAGE_SIZE-5,1,0xcc
20081 +#endif
20082 +
20083 ENTRY(startup_32)
20084 movl pa(stack_start),%ecx
20085
20086 @@ -106,6 +121,59 @@ ENTRY(startup_32)
20087 2:
20088 leal -__PAGE_OFFSET(%ecx),%esp
20089
20090 +#ifdef CONFIG_SMP
20091 + movl $pa(cpu_gdt_table),%edi
20092 + movl $__per_cpu_load,%eax
20093 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20094 + rorl $16,%eax
20095 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20096 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20097 + movl $__per_cpu_end - 1,%eax
20098 + subl $__per_cpu_start,%eax
20099 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20100 +#endif
20101 +
20102 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20103 + movl $NR_CPUS,%ecx
20104 + movl $pa(cpu_gdt_table),%edi
20105 +1:
20106 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20107 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20108 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20109 + addl $PAGE_SIZE_asm,%edi
20110 + loop 1b
20111 +#endif
20112 +
20113 +#ifdef CONFIG_PAX_KERNEXEC
20114 + movl $pa(boot_gdt),%edi
20115 + movl $__LOAD_PHYSICAL_ADDR,%eax
20116 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20117 + rorl $16,%eax
20118 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20119 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20120 + rorl $16,%eax
20121 +
20122 + ljmp $(__BOOT_CS),$1f
20123 +1:
20124 +
20125 + movl $NR_CPUS,%ecx
20126 + movl $pa(cpu_gdt_table),%edi
20127 + addl $__PAGE_OFFSET,%eax
20128 +1:
20129 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20130 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20131 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20132 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20133 + rorl $16,%eax
20134 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20135 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20136 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20137 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20138 + rorl $16,%eax
20139 + addl $PAGE_SIZE_asm,%edi
20140 + loop 1b
20141 +#endif
20142 +
20143 /*
20144 * Clear BSS first so that there are no surprises...
20145 */
20146 @@ -201,8 +269,11 @@ ENTRY(startup_32)
20147 movl %eax, pa(max_pfn_mapped)
20148
20149 /* Do early initialization of the fixmap area */
20150 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20151 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20152 +#ifdef CONFIG_COMPAT_VDSO
20153 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20154 +#else
20155 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20156 +#endif
20157 #else /* Not PAE */
20158
20159 page_pde_offset = (__PAGE_OFFSET >> 20);
20160 @@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20161 movl %eax, pa(max_pfn_mapped)
20162
20163 /* Do early initialization of the fixmap area */
20164 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20165 - movl %eax,pa(initial_page_table+0xffc)
20166 +#ifdef CONFIG_COMPAT_VDSO
20167 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20168 +#else
20169 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20170 +#endif
20171 #endif
20172
20173 #ifdef CONFIG_PARAVIRT
20174 @@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20175 cmpl $num_subarch_entries, %eax
20176 jae bad_subarch
20177
20178 - movl pa(subarch_entries)(,%eax,4), %eax
20179 - subl $__PAGE_OFFSET, %eax
20180 - jmp *%eax
20181 + jmp *pa(subarch_entries)(,%eax,4)
20182
20183 bad_subarch:
20184 WEAK(lguest_entry)
20185 @@ -261,10 +333,10 @@ WEAK(xen_entry)
20186 __INITDATA
20187
20188 subarch_entries:
20189 - .long default_entry /* normal x86/PC */
20190 - .long lguest_entry /* lguest hypervisor */
20191 - .long xen_entry /* Xen hypervisor */
20192 - .long default_entry /* Moorestown MID */
20193 + .long ta(default_entry) /* normal x86/PC */
20194 + .long ta(lguest_entry) /* lguest hypervisor */
20195 + .long ta(xen_entry) /* Xen hypervisor */
20196 + .long ta(default_entry) /* Moorestown MID */
20197 num_subarch_entries = (. - subarch_entries) / 4
20198 .previous
20199 #else
20200 @@ -355,6 +427,7 @@ default_entry:
20201 movl pa(mmu_cr4_features),%eax
20202 movl %eax,%cr4
20203
20204 +#ifdef CONFIG_X86_PAE
20205 testb $X86_CR4_PAE, %al # check if PAE is enabled
20206 jz enable_paging
20207
20208 @@ -383,6 +456,9 @@ default_entry:
20209 /* Make changes effective */
20210 wrmsr
20211
20212 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20213 +#endif
20214 +
20215 enable_paging:
20216
20217 /*
20218 @@ -451,14 +527,20 @@ is486:
20219 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20220 movl %eax,%ss # after changing gdt.
20221
20222 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
20223 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20224 movl %eax,%ds
20225 movl %eax,%es
20226
20227 movl $(__KERNEL_PERCPU), %eax
20228 movl %eax,%fs # set this cpu's percpu
20229
20230 +#ifdef CONFIG_CC_STACKPROTECTOR
20231 movl $(__KERNEL_STACK_CANARY),%eax
20232 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20233 + movl $(__USER_DS),%eax
20234 +#else
20235 + xorl %eax,%eax
20236 +#endif
20237 movl %eax,%gs
20238
20239 xorl %eax,%eax # Clear LDT
20240 @@ -534,8 +616,11 @@ setup_once:
20241 * relocation. Manually set base address in stack canary
20242 * segment descriptor.
20243 */
20244 - movl $gdt_page,%eax
20245 + movl $cpu_gdt_table,%eax
20246 movl $stack_canary,%ecx
20247 +#ifdef CONFIG_SMP
20248 + addl $__per_cpu_load,%ecx
20249 +#endif
20250 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20251 shrl $16, %ecx
20252 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20253 @@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
20254 /* This is global to keep gas from relaxing the jumps */
20255 ENTRY(early_idt_handler)
20256 cld
20257 - cmpl $2,%ss:early_recursion_flag
20258 + cmpl $1,%ss:early_recursion_flag
20259 je hlt_loop
20260 incl %ss:early_recursion_flag
20261
20262 @@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
20263 pushl (20+6*4)(%esp) /* trapno */
20264 pushl $fault_msg
20265 call printk
20266 -#endif
20267 call dump_stack
20268 +#endif
20269 hlt_loop:
20270 hlt
20271 jmp hlt_loop
20272 @@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
20273 /* This is the default interrupt "handler" :-) */
20274 ALIGN
20275 ignore_int:
20276 - cld
20277 #ifdef CONFIG_PRINTK
20278 + cmpl $2,%ss:early_recursion_flag
20279 + je hlt_loop
20280 + incl %ss:early_recursion_flag
20281 + cld
20282 pushl %eax
20283 pushl %ecx
20284 pushl %edx
20285 @@ -634,9 +722,6 @@ ignore_int:
20286 movl $(__KERNEL_DS),%eax
20287 movl %eax,%ds
20288 movl %eax,%es
20289 - cmpl $2,early_recursion_flag
20290 - je hlt_loop
20291 - incl early_recursion_flag
20292 pushl 16(%esp)
20293 pushl 24(%esp)
20294 pushl 32(%esp)
20295 @@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
20296 /*
20297 * BSS section
20298 */
20299 -__PAGE_ALIGNED_BSS
20300 - .align PAGE_SIZE
20301 #ifdef CONFIG_X86_PAE
20302 +.section .initial_pg_pmd,"a",@progbits
20303 initial_pg_pmd:
20304 .fill 1024*KPMDS,4,0
20305 #else
20306 +.section .initial_page_table,"a",@progbits
20307 ENTRY(initial_page_table)
20308 .fill 1024,4,0
20309 #endif
20310 +.section .initial_pg_fixmap,"a",@progbits
20311 initial_pg_fixmap:
20312 .fill 1024,4,0
20313 +.section .empty_zero_page,"a",@progbits
20314 ENTRY(empty_zero_page)
20315 .fill 4096,1,0
20316 +.section .swapper_pg_dir,"a",@progbits
20317 ENTRY(swapper_pg_dir)
20318 +#ifdef CONFIG_X86_PAE
20319 + .fill 4,8,0
20320 +#else
20321 .fill 1024,4,0
20322 +#endif
20323 +
20324 +/*
20325 + * The IDT has to be page-aligned to simplify the Pentium
20326 + * F0 0F bug workaround.. We have a special link segment
20327 + * for this.
20328 + */
20329 +.section .idt,"a",@progbits
20330 +ENTRY(idt_table)
20331 + .fill 256,8,0
20332
20333 /*
20334 * This starts the data section.
20335 */
20336 #ifdef CONFIG_X86_PAE
20337 -__PAGE_ALIGNED_DATA
20338 - /* Page-aligned for the benefit of paravirt? */
20339 - .align PAGE_SIZE
20340 +.section .initial_page_table,"a",@progbits
20341 ENTRY(initial_page_table)
20342 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20343 # if KPMDS == 3
20344 @@ -711,12 +810,20 @@ ENTRY(initial_page_table)
20345 # error "Kernel PMDs should be 1, 2 or 3"
20346 # endif
20347 .align PAGE_SIZE /* needs to be page-sized too */
20348 +
20349 +#ifdef CONFIG_PAX_PER_CPU_PGD
20350 +ENTRY(cpu_pgd)
20351 + .rept NR_CPUS
20352 + .fill 4,8,0
20353 + .endr
20354 +#endif
20355 +
20356 #endif
20357
20358 .data
20359 .balign 4
20360 ENTRY(stack_start)
20361 - .long init_thread_union+THREAD_SIZE
20362 + .long init_thread_union+THREAD_SIZE-8
20363
20364 __INITRODATA
20365 int_msg:
20366 @@ -744,7 +851,7 @@ fault_msg:
20367 * segment size, and 32-bit linear address value:
20368 */
20369
20370 - .data
20371 +.section .rodata,"a",@progbits
20372 .globl boot_gdt_descr
20373 .globl idt_descr
20374
20375 @@ -753,7 +860,7 @@ fault_msg:
20376 .word 0 # 32 bit align gdt_desc.address
20377 boot_gdt_descr:
20378 .word __BOOT_DS+7
20379 - .long boot_gdt - __PAGE_OFFSET
20380 + .long pa(boot_gdt)
20381
20382 .word 0 # 32-bit align idt_desc.address
20383 idt_descr:
20384 @@ -764,7 +871,7 @@ idt_descr:
20385 .word 0 # 32 bit align gdt_desc.address
20386 ENTRY(early_gdt_descr)
20387 .word GDT_ENTRIES*8-1
20388 - .long gdt_page /* Overwritten for secondary CPUs */
20389 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
20390
20391 /*
20392 * The boot_gdt must mirror the equivalent in setup.S and is
20393 @@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
20394 .align L1_CACHE_BYTES
20395 ENTRY(boot_gdt)
20396 .fill GDT_ENTRY_BOOT_CS,8,0
20397 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20398 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20399 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20400 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20401 +
20402 + .align PAGE_SIZE_asm
20403 +ENTRY(cpu_gdt_table)
20404 + .rept NR_CPUS
20405 + .quad 0x0000000000000000 /* NULL descriptor */
20406 + .quad 0x0000000000000000 /* 0x0b reserved */
20407 + .quad 0x0000000000000000 /* 0x13 reserved */
20408 + .quad 0x0000000000000000 /* 0x1b reserved */
20409 +
20410 +#ifdef CONFIG_PAX_KERNEXEC
20411 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20412 +#else
20413 + .quad 0x0000000000000000 /* 0x20 unused */
20414 +#endif
20415 +
20416 + .quad 0x0000000000000000 /* 0x28 unused */
20417 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20418 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20419 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20420 + .quad 0x0000000000000000 /* 0x4b reserved */
20421 + .quad 0x0000000000000000 /* 0x53 reserved */
20422 + .quad 0x0000000000000000 /* 0x5b reserved */
20423 +
20424 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20425 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20426 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20427 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20428 +
20429 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20430 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20431 +
20432 + /*
20433 + * Segments used for calling PnP BIOS have byte granularity.
20434 + * The code segments and data segments have fixed 64k limits,
20435 + * the transfer segment sizes are set at run time.
20436 + */
20437 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
20438 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
20439 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
20440 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
20441 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
20442 +
20443 + /*
20444 + * The APM segments have byte granularity and their bases
20445 + * are set at run time. All have 64k limits.
20446 + */
20447 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20448 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20449 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
20450 +
20451 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20452 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20453 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20454 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20455 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20456 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20457 +
20458 + /* Be sure this is zeroed to avoid false validations in Xen */
20459 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20460 + .endr
20461 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20462 index 08f7e80..40cbed5 100644
20463 --- a/arch/x86/kernel/head_64.S
20464 +++ b/arch/x86/kernel/head_64.S
20465 @@ -20,6 +20,8 @@
20466 #include <asm/processor-flags.h>
20467 #include <asm/percpu.h>
20468 #include <asm/nops.h>
20469 +#include <asm/cpufeature.h>
20470 +#include <asm/alternative-asm.h>
20471
20472 #ifdef CONFIG_PARAVIRT
20473 #include <asm/asm-offsets.h>
20474 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20475 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20476 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20477 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20478 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
20479 +L3_VMALLOC_START = pud_index(VMALLOC_START)
20480 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
20481 +L3_VMALLOC_END = pud_index(VMALLOC_END)
20482 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20483 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20484
20485 .text
20486 __HEAD
20487 @@ -89,11 +97,15 @@ startup_64:
20488 * Fixup the physical addresses in the page table
20489 */
20490 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
20491 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20492 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20493 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20494
20495 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20496 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20497
20498 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20499 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20500
20501 /*
20502 * Set up the identity mapping for the switchover. These
20503 @@ -175,8 +187,8 @@ ENTRY(secondary_startup_64)
20504 movq $(init_level4_pgt - __START_KERNEL_map), %rax
20505 1:
20506
20507 - /* Enable PAE mode and PGE */
20508 - movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
20509 + /* Enable PAE mode and PSE/PGE */
20510 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
20511 movq %rcx, %cr4
20512
20513 /* Setup early boot stage 4 level pagetables. */
20514 @@ -197,10 +209,18 @@ ENTRY(secondary_startup_64)
20515 movl $MSR_EFER, %ecx
20516 rdmsr
20517 btsl $_EFER_SCE, %eax /* Enable System Call */
20518 - btl $20,%edi /* No Execute supported? */
20519 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20520 jnc 1f
20521 btsl $_EFER_NX, %eax
20522 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
20523 + leaq init_level4_pgt(%rip), %rdi
20524 +#ifndef CONFIG_EFI
20525 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20526 +#endif
20527 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20528 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20529 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20530 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20531 1: wrmsr /* Make changes effective */
20532
20533 /* Setup cr0 */
20534 @@ -280,6 +300,7 @@ ENTRY(secondary_startup_64)
20535 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
20536 * address given in m16:64.
20537 */
20538 + pax_set_fptr_mask
20539 movq initial_code(%rip),%rax
20540 pushq $0 # fake return address to stop unwinder
20541 pushq $__KERNEL_CS # set correct cs
20542 @@ -386,7 +407,7 @@ ENTRY(early_idt_handler)
20543 call dump_stack
20544 #ifdef CONFIG_KALLSYMS
20545 leaq early_idt_ripmsg(%rip),%rdi
20546 - movq 40(%rsp),%rsi # %rip again
20547 + movq 88(%rsp),%rsi # %rip again
20548 call __print_symbol
20549 #endif
20550 #endif /* EARLY_PRINTK */
20551 @@ -414,6 +435,7 @@ ENDPROC(early_idt_handler)
20552 early_recursion_flag:
20553 .long 0
20554
20555 + .section .rodata,"a",@progbits
20556 #ifdef CONFIG_EARLY_PRINTK
20557 early_idt_msg:
20558 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20559 @@ -443,27 +465,50 @@ NEXT_PAGE(early_dynamic_pgts)
20560
20561 .data
20562
20563 -#ifndef CONFIG_XEN
20564 NEXT_PAGE(init_level4_pgt)
20565 - .fill 512,8,0
20566 -#else
20567 -NEXT_PAGE(init_level4_pgt)
20568 - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20569 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20570 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20571 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
20572 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20573 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
20574 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20575 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20576 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20577 .org init_level4_pgt + L4_START_KERNEL*8, 0
20578 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20579 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20580
20581 +#ifdef CONFIG_PAX_PER_CPU_PGD
20582 +NEXT_PAGE(cpu_pgd)
20583 + .rept NR_CPUS
20584 + .fill 512,8,0
20585 + .endr
20586 +#endif
20587 +
20588 NEXT_PAGE(level3_ident_pgt)
20589 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20590 +#ifdef CONFIG_XEN
20591 .fill 511, 8, 0
20592 +#else
20593 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20594 + .fill 510,8,0
20595 +#endif
20596 +
20597 +NEXT_PAGE(level3_vmalloc_start_pgt)
20598 + .fill 512,8,0
20599 +
20600 +NEXT_PAGE(level3_vmalloc_end_pgt)
20601 + .fill 512,8,0
20602 +
20603 +NEXT_PAGE(level3_vmemmap_pgt)
20604 + .fill L3_VMEMMAP_START,8,0
20605 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20606 +
20607 NEXT_PAGE(level2_ident_pgt)
20608 - /* Since I easily can, map the first 1G.
20609 + /* Since I easily can, map the first 2G.
20610 * Don't set NX because code runs from these pages.
20611 */
20612 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20613 -#endif
20614 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20615
20616 NEXT_PAGE(level3_kernel_pgt)
20617 .fill L3_START_KERNEL,8,0
20618 @@ -471,6 +516,9 @@ NEXT_PAGE(level3_kernel_pgt)
20619 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20620 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20621
20622 +NEXT_PAGE(level2_vmemmap_pgt)
20623 + .fill 512,8,0
20624 +
20625 NEXT_PAGE(level2_kernel_pgt)
20626 /*
20627 * 512 MB kernel mapping. We spend a full page on this pagetable
20628 @@ -486,38 +534,64 @@ NEXT_PAGE(level2_kernel_pgt)
20629 KERNEL_IMAGE_SIZE/PMD_SIZE)
20630
20631 NEXT_PAGE(level2_fixmap_pgt)
20632 - .fill 506,8,0
20633 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20634 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20635 - .fill 5,8,0
20636 + .fill 507,8,0
20637 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20638 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20639 + .fill 4,8,0
20640
20641 -NEXT_PAGE(level1_fixmap_pgt)
20642 +NEXT_PAGE(level1_vsyscall_pgt)
20643 .fill 512,8,0
20644
20645 #undef PMDS
20646
20647 - .data
20648 + .align PAGE_SIZE
20649 +ENTRY(cpu_gdt_table)
20650 + .rept NR_CPUS
20651 + .quad 0x0000000000000000 /* NULL descriptor */
20652 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20653 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
20654 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
20655 + .quad 0x00cffb000000ffff /* __USER32_CS */
20656 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20657 + .quad 0x00affb000000ffff /* __USER_CS */
20658 +
20659 +#ifdef CONFIG_PAX_KERNEXEC
20660 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20661 +#else
20662 + .quad 0x0 /* unused */
20663 +#endif
20664 +
20665 + .quad 0,0 /* TSS */
20666 + .quad 0,0 /* LDT */
20667 + .quad 0,0,0 /* three TLS descriptors */
20668 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
20669 + /* asm/segment.h:GDT_ENTRIES must match this */
20670 +
20671 + /* zero the remaining page */
20672 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20673 + .endr
20674 +
20675 .align 16
20676 .globl early_gdt_descr
20677 early_gdt_descr:
20678 .word GDT_ENTRIES*8-1
20679 early_gdt_descr_base:
20680 - .quad INIT_PER_CPU_VAR(gdt_page)
20681 + .quad cpu_gdt_table
20682
20683 ENTRY(phys_base)
20684 /* This must match the first entry in level2_kernel_pgt */
20685 .quad 0x0000000000000000
20686
20687 #include "../../x86/xen/xen-head.S"
20688 -
20689 - .section .bss, "aw", @nobits
20690 +
20691 + .section .rodata,"a",@progbits
20692 .align L1_CACHE_BYTES
20693 ENTRY(idt_table)
20694 - .skip IDT_ENTRIES * 16
20695 + .fill 512,8,0
20696
20697 .align L1_CACHE_BYTES
20698 ENTRY(nmi_idt_table)
20699 - .skip IDT_ENTRIES * 16
20700 + .fill 512,8,0
20701
20702 __PAGE_ALIGNED_BSS
20703 NEXT_PAGE(empty_zero_page)
20704 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20705 index 0fa6912..37fce70 100644
20706 --- a/arch/x86/kernel/i386_ksyms_32.c
20707 +++ b/arch/x86/kernel/i386_ksyms_32.c
20708 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20709 EXPORT_SYMBOL(cmpxchg8b_emu);
20710 #endif
20711
20712 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
20713 +
20714 /* Networking helper routines. */
20715 EXPORT_SYMBOL(csum_partial_copy_generic);
20716 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20717 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20718
20719 EXPORT_SYMBOL(__get_user_1);
20720 EXPORT_SYMBOL(__get_user_2);
20721 @@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
20722
20723 EXPORT_SYMBOL(csum_partial);
20724 EXPORT_SYMBOL(empty_zero_page);
20725 +
20726 +#ifdef CONFIG_PAX_KERNEXEC
20727 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20728 +#endif
20729 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20730 index 245a71d..89d9ce4 100644
20731 --- a/arch/x86/kernel/i387.c
20732 +++ b/arch/x86/kernel/i387.c
20733 @@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20734 static inline bool interrupted_user_mode(void)
20735 {
20736 struct pt_regs *regs = get_irq_regs();
20737 - return regs && user_mode_vm(regs);
20738 + return regs && user_mode(regs);
20739 }
20740
20741 /*
20742 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20743 index 9a5c460..84868423 100644
20744 --- a/arch/x86/kernel/i8259.c
20745 +++ b/arch/x86/kernel/i8259.c
20746 @@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
20747 static void make_8259A_irq(unsigned int irq)
20748 {
20749 disable_irq_nosync(irq);
20750 - io_apic_irqs &= ~(1<<irq);
20751 + io_apic_irqs &= ~(1UL<<irq);
20752 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
20753 i8259A_chip.name);
20754 enable_irq(irq);
20755 @@ -209,7 +209,7 @@ spurious_8259A_irq:
20756 "spurious 8259A interrupt: IRQ%d.\n", irq);
20757 spurious_irq_mask |= irqmask;
20758 }
20759 - atomic_inc(&irq_err_count);
20760 + atomic_inc_unchecked(&irq_err_count);
20761 /*
20762 * Theoretically we do not have to handle this IRQ,
20763 * but in Linux this does not cause problems and is
20764 @@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20765 /* (slave's support for AEOI in flat mode is to be investigated) */
20766 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20767
20768 + pax_open_kernel();
20769 if (auto_eoi)
20770 /*
20771 * In AEOI mode we just have to mask the interrupt
20772 * when acking.
20773 */
20774 - i8259A_chip.irq_mask_ack = disable_8259A_irq;
20775 + *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20776 else
20777 - i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20778 + *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20779 + pax_close_kernel();
20780
20781 udelay(100); /* wait for 8259A to initialize */
20782
20783 diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20784 index a979b5b..1d6db75 100644
20785 --- a/arch/x86/kernel/io_delay.c
20786 +++ b/arch/x86/kernel/io_delay.c
20787 @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20788 * Quirk table for systems that misbehave (lock up, etc.) if port
20789 * 0x80 is used:
20790 */
20791 -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20792 +static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20793 {
20794 .callback = dmi_io_delay_0xed_port,
20795 .ident = "Compaq Presario V6000",
20796 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20797 index 4ddaf66..6292f4e 100644
20798 --- a/arch/x86/kernel/ioport.c
20799 +++ b/arch/x86/kernel/ioport.c
20800 @@ -6,6 +6,7 @@
20801 #include <linux/sched.h>
20802 #include <linux/kernel.h>
20803 #include <linux/capability.h>
20804 +#include <linux/security.h>
20805 #include <linux/errno.h>
20806 #include <linux/types.h>
20807 #include <linux/ioport.h>
20808 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20809
20810 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20811 return -EINVAL;
20812 +#ifdef CONFIG_GRKERNSEC_IO
20813 + if (turn_on && grsec_disable_privio) {
20814 + gr_handle_ioperm();
20815 + return -EPERM;
20816 + }
20817 +#endif
20818 if (turn_on && !capable(CAP_SYS_RAWIO))
20819 return -EPERM;
20820
20821 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20822 * because the ->io_bitmap_max value must match the bitmap
20823 * contents:
20824 */
20825 - tss = &per_cpu(init_tss, get_cpu());
20826 + tss = init_tss + get_cpu();
20827
20828 if (turn_on)
20829 bitmap_clear(t->io_bitmap_ptr, from, num);
20830 @@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
20831 return -EINVAL;
20832 /* Trying to gain more privileges? */
20833 if (level > old) {
20834 +#ifdef CONFIG_GRKERNSEC_IO
20835 + if (grsec_disable_privio) {
20836 + gr_handle_iopl();
20837 + return -EPERM;
20838 + }
20839 +#endif
20840 if (!capable(CAP_SYS_RAWIO))
20841 return -EPERM;
20842 }
20843 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20844 index 84b7789..e65e8be 100644
20845 --- a/arch/x86/kernel/irq.c
20846 +++ b/arch/x86/kernel/irq.c
20847 @@ -18,7 +18,7 @@
20848 #include <asm/mce.h>
20849 #include <asm/hw_irq.h>
20850
20851 -atomic_t irq_err_count;
20852 +atomic_unchecked_t irq_err_count;
20853
20854 /* Function pointer for generic interrupt vector handling */
20855 void (*x86_platform_ipi_callback)(void) = NULL;
20856 @@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
20857 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
20858 seq_printf(p, " Machine check polls\n");
20859 #endif
20860 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
20861 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
20862 #if defined(CONFIG_X86_IO_APIC)
20863 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
20864 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
20865 #endif
20866 return 0;
20867 }
20868 @@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
20869
20870 u64 arch_irq_stat(void)
20871 {
20872 - u64 sum = atomic_read(&irq_err_count);
20873 + u64 sum = atomic_read_unchecked(&irq_err_count);
20874 return sum;
20875 }
20876
20877 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
20878 index 344faf8..355f60d 100644
20879 --- a/arch/x86/kernel/irq_32.c
20880 +++ b/arch/x86/kernel/irq_32.c
20881 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
20882 __asm__ __volatile__("andl %%esp,%0" :
20883 "=r" (sp) : "0" (THREAD_SIZE - 1));
20884
20885 - return sp < (sizeof(struct thread_info) + STACK_WARN);
20886 + return sp < STACK_WARN;
20887 }
20888
20889 static void print_stack_overflow(void)
20890 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
20891 * per-CPU IRQ handling contexts (thread information and stack)
20892 */
20893 union irq_ctx {
20894 - struct thread_info tinfo;
20895 - u32 stack[THREAD_SIZE/sizeof(u32)];
20896 + unsigned long previous_esp;
20897 + u32 stack[THREAD_SIZE/sizeof(u32)];
20898 } __attribute__((aligned(THREAD_SIZE)));
20899
20900 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
20901 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
20902 static inline int
20903 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20904 {
20905 - union irq_ctx *curctx, *irqctx;
20906 + union irq_ctx *irqctx;
20907 u32 *isp, arg1, arg2;
20908
20909 - curctx = (union irq_ctx *) current_thread_info();
20910 irqctx = __this_cpu_read(hardirq_ctx);
20911
20912 /*
20913 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20914 * handler) we can't do that and just have to keep using the
20915 * current stack (which is the irq stack already after all)
20916 */
20917 - if (unlikely(curctx == irqctx))
20918 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
20919 return 0;
20920
20921 /* build the stack frame on the IRQ stack */
20922 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20923 - irqctx->tinfo.task = curctx->tinfo.task;
20924 - irqctx->tinfo.previous_esp = current_stack_pointer;
20925 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20926 + irqctx->previous_esp = current_stack_pointer;
20927
20928 - /* Copy the preempt_count so that the [soft]irq checks work. */
20929 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
20930 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20931 + __set_fs(MAKE_MM_SEG(0));
20932 +#endif
20933
20934 if (unlikely(overflow))
20935 call_on_stack(print_stack_overflow, isp);
20936 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20937 : "0" (irq), "1" (desc), "2" (isp),
20938 "D" (desc->handle_irq)
20939 : "memory", "cc", "ecx");
20940 +
20941 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20942 + __set_fs(current_thread_info()->addr_limit);
20943 +#endif
20944 +
20945 return 1;
20946 }
20947
20948 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20949 */
20950 void __cpuinit irq_ctx_init(int cpu)
20951 {
20952 - union irq_ctx *irqctx;
20953 -
20954 if (per_cpu(hardirq_ctx, cpu))
20955 return;
20956
20957 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20958 - THREADINFO_GFP,
20959 - THREAD_SIZE_ORDER));
20960 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20961 - irqctx->tinfo.cpu = cpu;
20962 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
20963 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20964 -
20965 - per_cpu(hardirq_ctx, cpu) = irqctx;
20966 -
20967 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20968 - THREADINFO_GFP,
20969 - THREAD_SIZE_ORDER));
20970 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20971 - irqctx->tinfo.cpu = cpu;
20972 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20973 -
20974 - per_cpu(softirq_ctx, cpu) = irqctx;
20975 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20976 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20977 +
20978 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20979 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20980
20981 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20982 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20983 @@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
20984 asmlinkage void do_softirq(void)
20985 {
20986 unsigned long flags;
20987 - struct thread_info *curctx;
20988 union irq_ctx *irqctx;
20989 u32 *isp;
20990
20991 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
20992 local_irq_save(flags);
20993
20994 if (local_softirq_pending()) {
20995 - curctx = current_thread_info();
20996 irqctx = __this_cpu_read(softirq_ctx);
20997 - irqctx->tinfo.task = curctx->task;
20998 - irqctx->tinfo.previous_esp = current_stack_pointer;
20999 + irqctx->previous_esp = current_stack_pointer;
21000
21001 /* build the stack frame on the softirq stack */
21002 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21003 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21004 +
21005 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21006 + __set_fs(MAKE_MM_SEG(0));
21007 +#endif
21008
21009 call_on_stack(__do_softirq, isp);
21010 +
21011 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21012 + __set_fs(current_thread_info()->addr_limit);
21013 +#endif
21014 +
21015 /*
21016 * Shouldn't happen, we returned above if in_interrupt():
21017 */
21018 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21019 if (unlikely(!desc))
21020 return false;
21021
21022 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21023 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21024 if (unlikely(overflow))
21025 print_stack_overflow();
21026 desc->handle_irq(irq, desc);
21027 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21028 index d04d3ec..ea4b374 100644
21029 --- a/arch/x86/kernel/irq_64.c
21030 +++ b/arch/x86/kernel/irq_64.c
21031 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21032 u64 estack_top, estack_bottom;
21033 u64 curbase = (u64)task_stack_page(current);
21034
21035 - if (user_mode_vm(regs))
21036 + if (user_mode(regs))
21037 return;
21038
21039 if (regs->sp >= curbase + sizeof(struct thread_info) +
21040 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21041 index dc1404b..bbc43e7 100644
21042 --- a/arch/x86/kernel/kdebugfs.c
21043 +++ b/arch/x86/kernel/kdebugfs.c
21044 @@ -27,7 +27,7 @@ struct setup_data_node {
21045 u32 len;
21046 };
21047
21048 -static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21049 +static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21050 size_t count, loff_t *ppos)
21051 {
21052 struct setup_data_node *node = file->private_data;
21053 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21054 index 836f832..a8bda67 100644
21055 --- a/arch/x86/kernel/kgdb.c
21056 +++ b/arch/x86/kernel/kgdb.c
21057 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21058 #ifdef CONFIG_X86_32
21059 switch (regno) {
21060 case GDB_SS:
21061 - if (!user_mode_vm(regs))
21062 + if (!user_mode(regs))
21063 *(unsigned long *)mem = __KERNEL_DS;
21064 break;
21065 case GDB_SP:
21066 - if (!user_mode_vm(regs))
21067 + if (!user_mode(regs))
21068 *(unsigned long *)mem = kernel_stack_pointer(regs);
21069 break;
21070 case GDB_GS:
21071 @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21072 bp->attr.bp_addr = breakinfo[breakno].addr;
21073 bp->attr.bp_len = breakinfo[breakno].len;
21074 bp->attr.bp_type = breakinfo[breakno].type;
21075 - info->address = breakinfo[breakno].addr;
21076 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21077 + info->address = ktla_ktva(breakinfo[breakno].addr);
21078 + else
21079 + info->address = breakinfo[breakno].addr;
21080 info->len = breakinfo[breakno].len;
21081 info->type = breakinfo[breakno].type;
21082 val = arch_install_hw_breakpoint(bp);
21083 @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21084 case 'k':
21085 /* clear the trace bit */
21086 linux_regs->flags &= ~X86_EFLAGS_TF;
21087 - atomic_set(&kgdb_cpu_doing_single_step, -1);
21088 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21089
21090 /* set the trace bit if we're stepping */
21091 if (remcomInBuffer[0] == 's') {
21092 linux_regs->flags |= X86_EFLAGS_TF;
21093 - atomic_set(&kgdb_cpu_doing_single_step,
21094 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21095 raw_smp_processor_id());
21096 }
21097
21098 @@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21099
21100 switch (cmd) {
21101 case DIE_DEBUG:
21102 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21103 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21104 if (user_mode(regs))
21105 return single_step_cont(regs, args);
21106 break;
21107 @@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21108 #endif /* CONFIG_DEBUG_RODATA */
21109
21110 bpt->type = BP_BREAKPOINT;
21111 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21112 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21113 BREAK_INSTR_SIZE);
21114 if (err)
21115 return err;
21116 - err = probe_kernel_write((char *)bpt->bpt_addr,
21117 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21118 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21119 #ifdef CONFIG_DEBUG_RODATA
21120 if (!err)
21121 @@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21122 return -EBUSY;
21123 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21124 BREAK_INSTR_SIZE);
21125 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21126 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21127 if (err)
21128 return err;
21129 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21130 @@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21131 if (mutex_is_locked(&text_mutex))
21132 goto knl_write;
21133 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21134 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21135 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21136 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21137 goto knl_write;
21138 return err;
21139 knl_write:
21140 #endif /* CONFIG_DEBUG_RODATA */
21141 - return probe_kernel_write((char *)bpt->bpt_addr,
21142 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21143 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21144 }
21145
21146 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
21147 index 7bfe318..383d238 100644
21148 --- a/arch/x86/kernel/kprobes/core.c
21149 +++ b/arch/x86/kernel/kprobes/core.c
21150 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21151 s32 raddr;
21152 } __packed *insn;
21153
21154 - insn = (struct __arch_relative_insn *)from;
21155 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
21156 +
21157 + pax_open_kernel();
21158 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21159 insn->op = op;
21160 + pax_close_kernel();
21161 }
21162
21163 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21164 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21165 kprobe_opcode_t opcode;
21166 kprobe_opcode_t *orig_opcodes = opcodes;
21167
21168 - if (search_exception_tables((unsigned long)opcodes))
21169 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21170 return 0; /* Page fault may occur on this address. */
21171
21172 retry:
21173 @@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21174 * for the first byte, we can recover the original instruction
21175 * from it and kp->opcode.
21176 */
21177 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21178 + memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21179 buf[0] = kp->opcode;
21180 - return (unsigned long)buf;
21181 + return ktva_ktla((unsigned long)buf);
21182 }
21183
21184 /*
21185 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21186 /* Another subsystem puts a breakpoint, failed to recover */
21187 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21188 return 0;
21189 + pax_open_kernel();
21190 memcpy(dest, insn.kaddr, insn.length);
21191 + pax_close_kernel();
21192
21193 #ifdef CONFIG_X86_64
21194 if (insn_rip_relative(&insn)) {
21195 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21196 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21197 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21198 disp = (u8 *) dest + insn_offset_displacement(&insn);
21199 + pax_open_kernel();
21200 *(s32 *) disp = (s32) newdisp;
21201 + pax_close_kernel();
21202 }
21203 #endif
21204 return insn.length;
21205 @@ -488,7 +495,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21206 * nor set current_kprobe, because it doesn't use single
21207 * stepping.
21208 */
21209 - regs->ip = (unsigned long)p->ainsn.insn;
21210 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21211 preempt_enable_no_resched();
21212 return;
21213 }
21214 @@ -505,9 +512,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21215 regs->flags &= ~X86_EFLAGS_IF;
21216 /* single step inline if the instruction is an int3 */
21217 if (p->opcode == BREAKPOINT_INSTRUCTION)
21218 - regs->ip = (unsigned long)p->addr;
21219 + regs->ip = ktla_ktva((unsigned long)p->addr);
21220 else
21221 - regs->ip = (unsigned long)p->ainsn.insn;
21222 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21223 }
21224
21225 /*
21226 @@ -586,7 +593,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21227 setup_singlestep(p, regs, kcb, 0);
21228 return 1;
21229 }
21230 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
21231 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21232 /*
21233 * The breakpoint instruction was removed right
21234 * after we hit it. Another cpu has removed
21235 @@ -632,6 +639,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21236 " movq %rax, 152(%rsp)\n"
21237 RESTORE_REGS_STRING
21238 " popfq\n"
21239 +#ifdef KERNEXEC_PLUGIN
21240 + " btsq $63,(%rsp)\n"
21241 +#endif
21242 #else
21243 " pushf\n"
21244 SAVE_REGS_STRING
21245 @@ -769,7 +779,7 @@ static void __kprobes
21246 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21247 {
21248 unsigned long *tos = stack_addr(regs);
21249 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21250 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21251 unsigned long orig_ip = (unsigned long)p->addr;
21252 kprobe_opcode_t *insn = p->ainsn.insn;
21253
21254 @@ -951,7 +961,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21255 struct die_args *args = data;
21256 int ret = NOTIFY_DONE;
21257
21258 - if (args->regs && user_mode_vm(args->regs))
21259 + if (args->regs && user_mode(args->regs))
21260 return ret;
21261
21262 switch (val) {
21263 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
21264 index 76dc6f0..66bdfc3 100644
21265 --- a/arch/x86/kernel/kprobes/opt.c
21266 +++ b/arch/x86/kernel/kprobes/opt.c
21267 @@ -79,6 +79,7 @@ found:
21268 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
21269 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
21270 {
21271 + pax_open_kernel();
21272 #ifdef CONFIG_X86_64
21273 *addr++ = 0x48;
21274 *addr++ = 0xbf;
21275 @@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
21276 *addr++ = 0xb8;
21277 #endif
21278 *(unsigned long *)addr = val;
21279 + pax_close_kernel();
21280 }
21281
21282 static void __used __kprobes kprobes_optinsn_template_holder(void)
21283 @@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21284 * Verify if the address gap is in 2GB range, because this uses
21285 * a relative jump.
21286 */
21287 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21288 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21289 if (abs(rel) > 0x7fffffff)
21290 return -ERANGE;
21291
21292 @@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21293 op->optinsn.size = ret;
21294
21295 /* Copy arch-dep-instance from template */
21296 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21297 + pax_open_kernel();
21298 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21299 + pax_close_kernel();
21300
21301 /* Set probe information */
21302 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21303
21304 /* Set probe function call */
21305 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21306 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21307
21308 /* Set returning jmp instruction at the tail of out-of-line buffer */
21309 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21310 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21311 (u8 *)op->kp.addr + op->optinsn.size);
21312
21313 flush_icache_range((unsigned long) buf,
21314 @@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21315 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21316
21317 /* Backup instructions which will be replaced by jump address */
21318 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21319 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21320 RELATIVE_ADDR_SIZE);
21321
21322 insn_buf[0] = RELATIVEJUMP_OPCODE;
21323 @@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21324 /* This kprobe is really able to run optimized path. */
21325 op = container_of(p, struct optimized_kprobe, kp);
21326 /* Detour through copied instructions */
21327 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21328 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21329 if (!reenter)
21330 reset_current_kprobe();
21331 preempt_enable_no_resched();
21332 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21333 index b686a90..60d36fb 100644
21334 --- a/arch/x86/kernel/kvm.c
21335 +++ b/arch/x86/kernel/kvm.c
21336 @@ -453,7 +453,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21337 return NOTIFY_OK;
21338 }
21339
21340 -static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21341 +static struct notifier_block kvm_cpu_notifier = {
21342 .notifier_call = kvm_cpu_notify,
21343 };
21344 #endif
21345 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21346 index ebc9873..1b9724b 100644
21347 --- a/arch/x86/kernel/ldt.c
21348 +++ b/arch/x86/kernel/ldt.c
21349 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21350 if (reload) {
21351 #ifdef CONFIG_SMP
21352 preempt_disable();
21353 - load_LDT(pc);
21354 + load_LDT_nolock(pc);
21355 if (!cpumask_equal(mm_cpumask(current->mm),
21356 cpumask_of(smp_processor_id())))
21357 smp_call_function(flush_ldt, current->mm, 1);
21358 preempt_enable();
21359 #else
21360 - load_LDT(pc);
21361 + load_LDT_nolock(pc);
21362 #endif
21363 }
21364 if (oldsize) {
21365 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21366 return err;
21367
21368 for (i = 0; i < old->size; i++)
21369 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21370 + write_ldt_entry(new->ldt, i, old->ldt + i);
21371 return 0;
21372 }
21373
21374 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21375 retval = copy_ldt(&mm->context, &old_mm->context);
21376 mutex_unlock(&old_mm->context.lock);
21377 }
21378 +
21379 + if (tsk == current) {
21380 + mm->context.vdso = 0;
21381 +
21382 +#ifdef CONFIG_X86_32
21383 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21384 + mm->context.user_cs_base = 0UL;
21385 + mm->context.user_cs_limit = ~0UL;
21386 +
21387 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21388 + cpus_clear(mm->context.cpu_user_cs_mask);
21389 +#endif
21390 +
21391 +#endif
21392 +#endif
21393 +
21394 + }
21395 +
21396 return retval;
21397 }
21398
21399 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21400 }
21401 }
21402
21403 +#ifdef CONFIG_PAX_SEGMEXEC
21404 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21405 + error = -EINVAL;
21406 + goto out_unlock;
21407 + }
21408 +#endif
21409 +
21410 fill_ldt(&ldt, &ldt_info);
21411 if (oldmode)
21412 ldt.avl = 0;
21413 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21414 index 5b19e4d..6476a76 100644
21415 --- a/arch/x86/kernel/machine_kexec_32.c
21416 +++ b/arch/x86/kernel/machine_kexec_32.c
21417 @@ -26,7 +26,7 @@
21418 #include <asm/cacheflush.h>
21419 #include <asm/debugreg.h>
21420
21421 -static void set_idt(void *newidt, __u16 limit)
21422 +static void set_idt(struct desc_struct *newidt, __u16 limit)
21423 {
21424 struct desc_ptr curidt;
21425
21426 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21427 }
21428
21429
21430 -static void set_gdt(void *newgdt, __u16 limit)
21431 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21432 {
21433 struct desc_ptr curgdt;
21434
21435 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21436 }
21437
21438 control_page = page_address(image->control_code_page);
21439 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21440 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21441
21442 relocate_kernel_ptr = control_page;
21443 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21444 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21445 index 22db92b..d546bec 100644
21446 --- a/arch/x86/kernel/microcode_core.c
21447 +++ b/arch/x86/kernel/microcode_core.c
21448 @@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21449 return NOTIFY_OK;
21450 }
21451
21452 -static struct notifier_block __refdata mc_cpu_notifier = {
21453 +static struct notifier_block mc_cpu_notifier = {
21454 .notifier_call = mc_cpu_callback,
21455 };
21456
21457 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21458 index 5fb2ceb..3ae90bb 100644
21459 --- a/arch/x86/kernel/microcode_intel.c
21460 +++ b/arch/x86/kernel/microcode_intel.c
21461 @@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21462
21463 static int get_ucode_user(void *to, const void *from, size_t n)
21464 {
21465 - return copy_from_user(to, from, n);
21466 + return copy_from_user(to, (const void __force_user *)from, n);
21467 }
21468
21469 static enum ucode_state
21470 request_microcode_user(int cpu, const void __user *buf, size_t size)
21471 {
21472 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21473 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21474 }
21475
21476 static void microcode_fini_cpu(int cpu)
21477 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21478 index 216a4d7..228255a 100644
21479 --- a/arch/x86/kernel/module.c
21480 +++ b/arch/x86/kernel/module.c
21481 @@ -43,15 +43,60 @@ do { \
21482 } while (0)
21483 #endif
21484
21485 -void *module_alloc(unsigned long size)
21486 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21487 {
21488 - if (PAGE_ALIGN(size) > MODULES_LEN)
21489 + if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21490 return NULL;
21491 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21492 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21493 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21494 -1, __builtin_return_address(0));
21495 }
21496
21497 +void *module_alloc(unsigned long size)
21498 +{
21499 +
21500 +#ifdef CONFIG_PAX_KERNEXEC
21501 + return __module_alloc(size, PAGE_KERNEL);
21502 +#else
21503 + return __module_alloc(size, PAGE_KERNEL_EXEC);
21504 +#endif
21505 +
21506 +}
21507 +
21508 +#ifdef CONFIG_PAX_KERNEXEC
21509 +#ifdef CONFIG_X86_32
21510 +void *module_alloc_exec(unsigned long size)
21511 +{
21512 + struct vm_struct *area;
21513 +
21514 + if (size == 0)
21515 + return NULL;
21516 +
21517 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21518 + return area ? area->addr : NULL;
21519 +}
21520 +EXPORT_SYMBOL(module_alloc_exec);
21521 +
21522 +void module_free_exec(struct module *mod, void *module_region)
21523 +{
21524 + vunmap(module_region);
21525 +}
21526 +EXPORT_SYMBOL(module_free_exec);
21527 +#else
21528 +void module_free_exec(struct module *mod, void *module_region)
21529 +{
21530 + module_free(mod, module_region);
21531 +}
21532 +EXPORT_SYMBOL(module_free_exec);
21533 +
21534 +void *module_alloc_exec(unsigned long size)
21535 +{
21536 + return __module_alloc(size, PAGE_KERNEL_RX);
21537 +}
21538 +EXPORT_SYMBOL(module_alloc_exec);
21539 +#endif
21540 +#endif
21541 +
21542 #ifdef CONFIG_X86_32
21543 int apply_relocate(Elf32_Shdr *sechdrs,
21544 const char *strtab,
21545 @@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21546 unsigned int i;
21547 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21548 Elf32_Sym *sym;
21549 - uint32_t *location;
21550 + uint32_t *plocation, location;
21551
21552 DEBUGP("Applying relocate section %u to %u\n",
21553 relsec, sechdrs[relsec].sh_info);
21554 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21555 /* This is where to make the change */
21556 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21557 - + rel[i].r_offset;
21558 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21559 + location = (uint32_t)plocation;
21560 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21561 + plocation = ktla_ktva((void *)plocation);
21562 /* This is the symbol it is referring to. Note that all
21563 undefined symbols have been resolved. */
21564 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21565 @@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21566 switch (ELF32_R_TYPE(rel[i].r_info)) {
21567 case R_386_32:
21568 /* We add the value into the location given */
21569 - *location += sym->st_value;
21570 + pax_open_kernel();
21571 + *plocation += sym->st_value;
21572 + pax_close_kernel();
21573 break;
21574 case R_386_PC32:
21575 /* Add the value, subtract its position */
21576 - *location += sym->st_value - (uint32_t)location;
21577 + pax_open_kernel();
21578 + *plocation += sym->st_value - location;
21579 + pax_close_kernel();
21580 break;
21581 default:
21582 pr_err("%s: Unknown relocation: %u\n",
21583 @@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21584 case R_X86_64_NONE:
21585 break;
21586 case R_X86_64_64:
21587 + pax_open_kernel();
21588 *(u64 *)loc = val;
21589 + pax_close_kernel();
21590 break;
21591 case R_X86_64_32:
21592 + pax_open_kernel();
21593 *(u32 *)loc = val;
21594 + pax_close_kernel();
21595 if (val != *(u32 *)loc)
21596 goto overflow;
21597 break;
21598 case R_X86_64_32S:
21599 + pax_open_kernel();
21600 *(s32 *)loc = val;
21601 + pax_close_kernel();
21602 if ((s64)val != *(s32 *)loc)
21603 goto overflow;
21604 break;
21605 case R_X86_64_PC32:
21606 val -= (u64)loc;
21607 + pax_open_kernel();
21608 *(u32 *)loc = val;
21609 + pax_close_kernel();
21610 +
21611 #if 0
21612 if ((s64)val != *(s32 *)loc)
21613 goto overflow;
21614 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21615 index ce13049..e2e9c3c 100644
21616 --- a/arch/x86/kernel/msr.c
21617 +++ b/arch/x86/kernel/msr.c
21618 @@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21619 return notifier_from_errno(err);
21620 }
21621
21622 -static struct notifier_block __refdata msr_class_cpu_notifier = {
21623 +static struct notifier_block msr_class_cpu_notifier = {
21624 .notifier_call = msr_class_cpu_callback,
21625 };
21626
21627 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21628 index 6030805..2d33f21 100644
21629 --- a/arch/x86/kernel/nmi.c
21630 +++ b/arch/x86/kernel/nmi.c
21631 @@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21632 return handled;
21633 }
21634
21635 -int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21636 +int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21637 {
21638 struct nmi_desc *desc = nmi_to_desc(type);
21639 unsigned long flags;
21640 @@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21641 * event confuses some handlers (kdump uses this flag)
21642 */
21643 if (action->flags & NMI_FLAG_FIRST)
21644 - list_add_rcu(&action->list, &desc->head);
21645 + pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21646 else
21647 - list_add_tail_rcu(&action->list, &desc->head);
21648 + pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21649
21650 spin_unlock_irqrestore(&desc->lock, flags);
21651 return 0;
21652 @@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21653 if (!strcmp(n->name, name)) {
21654 WARN(in_nmi(),
21655 "Trying to free NMI (%s) from NMI context!\n", n->name);
21656 - list_del_rcu(&n->list);
21657 + pax_list_del_rcu((struct list_head *)&n->list);
21658 break;
21659 }
21660 }
21661 @@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21662 dotraplinkage notrace __kprobes void
21663 do_nmi(struct pt_regs *regs, long error_code)
21664 {
21665 +
21666 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21667 + if (!user_mode(regs)) {
21668 + unsigned long cs = regs->cs & 0xFFFF;
21669 + unsigned long ip = ktva_ktla(regs->ip);
21670 +
21671 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21672 + regs->ip = ip;
21673 + }
21674 +#endif
21675 +
21676 nmi_nesting_preprocess(regs);
21677
21678 nmi_enter();
21679 diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21680 index 6d9582e..f746287 100644
21681 --- a/arch/x86/kernel/nmi_selftest.c
21682 +++ b/arch/x86/kernel/nmi_selftest.c
21683 @@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21684 {
21685 /* trap all the unknown NMIs we may generate */
21686 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21687 - __initdata);
21688 + __initconst);
21689 }
21690
21691 static void __init cleanup_nmi_testsuite(void)
21692 @@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21693 unsigned long timeout;
21694
21695 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21696 - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21697 + NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21698 nmi_fail = FAILURE;
21699 return;
21700 }
21701 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21702 index 676b8c7..870ba04 100644
21703 --- a/arch/x86/kernel/paravirt-spinlocks.c
21704 +++ b/arch/x86/kernel/paravirt-spinlocks.c
21705 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21706 arch_spin_lock(lock);
21707 }
21708
21709 -struct pv_lock_ops pv_lock_ops = {
21710 +struct pv_lock_ops pv_lock_ops __read_only = {
21711 #ifdef CONFIG_SMP
21712 .spin_is_locked = __ticket_spin_is_locked,
21713 .spin_is_contended = __ticket_spin_is_contended,
21714 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21715 index 8bfb335..c1463c6 100644
21716 --- a/arch/x86/kernel/paravirt.c
21717 +++ b/arch/x86/kernel/paravirt.c
21718 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21719 {
21720 return x;
21721 }
21722 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21723 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21724 +#endif
21725
21726 void __init default_banner(void)
21727 {
21728 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21729 if (opfunc == NULL)
21730 /* If there's no function, patch it with a ud2a (BUG) */
21731 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21732 - else if (opfunc == _paravirt_nop)
21733 + else if (opfunc == (void *)_paravirt_nop)
21734 /* If the operation is a nop, then nop the callsite */
21735 ret = paravirt_patch_nop();
21736
21737 /* identity functions just return their single argument */
21738 - else if (opfunc == _paravirt_ident_32)
21739 + else if (opfunc == (void *)_paravirt_ident_32)
21740 ret = paravirt_patch_ident_32(insnbuf, len);
21741 - else if (opfunc == _paravirt_ident_64)
21742 + else if (opfunc == (void *)_paravirt_ident_64)
21743 ret = paravirt_patch_ident_64(insnbuf, len);
21744 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21745 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21746 + ret = paravirt_patch_ident_64(insnbuf, len);
21747 +#endif
21748
21749 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21750 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21751 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21752 if (insn_len > len || start == NULL)
21753 insn_len = len;
21754 else
21755 - memcpy(insnbuf, start, insn_len);
21756 + memcpy(insnbuf, ktla_ktva(start), insn_len);
21757
21758 return insn_len;
21759 }
21760 @@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
21761 return this_cpu_read(paravirt_lazy_mode);
21762 }
21763
21764 -struct pv_info pv_info = {
21765 +struct pv_info pv_info __read_only = {
21766 .name = "bare hardware",
21767 .paravirt_enabled = 0,
21768 .kernel_rpl = 0,
21769 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
21770 #endif
21771 };
21772
21773 -struct pv_init_ops pv_init_ops = {
21774 +struct pv_init_ops pv_init_ops __read_only = {
21775 .patch = native_patch,
21776 };
21777
21778 -struct pv_time_ops pv_time_ops = {
21779 +struct pv_time_ops pv_time_ops __read_only = {
21780 .sched_clock = native_sched_clock,
21781 .steal_clock = native_steal_clock,
21782 };
21783
21784 -struct pv_irq_ops pv_irq_ops = {
21785 +struct pv_irq_ops pv_irq_ops __read_only = {
21786 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21787 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21788 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21789 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21790 #endif
21791 };
21792
21793 -struct pv_cpu_ops pv_cpu_ops = {
21794 +struct pv_cpu_ops pv_cpu_ops __read_only = {
21795 .cpuid = native_cpuid,
21796 .get_debugreg = native_get_debugreg,
21797 .set_debugreg = native_set_debugreg,
21798 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21799 .end_context_switch = paravirt_nop,
21800 };
21801
21802 -struct pv_apic_ops pv_apic_ops = {
21803 +struct pv_apic_ops pv_apic_ops __read_only= {
21804 #ifdef CONFIG_X86_LOCAL_APIC
21805 .startup_ipi_hook = paravirt_nop,
21806 #endif
21807 };
21808
21809 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21810 +#ifdef CONFIG_X86_32
21811 +#ifdef CONFIG_X86_PAE
21812 +/* 64-bit pagetable entries */
21813 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21814 +#else
21815 /* 32-bit pagetable entries */
21816 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21817 +#endif
21818 #else
21819 /* 64-bit pagetable entries */
21820 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21821 #endif
21822
21823 -struct pv_mmu_ops pv_mmu_ops = {
21824 +struct pv_mmu_ops pv_mmu_ops __read_only = {
21825
21826 .read_cr2 = native_read_cr2,
21827 .write_cr2 = native_write_cr2,
21828 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21829 .make_pud = PTE_IDENT,
21830
21831 .set_pgd = native_set_pgd,
21832 + .set_pgd_batched = native_set_pgd_batched,
21833 #endif
21834 #endif /* PAGETABLE_LEVELS >= 3 */
21835
21836 @@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21837 },
21838
21839 .set_fixmap = native_set_fixmap,
21840 +
21841 +#ifdef CONFIG_PAX_KERNEXEC
21842 + .pax_open_kernel = native_pax_open_kernel,
21843 + .pax_close_kernel = native_pax_close_kernel,
21844 +#endif
21845 +
21846 };
21847
21848 EXPORT_SYMBOL_GPL(pv_time_ops);
21849 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
21850 index 299d493..2ccb0ee 100644
21851 --- a/arch/x86/kernel/pci-calgary_64.c
21852 +++ b/arch/x86/kernel/pci-calgary_64.c
21853 @@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
21854 tce_space = be64_to_cpu(readq(target));
21855 tce_space = tce_space & TAR_SW_BITS;
21856
21857 - tce_space = tce_space & (~specified_table_size);
21858 + tce_space = tce_space & (~(unsigned long)specified_table_size);
21859 info->tce_space = (u64 *)__va(tce_space);
21860 }
21861 }
21862 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
21863 index 35ccf75..7a15747 100644
21864 --- a/arch/x86/kernel/pci-iommu_table.c
21865 +++ b/arch/x86/kernel/pci-iommu_table.c
21866 @@ -2,7 +2,7 @@
21867 #include <asm/iommu_table.h>
21868 #include <linux/string.h>
21869 #include <linux/kallsyms.h>
21870 -
21871 +#include <linux/sched.h>
21872
21873 #define DEBUG 1
21874
21875 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
21876 index 6c483ba..d10ce2f 100644
21877 --- a/arch/x86/kernel/pci-swiotlb.c
21878 +++ b/arch/x86/kernel/pci-swiotlb.c
21879 @@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
21880 void *vaddr, dma_addr_t dma_addr,
21881 struct dma_attrs *attrs)
21882 {
21883 - swiotlb_free_coherent(dev, size, vaddr, dma_addr);
21884 + swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
21885 }
21886
21887 static struct dma_map_ops swiotlb_dma_ops = {
21888 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
21889 index 14ae100..752a4f6 100644
21890 --- a/arch/x86/kernel/process.c
21891 +++ b/arch/x86/kernel/process.c
21892 @@ -36,7 +36,8 @@
21893 * section. Since TSS's are completely CPU-local, we want them
21894 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
21895 */
21896 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
21897 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
21898 +EXPORT_SYMBOL(init_tss);
21899
21900 #ifdef CONFIG_X86_64
21901 static DEFINE_PER_CPU(unsigned char, is_idle);
21902 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
21903 task_xstate_cachep =
21904 kmem_cache_create("task_xstate", xstate_size,
21905 __alignof__(union thread_xstate),
21906 - SLAB_PANIC | SLAB_NOTRACK, NULL);
21907 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
21908 }
21909
21910 /*
21911 @@ -105,7 +106,7 @@ void exit_thread(void)
21912 unsigned long *bp = t->io_bitmap_ptr;
21913
21914 if (bp) {
21915 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
21916 + struct tss_struct *tss = init_tss + get_cpu();
21917
21918 t->io_bitmap_ptr = NULL;
21919 clear_thread_flag(TIF_IO_BITMAP);
21920 @@ -136,7 +137,7 @@ void show_regs_common(void)
21921 board = dmi_get_system_info(DMI_BOARD_NAME);
21922
21923 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
21924 - current->pid, current->comm, print_tainted(),
21925 + task_pid_nr(current), current->comm, print_tainted(),
21926 init_utsname()->release,
21927 (int)strcspn(init_utsname()->version, " "),
21928 init_utsname()->version,
21929 @@ -149,6 +150,9 @@ void flush_thread(void)
21930 {
21931 struct task_struct *tsk = current;
21932
21933 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
21934 + loadsegment(gs, 0);
21935 +#endif
21936 flush_ptrace_hw_breakpoint(tsk);
21937 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
21938 drop_init_fpu(tsk);
21939 @@ -295,7 +299,7 @@ static void __exit_idle(void)
21940 void exit_idle(void)
21941 {
21942 /* idle loop has pid 0 */
21943 - if (current->pid)
21944 + if (task_pid_nr(current))
21945 return;
21946 __exit_idle();
21947 }
21948 @@ -398,7 +402,7 @@ bool xen_set_default_idle(void)
21949 return ret;
21950 }
21951 #endif
21952 -void stop_this_cpu(void *dummy)
21953 +__noreturn void stop_this_cpu(void *dummy)
21954 {
21955 local_irq_disable();
21956 /*
21957 @@ -544,16 +548,37 @@ static int __init idle_setup(char *str)
21958 }
21959 early_param("idle", idle_setup);
21960
21961 -unsigned long arch_align_stack(unsigned long sp)
21962 +#ifdef CONFIG_PAX_RANDKSTACK
21963 +void pax_randomize_kstack(struct pt_regs *regs)
21964 {
21965 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
21966 - sp -= get_random_int() % 8192;
21967 - return sp & ~0xf;
21968 -}
21969 + struct thread_struct *thread = &current->thread;
21970 + unsigned long time;
21971
21972 -unsigned long arch_randomize_brk(struct mm_struct *mm)
21973 -{
21974 - unsigned long range_end = mm->brk + 0x02000000;
21975 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
21976 -}
21977 + if (!randomize_va_space)
21978 + return;
21979 +
21980 + if (v8086_mode(regs))
21981 + return;
21982
21983 + rdtscl(time);
21984 +
21985 + /* P4 seems to return a 0 LSB, ignore it */
21986 +#ifdef CONFIG_MPENTIUM4
21987 + time &= 0x3EUL;
21988 + time <<= 2;
21989 +#elif defined(CONFIG_X86_64)
21990 + time &= 0xFUL;
21991 + time <<= 4;
21992 +#else
21993 + time &= 0x1FUL;
21994 + time <<= 3;
21995 +#endif
21996 +
21997 + thread->sp0 ^= time;
21998 + load_sp0(init_tss + smp_processor_id(), thread);
21999 +
22000 +#ifdef CONFIG_X86_64
22001 + this_cpu_write(kernel_stack, thread->sp0);
22002 +#endif
22003 +}
22004 +#endif
22005 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22006 index b5a8905..d9cacac 100644
22007 --- a/arch/x86/kernel/process_32.c
22008 +++ b/arch/x86/kernel/process_32.c
22009 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22010 unsigned long thread_saved_pc(struct task_struct *tsk)
22011 {
22012 return ((unsigned long *)tsk->thread.sp)[3];
22013 +//XXX return tsk->thread.eip;
22014 }
22015
22016 void __show_regs(struct pt_regs *regs, int all)
22017 @@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22018 unsigned long sp;
22019 unsigned short ss, gs;
22020
22021 - if (user_mode_vm(regs)) {
22022 + if (user_mode(regs)) {
22023 sp = regs->sp;
22024 ss = regs->ss & 0xffff;
22025 - gs = get_user_gs(regs);
22026 } else {
22027 sp = kernel_stack_pointer(regs);
22028 savesegment(ss, ss);
22029 - savesegment(gs, gs);
22030 }
22031 + gs = get_user_gs(regs);
22032
22033 show_regs_common();
22034
22035 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22036 (u16)regs->cs, regs->ip, regs->flags,
22037 - smp_processor_id());
22038 + raw_smp_processor_id());
22039 print_symbol("EIP is at %s\n", regs->ip);
22040
22041 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22042 @@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
22043 int copy_thread(unsigned long clone_flags, unsigned long sp,
22044 unsigned long arg, struct task_struct *p)
22045 {
22046 - struct pt_regs *childregs = task_pt_regs(p);
22047 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22048 struct task_struct *tsk;
22049 int err;
22050
22051 p->thread.sp = (unsigned long) childregs;
22052 p->thread.sp0 = (unsigned long) (childregs+1);
22053 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22054
22055 if (unlikely(p->flags & PF_KTHREAD)) {
22056 /* kernel thread */
22057 memset(childregs, 0, sizeof(struct pt_regs));
22058 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22059 - task_user_gs(p) = __KERNEL_STACK_CANARY;
22060 - childregs->ds = __USER_DS;
22061 - childregs->es = __USER_DS;
22062 + savesegment(gs, childregs->gs);
22063 + childregs->ds = __KERNEL_DS;
22064 + childregs->es = __KERNEL_DS;
22065 childregs->fs = __KERNEL_PERCPU;
22066 childregs->bx = sp; /* function */
22067 childregs->bp = arg;
22068 @@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22069 struct thread_struct *prev = &prev_p->thread,
22070 *next = &next_p->thread;
22071 int cpu = smp_processor_id();
22072 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22073 + struct tss_struct *tss = init_tss + cpu;
22074 fpu_switch_t fpu;
22075
22076 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22077 @@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22078 */
22079 lazy_save_gs(prev->gs);
22080
22081 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22082 + __set_fs(task_thread_info(next_p)->addr_limit);
22083 +#endif
22084 +
22085 /*
22086 * Load the per-thread Thread-Local Storage descriptor.
22087 */
22088 @@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22089 */
22090 arch_end_context_switch(next_p);
22091
22092 + this_cpu_write(current_task, next_p);
22093 + this_cpu_write(current_tinfo, &next_p->tinfo);
22094 +
22095 /*
22096 * Restore %gs if needed (which is common)
22097 */
22098 @@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22099
22100 switch_fpu_finish(next_p, fpu);
22101
22102 - this_cpu_write(current_task, next_p);
22103 -
22104 return prev_p;
22105 }
22106
22107 @@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22108 } while (count++ < 16);
22109 return 0;
22110 }
22111 -
22112 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22113 index 0f49677..fcbf88c 100644
22114 --- a/arch/x86/kernel/process_64.c
22115 +++ b/arch/x86/kernel/process_64.c
22116 @@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22117 struct pt_regs *childregs;
22118 struct task_struct *me = current;
22119
22120 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22121 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22122 childregs = task_pt_regs(p);
22123 p->thread.sp = (unsigned long) childregs;
22124 p->thread.usersp = me->thread.usersp;
22125 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22126 set_tsk_thread_flag(p, TIF_FORK);
22127 p->fpu_counter = 0;
22128 p->thread.io_bitmap_ptr = NULL;
22129 @@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22130 struct thread_struct *prev = &prev_p->thread;
22131 struct thread_struct *next = &next_p->thread;
22132 int cpu = smp_processor_id();
22133 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22134 + struct tss_struct *tss = init_tss + cpu;
22135 unsigned fsindex, gsindex;
22136 fpu_switch_t fpu;
22137
22138 @@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22139 prev->usersp = this_cpu_read(old_rsp);
22140 this_cpu_write(old_rsp, next->usersp);
22141 this_cpu_write(current_task, next_p);
22142 + this_cpu_write(current_tinfo, &next_p->tinfo);
22143
22144 - this_cpu_write(kernel_stack,
22145 - (unsigned long)task_stack_page(next_p) +
22146 - THREAD_SIZE - KERNEL_STACK_OFFSET);
22147 + this_cpu_write(kernel_stack, next->sp0);
22148
22149 /*
22150 * Now maybe reload the debug registers and handle I/O bitmaps
22151 @@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22152 if (!p || p == current || p->state == TASK_RUNNING)
22153 return 0;
22154 stack = (unsigned long)task_stack_page(p);
22155 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22156 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22157 return 0;
22158 fp = *(u64 *)(p->thread.sp);
22159 do {
22160 - if (fp < (unsigned long)stack ||
22161 - fp >= (unsigned long)stack+THREAD_SIZE)
22162 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22163 return 0;
22164 ip = *(u64 *)(fp+8);
22165 if (!in_sched_functions(ip))
22166 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22167 index 29a8120..a50b5ee 100644
22168 --- a/arch/x86/kernel/ptrace.c
22169 +++ b/arch/x86/kernel/ptrace.c
22170 @@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22171 {
22172 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22173 unsigned long sp = (unsigned long)&regs->sp;
22174 - struct thread_info *tinfo;
22175
22176 - if (context == (sp & ~(THREAD_SIZE - 1)))
22177 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22178 return sp;
22179
22180 - tinfo = (struct thread_info *)context;
22181 - if (tinfo->previous_esp)
22182 - return tinfo->previous_esp;
22183 + sp = *(unsigned long *)context;
22184 + if (sp)
22185 + return sp;
22186
22187 return (unsigned long)regs;
22188 }
22189 @@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22190 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22191 {
22192 int i;
22193 - int dr7 = 0;
22194 + unsigned long dr7 = 0;
22195 struct arch_hw_breakpoint *info;
22196
22197 for (i = 0; i < HBP_NUM; i++) {
22198 @@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22199 unsigned long addr, unsigned long data)
22200 {
22201 int ret;
22202 - unsigned long __user *datap = (unsigned long __user *)data;
22203 + unsigned long __user *datap = (__force unsigned long __user *)data;
22204
22205 switch (request) {
22206 /* read the word at location addr in the USER area. */
22207 @@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22208 if ((int) addr < 0)
22209 return -EIO;
22210 ret = do_get_thread_area(child, addr,
22211 - (struct user_desc __user *)data);
22212 + (__force struct user_desc __user *) data);
22213 break;
22214
22215 case PTRACE_SET_THREAD_AREA:
22216 if ((int) addr < 0)
22217 return -EIO;
22218 ret = do_set_thread_area(child, addr,
22219 - (struct user_desc __user *)data, 0);
22220 + (__force struct user_desc __user *) data, 0);
22221 break;
22222 #endif
22223
22224 @@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22225
22226 #ifdef CONFIG_X86_64
22227
22228 -static struct user_regset x86_64_regsets[] __read_mostly = {
22229 +static user_regset_no_const x86_64_regsets[] __read_only = {
22230 [REGSET_GENERAL] = {
22231 .core_note_type = NT_PRSTATUS,
22232 .n = sizeof(struct user_regs_struct) / sizeof(long),
22233 @@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22234 #endif /* CONFIG_X86_64 */
22235
22236 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22237 -static struct user_regset x86_32_regsets[] __read_mostly = {
22238 +static user_regset_no_const x86_32_regsets[] __read_only = {
22239 [REGSET_GENERAL] = {
22240 .core_note_type = NT_PRSTATUS,
22241 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22242 @@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22243 */
22244 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22245
22246 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22247 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22248 {
22249 #ifdef CONFIG_X86_64
22250 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22251 @@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22252 memset(info, 0, sizeof(*info));
22253 info->si_signo = SIGTRAP;
22254 info->si_code = si_code;
22255 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22256 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22257 }
22258
22259 void user_single_step_siginfo(struct task_struct *tsk,
22260 @@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22261 # define IS_IA32 0
22262 #endif
22263
22264 +#ifdef CONFIG_GRKERNSEC_SETXID
22265 +extern void gr_delayed_cred_worker(void);
22266 +#endif
22267 +
22268 /*
22269 * We must return the syscall number to actually look up in the table.
22270 * This can be -1L to skip running any syscall at all.
22271 @@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22272
22273 user_exit();
22274
22275 +#ifdef CONFIG_GRKERNSEC_SETXID
22276 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22277 + gr_delayed_cred_worker();
22278 +#endif
22279 +
22280 /*
22281 * If we stepped into a sysenter/syscall insn, it trapped in
22282 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22283 @@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22284 */
22285 user_exit();
22286
22287 +#ifdef CONFIG_GRKERNSEC_SETXID
22288 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22289 + gr_delayed_cred_worker();
22290 +#endif
22291 +
22292 audit_syscall_exit(regs);
22293
22294 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22295 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22296 index 2cb9470..ff1fd80 100644
22297 --- a/arch/x86/kernel/pvclock.c
22298 +++ b/arch/x86/kernel/pvclock.c
22299 @@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22300 return pv_tsc_khz;
22301 }
22302
22303 -static atomic64_t last_value = ATOMIC64_INIT(0);
22304 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22305
22306 void pvclock_resume(void)
22307 {
22308 - atomic64_set(&last_value, 0);
22309 + atomic64_set_unchecked(&last_value, 0);
22310 }
22311
22312 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22313 @@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22314 * updating at the same time, and one of them could be slightly behind,
22315 * making the assumption that last_value always go forward fail to hold.
22316 */
22317 - last = atomic64_read(&last_value);
22318 + last = atomic64_read_unchecked(&last_value);
22319 do {
22320 if (ret < last)
22321 return last;
22322 - last = atomic64_cmpxchg(&last_value, last, ret);
22323 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22324 } while (unlikely(last != ret));
22325
22326 return ret;
22327 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22328 index 76fa1e9..abf09ea 100644
22329 --- a/arch/x86/kernel/reboot.c
22330 +++ b/arch/x86/kernel/reboot.c
22331 @@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22332 EXPORT_SYMBOL(pm_power_off);
22333
22334 static const struct desc_ptr no_idt = {};
22335 -static int reboot_mode;
22336 +static unsigned short reboot_mode;
22337 enum reboot_type reboot_type = BOOT_ACPI;
22338 int reboot_force;
22339
22340 @@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22341
22342 void __noreturn machine_real_restart(unsigned int type)
22343 {
22344 +
22345 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22346 + struct desc_struct *gdt;
22347 +#endif
22348 +
22349 local_irq_disable();
22350
22351 /*
22352 @@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22353
22354 /* Jump to the identity-mapped low memory code */
22355 #ifdef CONFIG_X86_32
22356 - asm volatile("jmpl *%0" : :
22357 +
22358 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22359 + gdt = get_cpu_gdt_table(smp_processor_id());
22360 + pax_open_kernel();
22361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22362 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22363 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22364 + loadsegment(ds, __KERNEL_DS);
22365 + loadsegment(es, __KERNEL_DS);
22366 + loadsegment(ss, __KERNEL_DS);
22367 +#endif
22368 +#ifdef CONFIG_PAX_KERNEXEC
22369 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22370 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22371 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22372 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22373 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22374 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22375 +#endif
22376 + pax_close_kernel();
22377 +#endif
22378 +
22379 + asm volatile("ljmpl *%0" : :
22380 "rm" (real_mode_header->machine_real_restart_asm),
22381 "a" (type));
22382 #else
22383 @@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22384 * try to force a triple fault and then cycle between hitting the keyboard
22385 * controller and doing that
22386 */
22387 -static void native_machine_emergency_restart(void)
22388 +static void __noreturn native_machine_emergency_restart(void)
22389 {
22390 int i;
22391 int attempt = 0;
22392 @@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22393 #endif
22394 }
22395
22396 -static void __machine_emergency_restart(int emergency)
22397 +static void __noreturn __machine_emergency_restart(int emergency)
22398 {
22399 reboot_emergency = emergency;
22400 machine_ops.emergency_restart();
22401 }
22402
22403 -static void native_machine_restart(char *__unused)
22404 +static void __noreturn native_machine_restart(char *__unused)
22405 {
22406 pr_notice("machine restart\n");
22407
22408 @@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22409 __machine_emergency_restart(0);
22410 }
22411
22412 -static void native_machine_halt(void)
22413 +static void __noreturn native_machine_halt(void)
22414 {
22415 /* Stop other cpus and apics */
22416 machine_shutdown();
22417 @@ -679,7 +706,7 @@ static void native_machine_halt(void)
22418 stop_this_cpu(NULL);
22419 }
22420
22421 -static void native_machine_power_off(void)
22422 +static void __noreturn native_machine_power_off(void)
22423 {
22424 if (pm_power_off) {
22425 if (!reboot_force)
22426 @@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22427 }
22428 /* A fallback in case there is no PM info available */
22429 tboot_shutdown(TB_SHUTDOWN_HALT);
22430 + unreachable();
22431 }
22432
22433 -struct machine_ops machine_ops = {
22434 +struct machine_ops machine_ops __read_only = {
22435 .power_off = native_machine_power_off,
22436 .shutdown = native_machine_shutdown,
22437 .emergency_restart = native_machine_emergency_restart,
22438 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22439 index 7a6f3b3..bed145d7 100644
22440 --- a/arch/x86/kernel/relocate_kernel_64.S
22441 +++ b/arch/x86/kernel/relocate_kernel_64.S
22442 @@ -11,6 +11,7 @@
22443 #include <asm/kexec.h>
22444 #include <asm/processor-flags.h>
22445 #include <asm/pgtable_types.h>
22446 +#include <asm/alternative-asm.h>
22447
22448 /*
22449 * Must be relocatable PIC code callable as a C function
22450 @@ -160,13 +161,14 @@ identity_mapped:
22451 xorq %rbp, %rbp
22452 xorq %r8, %r8
22453 xorq %r9, %r9
22454 - xorq %r10, %r9
22455 + xorq %r10, %r10
22456 xorq %r11, %r11
22457 xorq %r12, %r12
22458 xorq %r13, %r13
22459 xorq %r14, %r14
22460 xorq %r15, %r15
22461
22462 + pax_force_retaddr 0, 1
22463 ret
22464
22465 1:
22466 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22467 index fae9134..b7d4a57 100644
22468 --- a/arch/x86/kernel/setup.c
22469 +++ b/arch/x86/kernel/setup.c
22470 @@ -111,6 +111,7 @@
22471 #include <asm/mce.h>
22472 #include <asm/alternative.h>
22473 #include <asm/prom.h>
22474 +#include <asm/boot.h>
22475
22476 /*
22477 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
22478 @@ -447,7 +448,7 @@ static void __init parse_setup_data(void)
22479
22480 switch (data->type) {
22481 case SETUP_E820_EXT:
22482 - parse_e820_ext(data);
22483 + parse_e820_ext((struct setup_data __force_kernel *)data);
22484 break;
22485 case SETUP_DTB:
22486 add_dtb(pa_data);
22487 @@ -774,7 +775,7 @@ static void __init trim_bios_range(void)
22488 * area (640->1Mb) as ram even though it is not.
22489 * take them out.
22490 */
22491 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22492 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22493
22494 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22495 }
22496 @@ -844,8 +845,12 @@ static void __init trim_low_memory_range(void)
22497
22498 void __init setup_arch(char **cmdline_p)
22499 {
22500 +#ifdef CONFIG_X86_32
22501 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - ____LOAD_PHYSICAL_ADDR);
22502 +#else
22503 memblock_reserve(__pa_symbol(_text),
22504 (unsigned long)__bss_stop - (unsigned long)_text);
22505 +#endif
22506
22507 early_reserve_initrd();
22508
22509 @@ -937,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
22510
22511 if (!boot_params.hdr.root_flags)
22512 root_mountflags &= ~MS_RDONLY;
22513 - init_mm.start_code = (unsigned long) _text;
22514 - init_mm.end_code = (unsigned long) _etext;
22515 + init_mm.start_code = ktla_ktva((unsigned long) _text);
22516 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
22517 init_mm.end_data = (unsigned long) _edata;
22518 init_mm.brk = _brk_end;
22519
22520 - code_resource.start = __pa_symbol(_text);
22521 - code_resource.end = __pa_symbol(_etext)-1;
22522 - data_resource.start = __pa_symbol(_etext);
22523 + code_resource.start = __pa_symbol(ktla_ktva(_text));
22524 + code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
22525 + data_resource.start = __pa_symbol(_sdata);
22526 data_resource.end = __pa_symbol(_edata)-1;
22527 bss_resource.start = __pa_symbol(__bss_start);
22528 bss_resource.end = __pa_symbol(__bss_stop)-1;
22529 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22530 index 5cdff03..80fa283 100644
22531 --- a/arch/x86/kernel/setup_percpu.c
22532 +++ b/arch/x86/kernel/setup_percpu.c
22533 @@ -21,19 +21,17 @@
22534 #include <asm/cpu.h>
22535 #include <asm/stackprotector.h>
22536
22537 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22538 +#ifdef CONFIG_SMP
22539 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22540 EXPORT_PER_CPU_SYMBOL(cpu_number);
22541 +#endif
22542
22543 -#ifdef CONFIG_X86_64
22544 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22545 -#else
22546 -#define BOOT_PERCPU_OFFSET 0
22547 -#endif
22548
22549 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22550 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22551
22552 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22553 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22554 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22555 };
22556 EXPORT_SYMBOL(__per_cpu_offset);
22557 @@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22558 {
22559 #ifdef CONFIG_NEED_MULTIPLE_NODES
22560 pg_data_t *last = NULL;
22561 - unsigned int cpu;
22562 + int cpu;
22563
22564 for_each_possible_cpu(cpu) {
22565 int node = early_cpu_to_node(cpu);
22566 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22567 {
22568 #ifdef CONFIG_X86_32
22569 struct desc_struct gdt;
22570 + unsigned long base = per_cpu_offset(cpu);
22571
22572 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22573 - 0x2 | DESCTYPE_S, 0x8);
22574 - gdt.s = 1;
22575 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22576 + 0x83 | DESCTYPE_S, 0xC);
22577 write_gdt_entry(get_cpu_gdt_table(cpu),
22578 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22579 #endif
22580 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22581 /* alrighty, percpu areas up and running */
22582 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22583 for_each_possible_cpu(cpu) {
22584 +#ifdef CONFIG_CC_STACKPROTECTOR
22585 +#ifdef CONFIG_X86_32
22586 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
22587 +#endif
22588 +#endif
22589 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22590 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22591 per_cpu(cpu_number, cpu) = cpu;
22592 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22593 */
22594 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22595 #endif
22596 +#ifdef CONFIG_CC_STACKPROTECTOR
22597 +#ifdef CONFIG_X86_32
22598 + if (!cpu)
22599 + per_cpu(stack_canary.canary, cpu) = canary;
22600 +#endif
22601 +#endif
22602 /*
22603 * Up to this point, the boot CPU has been using .init.data
22604 * area. Reload any changed state for the boot CPU.
22605 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22606 index 6956299..f20beae 100644
22607 --- a/arch/x86/kernel/signal.c
22608 +++ b/arch/x86/kernel/signal.c
22609 @@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22610 * Align the stack pointer according to the i386 ABI,
22611 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22612 */
22613 - sp = ((sp + 4) & -16ul) - 4;
22614 + sp = ((sp - 12) & -16ul) - 4;
22615 #else /* !CONFIG_X86_32 */
22616 sp = round_down(sp, 16) - 8;
22617 #endif
22618 @@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22619 }
22620
22621 if (current->mm->context.vdso)
22622 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22623 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22624 else
22625 - restorer = &frame->retcode;
22626 + restorer = (void __user *)&frame->retcode;
22627 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22628 restorer = ksig->ka.sa.sa_restorer;
22629
22630 @@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22631 * reasons and because gdb uses it as a signature to notice
22632 * signal handler stack frames.
22633 */
22634 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22635 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22636
22637 if (err)
22638 return -EFAULT;
22639 @@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22640 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22641
22642 /* Set up to return from userspace. */
22643 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22644 + if (current->mm->context.vdso)
22645 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22646 + else
22647 + restorer = (void __user *)&frame->retcode;
22648 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22649 restorer = ksig->ka.sa.sa_restorer;
22650 put_user_ex(restorer, &frame->pretcode);
22651 @@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22652 * reasons and because gdb uses it as a signature to notice
22653 * signal handler stack frames.
22654 */
22655 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22656 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22657 } put_user_catch(err);
22658
22659 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
22660 @@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22661 {
22662 int usig = signr_convert(ksig->sig);
22663 sigset_t *set = sigmask_to_save();
22664 - compat_sigset_t *cset = (compat_sigset_t *) set;
22665 + sigset_t sigcopy;
22666 + compat_sigset_t *cset;
22667 +
22668 + sigcopy = *set;
22669 +
22670 + cset = (compat_sigset_t *) &sigcopy;
22671
22672 /* Set up the stack frame */
22673 if (is_ia32_frame()) {
22674 @@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22675 } else if (is_x32_frame()) {
22676 return x32_setup_rt_frame(ksig, cset, regs);
22677 } else {
22678 - return __setup_rt_frame(ksig->sig, ksig, set, regs);
22679 + return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
22680 }
22681 }
22682
22683 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22684 index 48d2b7d..90d328a 100644
22685 --- a/arch/x86/kernel/smp.c
22686 +++ b/arch/x86/kernel/smp.c
22687 @@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22688
22689 __setup("nonmi_ipi", nonmi_ipi_setup);
22690
22691 -struct smp_ops smp_ops = {
22692 +struct smp_ops smp_ops __read_only = {
22693 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22694 .smp_prepare_cpus = native_smp_prepare_cpus,
22695 .smp_cpus_done = native_smp_cpus_done,
22696 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22697 index 9f190a2..90a0688 100644
22698 --- a/arch/x86/kernel/smpboot.c
22699 +++ b/arch/x86/kernel/smpboot.c
22700 @@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22701 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22702 (THREAD_SIZE + task_stack_page(idle))) - 1);
22703 per_cpu(current_task, cpu) = idle;
22704 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22705
22706 #ifdef CONFIG_X86_32
22707 /* Stack for startup_32 can be just as for start_secondary onwards */
22708 @@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22709 #else
22710 clear_tsk_thread_flag(idle, TIF_FORK);
22711 initial_gs = per_cpu_offset(cpu);
22712 - per_cpu(kernel_stack, cpu) =
22713 - (unsigned long)task_stack_page(idle) -
22714 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22715 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22716 #endif
22717 +
22718 + pax_open_kernel();
22719 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22720 + pax_close_kernel();
22721 +
22722 initial_code = (unsigned long)start_secondary;
22723 stack_start = idle->thread.sp;
22724
22725 @@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22726 /* the FPU context is blank, nobody can own it */
22727 __cpu_disable_lazy_restore(cpu);
22728
22729 +#ifdef CONFIG_PAX_PER_CPU_PGD
22730 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22731 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22732 + KERNEL_PGD_PTRS);
22733 +#endif
22734 +
22735 + /* the FPU context is blank, nobody can own it */
22736 + __cpu_disable_lazy_restore(cpu);
22737 +
22738 err = do_boot_cpu(apicid, cpu, tidle);
22739 if (err) {
22740 pr_debug("do_boot_cpu failed %d\n", err);
22741 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22742 index 9b4d51d..5d28b58 100644
22743 --- a/arch/x86/kernel/step.c
22744 +++ b/arch/x86/kernel/step.c
22745 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22746 struct desc_struct *desc;
22747 unsigned long base;
22748
22749 - seg &= ~7UL;
22750 + seg >>= 3;
22751
22752 mutex_lock(&child->mm->context.lock);
22753 - if (unlikely((seg >> 3) >= child->mm->context.size))
22754 + if (unlikely(seg >= child->mm->context.size))
22755 addr = -1L; /* bogus selector, access would fault */
22756 else {
22757 desc = child->mm->context.ldt + seg;
22758 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22759 addr += base;
22760 }
22761 mutex_unlock(&child->mm->context.lock);
22762 - }
22763 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22764 + addr = ktla_ktva(addr);
22765
22766 return addr;
22767 }
22768 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22769 unsigned char opcode[15];
22770 unsigned long addr = convert_ip_to_linear(child, regs);
22771
22772 + if (addr == -EINVAL)
22773 + return 0;
22774 +
22775 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22776 for (i = 0; i < copied; i++) {
22777 switch (opcode[i]) {
22778 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22779 new file mode 100644
22780 index 0000000..207bec6
22781 --- /dev/null
22782 +++ b/arch/x86/kernel/sys_i386_32.c
22783 @@ -0,0 +1,250 @@
22784 +/*
22785 + * This file contains various random system calls that
22786 + * have a non-standard calling sequence on the Linux/i386
22787 + * platform.
22788 + */
22789 +
22790 +#include <linux/errno.h>
22791 +#include <linux/sched.h>
22792 +#include <linux/mm.h>
22793 +#include <linux/fs.h>
22794 +#include <linux/smp.h>
22795 +#include <linux/sem.h>
22796 +#include <linux/msg.h>
22797 +#include <linux/shm.h>
22798 +#include <linux/stat.h>
22799 +#include <linux/syscalls.h>
22800 +#include <linux/mman.h>
22801 +#include <linux/file.h>
22802 +#include <linux/utsname.h>
22803 +#include <linux/ipc.h>
22804 +
22805 +#include <linux/uaccess.h>
22806 +#include <linux/unistd.h>
22807 +
22808 +#include <asm/syscalls.h>
22809 +
22810 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22811 +{
22812 + unsigned long pax_task_size = TASK_SIZE;
22813 +
22814 +#ifdef CONFIG_PAX_SEGMEXEC
22815 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22816 + pax_task_size = SEGMEXEC_TASK_SIZE;
22817 +#endif
22818 +
22819 + if (flags & MAP_FIXED)
22820 + if (len > pax_task_size || addr > pax_task_size - len)
22821 + return -EINVAL;
22822 +
22823 + return 0;
22824 +}
22825 +
22826 +unsigned long
22827 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
22828 + unsigned long len, unsigned long pgoff, unsigned long flags)
22829 +{
22830 + struct mm_struct *mm = current->mm;
22831 + struct vm_area_struct *vma;
22832 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22833 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22834 +
22835 +#ifdef CONFIG_PAX_SEGMEXEC
22836 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22837 + pax_task_size = SEGMEXEC_TASK_SIZE;
22838 +#endif
22839 +
22840 + pax_task_size -= PAGE_SIZE;
22841 +
22842 + if (len > pax_task_size)
22843 + return -ENOMEM;
22844 +
22845 + if (flags & MAP_FIXED)
22846 + return addr;
22847 +
22848 +#ifdef CONFIG_PAX_RANDMMAP
22849 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22850 +#endif
22851 +
22852 + if (addr) {
22853 + addr = PAGE_ALIGN(addr);
22854 + if (pax_task_size - len >= addr) {
22855 + vma = find_vma(mm, addr);
22856 + if (check_heap_stack_gap(vma, addr, len, offset))
22857 + return addr;
22858 + }
22859 + }
22860 + if (len > mm->cached_hole_size) {
22861 + start_addr = addr = mm->free_area_cache;
22862 + } else {
22863 + start_addr = addr = mm->mmap_base;
22864 + mm->cached_hole_size = 0;
22865 + }
22866 +
22867 +#ifdef CONFIG_PAX_PAGEEXEC
22868 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
22869 + start_addr = 0x00110000UL;
22870 +
22871 +#ifdef CONFIG_PAX_RANDMMAP
22872 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22873 + start_addr += mm->delta_mmap & 0x03FFF000UL;
22874 +#endif
22875 +
22876 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
22877 + start_addr = addr = mm->mmap_base;
22878 + else
22879 + addr = start_addr;
22880 + }
22881 +#endif
22882 +
22883 +full_search:
22884 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22885 + /* At this point: (!vma || addr < vma->vm_end). */
22886 + if (pax_task_size - len < addr) {
22887 + /*
22888 + * Start a new search - just in case we missed
22889 + * some holes.
22890 + */
22891 + if (start_addr != mm->mmap_base) {
22892 + start_addr = addr = mm->mmap_base;
22893 + mm->cached_hole_size = 0;
22894 + goto full_search;
22895 + }
22896 + return -ENOMEM;
22897 + }
22898 + if (check_heap_stack_gap(vma, addr, len, offset))
22899 + break;
22900 + if (addr + mm->cached_hole_size < vma->vm_start)
22901 + mm->cached_hole_size = vma->vm_start - addr;
22902 + addr = vma->vm_end;
22903 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
22904 + start_addr = addr = mm->mmap_base;
22905 + mm->cached_hole_size = 0;
22906 + goto full_search;
22907 + }
22908 + }
22909 +
22910 + /*
22911 + * Remember the place where we stopped the search:
22912 + */
22913 + mm->free_area_cache = addr + len;
22914 + return addr;
22915 +}
22916 +
22917 +unsigned long
22918 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22919 + const unsigned long len, const unsigned long pgoff,
22920 + const unsigned long flags)
22921 +{
22922 + struct vm_area_struct *vma;
22923 + struct mm_struct *mm = current->mm;
22924 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
22925 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22926 +
22927 +#ifdef CONFIG_PAX_SEGMEXEC
22928 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22929 + pax_task_size = SEGMEXEC_TASK_SIZE;
22930 +#endif
22931 +
22932 + pax_task_size -= PAGE_SIZE;
22933 +
22934 + /* requested length too big for entire address space */
22935 + if (len > pax_task_size)
22936 + return -ENOMEM;
22937 +
22938 + if (flags & MAP_FIXED)
22939 + return addr;
22940 +
22941 +#ifdef CONFIG_PAX_PAGEEXEC
22942 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
22943 + goto bottomup;
22944 +#endif
22945 +
22946 +#ifdef CONFIG_PAX_RANDMMAP
22947 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22948 +#endif
22949 +
22950 + /* requesting a specific address */
22951 + if (addr) {
22952 + addr = PAGE_ALIGN(addr);
22953 + if (pax_task_size - len >= addr) {
22954 + vma = find_vma(mm, addr);
22955 + if (check_heap_stack_gap(vma, addr, len, offset))
22956 + return addr;
22957 + }
22958 + }
22959 +
22960 + /* check if free_area_cache is useful for us */
22961 + if (len <= mm->cached_hole_size) {
22962 + mm->cached_hole_size = 0;
22963 + mm->free_area_cache = mm->mmap_base;
22964 + }
22965 +
22966 + /* either no address requested or can't fit in requested address hole */
22967 + addr = mm->free_area_cache;
22968 +
22969 + /* make sure it can fit in the remaining address space */
22970 + if (addr > len) {
22971 + vma = find_vma(mm, addr-len);
22972 + if (check_heap_stack_gap(vma, addr - len, len, offset))
22973 + /* remember the address as a hint for next time */
22974 + return (mm->free_area_cache = addr-len);
22975 + }
22976 +
22977 + if (mm->mmap_base < len)
22978 + goto bottomup;
22979 +
22980 + addr = mm->mmap_base-len;
22981 +
22982 + do {
22983 + /*
22984 + * Lookup failure means no vma is above this address,
22985 + * else if new region fits below vma->vm_start,
22986 + * return with success:
22987 + */
22988 + vma = find_vma(mm, addr);
22989 + if (check_heap_stack_gap(vma, addr, len, offset))
22990 + /* remember the address as a hint for next time */
22991 + return (mm->free_area_cache = addr);
22992 +
22993 + /* remember the largest hole we saw so far */
22994 + if (addr + mm->cached_hole_size < vma->vm_start)
22995 + mm->cached_hole_size = vma->vm_start - addr;
22996 +
22997 + /* try just below the current vma->vm_start */
22998 + addr = skip_heap_stack_gap(vma, len, offset);
22999 + } while (!IS_ERR_VALUE(addr));
23000 +
23001 +bottomup:
23002 + /*
23003 + * A failed mmap() very likely causes application failure,
23004 + * so fall back to the bottom-up function here. This scenario
23005 + * can happen with large stack limits and large mmap()
23006 + * allocations.
23007 + */
23008 +
23009 +#ifdef CONFIG_PAX_SEGMEXEC
23010 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23011 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23012 + else
23013 +#endif
23014 +
23015 + mm->mmap_base = TASK_UNMAPPED_BASE;
23016 +
23017 +#ifdef CONFIG_PAX_RANDMMAP
23018 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23019 + mm->mmap_base += mm->delta_mmap;
23020 +#endif
23021 +
23022 + mm->free_area_cache = mm->mmap_base;
23023 + mm->cached_hole_size = ~0UL;
23024 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23025 + /*
23026 + * Restore the topdown base:
23027 + */
23028 + mm->mmap_base = base;
23029 + mm->free_area_cache = base;
23030 + mm->cached_hole_size = ~0UL;
23031 +
23032 + return addr;
23033 +}
23034 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23035 index dbded5a..ace2781 100644
23036 --- a/arch/x86/kernel/sys_x86_64.c
23037 +++ b/arch/x86/kernel/sys_x86_64.c
23038 @@ -81,8 +81,8 @@ out:
23039 return error;
23040 }
23041
23042 -static void find_start_end(unsigned long flags, unsigned long *begin,
23043 - unsigned long *end)
23044 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
23045 + unsigned long *begin, unsigned long *end)
23046 {
23047 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23048 unsigned long new_begin;
23049 @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23050 *begin = new_begin;
23051 }
23052 } else {
23053 - *begin = TASK_UNMAPPED_BASE;
23054 + *begin = mm->mmap_base;
23055 *end = TASK_SIZE;
23056 }
23057 }
23058 @@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23059 struct vm_area_struct *vma;
23060 struct vm_unmapped_area_info info;
23061 unsigned long begin, end;
23062 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23063
23064 if (flags & MAP_FIXED)
23065 return addr;
23066
23067 - find_start_end(flags, &begin, &end);
23068 + find_start_end(mm, flags, &begin, &end);
23069
23070 if (len > end)
23071 return -ENOMEM;
23072
23073 +#ifdef CONFIG_PAX_RANDMMAP
23074 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23075 +#endif
23076 +
23077 if (addr) {
23078 addr = PAGE_ALIGN(addr);
23079 vma = find_vma(mm, addr);
23080 - if (end - len >= addr &&
23081 - (!vma || addr + len <= vma->vm_start))
23082 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23083 return addr;
23084 }
23085
23086 @@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23087 info.high_limit = end;
23088 info.align_mask = filp ? get_align_mask() : 0;
23089 info.align_offset = pgoff << PAGE_SHIFT;
23090 + info.threadstack_offset = offset;
23091 return vm_unmapped_area(&info);
23092 }
23093
23094 @@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23095 struct mm_struct *mm = current->mm;
23096 unsigned long addr = addr0;
23097 struct vm_unmapped_area_info info;
23098 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23099
23100 /* requested length too big for entire address space */
23101 if (len > TASK_SIZE)
23102 @@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23103 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23104 goto bottomup;
23105
23106 +#ifdef CONFIG_PAX_RANDMMAP
23107 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23108 +#endif
23109 +
23110 /* requesting a specific address */
23111 if (addr) {
23112 addr = PAGE_ALIGN(addr);
23113 vma = find_vma(mm, addr);
23114 - if (TASK_SIZE - len >= addr &&
23115 - (!vma || addr + len <= vma->vm_start))
23116 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23117 return addr;
23118 }
23119
23120 @@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23121 info.high_limit = mm->mmap_base;
23122 info.align_mask = filp ? get_align_mask() : 0;
23123 info.align_offset = pgoff << PAGE_SHIFT;
23124 + info.threadstack_offset = offset;
23125 addr = vm_unmapped_area(&info);
23126 if (!(addr & ~PAGE_MASK))
23127 return addr;
23128 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23129 index f84fe00..f41d9f1 100644
23130 --- a/arch/x86/kernel/tboot.c
23131 +++ b/arch/x86/kernel/tboot.c
23132 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23133
23134 void tboot_shutdown(u32 shutdown_type)
23135 {
23136 - void (*shutdown)(void);
23137 + void (* __noreturn shutdown)(void);
23138
23139 if (!tboot_enabled())
23140 return;
23141 @@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23142
23143 switch_to_tboot_pt();
23144
23145 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23146 + shutdown = (void *)tboot->shutdown_entry;
23147 shutdown();
23148
23149 /* should not reach here */
23150 @@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23151 return 0;
23152 }
23153
23154 -static atomic_t ap_wfs_count;
23155 +static atomic_unchecked_t ap_wfs_count;
23156
23157 static int tboot_wait_for_aps(int num_aps)
23158 {
23159 @@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23160 {
23161 switch (action) {
23162 case CPU_DYING:
23163 - atomic_inc(&ap_wfs_count);
23164 + atomic_inc_unchecked(&ap_wfs_count);
23165 if (num_online_cpus() == 1)
23166 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23167 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23168 return NOTIFY_BAD;
23169 break;
23170 }
23171 return NOTIFY_OK;
23172 }
23173
23174 -static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23175 +static struct notifier_block tboot_cpu_notifier =
23176 {
23177 .notifier_call = tboot_cpu_callback,
23178 };
23179 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23180
23181 tboot_create_trampoline();
23182
23183 - atomic_set(&ap_wfs_count, 0);
23184 + atomic_set_unchecked(&ap_wfs_count, 0);
23185 register_hotcpu_notifier(&tboot_cpu_notifier);
23186
23187 acpi_os_set_prepare_sleep(&tboot_sleep);
23188 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23189 index 24d3c91..d06b473 100644
23190 --- a/arch/x86/kernel/time.c
23191 +++ b/arch/x86/kernel/time.c
23192 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23193 {
23194 unsigned long pc = instruction_pointer(regs);
23195
23196 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23197 + if (!user_mode(regs) && in_lock_functions(pc)) {
23198 #ifdef CONFIG_FRAME_POINTER
23199 - return *(unsigned long *)(regs->bp + sizeof(long));
23200 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23201 #else
23202 unsigned long *sp =
23203 (unsigned long *)kernel_stack_pointer(regs);
23204 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23205 * or above a saved flags. Eflags has bits 22-31 zero,
23206 * kernel addresses don't.
23207 */
23208 +
23209 +#ifdef CONFIG_PAX_KERNEXEC
23210 + return ktla_ktva(sp[0]);
23211 +#else
23212 if (sp[0] >> 22)
23213 return sp[0];
23214 if (sp[1] >> 22)
23215 return sp[1];
23216 #endif
23217 +
23218 +#endif
23219 }
23220 return pc;
23221 }
23222 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23223 index 9d9d2f9..cad418a 100644
23224 --- a/arch/x86/kernel/tls.c
23225 +++ b/arch/x86/kernel/tls.c
23226 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23227 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23228 return -EINVAL;
23229
23230 +#ifdef CONFIG_PAX_SEGMEXEC
23231 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23232 + return -EINVAL;
23233 +#endif
23234 +
23235 set_tls_desc(p, idx, &info, 1);
23236
23237 return 0;
23238 @@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23239
23240 if (kbuf)
23241 info = kbuf;
23242 - else if (__copy_from_user(infobuf, ubuf, count))
23243 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23244 return -EFAULT;
23245 else
23246 info = infobuf;
23247 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23248 index 68bda7a..3ec7bb7 100644
23249 --- a/arch/x86/kernel/traps.c
23250 +++ b/arch/x86/kernel/traps.c
23251 @@ -68,12 +68,6 @@
23252 #include <asm/setup.h>
23253
23254 asmlinkage int system_call(void);
23255 -
23256 -/*
23257 - * The IDT has to be page-aligned to simplify the Pentium
23258 - * F0 0F bug workaround.
23259 - */
23260 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23261 #endif
23262
23263 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23264 @@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23265 }
23266
23267 static int __kprobes
23268 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23269 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23270 struct pt_regs *regs, long error_code)
23271 {
23272 #ifdef CONFIG_X86_32
23273 - if (regs->flags & X86_VM_MASK) {
23274 + if (v8086_mode(regs)) {
23275 /*
23276 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23277 * On nmi (interrupt 2), do_trap should not be called.
23278 @@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23279 return -1;
23280 }
23281 #endif
23282 - if (!user_mode(regs)) {
23283 + if (!user_mode_novm(regs)) {
23284 if (!fixup_exception(regs)) {
23285 tsk->thread.error_code = error_code;
23286 tsk->thread.trap_nr = trapnr;
23287 +
23288 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23289 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23290 + str = "PAX: suspicious stack segment fault";
23291 +#endif
23292 +
23293 die(str, regs, error_code);
23294 }
23295 +
23296 +#ifdef CONFIG_PAX_REFCOUNT
23297 + if (trapnr == 4)
23298 + pax_report_refcount_overflow(regs);
23299 +#endif
23300 +
23301 return 0;
23302 }
23303
23304 @@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23305 }
23306
23307 static void __kprobes
23308 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23309 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23310 long error_code, siginfo_t *info)
23311 {
23312 struct task_struct *tsk = current;
23313 @@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23314 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23315 printk_ratelimit()) {
23316 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23317 - tsk->comm, tsk->pid, str,
23318 + tsk->comm, task_pid_nr(tsk), str,
23319 regs->ip, regs->sp, error_code);
23320 print_vma_addr(" in ", regs->ip);
23321 pr_cont("\n");
23322 @@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23323 conditional_sti(regs);
23324
23325 #ifdef CONFIG_X86_32
23326 - if (regs->flags & X86_VM_MASK) {
23327 + if (v8086_mode(regs)) {
23328 local_irq_enable();
23329 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23330 goto exit;
23331 @@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23332 #endif
23333
23334 tsk = current;
23335 - if (!user_mode(regs)) {
23336 + if (!user_mode_novm(regs)) {
23337 if (fixup_exception(regs))
23338 goto exit;
23339
23340 tsk->thread.error_code = error_code;
23341 tsk->thread.trap_nr = X86_TRAP_GP;
23342 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23343 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23344 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23345 +
23346 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23347 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23348 + die("PAX: suspicious general protection fault", regs, error_code);
23349 + else
23350 +#endif
23351 +
23352 die("general protection fault", regs, error_code);
23353 + }
23354 goto exit;
23355 }
23356
23357 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23358 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23359 + struct mm_struct *mm = tsk->mm;
23360 + unsigned long limit;
23361 +
23362 + down_write(&mm->mmap_sem);
23363 + limit = mm->context.user_cs_limit;
23364 + if (limit < TASK_SIZE) {
23365 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23366 + up_write(&mm->mmap_sem);
23367 + return;
23368 + }
23369 + up_write(&mm->mmap_sem);
23370 + }
23371 +#endif
23372 +
23373 tsk->thread.error_code = error_code;
23374 tsk->thread.trap_nr = X86_TRAP_GP;
23375
23376 @@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23377 /* It's safe to allow irq's after DR6 has been saved */
23378 preempt_conditional_sti(regs);
23379
23380 - if (regs->flags & X86_VM_MASK) {
23381 + if (v8086_mode(regs)) {
23382 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23383 X86_TRAP_DB);
23384 preempt_conditional_cli(regs);
23385 @@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23386 * We already checked v86 mode above, so we can check for kernel mode
23387 * by just checking the CPL of CS.
23388 */
23389 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
23390 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23391 tsk->thread.debugreg6 &= ~DR_STEP;
23392 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23393 regs->flags &= ~X86_EFLAGS_TF;
23394 @@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23395 return;
23396 conditional_sti(regs);
23397
23398 - if (!user_mode_vm(regs))
23399 + if (!user_mode(regs))
23400 {
23401 if (!fixup_exception(regs)) {
23402 task->thread.error_code = error_code;
23403 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23404 index 0ba4cfb..4596bec 100644
23405 --- a/arch/x86/kernel/uprobes.c
23406 +++ b/arch/x86/kernel/uprobes.c
23407 @@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23408 int ret = NOTIFY_DONE;
23409
23410 /* We are only interested in userspace traps */
23411 - if (regs && !user_mode_vm(regs))
23412 + if (regs && !user_mode(regs))
23413 return NOTIFY_DONE;
23414
23415 switch (val) {
23416 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23417 index b9242ba..50c5edd 100644
23418 --- a/arch/x86/kernel/verify_cpu.S
23419 +++ b/arch/x86/kernel/verify_cpu.S
23420 @@ -20,6 +20,7 @@
23421 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23422 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23423 * arch/x86/kernel/head_32.S: processor startup
23424 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23425 *
23426 * verify_cpu, returns the status of longmode and SSE in register %eax.
23427 * 0: Success 1: Failure
23428 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23429 index 1cf5766..c0d9de7 100644
23430 --- a/arch/x86/kernel/vm86_32.c
23431 +++ b/arch/x86/kernel/vm86_32.c
23432 @@ -43,6 +43,7 @@
23433 #include <linux/ptrace.h>
23434 #include <linux/audit.h>
23435 #include <linux/stddef.h>
23436 +#include <linux/grsecurity.h>
23437
23438 #include <asm/uaccess.h>
23439 #include <asm/io.h>
23440 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23441 do_exit(SIGSEGV);
23442 }
23443
23444 - tss = &per_cpu(init_tss, get_cpu());
23445 + tss = init_tss + get_cpu();
23446 current->thread.sp0 = current->thread.saved_sp0;
23447 current->thread.sysenter_cs = __KERNEL_CS;
23448 load_sp0(tss, &current->thread);
23449 @@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86)
23450 struct task_struct *tsk;
23451 int tmp, ret = -EPERM;
23452
23453 +#ifdef CONFIG_GRKERNSEC_VM86
23454 + if (!capable(CAP_SYS_RAWIO)) {
23455 + gr_handle_vm86();
23456 + goto out;
23457 + }
23458 +#endif
23459 +
23460 tsk = current;
23461 if (tsk->thread.saved_sp0)
23462 goto out;
23463 @@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
23464 int tmp, ret;
23465 struct vm86plus_struct __user *v86;
23466
23467 +#ifdef CONFIG_GRKERNSEC_VM86
23468 + if (!capable(CAP_SYS_RAWIO)) {
23469 + gr_handle_vm86();
23470 + ret = -EPERM;
23471 + goto out;
23472 + }
23473 +#endif
23474 +
23475 tsk = current;
23476 switch (cmd) {
23477 case VM86_REQUEST_IRQ:
23478 @@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23479 tsk->thread.saved_fs = info->regs32->fs;
23480 tsk->thread.saved_gs = get_user_gs(info->regs32);
23481
23482 - tss = &per_cpu(init_tss, get_cpu());
23483 + tss = init_tss + get_cpu();
23484 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23485 if (cpu_has_sep)
23486 tsk->thread.sysenter_cs = 0;
23487 @@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23488 goto cannot_handle;
23489 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23490 goto cannot_handle;
23491 - intr_ptr = (unsigned long __user *) (i << 2);
23492 + intr_ptr = (__force unsigned long __user *) (i << 2);
23493 if (get_user(segoffs, intr_ptr))
23494 goto cannot_handle;
23495 if ((segoffs >> 16) == BIOSSEG)
23496 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23497 index 22a1530..8fbaaad 100644
23498 --- a/arch/x86/kernel/vmlinux.lds.S
23499 +++ b/arch/x86/kernel/vmlinux.lds.S
23500 @@ -26,6 +26,13 @@
23501 #include <asm/page_types.h>
23502 #include <asm/cache.h>
23503 #include <asm/boot.h>
23504 +#include <asm/segment.h>
23505 +
23506 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23507 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23508 +#else
23509 +#define __KERNEL_TEXT_OFFSET 0
23510 +#endif
23511
23512 #undef i386 /* in case the preprocessor is a 32bit one */
23513
23514 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23515
23516 PHDRS {
23517 text PT_LOAD FLAGS(5); /* R_E */
23518 +#ifdef CONFIG_X86_32
23519 + module PT_LOAD FLAGS(5); /* R_E */
23520 +#endif
23521 +#ifdef CONFIG_XEN
23522 + rodata PT_LOAD FLAGS(5); /* R_E */
23523 +#else
23524 + rodata PT_LOAD FLAGS(4); /* R__ */
23525 +#endif
23526 data PT_LOAD FLAGS(6); /* RW_ */
23527 -#ifdef CONFIG_X86_64
23528 + init.begin PT_LOAD FLAGS(6); /* RW_ */
23529 #ifdef CONFIG_SMP
23530 percpu PT_LOAD FLAGS(6); /* RW_ */
23531 #endif
23532 + text.init PT_LOAD FLAGS(5); /* R_E */
23533 + text.exit PT_LOAD FLAGS(5); /* R_E */
23534 init PT_LOAD FLAGS(7); /* RWE */
23535 -#endif
23536 note PT_NOTE FLAGS(0); /* ___ */
23537 }
23538
23539 SECTIONS
23540 {
23541 #ifdef CONFIG_X86_32
23542 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23543 - phys_startup_32 = startup_32 - LOAD_OFFSET;
23544 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23545 #else
23546 - . = __START_KERNEL;
23547 - phys_startup_64 = startup_64 - LOAD_OFFSET;
23548 + . = __START_KERNEL;
23549 #endif
23550
23551 /* Text and read-only data */
23552 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
23553 - _text = .;
23554 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23555 /* bootstrapping code */
23556 +#ifdef CONFIG_X86_32
23557 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23558 +#else
23559 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23560 +#endif
23561 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23562 + _text = .;
23563 HEAD_TEXT
23564 #ifdef CONFIG_X86_32
23565 . = ALIGN(PAGE_SIZE);
23566 @@ -108,13 +128,48 @@ SECTIONS
23567 IRQENTRY_TEXT
23568 *(.fixup)
23569 *(.gnu.warning)
23570 - /* End of text section */
23571 - _etext = .;
23572 } :text = 0x9090
23573
23574 - NOTES :text :note
23575 + . += __KERNEL_TEXT_OFFSET;
23576
23577 - EXCEPTION_TABLE(16) :text = 0x9090
23578 +#ifdef CONFIG_X86_32
23579 + . = ALIGN(PAGE_SIZE);
23580 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23581 +
23582 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23583 + MODULES_EXEC_VADDR = .;
23584 + BYTE(0)
23585 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23586 + . = ALIGN(HPAGE_SIZE) - 1;
23587 + MODULES_EXEC_END = .;
23588 +#endif
23589 +
23590 + } :module
23591 +#endif
23592 +
23593 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23594 + /* End of text section */
23595 + BYTE(0)
23596 + _etext = . - __KERNEL_TEXT_OFFSET;
23597 + }
23598 +
23599 +#ifdef CONFIG_X86_32
23600 + . = ALIGN(PAGE_SIZE);
23601 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23602 + *(.idt)
23603 + . = ALIGN(PAGE_SIZE);
23604 + *(.empty_zero_page)
23605 + *(.initial_pg_fixmap)
23606 + *(.initial_pg_pmd)
23607 + *(.initial_page_table)
23608 + *(.swapper_pg_dir)
23609 + } :rodata
23610 +#endif
23611 +
23612 + . = ALIGN(PAGE_SIZE);
23613 + NOTES :rodata :note
23614 +
23615 + EXCEPTION_TABLE(16) :rodata
23616
23617 #if defined(CONFIG_DEBUG_RODATA)
23618 /* .text should occupy whole number of pages */
23619 @@ -126,16 +181,20 @@ SECTIONS
23620
23621 /* Data */
23622 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23623 +
23624 +#ifdef CONFIG_PAX_KERNEXEC
23625 + . = ALIGN(HPAGE_SIZE);
23626 +#else
23627 + . = ALIGN(PAGE_SIZE);
23628 +#endif
23629 +
23630 /* Start of data section */
23631 _sdata = .;
23632
23633 /* init_task */
23634 INIT_TASK_DATA(THREAD_SIZE)
23635
23636 -#ifdef CONFIG_X86_32
23637 - /* 32 bit has nosave before _edata */
23638 NOSAVE_DATA
23639 -#endif
23640
23641 PAGE_ALIGNED_DATA(PAGE_SIZE)
23642
23643 @@ -176,12 +235,19 @@ SECTIONS
23644 #endif /* CONFIG_X86_64 */
23645
23646 /* Init code and data - will be freed after init */
23647 - . = ALIGN(PAGE_SIZE);
23648 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23649 + BYTE(0)
23650 +
23651 +#ifdef CONFIG_PAX_KERNEXEC
23652 + . = ALIGN(HPAGE_SIZE);
23653 +#else
23654 + . = ALIGN(PAGE_SIZE);
23655 +#endif
23656 +
23657 __init_begin = .; /* paired with __init_end */
23658 - }
23659 + } :init.begin
23660
23661 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23662 +#ifdef CONFIG_SMP
23663 /*
23664 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23665 * output PHDR, so the next output section - .init.text - should
23666 @@ -190,12 +256,27 @@ SECTIONS
23667 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23668 #endif
23669
23670 - INIT_TEXT_SECTION(PAGE_SIZE)
23671 -#ifdef CONFIG_X86_64
23672 - :init
23673 -#endif
23674 + . = ALIGN(PAGE_SIZE);
23675 + init_begin = .;
23676 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23677 + VMLINUX_SYMBOL(_sinittext) = .;
23678 + INIT_TEXT
23679 + VMLINUX_SYMBOL(_einittext) = .;
23680 + . = ALIGN(PAGE_SIZE);
23681 + } :text.init
23682
23683 - INIT_DATA_SECTION(16)
23684 + /*
23685 + * .exit.text is discard at runtime, not link time, to deal with
23686 + * references from .altinstructions and .eh_frame
23687 + */
23688 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23689 + EXIT_TEXT
23690 + . = ALIGN(16);
23691 + } :text.exit
23692 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23693 +
23694 + . = ALIGN(PAGE_SIZE);
23695 + INIT_DATA_SECTION(16) :init
23696
23697 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23698 __x86_cpu_dev_start = .;
23699 @@ -257,19 +338,12 @@ SECTIONS
23700 }
23701
23702 . = ALIGN(8);
23703 - /*
23704 - * .exit.text is discard at runtime, not link time, to deal with
23705 - * references from .altinstructions and .eh_frame
23706 - */
23707 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23708 - EXIT_TEXT
23709 - }
23710
23711 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23712 EXIT_DATA
23713 }
23714
23715 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23716 +#ifndef CONFIG_SMP
23717 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23718 #endif
23719
23720 @@ -288,16 +362,10 @@ SECTIONS
23721 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23722 __smp_locks = .;
23723 *(.smp_locks)
23724 - . = ALIGN(PAGE_SIZE);
23725 __smp_locks_end = .;
23726 + . = ALIGN(PAGE_SIZE);
23727 }
23728
23729 -#ifdef CONFIG_X86_64
23730 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23731 - NOSAVE_DATA
23732 - }
23733 -#endif
23734 -
23735 /* BSS */
23736 . = ALIGN(PAGE_SIZE);
23737 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23738 @@ -313,6 +381,7 @@ SECTIONS
23739 __brk_base = .;
23740 . += 64 * 1024; /* 64k alignment slop space */
23741 *(.brk_reservation) /* areas brk users have reserved */
23742 + . = ALIGN(HPAGE_SIZE);
23743 __brk_limit = .;
23744 }
23745
23746 @@ -339,13 +408,12 @@ SECTIONS
23747 * for the boot processor.
23748 */
23749 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23750 -INIT_PER_CPU(gdt_page);
23751 INIT_PER_CPU(irq_stack_union);
23752
23753 /*
23754 * Build-time check on the image size:
23755 */
23756 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23757 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23758 "kernel image bigger than KERNEL_IMAGE_SIZE");
23759
23760 #ifdef CONFIG_SMP
23761 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23762 index 9a907a6..f83f921 100644
23763 --- a/arch/x86/kernel/vsyscall_64.c
23764 +++ b/arch/x86/kernel/vsyscall_64.c
23765 @@ -56,15 +56,13 @@
23766 DEFINE_VVAR(int, vgetcpu_mode);
23767 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23768
23769 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23770 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23771
23772 static int __init vsyscall_setup(char *str)
23773 {
23774 if (str) {
23775 if (!strcmp("emulate", str))
23776 vsyscall_mode = EMULATE;
23777 - else if (!strcmp("native", str))
23778 - vsyscall_mode = NATIVE;
23779 else if (!strcmp("none", str))
23780 vsyscall_mode = NONE;
23781 else
23782 @@ -323,8 +321,7 @@ do_ret:
23783 return true;
23784
23785 sigsegv:
23786 - force_sig(SIGSEGV, current);
23787 - return true;
23788 + do_group_exit(SIGKILL);
23789 }
23790
23791 /*
23792 @@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23793 extern char __vvar_page;
23794 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23795
23796 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23797 - vsyscall_mode == NATIVE
23798 - ? PAGE_KERNEL_VSYSCALL
23799 - : PAGE_KERNEL_VVAR);
23800 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23801 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23802 (unsigned long)VSYSCALL_START);
23803
23804 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23805 index b014d94..6d6ca7b 100644
23806 --- a/arch/x86/kernel/x8664_ksyms_64.c
23807 +++ b/arch/x86/kernel/x8664_ksyms_64.c
23808 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23809 EXPORT_SYMBOL(copy_user_generic_unrolled);
23810 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23811 EXPORT_SYMBOL(__copy_user_nocache);
23812 -EXPORT_SYMBOL(_copy_from_user);
23813 -EXPORT_SYMBOL(_copy_to_user);
23814
23815 EXPORT_SYMBOL(copy_page);
23816 EXPORT_SYMBOL(clear_page);
23817 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23818 index 45a14db..075bb9b 100644
23819 --- a/arch/x86/kernel/x86_init.c
23820 +++ b/arch/x86/kernel/x86_init.c
23821 @@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
23822 },
23823 };
23824
23825 -struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23826 +struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23827 .early_percpu_clock_init = x86_init_noop,
23828 .setup_percpu_clockev = setup_secondary_APIC_clock,
23829 };
23830 @@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23831 static void default_nmi_init(void) { };
23832 static int default_i8042_detect(void) { return 1; };
23833
23834 -struct x86_platform_ops x86_platform = {
23835 +struct x86_platform_ops x86_platform __read_only = {
23836 .calibrate_tsc = native_calibrate_tsc,
23837 .get_wallclock = mach_get_cmos_time,
23838 .set_wallclock = mach_set_rtc_mmss,
23839 @@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
23840 };
23841
23842 EXPORT_SYMBOL_GPL(x86_platform);
23843 -struct x86_msi_ops x86_msi = {
23844 +struct x86_msi_ops x86_msi __read_only = {
23845 .setup_msi_irqs = native_setup_msi_irqs,
23846 .compose_msi_msg = native_compose_msi_msg,
23847 .teardown_msi_irq = native_teardown_msi_irq,
23848 @@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
23849 .setup_hpet_msi = default_setup_hpet_msi,
23850 };
23851
23852 -struct x86_io_apic_ops x86_io_apic_ops = {
23853 +struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23854 .init = native_io_apic_init_mappings,
23855 .read = native_io_apic_read,
23856 .write = native_io_apic_write,
23857 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23858 index ada87a3..afea76d 100644
23859 --- a/arch/x86/kernel/xsave.c
23860 +++ b/arch/x86/kernel/xsave.c
23861 @@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23862 {
23863 int err;
23864
23865 + buf = (struct xsave_struct __user *)____m(buf);
23866 if (use_xsave())
23867 err = xsave_user(buf);
23868 else if (use_fxsr())
23869 @@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23870 */
23871 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23872 {
23873 + buf = (void __user *)____m(buf);
23874 if (use_xsave()) {
23875 if ((unsigned long)buf % 64 || fx_only) {
23876 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23877 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23878 index a20ecb5..d0e2194 100644
23879 --- a/arch/x86/kvm/cpuid.c
23880 +++ b/arch/x86/kvm/cpuid.c
23881 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23882 struct kvm_cpuid2 *cpuid,
23883 struct kvm_cpuid_entry2 __user *entries)
23884 {
23885 - int r;
23886 + int r, i;
23887
23888 r = -E2BIG;
23889 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23890 goto out;
23891 r = -EFAULT;
23892 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23893 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23894 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23895 goto out;
23896 + for (i = 0; i < cpuid->nent; ++i) {
23897 + struct kvm_cpuid_entry2 cpuid_entry;
23898 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23899 + goto out;
23900 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
23901 + }
23902 vcpu->arch.cpuid_nent = cpuid->nent;
23903 kvm_apic_set_version(vcpu);
23904 kvm_x86_ops->cpuid_update(vcpu);
23905 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23906 struct kvm_cpuid2 *cpuid,
23907 struct kvm_cpuid_entry2 __user *entries)
23908 {
23909 - int r;
23910 + int r, i;
23911
23912 r = -E2BIG;
23913 if (cpuid->nent < vcpu->arch.cpuid_nent)
23914 goto out;
23915 r = -EFAULT;
23916 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23917 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23918 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23919 goto out;
23920 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
23921 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
23922 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
23923 + goto out;
23924 + }
23925 return 0;
23926
23927 out:
23928 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
23929 index a9c9d3e..9fe214f 100644
23930 --- a/arch/x86/kvm/emulate.c
23931 +++ b/arch/x86/kvm/emulate.c
23932 @@ -326,6 +326,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23933
23934 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
23935 do { \
23936 + unsigned long _tmp; \
23937 __asm__ __volatile__ ( \
23938 _PRE_EFLAGS("0", "4", "2") \
23939 _op _suffix " %"_x"3,%1; " \
23940 @@ -340,8 +341,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23941 /* Raw emulation: instruction has two explicit operands. */
23942 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
23943 do { \
23944 - unsigned long _tmp; \
23945 - \
23946 switch ((ctxt)->dst.bytes) { \
23947 case 2: \
23948 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
23949 @@ -357,7 +356,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23950
23951 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
23952 do { \
23953 - unsigned long _tmp; \
23954 switch ((ctxt)->dst.bytes) { \
23955 case 1: \
23956 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
23957 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
23958 index f77df1c..6f20690 100644
23959 --- a/arch/x86/kvm/lapic.c
23960 +++ b/arch/x86/kvm/lapic.c
23961 @@ -55,7 +55,7 @@
23962 #define APIC_BUS_CYCLE_NS 1
23963
23964 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
23965 -#define apic_debug(fmt, arg...)
23966 +#define apic_debug(fmt, arg...) do {} while (0)
23967
23968 #define APIC_LVT_NUM 6
23969 /* 14 is the version for Xeon and Pentium 8.4.8*/
23970 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
23971 index 105dd5b..1b0ccc2 100644
23972 --- a/arch/x86/kvm/paging_tmpl.h
23973 +++ b/arch/x86/kvm/paging_tmpl.h
23974 @@ -208,7 +208,7 @@ retry_walk:
23975 if (unlikely(kvm_is_error_hva(host_addr)))
23976 goto error;
23977
23978 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
23979 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
23980 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
23981 goto error;
23982 walker->ptep_user[walker->level - 1] = ptep_user;
23983 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
23984 index e1b1ce2..f7b4b43 100644
23985 --- a/arch/x86/kvm/svm.c
23986 +++ b/arch/x86/kvm/svm.c
23987 @@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
23988 int cpu = raw_smp_processor_id();
23989
23990 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
23991 +
23992 + pax_open_kernel();
23993 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
23994 + pax_close_kernel();
23995 +
23996 load_TR_desc();
23997 }
23998
23999 @@ -3901,6 +3905,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24000 #endif
24001 #endif
24002
24003 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24004 + __set_fs(current_thread_info()->addr_limit);
24005 +#endif
24006 +
24007 reload_tss(vcpu);
24008
24009 local_irq_disable();
24010 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24011 index 6667042..b47ece4 100644
24012 --- a/arch/x86/kvm/vmx.c
24013 +++ b/arch/x86/kvm/vmx.c
24014 @@ -1184,12 +1184,12 @@ static void vmcs_write64(unsigned long field, u64 value)
24015 #endif
24016 }
24017
24018 -static void vmcs_clear_bits(unsigned long field, u32 mask)
24019 +static void vmcs_clear_bits(unsigned long field, unsigned long mask)
24020 {
24021 vmcs_writel(field, vmcs_readl(field) & ~mask);
24022 }
24023
24024 -static void vmcs_set_bits(unsigned long field, u32 mask)
24025 +static void vmcs_set_bits(unsigned long field, unsigned long mask)
24026 {
24027 vmcs_writel(field, vmcs_readl(field) | mask);
24028 }
24029 @@ -1390,7 +1390,11 @@ static void reload_tss(void)
24030 struct desc_struct *descs;
24031
24032 descs = (void *)gdt->address;
24033 +
24034 + pax_open_kernel();
24035 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24036 + pax_close_kernel();
24037 +
24038 load_TR_desc();
24039 }
24040
24041 @@ -1614,6 +1618,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24042 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24043 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24044
24045 +#ifdef CONFIG_PAX_PER_CPU_PGD
24046 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24047 +#endif
24048 +
24049 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24050 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24051 vmx->loaded_vmcs->cpu = cpu;
24052 @@ -2779,8 +2787,11 @@ static __init int hardware_setup(void)
24053 if (!cpu_has_vmx_flexpriority())
24054 flexpriority_enabled = 0;
24055
24056 - if (!cpu_has_vmx_tpr_shadow())
24057 - kvm_x86_ops->update_cr8_intercept = NULL;
24058 + if (!cpu_has_vmx_tpr_shadow()) {
24059 + pax_open_kernel();
24060 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24061 + pax_close_kernel();
24062 + }
24063
24064 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24065 kvm_disable_largepages();
24066 @@ -2792,10 +2803,12 @@ static __init int hardware_setup(void)
24067 !cpu_has_vmx_virtual_intr_delivery())
24068 enable_apicv_reg_vid = 0;
24069
24070 + pax_open_kernel();
24071 if (enable_apicv_reg_vid)
24072 - kvm_x86_ops->update_cr8_intercept = NULL;
24073 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24074 else
24075 - kvm_x86_ops->hwapic_irr_update = NULL;
24076 + *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
24077 + pax_close_kernel();
24078
24079 if (nested)
24080 nested_vmx_setup_ctls_msrs();
24081 @@ -3883,7 +3896,10 @@ static void vmx_set_constant_host_state(void)
24082
24083 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24084 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24085 +
24086 +#ifndef CONFIG_PAX_PER_CPU_PGD
24087 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24088 +#endif
24089
24090 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24091 #ifdef CONFIG_X86_64
24092 @@ -3904,7 +3920,7 @@ static void vmx_set_constant_host_state(void)
24093 native_store_idt(&dt);
24094 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24095
24096 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24097 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24098
24099 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24100 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24101 @@ -6574,6 +6590,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24102 "jmp 2f \n\t"
24103 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24104 "2: "
24105 +
24106 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24107 + "ljmp %[cs],$3f\n\t"
24108 + "3: "
24109 +#endif
24110 +
24111 /* Save guest registers, load host registers, keep flags */
24112 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24113 "pop %0 \n\t"
24114 @@ -6626,6 +6648,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24115 #endif
24116 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24117 [wordsize]"i"(sizeof(ulong))
24118 +
24119 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24120 + ,[cs]"i"(__KERNEL_CS)
24121 +#endif
24122 +
24123 : "cc", "memory"
24124 #ifdef CONFIG_X86_64
24125 , "rax", "rbx", "rdi", "rsi"
24126 @@ -6639,7 +6666,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24127 if (debugctlmsr)
24128 update_debugctlmsr(debugctlmsr);
24129
24130 -#ifndef CONFIG_X86_64
24131 +#ifdef CONFIG_X86_32
24132 /*
24133 * The sysexit path does not restore ds/es, so we must set them to
24134 * a reasonable value ourselves.
24135 @@ -6648,8 +6675,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24136 * may be executed in interrupt context, which saves and restore segments
24137 * around it, nullifying its effect.
24138 */
24139 - loadsegment(ds, __USER_DS);
24140 - loadsegment(es, __USER_DS);
24141 + loadsegment(ds, __KERNEL_DS);
24142 + loadsegment(es, __KERNEL_DS);
24143 + loadsegment(ss, __KERNEL_DS);
24144 +
24145 +#ifdef CONFIG_PAX_KERNEXEC
24146 + loadsegment(fs, __KERNEL_PERCPU);
24147 +#endif
24148 +
24149 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24150 + __set_fs(current_thread_info()->addr_limit);
24151 +#endif
24152 +
24153 #endif
24154
24155 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24156 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24157 index e172132..c3d3e27 100644
24158 --- a/arch/x86/kvm/x86.c
24159 +++ b/arch/x86/kvm/x86.c
24160 @@ -1686,8 +1686,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24161 {
24162 struct kvm *kvm = vcpu->kvm;
24163 int lm = is_long_mode(vcpu);
24164 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24165 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24166 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24167 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24168 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24169 : kvm->arch.xen_hvm_config.blob_size_32;
24170 u32 page_num = data & ~PAGE_MASK;
24171 @@ -2567,6 +2567,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24172 if (n < msr_list.nmsrs)
24173 goto out;
24174 r = -EFAULT;
24175 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24176 + goto out;
24177 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24178 num_msrs_to_save * sizeof(u32)))
24179 goto out;
24180 @@ -2696,7 +2698,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24181 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24182 struct kvm_interrupt *irq)
24183 {
24184 - if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24185 + if (irq->irq >= KVM_NR_INTERRUPTS)
24186 return -EINVAL;
24187 if (irqchip_in_kernel(vcpu->kvm))
24188 return -ENXIO;
24189 @@ -5247,7 +5249,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24190 };
24191 #endif
24192
24193 -int kvm_arch_init(void *opaque)
24194 +int kvm_arch_init(const void *opaque)
24195 {
24196 int r;
24197 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24198 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24199 index 7114c63..a1018fc 100644
24200 --- a/arch/x86/lguest/boot.c
24201 +++ b/arch/x86/lguest/boot.c
24202 @@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24203 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24204 * Launcher to reboot us.
24205 */
24206 -static void lguest_restart(char *reason)
24207 +static __noreturn void lguest_restart(char *reason)
24208 {
24209 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24210 + BUG();
24211 }
24212
24213 /*G:050
24214 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24215 index 00933d5..3a64af9 100644
24216 --- a/arch/x86/lib/atomic64_386_32.S
24217 +++ b/arch/x86/lib/atomic64_386_32.S
24218 @@ -48,6 +48,10 @@ BEGIN(read)
24219 movl (v), %eax
24220 movl 4(v), %edx
24221 RET_ENDP
24222 +BEGIN(read_unchecked)
24223 + movl (v), %eax
24224 + movl 4(v), %edx
24225 +RET_ENDP
24226 #undef v
24227
24228 #define v %esi
24229 @@ -55,6 +59,10 @@ BEGIN(set)
24230 movl %ebx, (v)
24231 movl %ecx, 4(v)
24232 RET_ENDP
24233 +BEGIN(set_unchecked)
24234 + movl %ebx, (v)
24235 + movl %ecx, 4(v)
24236 +RET_ENDP
24237 #undef v
24238
24239 #define v %esi
24240 @@ -70,6 +78,20 @@ RET_ENDP
24241 BEGIN(add)
24242 addl %eax, (v)
24243 adcl %edx, 4(v)
24244 +
24245 +#ifdef CONFIG_PAX_REFCOUNT
24246 + jno 0f
24247 + subl %eax, (v)
24248 + sbbl %edx, 4(v)
24249 + int $4
24250 +0:
24251 + _ASM_EXTABLE(0b, 0b)
24252 +#endif
24253 +
24254 +RET_ENDP
24255 +BEGIN(add_unchecked)
24256 + addl %eax, (v)
24257 + adcl %edx, 4(v)
24258 RET_ENDP
24259 #undef v
24260
24261 @@ -77,6 +99,24 @@ RET_ENDP
24262 BEGIN(add_return)
24263 addl (v), %eax
24264 adcl 4(v), %edx
24265 +
24266 +#ifdef CONFIG_PAX_REFCOUNT
24267 + into
24268 +1234:
24269 + _ASM_EXTABLE(1234b, 2f)
24270 +#endif
24271 +
24272 + movl %eax, (v)
24273 + movl %edx, 4(v)
24274 +
24275 +#ifdef CONFIG_PAX_REFCOUNT
24276 +2:
24277 +#endif
24278 +
24279 +RET_ENDP
24280 +BEGIN(add_return_unchecked)
24281 + addl (v), %eax
24282 + adcl 4(v), %edx
24283 movl %eax, (v)
24284 movl %edx, 4(v)
24285 RET_ENDP
24286 @@ -86,6 +126,20 @@ RET_ENDP
24287 BEGIN(sub)
24288 subl %eax, (v)
24289 sbbl %edx, 4(v)
24290 +
24291 +#ifdef CONFIG_PAX_REFCOUNT
24292 + jno 0f
24293 + addl %eax, (v)
24294 + adcl %edx, 4(v)
24295 + int $4
24296 +0:
24297 + _ASM_EXTABLE(0b, 0b)
24298 +#endif
24299 +
24300 +RET_ENDP
24301 +BEGIN(sub_unchecked)
24302 + subl %eax, (v)
24303 + sbbl %edx, 4(v)
24304 RET_ENDP
24305 #undef v
24306
24307 @@ -96,6 +150,27 @@ BEGIN(sub_return)
24308 sbbl $0, %edx
24309 addl (v), %eax
24310 adcl 4(v), %edx
24311 +
24312 +#ifdef CONFIG_PAX_REFCOUNT
24313 + into
24314 +1234:
24315 + _ASM_EXTABLE(1234b, 2f)
24316 +#endif
24317 +
24318 + movl %eax, (v)
24319 + movl %edx, 4(v)
24320 +
24321 +#ifdef CONFIG_PAX_REFCOUNT
24322 +2:
24323 +#endif
24324 +
24325 +RET_ENDP
24326 +BEGIN(sub_return_unchecked)
24327 + negl %edx
24328 + negl %eax
24329 + sbbl $0, %edx
24330 + addl (v), %eax
24331 + adcl 4(v), %edx
24332 movl %eax, (v)
24333 movl %edx, 4(v)
24334 RET_ENDP
24335 @@ -105,6 +180,20 @@ RET_ENDP
24336 BEGIN(inc)
24337 addl $1, (v)
24338 adcl $0, 4(v)
24339 +
24340 +#ifdef CONFIG_PAX_REFCOUNT
24341 + jno 0f
24342 + subl $1, (v)
24343 + sbbl $0, 4(v)
24344 + int $4
24345 +0:
24346 + _ASM_EXTABLE(0b, 0b)
24347 +#endif
24348 +
24349 +RET_ENDP
24350 +BEGIN(inc_unchecked)
24351 + addl $1, (v)
24352 + adcl $0, 4(v)
24353 RET_ENDP
24354 #undef v
24355
24356 @@ -114,6 +203,26 @@ BEGIN(inc_return)
24357 movl 4(v), %edx
24358 addl $1, %eax
24359 adcl $0, %edx
24360 +
24361 +#ifdef CONFIG_PAX_REFCOUNT
24362 + into
24363 +1234:
24364 + _ASM_EXTABLE(1234b, 2f)
24365 +#endif
24366 +
24367 + movl %eax, (v)
24368 + movl %edx, 4(v)
24369 +
24370 +#ifdef CONFIG_PAX_REFCOUNT
24371 +2:
24372 +#endif
24373 +
24374 +RET_ENDP
24375 +BEGIN(inc_return_unchecked)
24376 + movl (v), %eax
24377 + movl 4(v), %edx
24378 + addl $1, %eax
24379 + adcl $0, %edx
24380 movl %eax, (v)
24381 movl %edx, 4(v)
24382 RET_ENDP
24383 @@ -123,6 +232,20 @@ RET_ENDP
24384 BEGIN(dec)
24385 subl $1, (v)
24386 sbbl $0, 4(v)
24387 +
24388 +#ifdef CONFIG_PAX_REFCOUNT
24389 + jno 0f
24390 + addl $1, (v)
24391 + adcl $0, 4(v)
24392 + int $4
24393 +0:
24394 + _ASM_EXTABLE(0b, 0b)
24395 +#endif
24396 +
24397 +RET_ENDP
24398 +BEGIN(dec_unchecked)
24399 + subl $1, (v)
24400 + sbbl $0, 4(v)
24401 RET_ENDP
24402 #undef v
24403
24404 @@ -132,6 +255,26 @@ BEGIN(dec_return)
24405 movl 4(v), %edx
24406 subl $1, %eax
24407 sbbl $0, %edx
24408 +
24409 +#ifdef CONFIG_PAX_REFCOUNT
24410 + into
24411 +1234:
24412 + _ASM_EXTABLE(1234b, 2f)
24413 +#endif
24414 +
24415 + movl %eax, (v)
24416 + movl %edx, 4(v)
24417 +
24418 +#ifdef CONFIG_PAX_REFCOUNT
24419 +2:
24420 +#endif
24421 +
24422 +RET_ENDP
24423 +BEGIN(dec_return_unchecked)
24424 + movl (v), %eax
24425 + movl 4(v), %edx
24426 + subl $1, %eax
24427 + sbbl $0, %edx
24428 movl %eax, (v)
24429 movl %edx, 4(v)
24430 RET_ENDP
24431 @@ -143,6 +286,13 @@ BEGIN(add_unless)
24432 adcl %edx, %edi
24433 addl (v), %eax
24434 adcl 4(v), %edx
24435 +
24436 +#ifdef CONFIG_PAX_REFCOUNT
24437 + into
24438 +1234:
24439 + _ASM_EXTABLE(1234b, 2f)
24440 +#endif
24441 +
24442 cmpl %eax, %ecx
24443 je 3f
24444 1:
24445 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24446 1:
24447 addl $1, %eax
24448 adcl $0, %edx
24449 +
24450 +#ifdef CONFIG_PAX_REFCOUNT
24451 + into
24452 +1234:
24453 + _ASM_EXTABLE(1234b, 2f)
24454 +#endif
24455 +
24456 movl %eax, (v)
24457 movl %edx, 4(v)
24458 movl $1, %eax
24459 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24460 movl 4(v), %edx
24461 subl $1, %eax
24462 sbbl $0, %edx
24463 +
24464 +#ifdef CONFIG_PAX_REFCOUNT
24465 + into
24466 +1234:
24467 + _ASM_EXTABLE(1234b, 1f)
24468 +#endif
24469 +
24470 js 1f
24471 movl %eax, (v)
24472 movl %edx, 4(v)
24473 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24474 index f5cc9eb..51fa319 100644
24475 --- a/arch/x86/lib/atomic64_cx8_32.S
24476 +++ b/arch/x86/lib/atomic64_cx8_32.S
24477 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24478 CFI_STARTPROC
24479
24480 read64 %ecx
24481 + pax_force_retaddr
24482 ret
24483 CFI_ENDPROC
24484 ENDPROC(atomic64_read_cx8)
24485
24486 +ENTRY(atomic64_read_unchecked_cx8)
24487 + CFI_STARTPROC
24488 +
24489 + read64 %ecx
24490 + pax_force_retaddr
24491 + ret
24492 + CFI_ENDPROC
24493 +ENDPROC(atomic64_read_unchecked_cx8)
24494 +
24495 ENTRY(atomic64_set_cx8)
24496 CFI_STARTPROC
24497
24498 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24499 cmpxchg8b (%esi)
24500 jne 1b
24501
24502 + pax_force_retaddr
24503 ret
24504 CFI_ENDPROC
24505 ENDPROC(atomic64_set_cx8)
24506
24507 +ENTRY(atomic64_set_unchecked_cx8)
24508 + CFI_STARTPROC
24509 +
24510 +1:
24511 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
24512 + * are atomic on 586 and newer */
24513 + cmpxchg8b (%esi)
24514 + jne 1b
24515 +
24516 + pax_force_retaddr
24517 + ret
24518 + CFI_ENDPROC
24519 +ENDPROC(atomic64_set_unchecked_cx8)
24520 +
24521 ENTRY(atomic64_xchg_cx8)
24522 CFI_STARTPROC
24523
24524 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24525 cmpxchg8b (%esi)
24526 jne 1b
24527
24528 + pax_force_retaddr
24529 ret
24530 CFI_ENDPROC
24531 ENDPROC(atomic64_xchg_cx8)
24532
24533 -.macro addsub_return func ins insc
24534 -ENTRY(atomic64_\func\()_return_cx8)
24535 +.macro addsub_return func ins insc unchecked=""
24536 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24537 CFI_STARTPROC
24538 SAVE ebp
24539 SAVE ebx
24540 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24541 movl %edx, %ecx
24542 \ins\()l %esi, %ebx
24543 \insc\()l %edi, %ecx
24544 +
24545 +.ifb \unchecked
24546 +#ifdef CONFIG_PAX_REFCOUNT
24547 + into
24548 +2:
24549 + _ASM_EXTABLE(2b, 3f)
24550 +#endif
24551 +.endif
24552 +
24553 LOCK_PREFIX
24554 cmpxchg8b (%ebp)
24555 jne 1b
24556 -
24557 -10:
24558 movl %ebx, %eax
24559 movl %ecx, %edx
24560 +
24561 +.ifb \unchecked
24562 +#ifdef CONFIG_PAX_REFCOUNT
24563 +3:
24564 +#endif
24565 +.endif
24566 +
24567 RESTORE edi
24568 RESTORE esi
24569 RESTORE ebx
24570 RESTORE ebp
24571 + pax_force_retaddr
24572 ret
24573 CFI_ENDPROC
24574 -ENDPROC(atomic64_\func\()_return_cx8)
24575 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24576 .endm
24577
24578 addsub_return add add adc
24579 addsub_return sub sub sbb
24580 +addsub_return add add adc _unchecked
24581 +addsub_return sub sub sbb _unchecked
24582
24583 -.macro incdec_return func ins insc
24584 -ENTRY(atomic64_\func\()_return_cx8)
24585 +.macro incdec_return func ins insc unchecked=""
24586 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24587 CFI_STARTPROC
24588 SAVE ebx
24589
24590 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24591 movl %edx, %ecx
24592 \ins\()l $1, %ebx
24593 \insc\()l $0, %ecx
24594 +
24595 +.ifb \unchecked
24596 +#ifdef CONFIG_PAX_REFCOUNT
24597 + into
24598 +2:
24599 + _ASM_EXTABLE(2b, 3f)
24600 +#endif
24601 +.endif
24602 +
24603 LOCK_PREFIX
24604 cmpxchg8b (%esi)
24605 jne 1b
24606
24607 -10:
24608 movl %ebx, %eax
24609 movl %ecx, %edx
24610 +
24611 +.ifb \unchecked
24612 +#ifdef CONFIG_PAX_REFCOUNT
24613 +3:
24614 +#endif
24615 +.endif
24616 +
24617 RESTORE ebx
24618 + pax_force_retaddr
24619 ret
24620 CFI_ENDPROC
24621 -ENDPROC(atomic64_\func\()_return_cx8)
24622 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24623 .endm
24624
24625 incdec_return inc add adc
24626 incdec_return dec sub sbb
24627 +incdec_return inc add adc _unchecked
24628 +incdec_return dec sub sbb _unchecked
24629
24630 ENTRY(atomic64_dec_if_positive_cx8)
24631 CFI_STARTPROC
24632 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24633 movl %edx, %ecx
24634 subl $1, %ebx
24635 sbb $0, %ecx
24636 +
24637 +#ifdef CONFIG_PAX_REFCOUNT
24638 + into
24639 +1234:
24640 + _ASM_EXTABLE(1234b, 2f)
24641 +#endif
24642 +
24643 js 2f
24644 LOCK_PREFIX
24645 cmpxchg8b (%esi)
24646 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24647 movl %ebx, %eax
24648 movl %ecx, %edx
24649 RESTORE ebx
24650 + pax_force_retaddr
24651 ret
24652 CFI_ENDPROC
24653 ENDPROC(atomic64_dec_if_positive_cx8)
24654 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24655 movl %edx, %ecx
24656 addl %ebp, %ebx
24657 adcl %edi, %ecx
24658 +
24659 +#ifdef CONFIG_PAX_REFCOUNT
24660 + into
24661 +1234:
24662 + _ASM_EXTABLE(1234b, 3f)
24663 +#endif
24664 +
24665 LOCK_PREFIX
24666 cmpxchg8b (%esi)
24667 jne 1b
24668 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24669 CFI_ADJUST_CFA_OFFSET -8
24670 RESTORE ebx
24671 RESTORE ebp
24672 + pax_force_retaddr
24673 ret
24674 4:
24675 cmpl %edx, 4(%esp)
24676 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24677 xorl %ecx, %ecx
24678 addl $1, %ebx
24679 adcl %edx, %ecx
24680 +
24681 +#ifdef CONFIG_PAX_REFCOUNT
24682 + into
24683 +1234:
24684 + _ASM_EXTABLE(1234b, 3f)
24685 +#endif
24686 +
24687 LOCK_PREFIX
24688 cmpxchg8b (%esi)
24689 jne 1b
24690 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24691 movl $1, %eax
24692 3:
24693 RESTORE ebx
24694 + pax_force_retaddr
24695 ret
24696 CFI_ENDPROC
24697 ENDPROC(atomic64_inc_not_zero_cx8)
24698 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24699 index 2af5df3..62b1a5a 100644
24700 --- a/arch/x86/lib/checksum_32.S
24701 +++ b/arch/x86/lib/checksum_32.S
24702 @@ -29,7 +29,8 @@
24703 #include <asm/dwarf2.h>
24704 #include <asm/errno.h>
24705 #include <asm/asm.h>
24706 -
24707 +#include <asm/segment.h>
24708 +
24709 /*
24710 * computes a partial checksum, e.g. for TCP/UDP fragments
24711 */
24712 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24713
24714 #define ARGBASE 16
24715 #define FP 12
24716 -
24717 -ENTRY(csum_partial_copy_generic)
24718 +
24719 +ENTRY(csum_partial_copy_generic_to_user)
24720 CFI_STARTPROC
24721 +
24722 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24723 + pushl_cfi %gs
24724 + popl_cfi %es
24725 + jmp csum_partial_copy_generic
24726 +#endif
24727 +
24728 +ENTRY(csum_partial_copy_generic_from_user)
24729 +
24730 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24731 + pushl_cfi %gs
24732 + popl_cfi %ds
24733 +#endif
24734 +
24735 +ENTRY(csum_partial_copy_generic)
24736 subl $4,%esp
24737 CFI_ADJUST_CFA_OFFSET 4
24738 pushl_cfi %edi
24739 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24740 jmp 4f
24741 SRC(1: movw (%esi), %bx )
24742 addl $2, %esi
24743 -DST( movw %bx, (%edi) )
24744 +DST( movw %bx, %es:(%edi) )
24745 addl $2, %edi
24746 addw %bx, %ax
24747 adcl $0, %eax
24748 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24749 SRC(1: movl (%esi), %ebx )
24750 SRC( movl 4(%esi), %edx )
24751 adcl %ebx, %eax
24752 -DST( movl %ebx, (%edi) )
24753 +DST( movl %ebx, %es:(%edi) )
24754 adcl %edx, %eax
24755 -DST( movl %edx, 4(%edi) )
24756 +DST( movl %edx, %es:4(%edi) )
24757
24758 SRC( movl 8(%esi), %ebx )
24759 SRC( movl 12(%esi), %edx )
24760 adcl %ebx, %eax
24761 -DST( movl %ebx, 8(%edi) )
24762 +DST( movl %ebx, %es:8(%edi) )
24763 adcl %edx, %eax
24764 -DST( movl %edx, 12(%edi) )
24765 +DST( movl %edx, %es:12(%edi) )
24766
24767 SRC( movl 16(%esi), %ebx )
24768 SRC( movl 20(%esi), %edx )
24769 adcl %ebx, %eax
24770 -DST( movl %ebx, 16(%edi) )
24771 +DST( movl %ebx, %es:16(%edi) )
24772 adcl %edx, %eax
24773 -DST( movl %edx, 20(%edi) )
24774 +DST( movl %edx, %es:20(%edi) )
24775
24776 SRC( movl 24(%esi), %ebx )
24777 SRC( movl 28(%esi), %edx )
24778 adcl %ebx, %eax
24779 -DST( movl %ebx, 24(%edi) )
24780 +DST( movl %ebx, %es:24(%edi) )
24781 adcl %edx, %eax
24782 -DST( movl %edx, 28(%edi) )
24783 +DST( movl %edx, %es:28(%edi) )
24784
24785 lea 32(%esi), %esi
24786 lea 32(%edi), %edi
24787 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24788 shrl $2, %edx # This clears CF
24789 SRC(3: movl (%esi), %ebx )
24790 adcl %ebx, %eax
24791 -DST( movl %ebx, (%edi) )
24792 +DST( movl %ebx, %es:(%edi) )
24793 lea 4(%esi), %esi
24794 lea 4(%edi), %edi
24795 dec %edx
24796 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24797 jb 5f
24798 SRC( movw (%esi), %cx )
24799 leal 2(%esi), %esi
24800 -DST( movw %cx, (%edi) )
24801 +DST( movw %cx, %es:(%edi) )
24802 leal 2(%edi), %edi
24803 je 6f
24804 shll $16,%ecx
24805 SRC(5: movb (%esi), %cl )
24806 -DST( movb %cl, (%edi) )
24807 +DST( movb %cl, %es:(%edi) )
24808 6: addl %ecx, %eax
24809 adcl $0, %eax
24810 7:
24811 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24812
24813 6001:
24814 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24815 - movl $-EFAULT, (%ebx)
24816 + movl $-EFAULT, %ss:(%ebx)
24817
24818 # zero the complete destination - computing the rest
24819 # is too much work
24820 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24821
24822 6002:
24823 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24824 - movl $-EFAULT,(%ebx)
24825 + movl $-EFAULT,%ss:(%ebx)
24826 jmp 5000b
24827
24828 .previous
24829
24830 + pushl_cfi %ss
24831 + popl_cfi %ds
24832 + pushl_cfi %ss
24833 + popl_cfi %es
24834 popl_cfi %ebx
24835 CFI_RESTORE ebx
24836 popl_cfi %esi
24837 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24838 popl_cfi %ecx # equivalent to addl $4,%esp
24839 ret
24840 CFI_ENDPROC
24841 -ENDPROC(csum_partial_copy_generic)
24842 +ENDPROC(csum_partial_copy_generic_to_user)
24843
24844 #else
24845
24846 /* Version for PentiumII/PPro */
24847
24848 #define ROUND1(x) \
24849 + nop; nop; nop; \
24850 SRC(movl x(%esi), %ebx ) ; \
24851 addl %ebx, %eax ; \
24852 - DST(movl %ebx, x(%edi) ) ;
24853 + DST(movl %ebx, %es:x(%edi)) ;
24854
24855 #define ROUND(x) \
24856 + nop; nop; nop; \
24857 SRC(movl x(%esi), %ebx ) ; \
24858 adcl %ebx, %eax ; \
24859 - DST(movl %ebx, x(%edi) ) ;
24860 + DST(movl %ebx, %es:x(%edi)) ;
24861
24862 #define ARGBASE 12
24863 -
24864 -ENTRY(csum_partial_copy_generic)
24865 +
24866 +ENTRY(csum_partial_copy_generic_to_user)
24867 CFI_STARTPROC
24868 +
24869 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24870 + pushl_cfi %gs
24871 + popl_cfi %es
24872 + jmp csum_partial_copy_generic
24873 +#endif
24874 +
24875 +ENTRY(csum_partial_copy_generic_from_user)
24876 +
24877 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24878 + pushl_cfi %gs
24879 + popl_cfi %ds
24880 +#endif
24881 +
24882 +ENTRY(csum_partial_copy_generic)
24883 pushl_cfi %ebx
24884 CFI_REL_OFFSET ebx, 0
24885 pushl_cfi %edi
24886 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24887 subl %ebx, %edi
24888 lea -1(%esi),%edx
24889 andl $-32,%edx
24890 - lea 3f(%ebx,%ebx), %ebx
24891 + lea 3f(%ebx,%ebx,2), %ebx
24892 testl %esi, %esi
24893 jmp *%ebx
24894 1: addl $64,%esi
24895 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24896 jb 5f
24897 SRC( movw (%esi), %dx )
24898 leal 2(%esi), %esi
24899 -DST( movw %dx, (%edi) )
24900 +DST( movw %dx, %es:(%edi) )
24901 leal 2(%edi), %edi
24902 je 6f
24903 shll $16,%edx
24904 5:
24905 SRC( movb (%esi), %dl )
24906 -DST( movb %dl, (%edi) )
24907 +DST( movb %dl, %es:(%edi) )
24908 6: addl %edx, %eax
24909 adcl $0, %eax
24910 7:
24911 .section .fixup, "ax"
24912 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24913 - movl $-EFAULT, (%ebx)
24914 + movl $-EFAULT, %ss:(%ebx)
24915 # zero the complete destination (computing the rest is too much work)
24916 movl ARGBASE+8(%esp),%edi # dst
24917 movl ARGBASE+12(%esp),%ecx # len
24918 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24919 rep; stosb
24920 jmp 7b
24921 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24922 - movl $-EFAULT, (%ebx)
24923 + movl $-EFAULT, %ss:(%ebx)
24924 jmp 7b
24925 .previous
24926
24927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24928 + pushl_cfi %ss
24929 + popl_cfi %ds
24930 + pushl_cfi %ss
24931 + popl_cfi %es
24932 +#endif
24933 +
24934 popl_cfi %esi
24935 CFI_RESTORE esi
24936 popl_cfi %edi
24937 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
24938 CFI_RESTORE ebx
24939 ret
24940 CFI_ENDPROC
24941 -ENDPROC(csum_partial_copy_generic)
24942 +ENDPROC(csum_partial_copy_generic_to_user)
24943
24944 #undef ROUND
24945 #undef ROUND1
24946 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
24947 index f2145cf..cea889d 100644
24948 --- a/arch/x86/lib/clear_page_64.S
24949 +++ b/arch/x86/lib/clear_page_64.S
24950 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
24951 movl $4096/8,%ecx
24952 xorl %eax,%eax
24953 rep stosq
24954 + pax_force_retaddr
24955 ret
24956 CFI_ENDPROC
24957 ENDPROC(clear_page_c)
24958 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
24959 movl $4096,%ecx
24960 xorl %eax,%eax
24961 rep stosb
24962 + pax_force_retaddr
24963 ret
24964 CFI_ENDPROC
24965 ENDPROC(clear_page_c_e)
24966 @@ -43,6 +45,7 @@ ENTRY(clear_page)
24967 leaq 64(%rdi),%rdi
24968 jnz .Lloop
24969 nop
24970 + pax_force_retaddr
24971 ret
24972 CFI_ENDPROC
24973 .Lclear_page_end:
24974 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
24975
24976 #include <asm/cpufeature.h>
24977
24978 - .section .altinstr_replacement,"ax"
24979 + .section .altinstr_replacement,"a"
24980 1: .byte 0xeb /* jmp <disp8> */
24981 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
24982 2: .byte 0xeb /* jmp <disp8> */
24983 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
24984 index 1e572c5..2a162cd 100644
24985 --- a/arch/x86/lib/cmpxchg16b_emu.S
24986 +++ b/arch/x86/lib/cmpxchg16b_emu.S
24987 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
24988
24989 popf
24990 mov $1, %al
24991 + pax_force_retaddr
24992 ret
24993
24994 not_same:
24995 popf
24996 xor %al,%al
24997 + pax_force_retaddr
24998 ret
24999
25000 CFI_ENDPROC
25001 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25002 index 176cca6..1166c50 100644
25003 --- a/arch/x86/lib/copy_page_64.S
25004 +++ b/arch/x86/lib/copy_page_64.S
25005 @@ -9,6 +9,7 @@ copy_page_rep:
25006 CFI_STARTPROC
25007 movl $4096/8, %ecx
25008 rep movsq
25009 + pax_force_retaddr
25010 ret
25011 CFI_ENDPROC
25012 ENDPROC(copy_page_rep)
25013 @@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25014
25015 ENTRY(copy_page)
25016 CFI_STARTPROC
25017 - subq $2*8, %rsp
25018 - CFI_ADJUST_CFA_OFFSET 2*8
25019 + subq $3*8, %rsp
25020 + CFI_ADJUST_CFA_OFFSET 3*8
25021 movq %rbx, (%rsp)
25022 CFI_REL_OFFSET rbx, 0
25023 movq %r12, 1*8(%rsp)
25024 CFI_REL_OFFSET r12, 1*8
25025 + movq %r13, 2*8(%rsp)
25026 + CFI_REL_OFFSET r13, 2*8
25027
25028 movl $(4096/64)-5, %ecx
25029 .p2align 4
25030 @@ -36,7 +39,7 @@ ENTRY(copy_page)
25031 movq 0x8*2(%rsi), %rdx
25032 movq 0x8*3(%rsi), %r8
25033 movq 0x8*4(%rsi), %r9
25034 - movq 0x8*5(%rsi), %r10
25035 + movq 0x8*5(%rsi), %r13
25036 movq 0x8*6(%rsi), %r11
25037 movq 0x8*7(%rsi), %r12
25038
25039 @@ -47,7 +50,7 @@ ENTRY(copy_page)
25040 movq %rdx, 0x8*2(%rdi)
25041 movq %r8, 0x8*3(%rdi)
25042 movq %r9, 0x8*4(%rdi)
25043 - movq %r10, 0x8*5(%rdi)
25044 + movq %r13, 0x8*5(%rdi)
25045 movq %r11, 0x8*6(%rdi)
25046 movq %r12, 0x8*7(%rdi)
25047
25048 @@ -66,7 +69,7 @@ ENTRY(copy_page)
25049 movq 0x8*2(%rsi), %rdx
25050 movq 0x8*3(%rsi), %r8
25051 movq 0x8*4(%rsi), %r9
25052 - movq 0x8*5(%rsi), %r10
25053 + movq 0x8*5(%rsi), %r13
25054 movq 0x8*6(%rsi), %r11
25055 movq 0x8*7(%rsi), %r12
25056
25057 @@ -75,7 +78,7 @@ ENTRY(copy_page)
25058 movq %rdx, 0x8*2(%rdi)
25059 movq %r8, 0x8*3(%rdi)
25060 movq %r9, 0x8*4(%rdi)
25061 - movq %r10, 0x8*5(%rdi)
25062 + movq %r13, 0x8*5(%rdi)
25063 movq %r11, 0x8*6(%rdi)
25064 movq %r12, 0x8*7(%rdi)
25065
25066 @@ -87,8 +90,11 @@ ENTRY(copy_page)
25067 CFI_RESTORE rbx
25068 movq 1*8(%rsp), %r12
25069 CFI_RESTORE r12
25070 - addq $2*8, %rsp
25071 - CFI_ADJUST_CFA_OFFSET -2*8
25072 + movq 2*8(%rsp), %r13
25073 + CFI_RESTORE r13
25074 + addq $3*8, %rsp
25075 + CFI_ADJUST_CFA_OFFSET -3*8
25076 + pax_force_retaddr
25077 ret
25078 .Lcopy_page_end:
25079 CFI_ENDPROC
25080 @@ -99,7 +105,7 @@ ENDPROC(copy_page)
25081
25082 #include <asm/cpufeature.h>
25083
25084 - .section .altinstr_replacement,"ax"
25085 + .section .altinstr_replacement,"a"
25086 1: .byte 0xeb /* jmp <disp8> */
25087 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25088 2:
25089 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25090 index a30ca15..d25fab6 100644
25091 --- a/arch/x86/lib/copy_user_64.S
25092 +++ b/arch/x86/lib/copy_user_64.S
25093 @@ -18,6 +18,7 @@
25094 #include <asm/alternative-asm.h>
25095 #include <asm/asm.h>
25096 #include <asm/smap.h>
25097 +#include <asm/pgtable.h>
25098
25099 /*
25100 * By placing feature2 after feature1 in altinstructions section, we logically
25101 @@ -31,7 +32,7 @@
25102 .byte 0xe9 /* 32bit jump */
25103 .long \orig-1f /* by default jump to orig */
25104 1:
25105 - .section .altinstr_replacement,"ax"
25106 + .section .altinstr_replacement,"a"
25107 2: .byte 0xe9 /* near jump with 32bit immediate */
25108 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25109 3: .byte 0xe9 /* near jump with 32bit immediate */
25110 @@ -70,47 +71,20 @@
25111 #endif
25112 .endm
25113
25114 -/* Standard copy_to_user with segment limit checking */
25115 -ENTRY(_copy_to_user)
25116 - CFI_STARTPROC
25117 - GET_THREAD_INFO(%rax)
25118 - movq %rdi,%rcx
25119 - addq %rdx,%rcx
25120 - jc bad_to_user
25121 - cmpq TI_addr_limit(%rax),%rcx
25122 - ja bad_to_user
25123 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25124 - copy_user_generic_unrolled,copy_user_generic_string, \
25125 - copy_user_enhanced_fast_string
25126 - CFI_ENDPROC
25127 -ENDPROC(_copy_to_user)
25128 -
25129 -/* Standard copy_from_user with segment limit checking */
25130 -ENTRY(_copy_from_user)
25131 - CFI_STARTPROC
25132 - GET_THREAD_INFO(%rax)
25133 - movq %rsi,%rcx
25134 - addq %rdx,%rcx
25135 - jc bad_from_user
25136 - cmpq TI_addr_limit(%rax),%rcx
25137 - ja bad_from_user
25138 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25139 - copy_user_generic_unrolled,copy_user_generic_string, \
25140 - copy_user_enhanced_fast_string
25141 - CFI_ENDPROC
25142 -ENDPROC(_copy_from_user)
25143 -
25144 .section .fixup,"ax"
25145 /* must zero dest */
25146 ENTRY(bad_from_user)
25147 bad_from_user:
25148 CFI_STARTPROC
25149 + testl %edx,%edx
25150 + js bad_to_user
25151 movl %edx,%ecx
25152 xorl %eax,%eax
25153 rep
25154 stosb
25155 bad_to_user:
25156 movl %edx,%eax
25157 + pax_force_retaddr
25158 ret
25159 CFI_ENDPROC
25160 ENDPROC(bad_from_user)
25161 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25162 jz 17f
25163 1: movq (%rsi),%r8
25164 2: movq 1*8(%rsi),%r9
25165 -3: movq 2*8(%rsi),%r10
25166 +3: movq 2*8(%rsi),%rax
25167 4: movq 3*8(%rsi),%r11
25168 5: movq %r8,(%rdi)
25169 6: movq %r9,1*8(%rdi)
25170 -7: movq %r10,2*8(%rdi)
25171 +7: movq %rax,2*8(%rdi)
25172 8: movq %r11,3*8(%rdi)
25173 9: movq 4*8(%rsi),%r8
25174 10: movq 5*8(%rsi),%r9
25175 -11: movq 6*8(%rsi),%r10
25176 +11: movq 6*8(%rsi),%rax
25177 12: movq 7*8(%rsi),%r11
25178 13: movq %r8,4*8(%rdi)
25179 14: movq %r9,5*8(%rdi)
25180 -15: movq %r10,6*8(%rdi)
25181 +15: movq %rax,6*8(%rdi)
25182 16: movq %r11,7*8(%rdi)
25183 leaq 64(%rsi),%rsi
25184 leaq 64(%rdi),%rdi
25185 @@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25186 jnz 21b
25187 23: xor %eax,%eax
25188 ASM_CLAC
25189 + pax_force_retaddr
25190 ret
25191
25192 .section .fixup,"ax"
25193 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25194 movsb
25195 4: xorl %eax,%eax
25196 ASM_CLAC
25197 + pax_force_retaddr
25198 ret
25199
25200 .section .fixup,"ax"
25201 @@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25202 movsb
25203 2: xorl %eax,%eax
25204 ASM_CLAC
25205 + pax_force_retaddr
25206 ret
25207
25208 .section .fixup,"ax"
25209 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25210 index 6a4f43c..f08b4a2 100644
25211 --- a/arch/x86/lib/copy_user_nocache_64.S
25212 +++ b/arch/x86/lib/copy_user_nocache_64.S
25213 @@ -8,6 +8,7 @@
25214
25215 #include <linux/linkage.h>
25216 #include <asm/dwarf2.h>
25217 +#include <asm/alternative-asm.h>
25218
25219 #define FIX_ALIGNMENT 1
25220
25221 @@ -16,6 +17,7 @@
25222 #include <asm/thread_info.h>
25223 #include <asm/asm.h>
25224 #include <asm/smap.h>
25225 +#include <asm/pgtable.h>
25226
25227 .macro ALIGN_DESTINATION
25228 #ifdef FIX_ALIGNMENT
25229 @@ -49,6 +51,15 @@
25230 */
25231 ENTRY(__copy_user_nocache)
25232 CFI_STARTPROC
25233 +
25234 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25235 + mov pax_user_shadow_base,%rcx
25236 + cmp %rcx,%rsi
25237 + jae 1f
25238 + add %rcx,%rsi
25239 +1:
25240 +#endif
25241 +
25242 ASM_STAC
25243 cmpl $8,%edx
25244 jb 20f /* less then 8 bytes, go to byte copy loop */
25245 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25246 jz 17f
25247 1: movq (%rsi),%r8
25248 2: movq 1*8(%rsi),%r9
25249 -3: movq 2*8(%rsi),%r10
25250 +3: movq 2*8(%rsi),%rax
25251 4: movq 3*8(%rsi),%r11
25252 5: movnti %r8,(%rdi)
25253 6: movnti %r9,1*8(%rdi)
25254 -7: movnti %r10,2*8(%rdi)
25255 +7: movnti %rax,2*8(%rdi)
25256 8: movnti %r11,3*8(%rdi)
25257 9: movq 4*8(%rsi),%r8
25258 10: movq 5*8(%rsi),%r9
25259 -11: movq 6*8(%rsi),%r10
25260 +11: movq 6*8(%rsi),%rax
25261 12: movq 7*8(%rsi),%r11
25262 13: movnti %r8,4*8(%rdi)
25263 14: movnti %r9,5*8(%rdi)
25264 -15: movnti %r10,6*8(%rdi)
25265 +15: movnti %rax,6*8(%rdi)
25266 16: movnti %r11,7*8(%rdi)
25267 leaq 64(%rsi),%rsi
25268 leaq 64(%rdi),%rdi
25269 @@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25270 23: xorl %eax,%eax
25271 ASM_CLAC
25272 sfence
25273 + pax_force_retaddr
25274 ret
25275
25276 .section .fixup,"ax"
25277 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25278 index 2419d5f..953ee51 100644
25279 --- a/arch/x86/lib/csum-copy_64.S
25280 +++ b/arch/x86/lib/csum-copy_64.S
25281 @@ -9,6 +9,7 @@
25282 #include <asm/dwarf2.h>
25283 #include <asm/errno.h>
25284 #include <asm/asm.h>
25285 +#include <asm/alternative-asm.h>
25286
25287 /*
25288 * Checksum copy with exception handling.
25289 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25290 CFI_RESTORE rbp
25291 addq $7*8, %rsp
25292 CFI_ADJUST_CFA_OFFSET -7*8
25293 + pax_force_retaddr 0, 1
25294 ret
25295 CFI_RESTORE_STATE
25296
25297 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25298 index 25b7ae8..169fafc 100644
25299 --- a/arch/x86/lib/csum-wrappers_64.c
25300 +++ b/arch/x86/lib/csum-wrappers_64.c
25301 @@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25302 len -= 2;
25303 }
25304 }
25305 - isum = csum_partial_copy_generic((__force const void *)src,
25306 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25307 dst, len, isum, errp, NULL);
25308 if (unlikely(*errp))
25309 goto out_err;
25310 @@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25311 }
25312
25313 *errp = 0;
25314 - return csum_partial_copy_generic(src, (void __force *)dst,
25315 + return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25316 len, isum, NULL, errp);
25317 }
25318 EXPORT_SYMBOL(csum_partial_copy_to_user);
25319 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25320 index a451235..79fb5cf 100644
25321 --- a/arch/x86/lib/getuser.S
25322 +++ b/arch/x86/lib/getuser.S
25323 @@ -33,17 +33,40 @@
25324 #include <asm/thread_info.h>
25325 #include <asm/asm.h>
25326 #include <asm/smap.h>
25327 +#include <asm/segment.h>
25328 +#include <asm/pgtable.h>
25329 +#include <asm/alternative-asm.h>
25330 +
25331 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25332 +#define __copyuser_seg gs;
25333 +#else
25334 +#define __copyuser_seg
25335 +#endif
25336
25337 .text
25338 ENTRY(__get_user_1)
25339 CFI_STARTPROC
25340 +
25341 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25342 GET_THREAD_INFO(%_ASM_DX)
25343 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25344 jae bad_get_user
25345 ASM_STAC
25346 -1: movzbl (%_ASM_AX),%edx
25347 +
25348 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25349 + mov pax_user_shadow_base,%_ASM_DX
25350 + cmp %_ASM_DX,%_ASM_AX
25351 + jae 1234f
25352 + add %_ASM_DX,%_ASM_AX
25353 +1234:
25354 +#endif
25355 +
25356 +#endif
25357 +
25358 +1: __copyuser_seg movzbl (%_ASM_AX),%edx
25359 xor %eax,%eax
25360 ASM_CLAC
25361 + pax_force_retaddr
25362 ret
25363 CFI_ENDPROC
25364 ENDPROC(__get_user_1)
25365 @@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
25366 ENTRY(__get_user_2)
25367 CFI_STARTPROC
25368 add $1,%_ASM_AX
25369 +
25370 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25371 jc bad_get_user
25372 GET_THREAD_INFO(%_ASM_DX)
25373 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25374 jae bad_get_user
25375 ASM_STAC
25376 -2: movzwl -1(%_ASM_AX),%edx
25377 +
25378 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25379 + mov pax_user_shadow_base,%_ASM_DX
25380 + cmp %_ASM_DX,%_ASM_AX
25381 + jae 1234f
25382 + add %_ASM_DX,%_ASM_AX
25383 +1234:
25384 +#endif
25385 +
25386 +#endif
25387 +
25388 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25389 xor %eax,%eax
25390 ASM_CLAC
25391 + pax_force_retaddr
25392 ret
25393 CFI_ENDPROC
25394 ENDPROC(__get_user_2)
25395 @@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
25396 ENTRY(__get_user_4)
25397 CFI_STARTPROC
25398 add $3,%_ASM_AX
25399 +
25400 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25401 jc bad_get_user
25402 GET_THREAD_INFO(%_ASM_DX)
25403 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25404 jae bad_get_user
25405 ASM_STAC
25406 -3: movl -3(%_ASM_AX),%edx
25407 +
25408 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25409 + mov pax_user_shadow_base,%_ASM_DX
25410 + cmp %_ASM_DX,%_ASM_AX
25411 + jae 1234f
25412 + add %_ASM_DX,%_ASM_AX
25413 +1234:
25414 +#endif
25415 +
25416 +#endif
25417 +
25418 +3: __copyuser_seg movl -3(%_ASM_AX),%edx
25419 xor %eax,%eax
25420 ASM_CLAC
25421 + pax_force_retaddr
25422 ret
25423 CFI_ENDPROC
25424 ENDPROC(__get_user_4)
25425 @@ -86,10 +137,20 @@ ENTRY(__get_user_8)
25426 GET_THREAD_INFO(%_ASM_DX)
25427 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25428 jae bad_get_user
25429 +
25430 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25431 + mov pax_user_shadow_base,%_ASM_DX
25432 + cmp %_ASM_DX,%_ASM_AX
25433 + jae 1234f
25434 + add %_ASM_DX,%_ASM_AX
25435 +1234:
25436 +#endif
25437 +
25438 ASM_STAC
25439 4: movq -7(%_ASM_AX),%rdx
25440 xor %eax,%eax
25441 ASM_CLAC
25442 + pax_force_retaddr
25443 ret
25444 #else
25445 add $7,%_ASM_AX
25446 @@ -102,6 +163,7 @@ ENTRY(__get_user_8)
25447 5: movl -3(%_ASM_AX),%ecx
25448 xor %eax,%eax
25449 ASM_CLAC
25450 + pax_force_retaddr
25451 ret
25452 #endif
25453 CFI_ENDPROC
25454 @@ -113,6 +175,7 @@ bad_get_user:
25455 xor %edx,%edx
25456 mov $(-EFAULT),%_ASM_AX
25457 ASM_CLAC
25458 + pax_force_retaddr
25459 ret
25460 CFI_ENDPROC
25461 END(bad_get_user)
25462 @@ -124,6 +187,7 @@ bad_get_user_8:
25463 xor %ecx,%ecx
25464 mov $(-EFAULT),%_ASM_AX
25465 ASM_CLAC
25466 + pax_force_retaddr
25467 ret
25468 CFI_ENDPROC
25469 END(bad_get_user_8)
25470 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25471 index 54fcffe..7be149e 100644
25472 --- a/arch/x86/lib/insn.c
25473 +++ b/arch/x86/lib/insn.c
25474 @@ -20,8 +20,10 @@
25475
25476 #ifdef __KERNEL__
25477 #include <linux/string.h>
25478 +#include <asm/pgtable_types.h>
25479 #else
25480 #include <string.h>
25481 +#define ktla_ktva(addr) addr
25482 #endif
25483 #include <asm/inat.h>
25484 #include <asm/insn.h>
25485 @@ -53,8 +55,8 @@
25486 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25487 {
25488 memset(insn, 0, sizeof(*insn));
25489 - insn->kaddr = kaddr;
25490 - insn->next_byte = kaddr;
25491 + insn->kaddr = ktla_ktva(kaddr);
25492 + insn->next_byte = ktla_ktva(kaddr);
25493 insn->x86_64 = x86_64 ? 1 : 0;
25494 insn->opnd_bytes = 4;
25495 if (x86_64)
25496 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25497 index 05a95e7..326f2fa 100644
25498 --- a/arch/x86/lib/iomap_copy_64.S
25499 +++ b/arch/x86/lib/iomap_copy_64.S
25500 @@ -17,6 +17,7 @@
25501
25502 #include <linux/linkage.h>
25503 #include <asm/dwarf2.h>
25504 +#include <asm/alternative-asm.h>
25505
25506 /*
25507 * override generic version in lib/iomap_copy.c
25508 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25509 CFI_STARTPROC
25510 movl %edx,%ecx
25511 rep movsd
25512 + pax_force_retaddr
25513 ret
25514 CFI_ENDPROC
25515 ENDPROC(__iowrite32_copy)
25516 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25517 index 1c273be..da9cc0e 100644
25518 --- a/arch/x86/lib/memcpy_64.S
25519 +++ b/arch/x86/lib/memcpy_64.S
25520 @@ -33,6 +33,7 @@
25521 rep movsq
25522 movl %edx, %ecx
25523 rep movsb
25524 + pax_force_retaddr
25525 ret
25526 .Lmemcpy_e:
25527 .previous
25528 @@ -49,6 +50,7 @@
25529 movq %rdi, %rax
25530 movq %rdx, %rcx
25531 rep movsb
25532 + pax_force_retaddr
25533 ret
25534 .Lmemcpy_e_e:
25535 .previous
25536 @@ -76,13 +78,13 @@ ENTRY(memcpy)
25537 */
25538 movq 0*8(%rsi), %r8
25539 movq 1*8(%rsi), %r9
25540 - movq 2*8(%rsi), %r10
25541 + movq 2*8(%rsi), %rcx
25542 movq 3*8(%rsi), %r11
25543 leaq 4*8(%rsi), %rsi
25544
25545 movq %r8, 0*8(%rdi)
25546 movq %r9, 1*8(%rdi)
25547 - movq %r10, 2*8(%rdi)
25548 + movq %rcx, 2*8(%rdi)
25549 movq %r11, 3*8(%rdi)
25550 leaq 4*8(%rdi), %rdi
25551 jae .Lcopy_forward_loop
25552 @@ -105,12 +107,12 @@ ENTRY(memcpy)
25553 subq $0x20, %rdx
25554 movq -1*8(%rsi), %r8
25555 movq -2*8(%rsi), %r9
25556 - movq -3*8(%rsi), %r10
25557 + movq -3*8(%rsi), %rcx
25558 movq -4*8(%rsi), %r11
25559 leaq -4*8(%rsi), %rsi
25560 movq %r8, -1*8(%rdi)
25561 movq %r9, -2*8(%rdi)
25562 - movq %r10, -3*8(%rdi)
25563 + movq %rcx, -3*8(%rdi)
25564 movq %r11, -4*8(%rdi)
25565 leaq -4*8(%rdi), %rdi
25566 jae .Lcopy_backward_loop
25567 @@ -130,12 +132,13 @@ ENTRY(memcpy)
25568 */
25569 movq 0*8(%rsi), %r8
25570 movq 1*8(%rsi), %r9
25571 - movq -2*8(%rsi, %rdx), %r10
25572 + movq -2*8(%rsi, %rdx), %rcx
25573 movq -1*8(%rsi, %rdx), %r11
25574 movq %r8, 0*8(%rdi)
25575 movq %r9, 1*8(%rdi)
25576 - movq %r10, -2*8(%rdi, %rdx)
25577 + movq %rcx, -2*8(%rdi, %rdx)
25578 movq %r11, -1*8(%rdi, %rdx)
25579 + pax_force_retaddr
25580 retq
25581 .p2align 4
25582 .Lless_16bytes:
25583 @@ -148,6 +151,7 @@ ENTRY(memcpy)
25584 movq -1*8(%rsi, %rdx), %r9
25585 movq %r8, 0*8(%rdi)
25586 movq %r9, -1*8(%rdi, %rdx)
25587 + pax_force_retaddr
25588 retq
25589 .p2align 4
25590 .Lless_8bytes:
25591 @@ -161,6 +165,7 @@ ENTRY(memcpy)
25592 movl -4(%rsi, %rdx), %r8d
25593 movl %ecx, (%rdi)
25594 movl %r8d, -4(%rdi, %rdx)
25595 + pax_force_retaddr
25596 retq
25597 .p2align 4
25598 .Lless_3bytes:
25599 @@ -179,6 +184,7 @@ ENTRY(memcpy)
25600 movb %cl, (%rdi)
25601
25602 .Lend:
25603 + pax_force_retaddr
25604 retq
25605 CFI_ENDPROC
25606 ENDPROC(memcpy)
25607 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25608 index ee16461..c39c199 100644
25609 --- a/arch/x86/lib/memmove_64.S
25610 +++ b/arch/x86/lib/memmove_64.S
25611 @@ -61,13 +61,13 @@ ENTRY(memmove)
25612 5:
25613 sub $0x20, %rdx
25614 movq 0*8(%rsi), %r11
25615 - movq 1*8(%rsi), %r10
25616 + movq 1*8(%rsi), %rcx
25617 movq 2*8(%rsi), %r9
25618 movq 3*8(%rsi), %r8
25619 leaq 4*8(%rsi), %rsi
25620
25621 movq %r11, 0*8(%rdi)
25622 - movq %r10, 1*8(%rdi)
25623 + movq %rcx, 1*8(%rdi)
25624 movq %r9, 2*8(%rdi)
25625 movq %r8, 3*8(%rdi)
25626 leaq 4*8(%rdi), %rdi
25627 @@ -81,10 +81,10 @@ ENTRY(memmove)
25628 4:
25629 movq %rdx, %rcx
25630 movq -8(%rsi, %rdx), %r11
25631 - lea -8(%rdi, %rdx), %r10
25632 + lea -8(%rdi, %rdx), %r9
25633 shrq $3, %rcx
25634 rep movsq
25635 - movq %r11, (%r10)
25636 + movq %r11, (%r9)
25637 jmp 13f
25638 .Lmemmove_end_forward:
25639
25640 @@ -95,14 +95,14 @@ ENTRY(memmove)
25641 7:
25642 movq %rdx, %rcx
25643 movq (%rsi), %r11
25644 - movq %rdi, %r10
25645 + movq %rdi, %r9
25646 leaq -8(%rsi, %rdx), %rsi
25647 leaq -8(%rdi, %rdx), %rdi
25648 shrq $3, %rcx
25649 std
25650 rep movsq
25651 cld
25652 - movq %r11, (%r10)
25653 + movq %r11, (%r9)
25654 jmp 13f
25655
25656 /*
25657 @@ -127,13 +127,13 @@ ENTRY(memmove)
25658 8:
25659 subq $0x20, %rdx
25660 movq -1*8(%rsi), %r11
25661 - movq -2*8(%rsi), %r10
25662 + movq -2*8(%rsi), %rcx
25663 movq -3*8(%rsi), %r9
25664 movq -4*8(%rsi), %r8
25665 leaq -4*8(%rsi), %rsi
25666
25667 movq %r11, -1*8(%rdi)
25668 - movq %r10, -2*8(%rdi)
25669 + movq %rcx, -2*8(%rdi)
25670 movq %r9, -3*8(%rdi)
25671 movq %r8, -4*8(%rdi)
25672 leaq -4*8(%rdi), %rdi
25673 @@ -151,11 +151,11 @@ ENTRY(memmove)
25674 * Move data from 16 bytes to 31 bytes.
25675 */
25676 movq 0*8(%rsi), %r11
25677 - movq 1*8(%rsi), %r10
25678 + movq 1*8(%rsi), %rcx
25679 movq -2*8(%rsi, %rdx), %r9
25680 movq -1*8(%rsi, %rdx), %r8
25681 movq %r11, 0*8(%rdi)
25682 - movq %r10, 1*8(%rdi)
25683 + movq %rcx, 1*8(%rdi)
25684 movq %r9, -2*8(%rdi, %rdx)
25685 movq %r8, -1*8(%rdi, %rdx)
25686 jmp 13f
25687 @@ -167,9 +167,9 @@ ENTRY(memmove)
25688 * Move data from 8 bytes to 15 bytes.
25689 */
25690 movq 0*8(%rsi), %r11
25691 - movq -1*8(%rsi, %rdx), %r10
25692 + movq -1*8(%rsi, %rdx), %r9
25693 movq %r11, 0*8(%rdi)
25694 - movq %r10, -1*8(%rdi, %rdx)
25695 + movq %r9, -1*8(%rdi, %rdx)
25696 jmp 13f
25697 10:
25698 cmpq $4, %rdx
25699 @@ -178,9 +178,9 @@ ENTRY(memmove)
25700 * Move data from 4 bytes to 7 bytes.
25701 */
25702 movl (%rsi), %r11d
25703 - movl -4(%rsi, %rdx), %r10d
25704 + movl -4(%rsi, %rdx), %r9d
25705 movl %r11d, (%rdi)
25706 - movl %r10d, -4(%rdi, %rdx)
25707 + movl %r9d, -4(%rdi, %rdx)
25708 jmp 13f
25709 11:
25710 cmp $2, %rdx
25711 @@ -189,9 +189,9 @@ ENTRY(memmove)
25712 * Move data from 2 bytes to 3 bytes.
25713 */
25714 movw (%rsi), %r11w
25715 - movw -2(%rsi, %rdx), %r10w
25716 + movw -2(%rsi, %rdx), %r9w
25717 movw %r11w, (%rdi)
25718 - movw %r10w, -2(%rdi, %rdx)
25719 + movw %r9w, -2(%rdi, %rdx)
25720 jmp 13f
25721 12:
25722 cmp $1, %rdx
25723 @@ -202,6 +202,7 @@ ENTRY(memmove)
25724 movb (%rsi), %r11b
25725 movb %r11b, (%rdi)
25726 13:
25727 + pax_force_retaddr
25728 retq
25729 CFI_ENDPROC
25730
25731 @@ -210,6 +211,7 @@ ENTRY(memmove)
25732 /* Forward moving data. */
25733 movq %rdx, %rcx
25734 rep movsb
25735 + pax_force_retaddr
25736 retq
25737 .Lmemmove_end_forward_efs:
25738 .previous
25739 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25740 index 2dcb380..963660a 100644
25741 --- a/arch/x86/lib/memset_64.S
25742 +++ b/arch/x86/lib/memset_64.S
25743 @@ -30,6 +30,7 @@
25744 movl %edx,%ecx
25745 rep stosb
25746 movq %r9,%rax
25747 + pax_force_retaddr
25748 ret
25749 .Lmemset_e:
25750 .previous
25751 @@ -52,6 +53,7 @@
25752 movq %rdx,%rcx
25753 rep stosb
25754 movq %r9,%rax
25755 + pax_force_retaddr
25756 ret
25757 .Lmemset_e_e:
25758 .previous
25759 @@ -59,7 +61,7 @@
25760 ENTRY(memset)
25761 ENTRY(__memset)
25762 CFI_STARTPROC
25763 - movq %rdi,%r10
25764 + movq %rdi,%r11
25765
25766 /* expand byte value */
25767 movzbl %sil,%ecx
25768 @@ -117,7 +119,8 @@ ENTRY(__memset)
25769 jnz .Lloop_1
25770
25771 .Lende:
25772 - movq %r10,%rax
25773 + movq %r11,%rax
25774 + pax_force_retaddr
25775 ret
25776
25777 CFI_RESTORE_STATE
25778 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25779 index c9f2d9b..e7fd2c0 100644
25780 --- a/arch/x86/lib/mmx_32.c
25781 +++ b/arch/x86/lib/mmx_32.c
25782 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25783 {
25784 void *p;
25785 int i;
25786 + unsigned long cr0;
25787
25788 if (unlikely(in_interrupt()))
25789 return __memcpy(to, from, len);
25790 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25791 kernel_fpu_begin();
25792
25793 __asm__ __volatile__ (
25794 - "1: prefetch (%0)\n" /* This set is 28 bytes */
25795 - " prefetch 64(%0)\n"
25796 - " prefetch 128(%0)\n"
25797 - " prefetch 192(%0)\n"
25798 - " prefetch 256(%0)\n"
25799 + "1: prefetch (%1)\n" /* This set is 28 bytes */
25800 + " prefetch 64(%1)\n"
25801 + " prefetch 128(%1)\n"
25802 + " prefetch 192(%1)\n"
25803 + " prefetch 256(%1)\n"
25804 "2: \n"
25805 ".section .fixup, \"ax\"\n"
25806 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25807 + "3: \n"
25808 +
25809 +#ifdef CONFIG_PAX_KERNEXEC
25810 + " movl %%cr0, %0\n"
25811 + " movl %0, %%eax\n"
25812 + " andl $0xFFFEFFFF, %%eax\n"
25813 + " movl %%eax, %%cr0\n"
25814 +#endif
25815 +
25816 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25817 +
25818 +#ifdef CONFIG_PAX_KERNEXEC
25819 + " movl %0, %%cr0\n"
25820 +#endif
25821 +
25822 " jmp 2b\n"
25823 ".previous\n"
25824 _ASM_EXTABLE(1b, 3b)
25825 - : : "r" (from));
25826 + : "=&r" (cr0) : "r" (from) : "ax");
25827
25828 for ( ; i > 5; i--) {
25829 __asm__ __volatile__ (
25830 - "1: prefetch 320(%0)\n"
25831 - "2: movq (%0), %%mm0\n"
25832 - " movq 8(%0), %%mm1\n"
25833 - " movq 16(%0), %%mm2\n"
25834 - " movq 24(%0), %%mm3\n"
25835 - " movq %%mm0, (%1)\n"
25836 - " movq %%mm1, 8(%1)\n"
25837 - " movq %%mm2, 16(%1)\n"
25838 - " movq %%mm3, 24(%1)\n"
25839 - " movq 32(%0), %%mm0\n"
25840 - " movq 40(%0), %%mm1\n"
25841 - " movq 48(%0), %%mm2\n"
25842 - " movq 56(%0), %%mm3\n"
25843 - " movq %%mm0, 32(%1)\n"
25844 - " movq %%mm1, 40(%1)\n"
25845 - " movq %%mm2, 48(%1)\n"
25846 - " movq %%mm3, 56(%1)\n"
25847 + "1: prefetch 320(%1)\n"
25848 + "2: movq (%1), %%mm0\n"
25849 + " movq 8(%1), %%mm1\n"
25850 + " movq 16(%1), %%mm2\n"
25851 + " movq 24(%1), %%mm3\n"
25852 + " movq %%mm0, (%2)\n"
25853 + " movq %%mm1, 8(%2)\n"
25854 + " movq %%mm2, 16(%2)\n"
25855 + " movq %%mm3, 24(%2)\n"
25856 + " movq 32(%1), %%mm0\n"
25857 + " movq 40(%1), %%mm1\n"
25858 + " movq 48(%1), %%mm2\n"
25859 + " movq 56(%1), %%mm3\n"
25860 + " movq %%mm0, 32(%2)\n"
25861 + " movq %%mm1, 40(%2)\n"
25862 + " movq %%mm2, 48(%2)\n"
25863 + " movq %%mm3, 56(%2)\n"
25864 ".section .fixup, \"ax\"\n"
25865 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25866 + "3:\n"
25867 +
25868 +#ifdef CONFIG_PAX_KERNEXEC
25869 + " movl %%cr0, %0\n"
25870 + " movl %0, %%eax\n"
25871 + " andl $0xFFFEFFFF, %%eax\n"
25872 + " movl %%eax, %%cr0\n"
25873 +#endif
25874 +
25875 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25876 +
25877 +#ifdef CONFIG_PAX_KERNEXEC
25878 + " movl %0, %%cr0\n"
25879 +#endif
25880 +
25881 " jmp 2b\n"
25882 ".previous\n"
25883 _ASM_EXTABLE(1b, 3b)
25884 - : : "r" (from), "r" (to) : "memory");
25885 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25886
25887 from += 64;
25888 to += 64;
25889 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25890 static void fast_copy_page(void *to, void *from)
25891 {
25892 int i;
25893 + unsigned long cr0;
25894
25895 kernel_fpu_begin();
25896
25897 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25898 * but that is for later. -AV
25899 */
25900 __asm__ __volatile__(
25901 - "1: prefetch (%0)\n"
25902 - " prefetch 64(%0)\n"
25903 - " prefetch 128(%0)\n"
25904 - " prefetch 192(%0)\n"
25905 - " prefetch 256(%0)\n"
25906 + "1: prefetch (%1)\n"
25907 + " prefetch 64(%1)\n"
25908 + " prefetch 128(%1)\n"
25909 + " prefetch 192(%1)\n"
25910 + " prefetch 256(%1)\n"
25911 "2: \n"
25912 ".section .fixup, \"ax\"\n"
25913 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25914 + "3: \n"
25915 +
25916 +#ifdef CONFIG_PAX_KERNEXEC
25917 + " movl %%cr0, %0\n"
25918 + " movl %0, %%eax\n"
25919 + " andl $0xFFFEFFFF, %%eax\n"
25920 + " movl %%eax, %%cr0\n"
25921 +#endif
25922 +
25923 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25924 +
25925 +#ifdef CONFIG_PAX_KERNEXEC
25926 + " movl %0, %%cr0\n"
25927 +#endif
25928 +
25929 " jmp 2b\n"
25930 ".previous\n"
25931 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
25932 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25933
25934 for (i = 0; i < (4096-320)/64; i++) {
25935 __asm__ __volatile__ (
25936 - "1: prefetch 320(%0)\n"
25937 - "2: movq (%0), %%mm0\n"
25938 - " movntq %%mm0, (%1)\n"
25939 - " movq 8(%0), %%mm1\n"
25940 - " movntq %%mm1, 8(%1)\n"
25941 - " movq 16(%0), %%mm2\n"
25942 - " movntq %%mm2, 16(%1)\n"
25943 - " movq 24(%0), %%mm3\n"
25944 - " movntq %%mm3, 24(%1)\n"
25945 - " movq 32(%0), %%mm4\n"
25946 - " movntq %%mm4, 32(%1)\n"
25947 - " movq 40(%0), %%mm5\n"
25948 - " movntq %%mm5, 40(%1)\n"
25949 - " movq 48(%0), %%mm6\n"
25950 - " movntq %%mm6, 48(%1)\n"
25951 - " movq 56(%0), %%mm7\n"
25952 - " movntq %%mm7, 56(%1)\n"
25953 + "1: prefetch 320(%1)\n"
25954 + "2: movq (%1), %%mm0\n"
25955 + " movntq %%mm0, (%2)\n"
25956 + " movq 8(%1), %%mm1\n"
25957 + " movntq %%mm1, 8(%2)\n"
25958 + " movq 16(%1), %%mm2\n"
25959 + " movntq %%mm2, 16(%2)\n"
25960 + " movq 24(%1), %%mm3\n"
25961 + " movntq %%mm3, 24(%2)\n"
25962 + " movq 32(%1), %%mm4\n"
25963 + " movntq %%mm4, 32(%2)\n"
25964 + " movq 40(%1), %%mm5\n"
25965 + " movntq %%mm5, 40(%2)\n"
25966 + " movq 48(%1), %%mm6\n"
25967 + " movntq %%mm6, 48(%2)\n"
25968 + " movq 56(%1), %%mm7\n"
25969 + " movntq %%mm7, 56(%2)\n"
25970 ".section .fixup, \"ax\"\n"
25971 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25972 + "3:\n"
25973 +
25974 +#ifdef CONFIG_PAX_KERNEXEC
25975 + " movl %%cr0, %0\n"
25976 + " movl %0, %%eax\n"
25977 + " andl $0xFFFEFFFF, %%eax\n"
25978 + " movl %%eax, %%cr0\n"
25979 +#endif
25980 +
25981 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25982 +
25983 +#ifdef CONFIG_PAX_KERNEXEC
25984 + " movl %0, %%cr0\n"
25985 +#endif
25986 +
25987 " jmp 2b\n"
25988 ".previous\n"
25989 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
25990 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25991
25992 from += 64;
25993 to += 64;
25994 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
25995 static void fast_copy_page(void *to, void *from)
25996 {
25997 int i;
25998 + unsigned long cr0;
25999
26000 kernel_fpu_begin();
26001
26002 __asm__ __volatile__ (
26003 - "1: prefetch (%0)\n"
26004 - " prefetch 64(%0)\n"
26005 - " prefetch 128(%0)\n"
26006 - " prefetch 192(%0)\n"
26007 - " prefetch 256(%0)\n"
26008 + "1: prefetch (%1)\n"
26009 + " prefetch 64(%1)\n"
26010 + " prefetch 128(%1)\n"
26011 + " prefetch 192(%1)\n"
26012 + " prefetch 256(%1)\n"
26013 "2: \n"
26014 ".section .fixup, \"ax\"\n"
26015 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26016 + "3: \n"
26017 +
26018 +#ifdef CONFIG_PAX_KERNEXEC
26019 + " movl %%cr0, %0\n"
26020 + " movl %0, %%eax\n"
26021 + " andl $0xFFFEFFFF, %%eax\n"
26022 + " movl %%eax, %%cr0\n"
26023 +#endif
26024 +
26025 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26026 +
26027 +#ifdef CONFIG_PAX_KERNEXEC
26028 + " movl %0, %%cr0\n"
26029 +#endif
26030 +
26031 " jmp 2b\n"
26032 ".previous\n"
26033 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
26034 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26035
26036 for (i = 0; i < 4096/64; i++) {
26037 __asm__ __volatile__ (
26038 - "1: prefetch 320(%0)\n"
26039 - "2: movq (%0), %%mm0\n"
26040 - " movq 8(%0), %%mm1\n"
26041 - " movq 16(%0), %%mm2\n"
26042 - " movq 24(%0), %%mm3\n"
26043 - " movq %%mm0, (%1)\n"
26044 - " movq %%mm1, 8(%1)\n"
26045 - " movq %%mm2, 16(%1)\n"
26046 - " movq %%mm3, 24(%1)\n"
26047 - " movq 32(%0), %%mm0\n"
26048 - " movq 40(%0), %%mm1\n"
26049 - " movq 48(%0), %%mm2\n"
26050 - " movq 56(%0), %%mm3\n"
26051 - " movq %%mm0, 32(%1)\n"
26052 - " movq %%mm1, 40(%1)\n"
26053 - " movq %%mm2, 48(%1)\n"
26054 - " movq %%mm3, 56(%1)\n"
26055 + "1: prefetch 320(%1)\n"
26056 + "2: movq (%1), %%mm0\n"
26057 + " movq 8(%1), %%mm1\n"
26058 + " movq 16(%1), %%mm2\n"
26059 + " movq 24(%1), %%mm3\n"
26060 + " movq %%mm0, (%2)\n"
26061 + " movq %%mm1, 8(%2)\n"
26062 + " movq %%mm2, 16(%2)\n"
26063 + " movq %%mm3, 24(%2)\n"
26064 + " movq 32(%1), %%mm0\n"
26065 + " movq 40(%1), %%mm1\n"
26066 + " movq 48(%1), %%mm2\n"
26067 + " movq 56(%1), %%mm3\n"
26068 + " movq %%mm0, 32(%2)\n"
26069 + " movq %%mm1, 40(%2)\n"
26070 + " movq %%mm2, 48(%2)\n"
26071 + " movq %%mm3, 56(%2)\n"
26072 ".section .fixup, \"ax\"\n"
26073 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26074 + "3:\n"
26075 +
26076 +#ifdef CONFIG_PAX_KERNEXEC
26077 + " movl %%cr0, %0\n"
26078 + " movl %0, %%eax\n"
26079 + " andl $0xFFFEFFFF, %%eax\n"
26080 + " movl %%eax, %%cr0\n"
26081 +#endif
26082 +
26083 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26084 +
26085 +#ifdef CONFIG_PAX_KERNEXEC
26086 + " movl %0, %%cr0\n"
26087 +#endif
26088 +
26089 " jmp 2b\n"
26090 ".previous\n"
26091 _ASM_EXTABLE(1b, 3b)
26092 - : : "r" (from), "r" (to) : "memory");
26093 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26094
26095 from += 64;
26096 to += 64;
26097 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26098 index f6d13ee..aca5f0b 100644
26099 --- a/arch/x86/lib/msr-reg.S
26100 +++ b/arch/x86/lib/msr-reg.S
26101 @@ -3,6 +3,7 @@
26102 #include <asm/dwarf2.h>
26103 #include <asm/asm.h>
26104 #include <asm/msr.h>
26105 +#include <asm/alternative-asm.h>
26106
26107 #ifdef CONFIG_X86_64
26108 /*
26109 @@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26110 CFI_STARTPROC
26111 pushq_cfi %rbx
26112 pushq_cfi %rbp
26113 - movq %rdi, %r10 /* Save pointer */
26114 + movq %rdi, %r9 /* Save pointer */
26115 xorl %r11d, %r11d /* Return value */
26116 movl (%rdi), %eax
26117 movl 4(%rdi), %ecx
26118 @@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26119 movl 28(%rdi), %edi
26120 CFI_REMEMBER_STATE
26121 1: \op
26122 -2: movl %eax, (%r10)
26123 +2: movl %eax, (%r9)
26124 movl %r11d, %eax /* Return value */
26125 - movl %ecx, 4(%r10)
26126 - movl %edx, 8(%r10)
26127 - movl %ebx, 12(%r10)
26128 - movl %ebp, 20(%r10)
26129 - movl %esi, 24(%r10)
26130 - movl %edi, 28(%r10)
26131 + movl %ecx, 4(%r9)
26132 + movl %edx, 8(%r9)
26133 + movl %ebx, 12(%r9)
26134 + movl %ebp, 20(%r9)
26135 + movl %esi, 24(%r9)
26136 + movl %edi, 28(%r9)
26137 popq_cfi %rbp
26138 popq_cfi %rbx
26139 + pax_force_retaddr
26140 ret
26141 3:
26142 CFI_RESTORE_STATE
26143 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26144 index fc6ba17..d4d989d 100644
26145 --- a/arch/x86/lib/putuser.S
26146 +++ b/arch/x86/lib/putuser.S
26147 @@ -16,7 +16,9 @@
26148 #include <asm/errno.h>
26149 #include <asm/asm.h>
26150 #include <asm/smap.h>
26151 -
26152 +#include <asm/segment.h>
26153 +#include <asm/pgtable.h>
26154 +#include <asm/alternative-asm.h>
26155
26156 /*
26157 * __put_user_X
26158 @@ -30,57 +32,125 @@
26159 * as they get called from within inline assembly.
26160 */
26161
26162 -#define ENTER CFI_STARTPROC ; \
26163 - GET_THREAD_INFO(%_ASM_BX)
26164 -#define EXIT ASM_CLAC ; \
26165 - ret ; \
26166 +#define ENTER CFI_STARTPROC
26167 +#define EXIT ASM_CLAC ; \
26168 + pax_force_retaddr ; \
26169 + ret ; \
26170 CFI_ENDPROC
26171
26172 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26173 +#define _DEST %_ASM_CX,%_ASM_BX
26174 +#else
26175 +#define _DEST %_ASM_CX
26176 +#endif
26177 +
26178 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26179 +#define __copyuser_seg gs;
26180 +#else
26181 +#define __copyuser_seg
26182 +#endif
26183 +
26184 .text
26185 ENTRY(__put_user_1)
26186 ENTER
26187 +
26188 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26189 + GET_THREAD_INFO(%_ASM_BX)
26190 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26191 jae bad_put_user
26192 ASM_STAC
26193 -1: movb %al,(%_ASM_CX)
26194 +
26195 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26196 + mov pax_user_shadow_base,%_ASM_BX
26197 + cmp %_ASM_BX,%_ASM_CX
26198 + jb 1234f
26199 + xor %ebx,%ebx
26200 +1234:
26201 +#endif
26202 +
26203 +#endif
26204 +
26205 +1: __copyuser_seg movb %al,(_DEST)
26206 xor %eax,%eax
26207 EXIT
26208 ENDPROC(__put_user_1)
26209
26210 ENTRY(__put_user_2)
26211 ENTER
26212 +
26213 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26214 + GET_THREAD_INFO(%_ASM_BX)
26215 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26216 sub $1,%_ASM_BX
26217 cmp %_ASM_BX,%_ASM_CX
26218 jae bad_put_user
26219 ASM_STAC
26220 -2: movw %ax,(%_ASM_CX)
26221 +
26222 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26223 + mov pax_user_shadow_base,%_ASM_BX
26224 + cmp %_ASM_BX,%_ASM_CX
26225 + jb 1234f
26226 + xor %ebx,%ebx
26227 +1234:
26228 +#endif
26229 +
26230 +#endif
26231 +
26232 +2: __copyuser_seg movw %ax,(_DEST)
26233 xor %eax,%eax
26234 EXIT
26235 ENDPROC(__put_user_2)
26236
26237 ENTRY(__put_user_4)
26238 ENTER
26239 +
26240 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26241 + GET_THREAD_INFO(%_ASM_BX)
26242 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26243 sub $3,%_ASM_BX
26244 cmp %_ASM_BX,%_ASM_CX
26245 jae bad_put_user
26246 ASM_STAC
26247 -3: movl %eax,(%_ASM_CX)
26248 +
26249 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26250 + mov pax_user_shadow_base,%_ASM_BX
26251 + cmp %_ASM_BX,%_ASM_CX
26252 + jb 1234f
26253 + xor %ebx,%ebx
26254 +1234:
26255 +#endif
26256 +
26257 +#endif
26258 +
26259 +3: __copyuser_seg movl %eax,(_DEST)
26260 xor %eax,%eax
26261 EXIT
26262 ENDPROC(__put_user_4)
26263
26264 ENTRY(__put_user_8)
26265 ENTER
26266 +
26267 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26268 + GET_THREAD_INFO(%_ASM_BX)
26269 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26270 sub $7,%_ASM_BX
26271 cmp %_ASM_BX,%_ASM_CX
26272 jae bad_put_user
26273 ASM_STAC
26274 -4: mov %_ASM_AX,(%_ASM_CX)
26275 +
26276 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26277 + mov pax_user_shadow_base,%_ASM_BX
26278 + cmp %_ASM_BX,%_ASM_CX
26279 + jb 1234f
26280 + xor %ebx,%ebx
26281 +1234:
26282 +#endif
26283 +
26284 +#endif
26285 +
26286 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
26287 #ifdef CONFIG_X86_32
26288 -5: movl %edx,4(%_ASM_CX)
26289 +5: __copyuser_seg movl %edx,4(_DEST)
26290 #endif
26291 xor %eax,%eax
26292 EXIT
26293 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26294 index 1cad221..de671ee 100644
26295 --- a/arch/x86/lib/rwlock.S
26296 +++ b/arch/x86/lib/rwlock.S
26297 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26298 FRAME
26299 0: LOCK_PREFIX
26300 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26301 +
26302 +#ifdef CONFIG_PAX_REFCOUNT
26303 + jno 1234f
26304 + LOCK_PREFIX
26305 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26306 + int $4
26307 +1234:
26308 + _ASM_EXTABLE(1234b, 1234b)
26309 +#endif
26310 +
26311 1: rep; nop
26312 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26313 jne 1b
26314 LOCK_PREFIX
26315 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26316 +
26317 +#ifdef CONFIG_PAX_REFCOUNT
26318 + jno 1234f
26319 + LOCK_PREFIX
26320 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26321 + int $4
26322 +1234:
26323 + _ASM_EXTABLE(1234b, 1234b)
26324 +#endif
26325 +
26326 jnz 0b
26327 ENDFRAME
26328 + pax_force_retaddr
26329 ret
26330 CFI_ENDPROC
26331 END(__write_lock_failed)
26332 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26333 FRAME
26334 0: LOCK_PREFIX
26335 READ_LOCK_SIZE(inc) (%__lock_ptr)
26336 +
26337 +#ifdef CONFIG_PAX_REFCOUNT
26338 + jno 1234f
26339 + LOCK_PREFIX
26340 + READ_LOCK_SIZE(dec) (%__lock_ptr)
26341 + int $4
26342 +1234:
26343 + _ASM_EXTABLE(1234b, 1234b)
26344 +#endif
26345 +
26346 1: rep; nop
26347 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26348 js 1b
26349 LOCK_PREFIX
26350 READ_LOCK_SIZE(dec) (%__lock_ptr)
26351 +
26352 +#ifdef CONFIG_PAX_REFCOUNT
26353 + jno 1234f
26354 + LOCK_PREFIX
26355 + READ_LOCK_SIZE(inc) (%__lock_ptr)
26356 + int $4
26357 +1234:
26358 + _ASM_EXTABLE(1234b, 1234b)
26359 +#endif
26360 +
26361 js 0b
26362 ENDFRAME
26363 + pax_force_retaddr
26364 ret
26365 CFI_ENDPROC
26366 END(__read_lock_failed)
26367 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26368 index 5dff5f0..cadebf4 100644
26369 --- a/arch/x86/lib/rwsem.S
26370 +++ b/arch/x86/lib/rwsem.S
26371 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26372 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26373 CFI_RESTORE __ASM_REG(dx)
26374 restore_common_regs
26375 + pax_force_retaddr
26376 ret
26377 CFI_ENDPROC
26378 ENDPROC(call_rwsem_down_read_failed)
26379 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26380 movq %rax,%rdi
26381 call rwsem_down_write_failed
26382 restore_common_regs
26383 + pax_force_retaddr
26384 ret
26385 CFI_ENDPROC
26386 ENDPROC(call_rwsem_down_write_failed)
26387 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26388 movq %rax,%rdi
26389 call rwsem_wake
26390 restore_common_regs
26391 -1: ret
26392 +1: pax_force_retaddr
26393 + ret
26394 CFI_ENDPROC
26395 ENDPROC(call_rwsem_wake)
26396
26397 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26398 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26399 CFI_RESTORE __ASM_REG(dx)
26400 restore_common_regs
26401 + pax_force_retaddr
26402 ret
26403 CFI_ENDPROC
26404 ENDPROC(call_rwsem_downgrade_wake)
26405 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26406 index a63efd6..ccecad8 100644
26407 --- a/arch/x86/lib/thunk_64.S
26408 +++ b/arch/x86/lib/thunk_64.S
26409 @@ -8,6 +8,7 @@
26410 #include <linux/linkage.h>
26411 #include <asm/dwarf2.h>
26412 #include <asm/calling.h>
26413 +#include <asm/alternative-asm.h>
26414
26415 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26416 .macro THUNK name, func, put_ret_addr_in_rdi=0
26417 @@ -41,5 +42,6 @@
26418 SAVE_ARGS
26419 restore:
26420 RESTORE_ARGS
26421 + pax_force_retaddr
26422 ret
26423 CFI_ENDPROC
26424 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26425 index f0312d7..9c39d63 100644
26426 --- a/arch/x86/lib/usercopy_32.c
26427 +++ b/arch/x86/lib/usercopy_32.c
26428 @@ -42,11 +42,13 @@ do { \
26429 int __d0; \
26430 might_fault(); \
26431 __asm__ __volatile__( \
26432 + __COPYUSER_SET_ES \
26433 ASM_STAC "\n" \
26434 "0: rep; stosl\n" \
26435 " movl %2,%0\n" \
26436 "1: rep; stosb\n" \
26437 "2: " ASM_CLAC "\n" \
26438 + __COPYUSER_RESTORE_ES \
26439 ".section .fixup,\"ax\"\n" \
26440 "3: lea 0(%2,%0,4),%0\n" \
26441 " jmp 2b\n" \
26442 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26443
26444 #ifdef CONFIG_X86_INTEL_USERCOPY
26445 static unsigned long
26446 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
26447 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26448 {
26449 int d0, d1;
26450 __asm__ __volatile__(
26451 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26452 " .align 2,0x90\n"
26453 "3: movl 0(%4), %%eax\n"
26454 "4: movl 4(%4), %%edx\n"
26455 - "5: movl %%eax, 0(%3)\n"
26456 - "6: movl %%edx, 4(%3)\n"
26457 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26458 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26459 "7: movl 8(%4), %%eax\n"
26460 "8: movl 12(%4),%%edx\n"
26461 - "9: movl %%eax, 8(%3)\n"
26462 - "10: movl %%edx, 12(%3)\n"
26463 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26464 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26465 "11: movl 16(%4), %%eax\n"
26466 "12: movl 20(%4), %%edx\n"
26467 - "13: movl %%eax, 16(%3)\n"
26468 - "14: movl %%edx, 20(%3)\n"
26469 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26470 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26471 "15: movl 24(%4), %%eax\n"
26472 "16: movl 28(%4), %%edx\n"
26473 - "17: movl %%eax, 24(%3)\n"
26474 - "18: movl %%edx, 28(%3)\n"
26475 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26476 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26477 "19: movl 32(%4), %%eax\n"
26478 "20: movl 36(%4), %%edx\n"
26479 - "21: movl %%eax, 32(%3)\n"
26480 - "22: movl %%edx, 36(%3)\n"
26481 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26482 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26483 "23: movl 40(%4), %%eax\n"
26484 "24: movl 44(%4), %%edx\n"
26485 - "25: movl %%eax, 40(%3)\n"
26486 - "26: movl %%edx, 44(%3)\n"
26487 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26488 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26489 "27: movl 48(%4), %%eax\n"
26490 "28: movl 52(%4), %%edx\n"
26491 - "29: movl %%eax, 48(%3)\n"
26492 - "30: movl %%edx, 52(%3)\n"
26493 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26494 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26495 "31: movl 56(%4), %%eax\n"
26496 "32: movl 60(%4), %%edx\n"
26497 - "33: movl %%eax, 56(%3)\n"
26498 - "34: movl %%edx, 60(%3)\n"
26499 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26500 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26501 " addl $-64, %0\n"
26502 " addl $64, %4\n"
26503 " addl $64, %3\n"
26504 @@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26505 " shrl $2, %0\n"
26506 " andl $3, %%eax\n"
26507 " cld\n"
26508 + __COPYUSER_SET_ES
26509 "99: rep; movsl\n"
26510 "36: movl %%eax, %0\n"
26511 "37: rep; movsb\n"
26512 "100:\n"
26513 + __COPYUSER_RESTORE_ES
26514 ".section .fixup,\"ax\"\n"
26515 "101: lea 0(%%eax,%0,4),%0\n"
26516 " jmp 100b\n"
26517 @@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26518 }
26519
26520 static unsigned long
26521 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26522 +{
26523 + int d0, d1;
26524 + __asm__ __volatile__(
26525 + " .align 2,0x90\n"
26526 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26527 + " cmpl $67, %0\n"
26528 + " jbe 3f\n"
26529 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26530 + " .align 2,0x90\n"
26531 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26532 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26533 + "5: movl %%eax, 0(%3)\n"
26534 + "6: movl %%edx, 4(%3)\n"
26535 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26536 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26537 + "9: movl %%eax, 8(%3)\n"
26538 + "10: movl %%edx, 12(%3)\n"
26539 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26540 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26541 + "13: movl %%eax, 16(%3)\n"
26542 + "14: movl %%edx, 20(%3)\n"
26543 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26544 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26545 + "17: movl %%eax, 24(%3)\n"
26546 + "18: movl %%edx, 28(%3)\n"
26547 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26548 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26549 + "21: movl %%eax, 32(%3)\n"
26550 + "22: movl %%edx, 36(%3)\n"
26551 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26552 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26553 + "25: movl %%eax, 40(%3)\n"
26554 + "26: movl %%edx, 44(%3)\n"
26555 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26556 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26557 + "29: movl %%eax, 48(%3)\n"
26558 + "30: movl %%edx, 52(%3)\n"
26559 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26560 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26561 + "33: movl %%eax, 56(%3)\n"
26562 + "34: movl %%edx, 60(%3)\n"
26563 + " addl $-64, %0\n"
26564 + " addl $64, %4\n"
26565 + " addl $64, %3\n"
26566 + " cmpl $63, %0\n"
26567 + " ja 1b\n"
26568 + "35: movl %0, %%eax\n"
26569 + " shrl $2, %0\n"
26570 + " andl $3, %%eax\n"
26571 + " cld\n"
26572 + "99: rep; "__copyuser_seg" movsl\n"
26573 + "36: movl %%eax, %0\n"
26574 + "37: rep; "__copyuser_seg" movsb\n"
26575 + "100:\n"
26576 + ".section .fixup,\"ax\"\n"
26577 + "101: lea 0(%%eax,%0,4),%0\n"
26578 + " jmp 100b\n"
26579 + ".previous\n"
26580 + _ASM_EXTABLE(1b,100b)
26581 + _ASM_EXTABLE(2b,100b)
26582 + _ASM_EXTABLE(3b,100b)
26583 + _ASM_EXTABLE(4b,100b)
26584 + _ASM_EXTABLE(5b,100b)
26585 + _ASM_EXTABLE(6b,100b)
26586 + _ASM_EXTABLE(7b,100b)
26587 + _ASM_EXTABLE(8b,100b)
26588 + _ASM_EXTABLE(9b,100b)
26589 + _ASM_EXTABLE(10b,100b)
26590 + _ASM_EXTABLE(11b,100b)
26591 + _ASM_EXTABLE(12b,100b)
26592 + _ASM_EXTABLE(13b,100b)
26593 + _ASM_EXTABLE(14b,100b)
26594 + _ASM_EXTABLE(15b,100b)
26595 + _ASM_EXTABLE(16b,100b)
26596 + _ASM_EXTABLE(17b,100b)
26597 + _ASM_EXTABLE(18b,100b)
26598 + _ASM_EXTABLE(19b,100b)
26599 + _ASM_EXTABLE(20b,100b)
26600 + _ASM_EXTABLE(21b,100b)
26601 + _ASM_EXTABLE(22b,100b)
26602 + _ASM_EXTABLE(23b,100b)
26603 + _ASM_EXTABLE(24b,100b)
26604 + _ASM_EXTABLE(25b,100b)
26605 + _ASM_EXTABLE(26b,100b)
26606 + _ASM_EXTABLE(27b,100b)
26607 + _ASM_EXTABLE(28b,100b)
26608 + _ASM_EXTABLE(29b,100b)
26609 + _ASM_EXTABLE(30b,100b)
26610 + _ASM_EXTABLE(31b,100b)
26611 + _ASM_EXTABLE(32b,100b)
26612 + _ASM_EXTABLE(33b,100b)
26613 + _ASM_EXTABLE(34b,100b)
26614 + _ASM_EXTABLE(35b,100b)
26615 + _ASM_EXTABLE(36b,100b)
26616 + _ASM_EXTABLE(37b,100b)
26617 + _ASM_EXTABLE(99b,101b)
26618 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
26619 + : "1"(to), "2"(from), "0"(size)
26620 + : "eax", "edx", "memory");
26621 + return size;
26622 +}
26623 +
26624 +static unsigned long __size_overflow(3)
26625 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26626 {
26627 int d0, d1;
26628 __asm__ __volatile__(
26629 " .align 2,0x90\n"
26630 - "0: movl 32(%4), %%eax\n"
26631 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26632 " cmpl $67, %0\n"
26633 " jbe 2f\n"
26634 - "1: movl 64(%4), %%eax\n"
26635 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26636 " .align 2,0x90\n"
26637 - "2: movl 0(%4), %%eax\n"
26638 - "21: movl 4(%4), %%edx\n"
26639 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26640 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26641 " movl %%eax, 0(%3)\n"
26642 " movl %%edx, 4(%3)\n"
26643 - "3: movl 8(%4), %%eax\n"
26644 - "31: movl 12(%4),%%edx\n"
26645 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26646 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26647 " movl %%eax, 8(%3)\n"
26648 " movl %%edx, 12(%3)\n"
26649 - "4: movl 16(%4), %%eax\n"
26650 - "41: movl 20(%4), %%edx\n"
26651 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26652 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26653 " movl %%eax, 16(%3)\n"
26654 " movl %%edx, 20(%3)\n"
26655 - "10: movl 24(%4), %%eax\n"
26656 - "51: movl 28(%4), %%edx\n"
26657 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26658 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26659 " movl %%eax, 24(%3)\n"
26660 " movl %%edx, 28(%3)\n"
26661 - "11: movl 32(%4), %%eax\n"
26662 - "61: movl 36(%4), %%edx\n"
26663 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26664 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26665 " movl %%eax, 32(%3)\n"
26666 " movl %%edx, 36(%3)\n"
26667 - "12: movl 40(%4), %%eax\n"
26668 - "71: movl 44(%4), %%edx\n"
26669 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26670 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26671 " movl %%eax, 40(%3)\n"
26672 " movl %%edx, 44(%3)\n"
26673 - "13: movl 48(%4), %%eax\n"
26674 - "81: movl 52(%4), %%edx\n"
26675 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26676 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26677 " movl %%eax, 48(%3)\n"
26678 " movl %%edx, 52(%3)\n"
26679 - "14: movl 56(%4), %%eax\n"
26680 - "91: movl 60(%4), %%edx\n"
26681 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26682 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26683 " movl %%eax, 56(%3)\n"
26684 " movl %%edx, 60(%3)\n"
26685 " addl $-64, %0\n"
26686 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26687 " shrl $2, %0\n"
26688 " andl $3, %%eax\n"
26689 " cld\n"
26690 - "6: rep; movsl\n"
26691 + "6: rep; "__copyuser_seg" movsl\n"
26692 " movl %%eax,%0\n"
26693 - "7: rep; movsb\n"
26694 + "7: rep; "__copyuser_seg" movsb\n"
26695 "8:\n"
26696 ".section .fixup,\"ax\"\n"
26697 "9: lea 0(%%eax,%0,4),%0\n"
26698 @@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26699 * hyoshiok@miraclelinux.com
26700 */
26701
26702 -static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26703 +static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26704 const void __user *from, unsigned long size)
26705 {
26706 int d0, d1;
26707
26708 __asm__ __volatile__(
26709 " .align 2,0x90\n"
26710 - "0: movl 32(%4), %%eax\n"
26711 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26712 " cmpl $67, %0\n"
26713 " jbe 2f\n"
26714 - "1: movl 64(%4), %%eax\n"
26715 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26716 " .align 2,0x90\n"
26717 - "2: movl 0(%4), %%eax\n"
26718 - "21: movl 4(%4), %%edx\n"
26719 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26720 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26721 " movnti %%eax, 0(%3)\n"
26722 " movnti %%edx, 4(%3)\n"
26723 - "3: movl 8(%4), %%eax\n"
26724 - "31: movl 12(%4),%%edx\n"
26725 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26726 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26727 " movnti %%eax, 8(%3)\n"
26728 " movnti %%edx, 12(%3)\n"
26729 - "4: movl 16(%4), %%eax\n"
26730 - "41: movl 20(%4), %%edx\n"
26731 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26732 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26733 " movnti %%eax, 16(%3)\n"
26734 " movnti %%edx, 20(%3)\n"
26735 - "10: movl 24(%4), %%eax\n"
26736 - "51: movl 28(%4), %%edx\n"
26737 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26738 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26739 " movnti %%eax, 24(%3)\n"
26740 " movnti %%edx, 28(%3)\n"
26741 - "11: movl 32(%4), %%eax\n"
26742 - "61: movl 36(%4), %%edx\n"
26743 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26744 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26745 " movnti %%eax, 32(%3)\n"
26746 " movnti %%edx, 36(%3)\n"
26747 - "12: movl 40(%4), %%eax\n"
26748 - "71: movl 44(%4), %%edx\n"
26749 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26750 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26751 " movnti %%eax, 40(%3)\n"
26752 " movnti %%edx, 44(%3)\n"
26753 - "13: movl 48(%4), %%eax\n"
26754 - "81: movl 52(%4), %%edx\n"
26755 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26756 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26757 " movnti %%eax, 48(%3)\n"
26758 " movnti %%edx, 52(%3)\n"
26759 - "14: movl 56(%4), %%eax\n"
26760 - "91: movl 60(%4), %%edx\n"
26761 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26762 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26763 " movnti %%eax, 56(%3)\n"
26764 " movnti %%edx, 60(%3)\n"
26765 " addl $-64, %0\n"
26766 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26767 " shrl $2, %0\n"
26768 " andl $3, %%eax\n"
26769 " cld\n"
26770 - "6: rep; movsl\n"
26771 + "6: rep; "__copyuser_seg" movsl\n"
26772 " movl %%eax,%0\n"
26773 - "7: rep; movsb\n"
26774 + "7: rep; "__copyuser_seg" movsb\n"
26775 "8:\n"
26776 ".section .fixup,\"ax\"\n"
26777 "9: lea 0(%%eax,%0,4),%0\n"
26778 @@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26779 return size;
26780 }
26781
26782 -static unsigned long __copy_user_intel_nocache(void *to,
26783 +static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26784 const void __user *from, unsigned long size)
26785 {
26786 int d0, d1;
26787
26788 __asm__ __volatile__(
26789 " .align 2,0x90\n"
26790 - "0: movl 32(%4), %%eax\n"
26791 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26792 " cmpl $67, %0\n"
26793 " jbe 2f\n"
26794 - "1: movl 64(%4), %%eax\n"
26795 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26796 " .align 2,0x90\n"
26797 - "2: movl 0(%4), %%eax\n"
26798 - "21: movl 4(%4), %%edx\n"
26799 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26800 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26801 " movnti %%eax, 0(%3)\n"
26802 " movnti %%edx, 4(%3)\n"
26803 - "3: movl 8(%4), %%eax\n"
26804 - "31: movl 12(%4),%%edx\n"
26805 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26806 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26807 " movnti %%eax, 8(%3)\n"
26808 " movnti %%edx, 12(%3)\n"
26809 - "4: movl 16(%4), %%eax\n"
26810 - "41: movl 20(%4), %%edx\n"
26811 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26812 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26813 " movnti %%eax, 16(%3)\n"
26814 " movnti %%edx, 20(%3)\n"
26815 - "10: movl 24(%4), %%eax\n"
26816 - "51: movl 28(%4), %%edx\n"
26817 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26818 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26819 " movnti %%eax, 24(%3)\n"
26820 " movnti %%edx, 28(%3)\n"
26821 - "11: movl 32(%4), %%eax\n"
26822 - "61: movl 36(%4), %%edx\n"
26823 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26824 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26825 " movnti %%eax, 32(%3)\n"
26826 " movnti %%edx, 36(%3)\n"
26827 - "12: movl 40(%4), %%eax\n"
26828 - "71: movl 44(%4), %%edx\n"
26829 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26830 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26831 " movnti %%eax, 40(%3)\n"
26832 " movnti %%edx, 44(%3)\n"
26833 - "13: movl 48(%4), %%eax\n"
26834 - "81: movl 52(%4), %%edx\n"
26835 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26836 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26837 " movnti %%eax, 48(%3)\n"
26838 " movnti %%edx, 52(%3)\n"
26839 - "14: movl 56(%4), %%eax\n"
26840 - "91: movl 60(%4), %%edx\n"
26841 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26842 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26843 " movnti %%eax, 56(%3)\n"
26844 " movnti %%edx, 60(%3)\n"
26845 " addl $-64, %0\n"
26846 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26847 " shrl $2, %0\n"
26848 " andl $3, %%eax\n"
26849 " cld\n"
26850 - "6: rep; movsl\n"
26851 + "6: rep; "__copyuser_seg" movsl\n"
26852 " movl %%eax,%0\n"
26853 - "7: rep; movsb\n"
26854 + "7: rep; "__copyuser_seg" movsb\n"
26855 "8:\n"
26856 ".section .fixup,\"ax\"\n"
26857 "9: lea 0(%%eax,%0,4),%0\n"
26858 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26859 */
26860 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26861 unsigned long size);
26862 -unsigned long __copy_user_intel(void __user *to, const void *from,
26863 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26864 + unsigned long size);
26865 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26866 unsigned long size);
26867 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26868 const void __user *from, unsigned long size);
26869 #endif /* CONFIG_X86_INTEL_USERCOPY */
26870
26871 /* Generic arbitrary sized copy. */
26872 -#define __copy_user(to, from, size) \
26873 +#define __copy_user(to, from, size, prefix, set, restore) \
26874 do { \
26875 int __d0, __d1, __d2; \
26876 __asm__ __volatile__( \
26877 + set \
26878 " cmp $7,%0\n" \
26879 " jbe 1f\n" \
26880 " movl %1,%0\n" \
26881 " negl %0\n" \
26882 " andl $7,%0\n" \
26883 " subl %0,%3\n" \
26884 - "4: rep; movsb\n" \
26885 + "4: rep; "prefix"movsb\n" \
26886 " movl %3,%0\n" \
26887 " shrl $2,%0\n" \
26888 " andl $3,%3\n" \
26889 " .align 2,0x90\n" \
26890 - "0: rep; movsl\n" \
26891 + "0: rep; "prefix"movsl\n" \
26892 " movl %3,%0\n" \
26893 - "1: rep; movsb\n" \
26894 + "1: rep; "prefix"movsb\n" \
26895 "2:\n" \
26896 + restore \
26897 ".section .fixup,\"ax\"\n" \
26898 "5: addl %3,%0\n" \
26899 " jmp 2b\n" \
26900 @@ -538,14 +650,14 @@ do { \
26901 " negl %0\n" \
26902 " andl $7,%0\n" \
26903 " subl %0,%3\n" \
26904 - "4: rep; movsb\n" \
26905 + "4: rep; "__copyuser_seg"movsb\n" \
26906 " movl %3,%0\n" \
26907 " shrl $2,%0\n" \
26908 " andl $3,%3\n" \
26909 " .align 2,0x90\n" \
26910 - "0: rep; movsl\n" \
26911 + "0: rep; "__copyuser_seg"movsl\n" \
26912 " movl %3,%0\n" \
26913 - "1: rep; movsb\n" \
26914 + "1: rep; "__copyuser_seg"movsb\n" \
26915 "2:\n" \
26916 ".section .fixup,\"ax\"\n" \
26917 "5: addl %3,%0\n" \
26918 @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26919 {
26920 stac();
26921 if (movsl_is_ok(to, from, n))
26922 - __copy_user(to, from, n);
26923 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
26924 else
26925 - n = __copy_user_intel(to, from, n);
26926 + n = __generic_copy_to_user_intel(to, from, n);
26927 clac();
26928 return n;
26929 }
26930 @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
26931 {
26932 stac();
26933 if (movsl_is_ok(to, from, n))
26934 - __copy_user(to, from, n);
26935 + __copy_user(to, from, n, __copyuser_seg, "", "");
26936 else
26937 - n = __copy_user_intel((void __user *)to,
26938 - (const void *)from, n);
26939 + n = __generic_copy_from_user_intel(to, from, n);
26940 clac();
26941 return n;
26942 }
26943 @@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
26944 if (n > 64 && cpu_has_xmm2)
26945 n = __copy_user_intel_nocache(to, from, n);
26946 else
26947 - __copy_user(to, from, n);
26948 + __copy_user(to, from, n, __copyuser_seg, "", "");
26949 #else
26950 - __copy_user(to, from, n);
26951 + __copy_user(to, from, n, __copyuser_seg, "", "");
26952 #endif
26953 clac();
26954 return n;
26955 }
26956 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
26957
26958 -/**
26959 - * copy_to_user: - Copy a block of data into user space.
26960 - * @to: Destination address, in user space.
26961 - * @from: Source address, in kernel space.
26962 - * @n: Number of bytes to copy.
26963 - *
26964 - * Context: User context only. This function may sleep.
26965 - *
26966 - * Copy data from kernel space to user space.
26967 - *
26968 - * Returns number of bytes that could not be copied.
26969 - * On success, this will be zero.
26970 - */
26971 -unsigned long
26972 -copy_to_user(void __user *to, const void *from, unsigned long n)
26973 -{
26974 - if (access_ok(VERIFY_WRITE, to, n))
26975 - n = __copy_to_user(to, from, n);
26976 - return n;
26977 -}
26978 -EXPORT_SYMBOL(copy_to_user);
26979 -
26980 -/**
26981 - * copy_from_user: - Copy a block of data from user space.
26982 - * @to: Destination address, in kernel space.
26983 - * @from: Source address, in user space.
26984 - * @n: Number of bytes to copy.
26985 - *
26986 - * Context: User context only. This function may sleep.
26987 - *
26988 - * Copy data from user space to kernel space.
26989 - *
26990 - * Returns number of bytes that could not be copied.
26991 - * On success, this will be zero.
26992 - *
26993 - * If some data could not be copied, this function will pad the copied
26994 - * data to the requested size using zero bytes.
26995 - */
26996 -unsigned long
26997 -_copy_from_user(void *to, const void __user *from, unsigned long n)
26998 -{
26999 - if (access_ok(VERIFY_READ, from, n))
27000 - n = __copy_from_user(to, from, n);
27001 - else
27002 - memset(to, 0, n);
27003 - return n;
27004 -}
27005 -EXPORT_SYMBOL(_copy_from_user);
27006 -
27007 void copy_from_user_overflow(void)
27008 {
27009 WARN(1, "Buffer overflow detected!\n");
27010 }
27011 EXPORT_SYMBOL(copy_from_user_overflow);
27012 +
27013 +void copy_to_user_overflow(void)
27014 +{
27015 + WARN(1, "Buffer overflow detected!\n");
27016 +}
27017 +EXPORT_SYMBOL(copy_to_user_overflow);
27018 +
27019 +#ifdef CONFIG_PAX_MEMORY_UDEREF
27020 +void __set_fs(mm_segment_t x)
27021 +{
27022 + switch (x.seg) {
27023 + case 0:
27024 + loadsegment(gs, 0);
27025 + break;
27026 + case TASK_SIZE_MAX:
27027 + loadsegment(gs, __USER_DS);
27028 + break;
27029 + case -1UL:
27030 + loadsegment(gs, __KERNEL_DS);
27031 + break;
27032 + default:
27033 + BUG();
27034 + }
27035 + return;
27036 +}
27037 +EXPORT_SYMBOL(__set_fs);
27038 +
27039 +void set_fs(mm_segment_t x)
27040 +{
27041 + current_thread_info()->addr_limit = x;
27042 + __set_fs(x);
27043 +}
27044 +EXPORT_SYMBOL(set_fs);
27045 +#endif
27046 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27047 index 906fea3..ee8a097 100644
27048 --- a/arch/x86/lib/usercopy_64.c
27049 +++ b/arch/x86/lib/usercopy_64.c
27050 @@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27051 _ASM_EXTABLE(0b,3b)
27052 _ASM_EXTABLE(1b,2b)
27053 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27054 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27055 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27056 [zero] "r" (0UL), [eight] "r" (8UL));
27057 clac();
27058 return size;
27059 @@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27060 }
27061 EXPORT_SYMBOL(clear_user);
27062
27063 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27064 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27065 {
27066 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27067 - return copy_user_generic((__force void *)to, (__force void *)from, len);
27068 - }
27069 - return len;
27070 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27071 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27072 + return len;
27073 }
27074 EXPORT_SYMBOL(copy_in_user);
27075
27076 @@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27077 * it is not necessary to optimize tail handling.
27078 */
27079 unsigned long
27080 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27081 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27082 {
27083 char c;
27084 unsigned zero_len;
27085 @@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27086 clac();
27087 return len;
27088 }
27089 +
27090 +void copy_from_user_overflow(void)
27091 +{
27092 + WARN(1, "Buffer overflow detected!\n");
27093 +}
27094 +EXPORT_SYMBOL(copy_from_user_overflow);
27095 +
27096 +void copy_to_user_overflow(void)
27097 +{
27098 + WARN(1, "Buffer overflow detected!\n");
27099 +}
27100 +EXPORT_SYMBOL(copy_to_user_overflow);
27101 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27102 index 903ec1e..c4166b2 100644
27103 --- a/arch/x86/mm/extable.c
27104 +++ b/arch/x86/mm/extable.c
27105 @@ -6,12 +6,24 @@
27106 static inline unsigned long
27107 ex_insn_addr(const struct exception_table_entry *x)
27108 {
27109 - return (unsigned long)&x->insn + x->insn;
27110 + unsigned long reloc = 0;
27111 +
27112 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27113 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27114 +#endif
27115 +
27116 + return (unsigned long)&x->insn + x->insn + reloc;
27117 }
27118 static inline unsigned long
27119 ex_fixup_addr(const struct exception_table_entry *x)
27120 {
27121 - return (unsigned long)&x->fixup + x->fixup;
27122 + unsigned long reloc = 0;
27123 +
27124 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27125 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27126 +#endif
27127 +
27128 + return (unsigned long)&x->fixup + x->fixup + reloc;
27129 }
27130
27131 int fixup_exception(struct pt_regs *regs)
27132 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27133 unsigned long new_ip;
27134
27135 #ifdef CONFIG_PNPBIOS
27136 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27137 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27138 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27139 extern u32 pnp_bios_is_utter_crap;
27140 pnp_bios_is_utter_crap = 1;
27141 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27142 i += 4;
27143 p->fixup -= i;
27144 i += 4;
27145 +
27146 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27147 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27148 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27149 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27150 +#endif
27151 +
27152 }
27153 }
27154
27155 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27156 index 0e88336..2bb9777 100644
27157 --- a/arch/x86/mm/fault.c
27158 +++ b/arch/x86/mm/fault.c
27159 @@ -13,12 +13,19 @@
27160 #include <linux/perf_event.h> /* perf_sw_event */
27161 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27162 #include <linux/prefetch.h> /* prefetchw */
27163 +#include <linux/unistd.h>
27164 +#include <linux/compiler.h>
27165
27166 #include <asm/traps.h> /* dotraplinkage, ... */
27167 #include <asm/pgalloc.h> /* pgd_*(), ... */
27168 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27169 #include <asm/fixmap.h> /* VSYSCALL_START */
27170 #include <asm/context_tracking.h> /* exception_enter(), ... */
27171 +#include <asm/tlbflush.h>
27172 +
27173 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27174 +#include <asm/stacktrace.h>
27175 +#endif
27176
27177 /*
27178 * Page fault error code bits:
27179 @@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27180 int ret = 0;
27181
27182 /* kprobe_running() needs smp_processor_id() */
27183 - if (kprobes_built_in() && !user_mode_vm(regs)) {
27184 + if (kprobes_built_in() && !user_mode(regs)) {
27185 preempt_disable();
27186 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27187 ret = 1;
27188 @@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27189 return !instr_lo || (instr_lo>>1) == 1;
27190 case 0x00:
27191 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27192 - if (probe_kernel_address(instr, opcode))
27193 + if (user_mode(regs)) {
27194 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27195 + return 0;
27196 + } else if (probe_kernel_address(instr, opcode))
27197 return 0;
27198
27199 *prefetch = (instr_lo == 0xF) &&
27200 @@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27201 while (instr < max_instr) {
27202 unsigned char opcode;
27203
27204 - if (probe_kernel_address(instr, opcode))
27205 + if (user_mode(regs)) {
27206 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27207 + break;
27208 + } else if (probe_kernel_address(instr, opcode))
27209 break;
27210
27211 instr++;
27212 @@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27213 force_sig_info(si_signo, &info, tsk);
27214 }
27215
27216 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27217 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27218 +#endif
27219 +
27220 +#ifdef CONFIG_PAX_EMUTRAMP
27221 +static int pax_handle_fetch_fault(struct pt_regs *regs);
27222 +#endif
27223 +
27224 +#ifdef CONFIG_PAX_PAGEEXEC
27225 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27226 +{
27227 + pgd_t *pgd;
27228 + pud_t *pud;
27229 + pmd_t *pmd;
27230 +
27231 + pgd = pgd_offset(mm, address);
27232 + if (!pgd_present(*pgd))
27233 + return NULL;
27234 + pud = pud_offset(pgd, address);
27235 + if (!pud_present(*pud))
27236 + return NULL;
27237 + pmd = pmd_offset(pud, address);
27238 + if (!pmd_present(*pmd))
27239 + return NULL;
27240 + return pmd;
27241 +}
27242 +#endif
27243 +
27244 DEFINE_SPINLOCK(pgd_lock);
27245 LIST_HEAD(pgd_list);
27246
27247 @@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27248 for (address = VMALLOC_START & PMD_MASK;
27249 address >= TASK_SIZE && address < FIXADDR_TOP;
27250 address += PMD_SIZE) {
27251 +
27252 +#ifdef CONFIG_PAX_PER_CPU_PGD
27253 + unsigned long cpu;
27254 +#else
27255 struct page *page;
27256 +#endif
27257
27258 spin_lock(&pgd_lock);
27259 +
27260 +#ifdef CONFIG_PAX_PER_CPU_PGD
27261 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27262 + pgd_t *pgd = get_cpu_pgd(cpu);
27263 + pmd_t *ret;
27264 +#else
27265 list_for_each_entry(page, &pgd_list, lru) {
27266 + pgd_t *pgd;
27267 spinlock_t *pgt_lock;
27268 pmd_t *ret;
27269
27270 @@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27271 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27272
27273 spin_lock(pgt_lock);
27274 - ret = vmalloc_sync_one(page_address(page), address);
27275 + pgd = page_address(page);
27276 +#endif
27277 +
27278 + ret = vmalloc_sync_one(pgd, address);
27279 +
27280 +#ifndef CONFIG_PAX_PER_CPU_PGD
27281 spin_unlock(pgt_lock);
27282 +#endif
27283
27284 if (!ret)
27285 break;
27286 @@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27287 * an interrupt in the middle of a task switch..
27288 */
27289 pgd_paddr = read_cr3();
27290 +
27291 +#ifdef CONFIG_PAX_PER_CPU_PGD
27292 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27293 +#endif
27294 +
27295 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27296 if (!pmd_k)
27297 return -1;
27298 @@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27299 * happen within a race in page table update. In the later
27300 * case just flush:
27301 */
27302 +
27303 +#ifdef CONFIG_PAX_PER_CPU_PGD
27304 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27305 + pgd = pgd_offset_cpu(smp_processor_id(), address);
27306 +#else
27307 pgd = pgd_offset(current->active_mm, address);
27308 +#endif
27309 +
27310 pgd_ref = pgd_offset_k(address);
27311 if (pgd_none(*pgd_ref))
27312 return -1;
27313 @@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27314 static int is_errata100(struct pt_regs *regs, unsigned long address)
27315 {
27316 #ifdef CONFIG_X86_64
27317 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27318 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27319 return 1;
27320 #endif
27321 return 0;
27322 @@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27323 }
27324
27325 static const char nx_warning[] = KERN_CRIT
27326 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27327 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27328
27329 static void
27330 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27331 @@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27332 if (!oops_may_print())
27333 return;
27334
27335 - if (error_code & PF_INSTR) {
27336 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27337 unsigned int level;
27338
27339 pte_t *pte = lookup_address(address, &level);
27340
27341 if (pte && pte_present(*pte) && !pte_exec(*pte))
27342 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27343 + printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27344 }
27345
27346 +#ifdef CONFIG_PAX_KERNEXEC
27347 + if (init_mm.start_code <= address && address < init_mm.end_code) {
27348 + if (current->signal->curr_ip)
27349 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27350 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
27351 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27352 + else
27353 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27354 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27355 + }
27356 +#endif
27357 +
27358 printk(KERN_ALERT "BUG: unable to handle kernel ");
27359 if (address < PAGE_SIZE)
27360 printk(KERN_CONT "NULL pointer dereference");
27361 @@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27362 return;
27363 }
27364 #endif
27365 +
27366 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27367 + if (pax_is_fetch_fault(regs, error_code, address)) {
27368 +
27369 +#ifdef CONFIG_PAX_EMUTRAMP
27370 + switch (pax_handle_fetch_fault(regs)) {
27371 + case 2:
27372 + return;
27373 + }
27374 +#endif
27375 +
27376 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27377 + do_group_exit(SIGKILL);
27378 + }
27379 +#endif
27380 +
27381 /* Kernel addresses are always protection faults: */
27382 if (address >= TASK_SIZE)
27383 error_code |= PF_PROT;
27384 @@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27385 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27386 printk(KERN_ERR
27387 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27388 - tsk->comm, tsk->pid, address);
27389 + tsk->comm, task_pid_nr(tsk), address);
27390 code = BUS_MCEERR_AR;
27391 }
27392 #endif
27393 @@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27394 return 1;
27395 }
27396
27397 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27398 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27399 +{
27400 + pte_t *pte;
27401 + pmd_t *pmd;
27402 + spinlock_t *ptl;
27403 + unsigned char pte_mask;
27404 +
27405 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27406 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
27407 + return 0;
27408 +
27409 + /* PaX: it's our fault, let's handle it if we can */
27410 +
27411 + /* PaX: take a look at read faults before acquiring any locks */
27412 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27413 + /* instruction fetch attempt from a protected page in user mode */
27414 + up_read(&mm->mmap_sem);
27415 +
27416 +#ifdef CONFIG_PAX_EMUTRAMP
27417 + switch (pax_handle_fetch_fault(regs)) {
27418 + case 2:
27419 + return 1;
27420 + }
27421 +#endif
27422 +
27423 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27424 + do_group_exit(SIGKILL);
27425 + }
27426 +
27427 + pmd = pax_get_pmd(mm, address);
27428 + if (unlikely(!pmd))
27429 + return 0;
27430 +
27431 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27432 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27433 + pte_unmap_unlock(pte, ptl);
27434 + return 0;
27435 + }
27436 +
27437 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27438 + /* write attempt to a protected page in user mode */
27439 + pte_unmap_unlock(pte, ptl);
27440 + return 0;
27441 + }
27442 +
27443 +#ifdef CONFIG_SMP
27444 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27445 +#else
27446 + if (likely(address > get_limit(regs->cs)))
27447 +#endif
27448 + {
27449 + set_pte(pte, pte_mkread(*pte));
27450 + __flush_tlb_one(address);
27451 + pte_unmap_unlock(pte, ptl);
27452 + up_read(&mm->mmap_sem);
27453 + return 1;
27454 + }
27455 +
27456 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27457 +
27458 + /*
27459 + * PaX: fill DTLB with user rights and retry
27460 + */
27461 + __asm__ __volatile__ (
27462 + "orb %2,(%1)\n"
27463 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27464 +/*
27465 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27466 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27467 + * page fault when examined during a TLB load attempt. this is true not only
27468 + * for PTEs holding a non-present entry but also present entries that will
27469 + * raise a page fault (such as those set up by PaX, or the copy-on-write
27470 + * mechanism). in effect it means that we do *not* need to flush the TLBs
27471 + * for our target pages since their PTEs are simply not in the TLBs at all.
27472 +
27473 + * the best thing in omitting it is that we gain around 15-20% speed in the
27474 + * fast path of the page fault handler and can get rid of tracing since we
27475 + * can no longer flush unintended entries.
27476 + */
27477 + "invlpg (%0)\n"
27478 +#endif
27479 + __copyuser_seg"testb $0,(%0)\n"
27480 + "xorb %3,(%1)\n"
27481 + :
27482 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27483 + : "memory", "cc");
27484 + pte_unmap_unlock(pte, ptl);
27485 + up_read(&mm->mmap_sem);
27486 + return 1;
27487 +}
27488 +#endif
27489 +
27490 /*
27491 * Handle a spurious fault caused by a stale TLB entry.
27492 *
27493 @@ -964,6 +1156,9 @@ int show_unhandled_signals = 1;
27494 static inline int
27495 access_error(unsigned long error_code, struct vm_area_struct *vma)
27496 {
27497 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27498 + return 1;
27499 +
27500 if (error_code & PF_WRITE) {
27501 /* write, present and write, not present: */
27502 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27503 @@ -992,7 +1187,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27504 if (error_code & PF_USER)
27505 return false;
27506
27507 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27508 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27509 return false;
27510
27511 return true;
27512 @@ -1008,18 +1203,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27513 {
27514 struct vm_area_struct *vma;
27515 struct task_struct *tsk;
27516 - unsigned long address;
27517 struct mm_struct *mm;
27518 int fault;
27519 int write = error_code & PF_WRITE;
27520 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27521 (write ? FAULT_FLAG_WRITE : 0);
27522
27523 - tsk = current;
27524 - mm = tsk->mm;
27525 -
27526 /* Get the faulting address: */
27527 - address = read_cr2();
27528 + unsigned long address = read_cr2();
27529 +
27530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27531 + if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
27532 + if (!search_exception_tables(regs->ip)) {
27533 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27534 + bad_area_nosemaphore(regs, error_code, address);
27535 + return;
27536 + }
27537 + if (address < pax_user_shadow_base) {
27538 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27539 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27540 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27541 + } else
27542 + address -= pax_user_shadow_base;
27543 + }
27544 +#endif
27545 +
27546 + tsk = current;
27547 + mm = tsk->mm;
27548
27549 /*
27550 * Detect and handle instructions that would cause a page fault for
27551 @@ -1080,7 +1290,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27552 * User-mode registers count as a user access even for any
27553 * potential system fault or CPU buglet:
27554 */
27555 - if (user_mode_vm(regs)) {
27556 + if (user_mode(regs)) {
27557 local_irq_enable();
27558 error_code |= PF_USER;
27559 } else {
27560 @@ -1142,6 +1352,11 @@ retry:
27561 might_sleep();
27562 }
27563
27564 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27565 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27566 + return;
27567 +#endif
27568 +
27569 vma = find_vma(mm, address);
27570 if (unlikely(!vma)) {
27571 bad_area(regs, error_code, address);
27572 @@ -1153,18 +1368,24 @@ retry:
27573 bad_area(regs, error_code, address);
27574 return;
27575 }
27576 - if (error_code & PF_USER) {
27577 - /*
27578 - * Accessing the stack below %sp is always a bug.
27579 - * The large cushion allows instructions like enter
27580 - * and pusha to work. ("enter $65535, $31" pushes
27581 - * 32 pointers and then decrements %sp by 65535.)
27582 - */
27583 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27584 - bad_area(regs, error_code, address);
27585 - return;
27586 - }
27587 + /*
27588 + * Accessing the stack below %sp is always a bug.
27589 + * The large cushion allows instructions like enter
27590 + * and pusha to work. ("enter $65535, $31" pushes
27591 + * 32 pointers and then decrements %sp by 65535.)
27592 + */
27593 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27594 + bad_area(regs, error_code, address);
27595 + return;
27596 }
27597 +
27598 +#ifdef CONFIG_PAX_SEGMEXEC
27599 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27600 + bad_area(regs, error_code, address);
27601 + return;
27602 + }
27603 +#endif
27604 +
27605 if (unlikely(expand_stack(vma, address))) {
27606 bad_area(regs, error_code, address);
27607 return;
27608 @@ -1228,3 +1449,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27609 __do_page_fault(regs, error_code);
27610 exception_exit(regs);
27611 }
27612 +
27613 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27614 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27615 +{
27616 + struct mm_struct *mm = current->mm;
27617 + unsigned long ip = regs->ip;
27618 +
27619 + if (v8086_mode(regs))
27620 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27621 +
27622 +#ifdef CONFIG_PAX_PAGEEXEC
27623 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27624 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27625 + return true;
27626 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27627 + return true;
27628 + return false;
27629 + }
27630 +#endif
27631 +
27632 +#ifdef CONFIG_PAX_SEGMEXEC
27633 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27634 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27635 + return true;
27636 + return false;
27637 + }
27638 +#endif
27639 +
27640 + return false;
27641 +}
27642 +#endif
27643 +
27644 +#ifdef CONFIG_PAX_EMUTRAMP
27645 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27646 +{
27647 + int err;
27648 +
27649 + do { /* PaX: libffi trampoline emulation */
27650 + unsigned char mov, jmp;
27651 + unsigned int addr1, addr2;
27652 +
27653 +#ifdef CONFIG_X86_64
27654 + if ((regs->ip + 9) >> 32)
27655 + break;
27656 +#endif
27657 +
27658 + err = get_user(mov, (unsigned char __user *)regs->ip);
27659 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27660 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27661 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27662 +
27663 + if (err)
27664 + break;
27665 +
27666 + if (mov == 0xB8 && jmp == 0xE9) {
27667 + regs->ax = addr1;
27668 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27669 + return 2;
27670 + }
27671 + } while (0);
27672 +
27673 + do { /* PaX: gcc trampoline emulation #1 */
27674 + unsigned char mov1, mov2;
27675 + unsigned short jmp;
27676 + unsigned int addr1, addr2;
27677 +
27678 +#ifdef CONFIG_X86_64
27679 + if ((regs->ip + 11) >> 32)
27680 + break;
27681 +#endif
27682 +
27683 + err = get_user(mov1, (unsigned char __user *)regs->ip);
27684 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27685 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27686 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27687 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27688 +
27689 + if (err)
27690 + break;
27691 +
27692 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27693 + regs->cx = addr1;
27694 + regs->ax = addr2;
27695 + regs->ip = addr2;
27696 + return 2;
27697 + }
27698 + } while (0);
27699 +
27700 + do { /* PaX: gcc trampoline emulation #2 */
27701 + unsigned char mov, jmp;
27702 + unsigned int addr1, addr2;
27703 +
27704 +#ifdef CONFIG_X86_64
27705 + if ((regs->ip + 9) >> 32)
27706 + break;
27707 +#endif
27708 +
27709 + err = get_user(mov, (unsigned char __user *)regs->ip);
27710 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27711 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27712 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27713 +
27714 + if (err)
27715 + break;
27716 +
27717 + if (mov == 0xB9 && jmp == 0xE9) {
27718 + regs->cx = addr1;
27719 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27720 + return 2;
27721 + }
27722 + } while (0);
27723 +
27724 + return 1; /* PaX in action */
27725 +}
27726 +
27727 +#ifdef CONFIG_X86_64
27728 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27729 +{
27730 + int err;
27731 +
27732 + do { /* PaX: libffi trampoline emulation */
27733 + unsigned short mov1, mov2, jmp1;
27734 + unsigned char stcclc, jmp2;
27735 + unsigned long addr1, addr2;
27736 +
27737 + err = get_user(mov1, (unsigned short __user *)regs->ip);
27738 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27739 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27740 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27741 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27742 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27743 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27744 +
27745 + if (err)
27746 + break;
27747 +
27748 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27749 + regs->r11 = addr1;
27750 + regs->r10 = addr2;
27751 + if (stcclc == 0xF8)
27752 + regs->flags &= ~X86_EFLAGS_CF;
27753 + else
27754 + regs->flags |= X86_EFLAGS_CF;
27755 + regs->ip = addr1;
27756 + return 2;
27757 + }
27758 + } while (0);
27759 +
27760 + do { /* PaX: gcc trampoline emulation #1 */
27761 + unsigned short mov1, mov2, jmp1;
27762 + unsigned char jmp2;
27763 + unsigned int addr1;
27764 + unsigned long addr2;
27765 +
27766 + err = get_user(mov1, (unsigned short __user *)regs->ip);
27767 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27768 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27769 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27770 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27771 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27772 +
27773 + if (err)
27774 + break;
27775 +
27776 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27777 + regs->r11 = addr1;
27778 + regs->r10 = addr2;
27779 + regs->ip = addr1;
27780 + return 2;
27781 + }
27782 + } while (0);
27783 +
27784 + do { /* PaX: gcc trampoline emulation #2 */
27785 + unsigned short mov1, mov2, jmp1;
27786 + unsigned char jmp2;
27787 + unsigned long addr1, addr2;
27788 +
27789 + err = get_user(mov1, (unsigned short __user *)regs->ip);
27790 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27791 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27792 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27793 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27794 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27795 +
27796 + if (err)
27797 + break;
27798 +
27799 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27800 + regs->r11 = addr1;
27801 + regs->r10 = addr2;
27802 + regs->ip = addr1;
27803 + return 2;
27804 + }
27805 + } while (0);
27806 +
27807 + return 1; /* PaX in action */
27808 +}
27809 +#endif
27810 +
27811 +/*
27812 + * PaX: decide what to do with offenders (regs->ip = fault address)
27813 + *
27814 + * returns 1 when task should be killed
27815 + * 2 when gcc trampoline was detected
27816 + */
27817 +static int pax_handle_fetch_fault(struct pt_regs *regs)
27818 +{
27819 + if (v8086_mode(regs))
27820 + return 1;
27821 +
27822 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27823 + return 1;
27824 +
27825 +#ifdef CONFIG_X86_32
27826 + return pax_handle_fetch_fault_32(regs);
27827 +#else
27828 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27829 + return pax_handle_fetch_fault_32(regs);
27830 + else
27831 + return pax_handle_fetch_fault_64(regs);
27832 +#endif
27833 +}
27834 +#endif
27835 +
27836 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27837 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27838 +{
27839 + long i;
27840 +
27841 + printk(KERN_ERR "PAX: bytes at PC: ");
27842 + for (i = 0; i < 20; i++) {
27843 + unsigned char c;
27844 + if (get_user(c, (unsigned char __force_user *)pc+i))
27845 + printk(KERN_CONT "?? ");
27846 + else
27847 + printk(KERN_CONT "%02x ", c);
27848 + }
27849 + printk("\n");
27850 +
27851 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27852 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
27853 + unsigned long c;
27854 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
27855 +#ifdef CONFIG_X86_32
27856 + printk(KERN_CONT "???????? ");
27857 +#else
27858 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27859 + printk(KERN_CONT "???????? ???????? ");
27860 + else
27861 + printk(KERN_CONT "???????????????? ");
27862 +#endif
27863 + } else {
27864 +#ifdef CONFIG_X86_64
27865 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27866 + printk(KERN_CONT "%08x ", (unsigned int)c);
27867 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27868 + } else
27869 +#endif
27870 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27871 + }
27872 + }
27873 + printk("\n");
27874 +}
27875 +#endif
27876 +
27877 +/**
27878 + * probe_kernel_write(): safely attempt to write to a location
27879 + * @dst: address to write to
27880 + * @src: pointer to the data that shall be written
27881 + * @size: size of the data chunk
27882 + *
27883 + * Safely write to address @dst from the buffer at @src. If a kernel fault
27884 + * happens, handle that and return -EFAULT.
27885 + */
27886 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27887 +{
27888 + long ret;
27889 + mm_segment_t old_fs = get_fs();
27890 +
27891 + set_fs(KERNEL_DS);
27892 + pagefault_disable();
27893 + pax_open_kernel();
27894 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27895 + pax_close_kernel();
27896 + pagefault_enable();
27897 + set_fs(old_fs);
27898 +
27899 + return ret ? -EFAULT : 0;
27900 +}
27901 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27902 index dd74e46..7d26398 100644
27903 --- a/arch/x86/mm/gup.c
27904 +++ b/arch/x86/mm/gup.c
27905 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27906 addr = start;
27907 len = (unsigned long) nr_pages << PAGE_SHIFT;
27908 end = start + len;
27909 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27910 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27911 (void __user *)start, len)))
27912 return 0;
27913
27914 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27915 index 6f31ee5..8ee4164 100644
27916 --- a/arch/x86/mm/highmem_32.c
27917 +++ b/arch/x86/mm/highmem_32.c
27918 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27919 idx = type + KM_TYPE_NR*smp_processor_id();
27920 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27921 BUG_ON(!pte_none(*(kmap_pte-idx)));
27922 +
27923 + pax_open_kernel();
27924 set_pte(kmap_pte-idx, mk_pte(page, prot));
27925 + pax_close_kernel();
27926 +
27927 arch_flush_lazy_mmu_mode();
27928
27929 return (void *)vaddr;
27930 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27931 index ae1aa71..d9bea75 100644
27932 --- a/arch/x86/mm/hugetlbpage.c
27933 +++ b/arch/x86/mm/hugetlbpage.c
27934 @@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
27935 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
27936 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27937 unsigned long addr, unsigned long len,
27938 - unsigned long pgoff, unsigned long flags)
27939 + unsigned long pgoff, unsigned long flags, unsigned long offset)
27940 {
27941 struct hstate *h = hstate_file(file);
27942 struct vm_unmapped_area_info info;
27943 -
27944 +
27945 info.flags = 0;
27946 info.length = len;
27947 info.low_limit = TASK_UNMAPPED_BASE;
27948 +
27949 +#ifdef CONFIG_PAX_RANDMMAP
27950 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27951 + info.low_limit += current->mm->delta_mmap;
27952 +#endif
27953 +
27954 info.high_limit = TASK_SIZE;
27955 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27956 info.align_offset = 0;
27957 + info.threadstack_offset = offset;
27958 return vm_unmapped_area(&info);
27959 }
27960
27961 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27962 unsigned long addr0, unsigned long len,
27963 - unsigned long pgoff, unsigned long flags)
27964 + unsigned long pgoff, unsigned long flags, unsigned long offset)
27965 {
27966 struct hstate *h = hstate_file(file);
27967 struct vm_unmapped_area_info info;
27968 @@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27969 info.high_limit = current->mm->mmap_base;
27970 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27971 info.align_offset = 0;
27972 + info.threadstack_offset = offset;
27973 addr = vm_unmapped_area(&info);
27974
27975 /*
27976 @@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27977 VM_BUG_ON(addr != -ENOMEM);
27978 info.flags = 0;
27979 info.low_limit = TASK_UNMAPPED_BASE;
27980 +
27981 +#ifdef CONFIG_PAX_RANDMMAP
27982 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27983 + info.low_limit += current->mm->delta_mmap;
27984 +#endif
27985 +
27986 info.high_limit = TASK_SIZE;
27987 addr = vm_unmapped_area(&info);
27988 }
27989 @@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27990 struct hstate *h = hstate_file(file);
27991 struct mm_struct *mm = current->mm;
27992 struct vm_area_struct *vma;
27993 + unsigned long pax_task_size = TASK_SIZE;
27994 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
27995
27996 if (len & ~huge_page_mask(h))
27997 return -EINVAL;
27998 - if (len > TASK_SIZE)
27999 +
28000 +#ifdef CONFIG_PAX_SEGMEXEC
28001 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
28002 + pax_task_size = SEGMEXEC_TASK_SIZE;
28003 +#endif
28004 +
28005 + pax_task_size -= PAGE_SIZE;
28006 +
28007 + if (len > pax_task_size)
28008 return -ENOMEM;
28009
28010 if (flags & MAP_FIXED) {
28011 @@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28012 return addr;
28013 }
28014
28015 +#ifdef CONFIG_PAX_RANDMMAP
28016 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28017 +#endif
28018 +
28019 if (addr) {
28020 addr = ALIGN(addr, huge_page_size(h));
28021 vma = find_vma(mm, addr);
28022 - if (TASK_SIZE - len >= addr &&
28023 - (!vma || addr + len <= vma->vm_start))
28024 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28025 return addr;
28026 }
28027 if (mm->get_unmapped_area == arch_get_unmapped_area)
28028 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
28029 - pgoff, flags);
28030 + pgoff, flags, offset);
28031 else
28032 return hugetlb_get_unmapped_area_topdown(file, addr, len,
28033 - pgoff, flags);
28034 + pgoff, flags, offset);
28035 }
28036
28037 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
28038 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28039 index 59b7fc4..b1dd75f 100644
28040 --- a/arch/x86/mm/init.c
28041 +++ b/arch/x86/mm/init.c
28042 @@ -4,6 +4,7 @@
28043 #include <linux/swap.h>
28044 #include <linux/memblock.h>
28045 #include <linux/bootmem.h> /* for max_low_pfn */
28046 +#include <linux/tboot.h>
28047
28048 #include <asm/cacheflush.h>
28049 #include <asm/e820.h>
28050 @@ -17,6 +18,8 @@
28051 #include <asm/proto.h>
28052 #include <asm/dma.h> /* for MAX_DMA_PFN */
28053 #include <asm/microcode.h>
28054 +#include <asm/desc.h>
28055 +#include <asm/bios_ebda.h>
28056
28057 #include "mm_internal.h"
28058
28059 @@ -464,10 +467,40 @@ void __init init_mem_mapping(void)
28060 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28061 * mmio resources as well as potential bios/acpi data regions.
28062 */
28063 +
28064 +#ifdef CONFIG_GRKERNSEC_KMEM
28065 +static unsigned int ebda_start __read_only;
28066 +static unsigned int ebda_end __read_only;
28067 +#endif
28068 +
28069 int devmem_is_allowed(unsigned long pagenr)
28070 {
28071 - if (pagenr < 256)
28072 +#ifdef CONFIG_GRKERNSEC_KMEM
28073 + /* allow BDA */
28074 + if (!pagenr)
28075 return 1;
28076 + /* allow EBDA */
28077 + if (pagenr >= ebda_start && pagenr < ebda_end)
28078 + return 1;
28079 + /* if tboot is in use, allow access to its hardcoded serial log range */
28080 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28081 + return 1;
28082 +#else
28083 + if (!pagenr)
28084 + return 1;
28085 +#ifdef CONFIG_VM86
28086 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28087 + return 1;
28088 +#endif
28089 +#endif
28090 +
28091 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28092 + return 1;
28093 +#ifdef CONFIG_GRKERNSEC_KMEM
28094 + /* throw out everything else below 1MB */
28095 + if (pagenr <= 256)
28096 + return 0;
28097 +#endif
28098 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28099 return 0;
28100 if (!page_is_ram(pagenr))
28101 @@ -524,8 +557,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28102 #endif
28103 }
28104
28105 +#ifdef CONFIG_GRKERNSEC_KMEM
28106 +static inline void gr_init_ebda(void)
28107 +{
28108 + unsigned int ebda_addr;
28109 + unsigned int ebda_size = 0;
28110 +
28111 + ebda_addr = get_bios_ebda();
28112 + if (ebda_addr) {
28113 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28114 + ebda_size <<= 10;
28115 + }
28116 + if (ebda_addr && ebda_size) {
28117 + ebda_start = ebda_addr >> PAGE_SHIFT;
28118 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28119 + } else {
28120 + ebda_start = 0x9f000 >> PAGE_SHIFT;
28121 + ebda_end = 0xa0000 >> PAGE_SHIFT;
28122 + }
28123 +}
28124 +#else
28125 +static inline void gr_init_ebda(void) { }
28126 +#endif
28127 +
28128 void free_initmem(void)
28129 {
28130 +#ifdef CONFIG_PAX_KERNEXEC
28131 +#ifdef CONFIG_X86_32
28132 + /* PaX: limit KERNEL_CS to actual size */
28133 + unsigned long addr, limit;
28134 + struct desc_struct d;
28135 + int cpu;
28136 +#else
28137 + pgd_t *pgd;
28138 + pud_t *pud;
28139 + pmd_t *pmd;
28140 + unsigned long addr, end;
28141 +#endif
28142 +#endif
28143 +
28144 + gr_init_ebda();
28145 +
28146 +#ifdef CONFIG_PAX_KERNEXEC
28147 +#ifdef CONFIG_X86_32
28148 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28149 + limit = (limit - 1UL) >> PAGE_SHIFT;
28150 +
28151 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28152 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28153 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28154 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28155 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28156 + }
28157 +
28158 + /* PaX: make KERNEL_CS read-only */
28159 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28160 + if (!paravirt_enabled())
28161 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28162 +/*
28163 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28164 + pgd = pgd_offset_k(addr);
28165 + pud = pud_offset(pgd, addr);
28166 + pmd = pmd_offset(pud, addr);
28167 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28168 + }
28169 +*/
28170 +#ifdef CONFIG_X86_PAE
28171 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28172 +/*
28173 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28174 + pgd = pgd_offset_k(addr);
28175 + pud = pud_offset(pgd, addr);
28176 + pmd = pmd_offset(pud, addr);
28177 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28178 + }
28179 +*/
28180 +#endif
28181 +
28182 +#ifdef CONFIG_MODULES
28183 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28184 +#endif
28185 +
28186 +#else
28187 + /* PaX: make kernel code/rodata read-only, rest non-executable */
28188 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28189 + pgd = pgd_offset_k(addr);
28190 + pud = pud_offset(pgd, addr);
28191 + pmd = pmd_offset(pud, addr);
28192 + if (!pmd_present(*pmd))
28193 + continue;
28194 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28195 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28196 + else
28197 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28198 + }
28199 +
28200 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28201 + end = addr + KERNEL_IMAGE_SIZE;
28202 + for (; addr < end; addr += PMD_SIZE) {
28203 + pgd = pgd_offset_k(addr);
28204 + pud = pud_offset(pgd, addr);
28205 + pmd = pmd_offset(pud, addr);
28206 + if (!pmd_present(*pmd))
28207 + continue;
28208 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28209 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28210 + }
28211 +#endif
28212 +
28213 + flush_tlb_all();
28214 +#endif
28215 +
28216 free_init_pages("unused kernel memory",
28217 (unsigned long)(&__init_begin),
28218 (unsigned long)(&__init_end));
28219 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28220 index 2d19001..6a1046c 100644
28221 --- a/arch/x86/mm/init_32.c
28222 +++ b/arch/x86/mm/init_32.c
28223 @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
28224 bool __read_mostly __vmalloc_start_set = false;
28225
28226 /*
28227 - * Creates a middle page table and puts a pointer to it in the
28228 - * given global directory entry. This only returns the gd entry
28229 - * in non-PAE compilation mode, since the middle layer is folded.
28230 - */
28231 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
28232 -{
28233 - pud_t *pud;
28234 - pmd_t *pmd_table;
28235 -
28236 -#ifdef CONFIG_X86_PAE
28237 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28238 - pmd_table = (pmd_t *)alloc_low_page();
28239 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28240 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28241 - pud = pud_offset(pgd, 0);
28242 - BUG_ON(pmd_table != pmd_offset(pud, 0));
28243 -
28244 - return pmd_table;
28245 - }
28246 -#endif
28247 - pud = pud_offset(pgd, 0);
28248 - pmd_table = pmd_offset(pud, 0);
28249 -
28250 - return pmd_table;
28251 -}
28252 -
28253 -/*
28254 * Create a page table and place a pointer to it in a middle page
28255 * directory entry:
28256 */
28257 @@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28258 pte_t *page_table = (pte_t *)alloc_low_page();
28259
28260 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28261 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28262 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28263 +#else
28264 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28265 +#endif
28266 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28267 }
28268
28269 return pte_offset_kernel(pmd, 0);
28270 }
28271
28272 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
28273 +{
28274 + pud_t *pud;
28275 + pmd_t *pmd_table;
28276 +
28277 + pud = pud_offset(pgd, 0);
28278 + pmd_table = pmd_offset(pud, 0);
28279 +
28280 + return pmd_table;
28281 +}
28282 +
28283 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28284 {
28285 int pgd_idx = pgd_index(vaddr);
28286 @@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28287 int pgd_idx, pmd_idx;
28288 unsigned long vaddr;
28289 pgd_t *pgd;
28290 + pud_t *pud;
28291 pmd_t *pmd;
28292 pte_t *pte = NULL;
28293 unsigned long count = page_table_range_init_count(start, end);
28294 @@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28295 pgd = pgd_base + pgd_idx;
28296
28297 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28298 - pmd = one_md_table_init(pgd);
28299 - pmd = pmd + pmd_index(vaddr);
28300 + pud = pud_offset(pgd, vaddr);
28301 + pmd = pmd_offset(pud, vaddr);
28302 +
28303 +#ifdef CONFIG_X86_PAE
28304 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28305 +#endif
28306 +
28307 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28308 pmd++, pmd_idx++) {
28309 pte = page_table_kmap_check(one_page_table_init(pmd),
28310 @@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28311 }
28312 }
28313
28314 -static inline int is_kernel_text(unsigned long addr)
28315 +static inline int is_kernel_text(unsigned long start, unsigned long end)
28316 {
28317 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28318 - return 1;
28319 - return 0;
28320 + if ((start > ktla_ktva((unsigned long)_etext) ||
28321 + end <= ktla_ktva((unsigned long)_stext)) &&
28322 + (start > ktla_ktva((unsigned long)_einittext) ||
28323 + end <= ktla_ktva((unsigned long)_sinittext)) &&
28324 +
28325 +#ifdef CONFIG_ACPI_SLEEP
28326 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28327 +#endif
28328 +
28329 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28330 + return 0;
28331 + return 1;
28332 }
28333
28334 /*
28335 @@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
28336 unsigned long last_map_addr = end;
28337 unsigned long start_pfn, end_pfn;
28338 pgd_t *pgd_base = swapper_pg_dir;
28339 - int pgd_idx, pmd_idx, pte_ofs;
28340 + unsigned int pgd_idx, pmd_idx, pte_ofs;
28341 unsigned long pfn;
28342 pgd_t *pgd;
28343 + pud_t *pud;
28344 pmd_t *pmd;
28345 pte_t *pte;
28346 unsigned pages_2m, pages_4k;
28347 @@ -291,8 +295,13 @@ repeat:
28348 pfn = start_pfn;
28349 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28350 pgd = pgd_base + pgd_idx;
28351 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28352 - pmd = one_md_table_init(pgd);
28353 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28354 + pud = pud_offset(pgd, 0);
28355 + pmd = pmd_offset(pud, 0);
28356 +
28357 +#ifdef CONFIG_X86_PAE
28358 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28359 +#endif
28360
28361 if (pfn >= end_pfn)
28362 continue;
28363 @@ -304,14 +313,13 @@ repeat:
28364 #endif
28365 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28366 pmd++, pmd_idx++) {
28367 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28368 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28369
28370 /*
28371 * Map with big pages if possible, otherwise
28372 * create normal page tables:
28373 */
28374 if (use_pse) {
28375 - unsigned int addr2;
28376 pgprot_t prot = PAGE_KERNEL_LARGE;
28377 /*
28378 * first pass will use the same initial
28379 @@ -322,11 +330,7 @@ repeat:
28380 _PAGE_PSE);
28381
28382 pfn &= PMD_MASK >> PAGE_SHIFT;
28383 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28384 - PAGE_OFFSET + PAGE_SIZE-1;
28385 -
28386 - if (is_kernel_text(addr) ||
28387 - is_kernel_text(addr2))
28388 + if (is_kernel_text(address, address + PMD_SIZE))
28389 prot = PAGE_KERNEL_LARGE_EXEC;
28390
28391 pages_2m++;
28392 @@ -343,7 +347,7 @@ repeat:
28393 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28394 pte += pte_ofs;
28395 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28396 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28397 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28398 pgprot_t prot = PAGE_KERNEL;
28399 /*
28400 * first pass will use the same initial
28401 @@ -351,7 +355,7 @@ repeat:
28402 */
28403 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28404
28405 - if (is_kernel_text(addr))
28406 + if (is_kernel_text(address, address + PAGE_SIZE))
28407 prot = PAGE_KERNEL_EXEC;
28408
28409 pages_4k++;
28410 @@ -482,7 +486,7 @@ void __init native_pagetable_init(void)
28411
28412 pud = pud_offset(pgd, va);
28413 pmd = pmd_offset(pud, va);
28414 - if (!pmd_present(*pmd))
28415 + if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
28416 break;
28417
28418 /* should not be large page here */
28419 @@ -540,12 +544,10 @@ void __init early_ioremap_page_table_range_init(void)
28420
28421 static void __init pagetable_init(void)
28422 {
28423 - pgd_t *pgd_base = swapper_pg_dir;
28424 -
28425 - permanent_kmaps_init(pgd_base);
28426 + permanent_kmaps_init(swapper_pg_dir);
28427 }
28428
28429 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28430 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28431 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28432
28433 /* user-defined highmem size */
28434 @@ -752,6 +754,12 @@ void __init mem_init(void)
28435
28436 pci_iommu_alloc();
28437
28438 +#ifdef CONFIG_PAX_PER_CPU_PGD
28439 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28440 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28441 + KERNEL_PGD_PTRS);
28442 +#endif
28443 +
28444 #ifdef CONFIG_FLATMEM
28445 BUG_ON(!mem_map);
28446 #endif
28447 @@ -780,7 +788,7 @@ void __init mem_init(void)
28448 after_bootmem = 1;
28449
28450 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28451 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28452 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28453 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28454
28455 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28456 @@ -821,10 +829,10 @@ void __init mem_init(void)
28457 ((unsigned long)&__init_end -
28458 (unsigned long)&__init_begin) >> 10,
28459
28460 - (unsigned long)&_etext, (unsigned long)&_edata,
28461 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28462 + (unsigned long)&_sdata, (unsigned long)&_edata,
28463 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28464
28465 - (unsigned long)&_text, (unsigned long)&_etext,
28466 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28467 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28468
28469 /*
28470 @@ -914,6 +922,7 @@ void set_kernel_text_rw(void)
28471 if (!kernel_set_to_readonly)
28472 return;
28473
28474 + start = ktla_ktva(start);
28475 pr_debug("Set kernel text: %lx - %lx for read write\n",
28476 start, start+size);
28477
28478 @@ -928,6 +937,7 @@ void set_kernel_text_ro(void)
28479 if (!kernel_set_to_readonly)
28480 return;
28481
28482 + start = ktla_ktva(start);
28483 pr_debug("Set kernel text: %lx - %lx for read only\n",
28484 start, start+size);
28485
28486 @@ -956,6 +966,7 @@ void mark_rodata_ro(void)
28487 unsigned long start = PFN_ALIGN(_text);
28488 unsigned long size = PFN_ALIGN(_etext) - start;
28489
28490 + start = ktla_ktva(start);
28491 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28492 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28493 size >> 10);
28494 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28495 index 474e28f..647dd12 100644
28496 --- a/arch/x86/mm/init_64.c
28497 +++ b/arch/x86/mm/init_64.c
28498 @@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28499 * around without checking the pgd every time.
28500 */
28501
28502 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28503 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28504 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28505
28506 int force_personality32;
28507 @@ -183,12 +183,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28508
28509 for (address = start; address <= end; address += PGDIR_SIZE) {
28510 const pgd_t *pgd_ref = pgd_offset_k(address);
28511 +
28512 +#ifdef CONFIG_PAX_PER_CPU_PGD
28513 + unsigned long cpu;
28514 +#else
28515 struct page *page;
28516 +#endif
28517
28518 if (pgd_none(*pgd_ref))
28519 continue;
28520
28521 spin_lock(&pgd_lock);
28522 +
28523 +#ifdef CONFIG_PAX_PER_CPU_PGD
28524 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28525 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
28526 +#else
28527 list_for_each_entry(page, &pgd_list, lru) {
28528 pgd_t *pgd;
28529 spinlock_t *pgt_lock;
28530 @@ -197,6 +207,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28531 /* the pgt_lock only for Xen */
28532 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28533 spin_lock(pgt_lock);
28534 +#endif
28535
28536 if (pgd_none(*pgd))
28537 set_pgd(pgd, *pgd_ref);
28538 @@ -204,7 +215,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28539 BUG_ON(pgd_page_vaddr(*pgd)
28540 != pgd_page_vaddr(*pgd_ref));
28541
28542 +#ifndef CONFIG_PAX_PER_CPU_PGD
28543 spin_unlock(pgt_lock);
28544 +#endif
28545 +
28546 }
28547 spin_unlock(&pgd_lock);
28548 }
28549 @@ -237,7 +251,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28550 {
28551 if (pgd_none(*pgd)) {
28552 pud_t *pud = (pud_t *)spp_getpage();
28553 - pgd_populate(&init_mm, pgd, pud);
28554 + pgd_populate_kernel(&init_mm, pgd, pud);
28555 if (pud != pud_offset(pgd, 0))
28556 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28557 pud, pud_offset(pgd, 0));
28558 @@ -249,7 +263,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28559 {
28560 if (pud_none(*pud)) {
28561 pmd_t *pmd = (pmd_t *) spp_getpage();
28562 - pud_populate(&init_mm, pud, pmd);
28563 + pud_populate_kernel(&init_mm, pud, pmd);
28564 if (pmd != pmd_offset(pud, 0))
28565 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28566 pmd, pmd_offset(pud, 0));
28567 @@ -278,7 +292,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28568 pmd = fill_pmd(pud, vaddr);
28569 pte = fill_pte(pmd, vaddr);
28570
28571 + pax_open_kernel();
28572 set_pte(pte, new_pte);
28573 + pax_close_kernel();
28574
28575 /*
28576 * It's enough to flush this one mapping.
28577 @@ -337,14 +353,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28578 pgd = pgd_offset_k((unsigned long)__va(phys));
28579 if (pgd_none(*pgd)) {
28580 pud = (pud_t *) spp_getpage();
28581 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28582 - _PAGE_USER));
28583 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28584 }
28585 pud = pud_offset(pgd, (unsigned long)__va(phys));
28586 if (pud_none(*pud)) {
28587 pmd = (pmd_t *) spp_getpage();
28588 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28589 - _PAGE_USER));
28590 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28591 }
28592 pmd = pmd_offset(pud, phys);
28593 BUG_ON(!pmd_none(*pmd));
28594 @@ -585,7 +599,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28595 prot);
28596
28597 spin_lock(&init_mm.page_table_lock);
28598 - pud_populate(&init_mm, pud, pmd);
28599 + pud_populate_kernel(&init_mm, pud, pmd);
28600 spin_unlock(&init_mm.page_table_lock);
28601 }
28602 __flush_tlb_all();
28603 @@ -626,7 +640,7 @@ kernel_physical_mapping_init(unsigned long start,
28604 page_size_mask);
28605
28606 spin_lock(&init_mm.page_table_lock);
28607 - pgd_populate(&init_mm, pgd, pud);
28608 + pgd_populate_kernel(&init_mm, pgd, pud);
28609 spin_unlock(&init_mm.page_table_lock);
28610 pgd_changed = true;
28611 }
28612 @@ -1065,6 +1079,12 @@ void __init mem_init(void)
28613
28614 pci_iommu_alloc();
28615
28616 +#ifdef CONFIG_PAX_PER_CPU_PGD
28617 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28618 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28619 + KERNEL_PGD_PTRS);
28620 +#endif
28621 +
28622 /* clear_bss() already clear the empty_zero_page */
28623
28624 reservedpages = 0;
28625 @@ -1224,8 +1244,8 @@ int kern_addr_valid(unsigned long addr)
28626 static struct vm_area_struct gate_vma = {
28627 .vm_start = VSYSCALL_START,
28628 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28629 - .vm_page_prot = PAGE_READONLY_EXEC,
28630 - .vm_flags = VM_READ | VM_EXEC
28631 + .vm_page_prot = PAGE_READONLY,
28632 + .vm_flags = VM_READ
28633 };
28634
28635 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28636 @@ -1259,7 +1279,7 @@ int in_gate_area_no_mm(unsigned long addr)
28637
28638 const char *arch_vma_name(struct vm_area_struct *vma)
28639 {
28640 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28641 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28642 return "[vdso]";
28643 if (vma == &gate_vma)
28644 return "[vsyscall]";
28645 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28646 index 7b179b4..6bd17777 100644
28647 --- a/arch/x86/mm/iomap_32.c
28648 +++ b/arch/x86/mm/iomap_32.c
28649 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28650 type = kmap_atomic_idx_push();
28651 idx = type + KM_TYPE_NR * smp_processor_id();
28652 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28653 +
28654 + pax_open_kernel();
28655 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28656 + pax_close_kernel();
28657 +
28658 arch_flush_lazy_mmu_mode();
28659
28660 return (void *)vaddr;
28661 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28662 index 78fe3f1..73b95e2 100644
28663 --- a/arch/x86/mm/ioremap.c
28664 +++ b/arch/x86/mm/ioremap.c
28665 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28666 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28667 int is_ram = page_is_ram(pfn);
28668
28669 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28670 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28671 return NULL;
28672 WARN_ON_ONCE(is_ram);
28673 }
28674 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28675 *
28676 * Caller must ensure there is only one unmapping for the same pointer.
28677 */
28678 -void iounmap(volatile void __iomem *addr)
28679 +void iounmap(const volatile void __iomem *addr)
28680 {
28681 struct vm_struct *p, *o;
28682
28683 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28684
28685 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28686 if (page_is_ram(start >> PAGE_SHIFT))
28687 +#ifdef CONFIG_HIGHMEM
28688 + if ((start >> PAGE_SHIFT) < max_low_pfn)
28689 +#endif
28690 return __va(phys);
28691
28692 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28693 @@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28694 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28695 {
28696 if (page_is_ram(phys >> PAGE_SHIFT))
28697 +#ifdef CONFIG_HIGHMEM
28698 + if ((phys >> PAGE_SHIFT) < max_low_pfn)
28699 +#endif
28700 return;
28701
28702 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28703 @@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
28704 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28705
28706 static __initdata int after_paging_init;
28707 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28708 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28709
28710 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28711 {
28712 @@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
28713 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28714
28715 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28716 - memset(bm_pte, 0, sizeof(bm_pte));
28717 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
28718 + pmd_populate_user(&init_mm, pmd, bm_pte);
28719
28720 /*
28721 * The boot-ioremap range spans multiple pmds, for which
28722 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28723 index d87dd6d..bf3fa66 100644
28724 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
28725 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28726 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28727 * memory (e.g. tracked pages)? For now, we need this to avoid
28728 * invoking kmemcheck for PnP BIOS calls.
28729 */
28730 - if (regs->flags & X86_VM_MASK)
28731 + if (v8086_mode(regs))
28732 return false;
28733 - if (regs->cs != __KERNEL_CS)
28734 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28735 return false;
28736
28737 pte = kmemcheck_pte_lookup(address);
28738 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28739 index 845df68..1d8d29f 100644
28740 --- a/arch/x86/mm/mmap.c
28741 +++ b/arch/x86/mm/mmap.c
28742 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28743 * Leave an at least ~128 MB hole with possible stack randomization.
28744 */
28745 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28746 -#define MAX_GAP (TASK_SIZE/6*5)
28747 +#define MAX_GAP (pax_task_size/6*5)
28748
28749 static int mmap_is_legacy(void)
28750 {
28751 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28752 return rnd << PAGE_SHIFT;
28753 }
28754
28755 -static unsigned long mmap_base(void)
28756 +static unsigned long mmap_base(struct mm_struct *mm)
28757 {
28758 unsigned long gap = rlimit(RLIMIT_STACK);
28759 + unsigned long pax_task_size = TASK_SIZE;
28760 +
28761 +#ifdef CONFIG_PAX_SEGMEXEC
28762 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
28763 + pax_task_size = SEGMEXEC_TASK_SIZE;
28764 +#endif
28765
28766 if (gap < MIN_GAP)
28767 gap = MIN_GAP;
28768 else if (gap > MAX_GAP)
28769 gap = MAX_GAP;
28770
28771 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28772 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28773 }
28774
28775 /*
28776 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28777 * does, but not when emulating X86_32
28778 */
28779 -static unsigned long mmap_legacy_base(void)
28780 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
28781 {
28782 - if (mmap_is_ia32())
28783 + if (mmap_is_ia32()) {
28784 +
28785 +#ifdef CONFIG_PAX_SEGMEXEC
28786 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
28787 + return SEGMEXEC_TASK_UNMAPPED_BASE;
28788 + else
28789 +#endif
28790 +
28791 return TASK_UNMAPPED_BASE;
28792 - else
28793 + } else
28794 return TASK_UNMAPPED_BASE + mmap_rnd();
28795 }
28796
28797 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28798 void arch_pick_mmap_layout(struct mm_struct *mm)
28799 {
28800 if (mmap_is_legacy()) {
28801 - mm->mmap_base = mmap_legacy_base();
28802 + mm->mmap_base = mmap_legacy_base(mm);
28803 +
28804 +#ifdef CONFIG_PAX_RANDMMAP
28805 + if (mm->pax_flags & MF_PAX_RANDMMAP)
28806 + mm->mmap_base += mm->delta_mmap;
28807 +#endif
28808 +
28809 mm->get_unmapped_area = arch_get_unmapped_area;
28810 mm->unmap_area = arch_unmap_area;
28811 } else {
28812 - mm->mmap_base = mmap_base();
28813 + mm->mmap_base = mmap_base(mm);
28814 +
28815 +#ifdef CONFIG_PAX_RANDMMAP
28816 + if (mm->pax_flags & MF_PAX_RANDMMAP)
28817 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28818 +#endif
28819 +
28820 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28821 mm->unmap_area = arch_unmap_area_topdown;
28822 }
28823 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28824 index dc0b727..f612039 100644
28825 --- a/arch/x86/mm/mmio-mod.c
28826 +++ b/arch/x86/mm/mmio-mod.c
28827 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28828 break;
28829 default:
28830 {
28831 - unsigned char *ip = (unsigned char *)instptr;
28832 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28833 my_trace->opcode = MMIO_UNKNOWN_OP;
28834 my_trace->width = 0;
28835 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28836 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28837 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28838 void __iomem *addr)
28839 {
28840 - static atomic_t next_id;
28841 + static atomic_unchecked_t next_id;
28842 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28843 /* These are page-unaligned. */
28844 struct mmiotrace_map map = {
28845 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28846 .private = trace
28847 },
28848 .phys = offset,
28849 - .id = atomic_inc_return(&next_id)
28850 + .id = atomic_inc_return_unchecked(&next_id)
28851 };
28852 map.map_id = trace->id;
28853
28854 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28855 ioremap_trace_core(offset, size, addr);
28856 }
28857
28858 -static void iounmap_trace_core(volatile void __iomem *addr)
28859 +static void iounmap_trace_core(const volatile void __iomem *addr)
28860 {
28861 struct mmiotrace_map map = {
28862 .phys = 0,
28863 @@ -328,7 +328,7 @@ not_enabled:
28864 }
28865 }
28866
28867 -void mmiotrace_iounmap(volatile void __iomem *addr)
28868 +void mmiotrace_iounmap(const volatile void __iomem *addr)
28869 {
28870 might_sleep();
28871 if (is_enabled()) /* recheck and proper locking in *_core() */
28872 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
28873 index 72fe01e..f1a8daa 100644
28874 --- a/arch/x86/mm/numa.c
28875 +++ b/arch/x86/mm/numa.c
28876 @@ -477,7 +477,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
28877 return true;
28878 }
28879
28880 -static int __init numa_register_memblks(struct numa_meminfo *mi)
28881 +static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
28882 {
28883 unsigned long uninitialized_var(pfn_align);
28884 int i, nid;
28885 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28886 index 0e38951..4ca8458 100644
28887 --- a/arch/x86/mm/pageattr-test.c
28888 +++ b/arch/x86/mm/pageattr-test.c
28889 @@ -36,7 +36,7 @@ enum {
28890
28891 static int pte_testbit(pte_t pte)
28892 {
28893 - return pte_flags(pte) & _PAGE_UNUSED1;
28894 + return pte_flags(pte) & _PAGE_CPA_TEST;
28895 }
28896
28897 struct split_state {
28898 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28899 index fb4e73e..43f7238 100644
28900 --- a/arch/x86/mm/pageattr.c
28901 +++ b/arch/x86/mm/pageattr.c
28902 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28903 */
28904 #ifdef CONFIG_PCI_BIOS
28905 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28906 - pgprot_val(forbidden) |= _PAGE_NX;
28907 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28908 #endif
28909
28910 /*
28911 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28912 * Does not cover __inittext since that is gone later on. On
28913 * 64bit we do not enforce !NX on the low mapping
28914 */
28915 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
28916 - pgprot_val(forbidden) |= _PAGE_NX;
28917 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28918 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28919
28920 +#ifdef CONFIG_DEBUG_RODATA
28921 /*
28922 * The .rodata section needs to be read-only. Using the pfn
28923 * catches all aliases.
28924 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28925 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
28926 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
28927 pgprot_val(forbidden) |= _PAGE_RW;
28928 +#endif
28929
28930 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28931 /*
28932 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28933 }
28934 #endif
28935
28936 +#ifdef CONFIG_PAX_KERNEXEC
28937 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28938 + pgprot_val(forbidden) |= _PAGE_RW;
28939 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28940 + }
28941 +#endif
28942 +
28943 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28944
28945 return prot;
28946 @@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
28947 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28948 {
28949 /* change init_mm */
28950 + pax_open_kernel();
28951 set_pte_atomic(kpte, pte);
28952 +
28953 #ifdef CONFIG_X86_32
28954 if (!SHARED_KERNEL_PMD) {
28955 +
28956 +#ifdef CONFIG_PAX_PER_CPU_PGD
28957 + unsigned long cpu;
28958 +#else
28959 struct page *page;
28960 +#endif
28961
28962 +#ifdef CONFIG_PAX_PER_CPU_PGD
28963 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28964 + pgd_t *pgd = get_cpu_pgd(cpu);
28965 +#else
28966 list_for_each_entry(page, &pgd_list, lru) {
28967 - pgd_t *pgd;
28968 + pgd_t *pgd = (pgd_t *)page_address(page);
28969 +#endif
28970 +
28971 pud_t *pud;
28972 pmd_t *pmd;
28973
28974 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
28975 + pgd += pgd_index(address);
28976 pud = pud_offset(pgd, address);
28977 pmd = pmd_offset(pud, address);
28978 set_pte_atomic((pte_t *)pmd, pte);
28979 }
28980 }
28981 #endif
28982 + pax_close_kernel();
28983 }
28984
28985 static int
28986 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
28987 index 6574388..87e9bef 100644
28988 --- a/arch/x86/mm/pat.c
28989 +++ b/arch/x86/mm/pat.c
28990 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
28991
28992 if (!entry) {
28993 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
28994 - current->comm, current->pid, start, end - 1);
28995 + current->comm, task_pid_nr(current), start, end - 1);
28996 return -EINVAL;
28997 }
28998
28999 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29000
29001 while (cursor < to) {
29002 if (!devmem_is_allowed(pfn)) {
29003 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29004 - current->comm, from, to - 1);
29005 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29006 + current->comm, from, to - 1, cursor);
29007 return 0;
29008 }
29009 cursor += PAGE_SIZE;
29010 @@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29011 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29012 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29013 "for [mem %#010Lx-%#010Lx]\n",
29014 - current->comm, current->pid,
29015 + current->comm, task_pid_nr(current),
29016 cattr_name(flags),
29017 base, (unsigned long long)(base + size-1));
29018 return -EINVAL;
29019 @@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29020 flags = lookup_memtype(paddr);
29021 if (want_flags != flags) {
29022 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29023 - current->comm, current->pid,
29024 + current->comm, task_pid_nr(current),
29025 cattr_name(want_flags),
29026 (unsigned long long)paddr,
29027 (unsigned long long)(paddr + size - 1),
29028 @@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29029 free_memtype(paddr, paddr + size);
29030 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29031 " for [mem %#010Lx-%#010Lx], got %s\n",
29032 - current->comm, current->pid,
29033 + current->comm, task_pid_nr(current),
29034 cattr_name(want_flags),
29035 (unsigned long long)paddr,
29036 (unsigned long long)(paddr + size - 1),
29037 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29038 index 9f0614d..92ae64a 100644
29039 --- a/arch/x86/mm/pf_in.c
29040 +++ b/arch/x86/mm/pf_in.c
29041 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29042 int i;
29043 enum reason_type rv = OTHERS;
29044
29045 - p = (unsigned char *)ins_addr;
29046 + p = (unsigned char *)ktla_ktva(ins_addr);
29047 p += skip_prefix(p, &prf);
29048 p += get_opcode(p, &opcode);
29049
29050 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29051 struct prefix_bits prf;
29052 int i;
29053
29054 - p = (unsigned char *)ins_addr;
29055 + p = (unsigned char *)ktla_ktva(ins_addr);
29056 p += skip_prefix(p, &prf);
29057 p += get_opcode(p, &opcode);
29058
29059 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29060 struct prefix_bits prf;
29061 int i;
29062
29063 - p = (unsigned char *)ins_addr;
29064 + p = (unsigned char *)ktla_ktva(ins_addr);
29065 p += skip_prefix(p, &prf);
29066 p += get_opcode(p, &opcode);
29067
29068 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29069 struct prefix_bits prf;
29070 int i;
29071
29072 - p = (unsigned char *)ins_addr;
29073 + p = (unsigned char *)ktla_ktva(ins_addr);
29074 p += skip_prefix(p, &prf);
29075 p += get_opcode(p, &opcode);
29076 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29077 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29078 struct prefix_bits prf;
29079 int i;
29080
29081 - p = (unsigned char *)ins_addr;
29082 + p = (unsigned char *)ktla_ktva(ins_addr);
29083 p += skip_prefix(p, &prf);
29084 p += get_opcode(p, &opcode);
29085 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29086 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29087 index 17fda6a..489c74a 100644
29088 --- a/arch/x86/mm/pgtable.c
29089 +++ b/arch/x86/mm/pgtable.c
29090 @@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29091 list_del(&page->lru);
29092 }
29093
29094 -#define UNSHARED_PTRS_PER_PGD \
29095 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29096 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29097 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29098
29099 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29100 +{
29101 + unsigned int count = USER_PGD_PTRS;
29102
29103 + while (count--)
29104 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29105 +}
29106 +#endif
29107 +
29108 +#ifdef CONFIG_PAX_PER_CPU_PGD
29109 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29110 +{
29111 + unsigned int count = USER_PGD_PTRS;
29112 +
29113 + while (count--) {
29114 + pgd_t pgd;
29115 +
29116 +#ifdef CONFIG_X86_64
29117 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29118 +#else
29119 + pgd = *src++;
29120 +#endif
29121 +
29122 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29123 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29124 +#endif
29125 +
29126 + *dst++ = pgd;
29127 + }
29128 +
29129 +}
29130 +#endif
29131 +
29132 +#ifdef CONFIG_X86_64
29133 +#define pxd_t pud_t
29134 +#define pyd_t pgd_t
29135 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29136 +#define pxd_free(mm, pud) pud_free((mm), (pud))
29137 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29138 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
29139 +#define PYD_SIZE PGDIR_SIZE
29140 +#else
29141 +#define pxd_t pmd_t
29142 +#define pyd_t pud_t
29143 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29144 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
29145 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29146 +#define pyd_offset(mm, address) pud_offset((mm), (address))
29147 +#define PYD_SIZE PUD_SIZE
29148 +#endif
29149 +
29150 +#ifdef CONFIG_PAX_PER_CPU_PGD
29151 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29152 +static inline void pgd_dtor(pgd_t *pgd) {}
29153 +#else
29154 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29155 {
29156 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29157 @@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
29158 pgd_list_del(pgd);
29159 spin_unlock(&pgd_lock);
29160 }
29161 +#endif
29162
29163 /*
29164 * List of all pgd's needed for non-PAE so it can invalidate entries
29165 @@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
29166 * -- nyc
29167 */
29168
29169 -#ifdef CONFIG_X86_PAE
29170 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29171 /*
29172 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29173 * updating the top-level pagetable entries to guarantee the
29174 @@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
29175 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29176 * and initialize the kernel pmds here.
29177 */
29178 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29179 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29180
29181 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29182 {
29183 @@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29184 */
29185 flush_tlb_mm(mm);
29186 }
29187 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29188 +#define PREALLOCATED_PXDS USER_PGD_PTRS
29189 #else /* !CONFIG_X86_PAE */
29190
29191 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29192 -#define PREALLOCATED_PMDS 0
29193 +#define PREALLOCATED_PXDS 0
29194
29195 #endif /* CONFIG_X86_PAE */
29196
29197 -static void free_pmds(pmd_t *pmds[])
29198 +static void free_pxds(pxd_t *pxds[])
29199 {
29200 int i;
29201
29202 - for(i = 0; i < PREALLOCATED_PMDS; i++)
29203 - if (pmds[i])
29204 - free_page((unsigned long)pmds[i]);
29205 + for(i = 0; i < PREALLOCATED_PXDS; i++)
29206 + if (pxds[i])
29207 + free_page((unsigned long)pxds[i]);
29208 }
29209
29210 -static int preallocate_pmds(pmd_t *pmds[])
29211 +static int preallocate_pxds(pxd_t *pxds[])
29212 {
29213 int i;
29214 bool failed = false;
29215
29216 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
29217 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29218 - if (pmd == NULL)
29219 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
29220 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29221 + if (pxd == NULL)
29222 failed = true;
29223 - pmds[i] = pmd;
29224 + pxds[i] = pxd;
29225 }
29226
29227 if (failed) {
29228 - free_pmds(pmds);
29229 + free_pxds(pxds);
29230 return -ENOMEM;
29231 }
29232
29233 @@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29234 * preallocate which never got a corresponding vma will need to be
29235 * freed manually.
29236 */
29237 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29238 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29239 {
29240 int i;
29241
29242 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
29243 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
29244 pgd_t pgd = pgdp[i];
29245
29246 if (pgd_val(pgd) != 0) {
29247 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29248 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29249
29250 - pgdp[i] = native_make_pgd(0);
29251 + set_pgd(pgdp + i, native_make_pgd(0));
29252
29253 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29254 - pmd_free(mm, pmd);
29255 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29256 + pxd_free(mm, pxd);
29257 }
29258 }
29259 }
29260
29261 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29262 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29263 {
29264 - pud_t *pud;
29265 + pyd_t *pyd;
29266 unsigned long addr;
29267 int i;
29268
29269 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29270 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29271 return;
29272
29273 - pud = pud_offset(pgd, 0);
29274 +#ifdef CONFIG_X86_64
29275 + pyd = pyd_offset(mm, 0L);
29276 +#else
29277 + pyd = pyd_offset(pgd, 0L);
29278 +#endif
29279
29280 - for (addr = i = 0; i < PREALLOCATED_PMDS;
29281 - i++, pud++, addr += PUD_SIZE) {
29282 - pmd_t *pmd = pmds[i];
29283 + for (addr = i = 0; i < PREALLOCATED_PXDS;
29284 + i++, pyd++, addr += PYD_SIZE) {
29285 + pxd_t *pxd = pxds[i];
29286
29287 if (i >= KERNEL_PGD_BOUNDARY)
29288 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29289 - sizeof(pmd_t) * PTRS_PER_PMD);
29290 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29291 + sizeof(pxd_t) * PTRS_PER_PMD);
29292
29293 - pud_populate(mm, pud, pmd);
29294 + pyd_populate(mm, pyd, pxd);
29295 }
29296 }
29297
29298 pgd_t *pgd_alloc(struct mm_struct *mm)
29299 {
29300 pgd_t *pgd;
29301 - pmd_t *pmds[PREALLOCATED_PMDS];
29302 + pxd_t *pxds[PREALLOCATED_PXDS];
29303
29304 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29305
29306 @@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29307
29308 mm->pgd = pgd;
29309
29310 - if (preallocate_pmds(pmds) != 0)
29311 + if (preallocate_pxds(pxds) != 0)
29312 goto out_free_pgd;
29313
29314 if (paravirt_pgd_alloc(mm) != 0)
29315 - goto out_free_pmds;
29316 + goto out_free_pxds;
29317
29318 /*
29319 * Make sure that pre-populating the pmds is atomic with
29320 @@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29321 spin_lock(&pgd_lock);
29322
29323 pgd_ctor(mm, pgd);
29324 - pgd_prepopulate_pmd(mm, pgd, pmds);
29325 + pgd_prepopulate_pxd(mm, pgd, pxds);
29326
29327 spin_unlock(&pgd_lock);
29328
29329 return pgd;
29330
29331 -out_free_pmds:
29332 - free_pmds(pmds);
29333 +out_free_pxds:
29334 + free_pxds(pxds);
29335 out_free_pgd:
29336 free_page((unsigned long)pgd);
29337 out:
29338 @@ -302,7 +363,7 @@ out:
29339
29340 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29341 {
29342 - pgd_mop_up_pmds(mm, pgd);
29343 + pgd_mop_up_pxds(mm, pgd);
29344 pgd_dtor(pgd);
29345 paravirt_pgd_free(mm, pgd);
29346 free_page((unsigned long)pgd);
29347 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29348 index a69bcb8..19068ab 100644
29349 --- a/arch/x86/mm/pgtable_32.c
29350 +++ b/arch/x86/mm/pgtable_32.c
29351 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29352 return;
29353 }
29354 pte = pte_offset_kernel(pmd, vaddr);
29355 +
29356 + pax_open_kernel();
29357 if (pte_val(pteval))
29358 set_pte_at(&init_mm, vaddr, pte, pteval);
29359 else
29360 pte_clear(&init_mm, vaddr, pte);
29361 + pax_close_kernel();
29362
29363 /*
29364 * It's enough to flush this one mapping.
29365 diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29366 index e666cbb..61788c45 100644
29367 --- a/arch/x86/mm/physaddr.c
29368 +++ b/arch/x86/mm/physaddr.c
29369 @@ -10,7 +10,7 @@
29370 #ifdef CONFIG_X86_64
29371
29372 #ifdef CONFIG_DEBUG_VIRTUAL
29373 -unsigned long __phys_addr(unsigned long x)
29374 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29375 {
29376 unsigned long y = x - __START_KERNEL_map;
29377
29378 @@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29379 #else
29380
29381 #ifdef CONFIG_DEBUG_VIRTUAL
29382 -unsigned long __phys_addr(unsigned long x)
29383 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29384 {
29385 unsigned long phys_addr = x - PAGE_OFFSET;
29386 /* VMALLOC_* aren't constants */
29387 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29388 index 410531d..0f16030 100644
29389 --- a/arch/x86/mm/setup_nx.c
29390 +++ b/arch/x86/mm/setup_nx.c
29391 @@ -5,8 +5,10 @@
29392 #include <asm/pgtable.h>
29393 #include <asm/proto.h>
29394
29395 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29396 static int disable_nx __cpuinitdata;
29397
29398 +#ifndef CONFIG_PAX_PAGEEXEC
29399 /*
29400 * noexec = on|off
29401 *
29402 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29403 return 0;
29404 }
29405 early_param("noexec", noexec_setup);
29406 +#endif
29407 +
29408 +#endif
29409
29410 void __cpuinit x86_configure_nx(void)
29411 {
29412 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29413 if (cpu_has_nx && !disable_nx)
29414 __supported_pte_mask |= _PAGE_NX;
29415 else
29416 +#endif
29417 __supported_pte_mask &= ~_PAGE_NX;
29418 }
29419
29420 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29421 index 282375f..e03a98f 100644
29422 --- a/arch/x86/mm/tlb.c
29423 +++ b/arch/x86/mm/tlb.c
29424 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
29425 BUG();
29426 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29427 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29428 +
29429 +#ifndef CONFIG_PAX_PER_CPU_PGD
29430 load_cr3(swapper_pg_dir);
29431 +#endif
29432 +
29433 }
29434 }
29435 EXPORT_SYMBOL_GPL(leave_mm);
29436 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29437 index 877b9a1..a8ecf42 100644
29438 --- a/arch/x86/net/bpf_jit.S
29439 +++ b/arch/x86/net/bpf_jit.S
29440 @@ -9,6 +9,7 @@
29441 */
29442 #include <linux/linkage.h>
29443 #include <asm/dwarf2.h>
29444 +#include <asm/alternative-asm.h>
29445
29446 /*
29447 * Calling convention :
29448 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29449 jle bpf_slow_path_word
29450 mov (SKBDATA,%rsi),%eax
29451 bswap %eax /* ntohl() */
29452 + pax_force_retaddr
29453 ret
29454
29455 sk_load_half:
29456 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29457 jle bpf_slow_path_half
29458 movzwl (SKBDATA,%rsi),%eax
29459 rol $8,%ax # ntohs()
29460 + pax_force_retaddr
29461 ret
29462
29463 sk_load_byte:
29464 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29465 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29466 jle bpf_slow_path_byte
29467 movzbl (SKBDATA,%rsi),%eax
29468 + pax_force_retaddr
29469 ret
29470
29471 /**
29472 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29473 movzbl (SKBDATA,%rsi),%ebx
29474 and $15,%bl
29475 shl $2,%bl
29476 + pax_force_retaddr
29477 ret
29478
29479 /* rsi contains offset and can be scratched */
29480 @@ -109,6 +114,7 @@ bpf_slow_path_word:
29481 js bpf_error
29482 mov -12(%rbp),%eax
29483 bswap %eax
29484 + pax_force_retaddr
29485 ret
29486
29487 bpf_slow_path_half:
29488 @@ -117,12 +123,14 @@ bpf_slow_path_half:
29489 mov -12(%rbp),%ax
29490 rol $8,%ax
29491 movzwl %ax,%eax
29492 + pax_force_retaddr
29493 ret
29494
29495 bpf_slow_path_byte:
29496 bpf_slow_path_common(1)
29497 js bpf_error
29498 movzbl -12(%rbp),%eax
29499 + pax_force_retaddr
29500 ret
29501
29502 bpf_slow_path_byte_msh:
29503 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29504 and $15,%al
29505 shl $2,%al
29506 xchg %eax,%ebx
29507 + pax_force_retaddr
29508 ret
29509
29510 #define sk_negative_common(SIZE) \
29511 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29512 sk_negative_common(4)
29513 mov (%rax), %eax
29514 bswap %eax
29515 + pax_force_retaddr
29516 ret
29517
29518 bpf_slow_path_half_neg:
29519 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29520 mov (%rax),%ax
29521 rol $8,%ax
29522 movzwl %ax,%eax
29523 + pax_force_retaddr
29524 ret
29525
29526 bpf_slow_path_byte_neg:
29527 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29528 .globl sk_load_byte_negative_offset
29529 sk_negative_common(1)
29530 movzbl (%rax), %eax
29531 + pax_force_retaddr
29532 ret
29533
29534 bpf_slow_path_byte_msh_neg:
29535 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29536 and $15,%al
29537 shl $2,%al
29538 xchg %eax,%ebx
29539 + pax_force_retaddr
29540 ret
29541
29542 bpf_error:
29543 @@ -197,4 +210,5 @@ bpf_error:
29544 xor %eax,%eax
29545 mov -8(%rbp),%rbx
29546 leaveq
29547 + pax_force_retaddr
29548 ret
29549 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29550 index 3cbe4538..fd756dc 100644
29551 --- a/arch/x86/net/bpf_jit_comp.c
29552 +++ b/arch/x86/net/bpf_jit_comp.c
29553 @@ -12,6 +12,7 @@
29554 #include <linux/netdevice.h>
29555 #include <linux/filter.h>
29556 #include <linux/if_vlan.h>
29557 +#include <linux/random.h>
29558
29559 /*
29560 * Conventions :
29561 @@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29562 return ptr + len;
29563 }
29564
29565 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29566 +#define MAX_INSTR_CODE_SIZE 96
29567 +#else
29568 +#define MAX_INSTR_CODE_SIZE 64
29569 +#endif
29570 +
29571 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29572
29573 #define EMIT1(b1) EMIT(b1, 1)
29574 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29575 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29576 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29577 +
29578 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29579 +/* original constant will appear in ecx */
29580 +#define DILUTE_CONST_SEQUENCE(_off, _key) \
29581 +do { \
29582 + /* mov ecx, randkey */ \
29583 + EMIT1(0xb9); \
29584 + EMIT(_key, 4); \
29585 + /* xor ecx, randkey ^ off */ \
29586 + EMIT2(0x81, 0xf1); \
29587 + EMIT((_key) ^ (_off), 4); \
29588 +} while (0)
29589 +
29590 +#define EMIT1_off32(b1, _off) \
29591 +do { \
29592 + switch (b1) { \
29593 + case 0x05: /* add eax, imm32 */ \
29594 + case 0x2d: /* sub eax, imm32 */ \
29595 + case 0x25: /* and eax, imm32 */ \
29596 + case 0x0d: /* or eax, imm32 */ \
29597 + case 0xb8: /* mov eax, imm32 */ \
29598 + case 0x3d: /* cmp eax, imm32 */ \
29599 + case 0xa9: /* test eax, imm32 */ \
29600 + DILUTE_CONST_SEQUENCE(_off, randkey); \
29601 + EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29602 + break; \
29603 + case 0xbb: /* mov ebx, imm32 */ \
29604 + DILUTE_CONST_SEQUENCE(_off, randkey); \
29605 + /* mov ebx, ecx */ \
29606 + EMIT2(0x89, 0xcb); \
29607 + break; \
29608 + case 0xbe: /* mov esi, imm32 */ \
29609 + DILUTE_CONST_SEQUENCE(_off, randkey); \
29610 + /* mov esi, ecx */ \
29611 + EMIT2(0x89, 0xce); \
29612 + break; \
29613 + case 0xe9: /* jmp rel imm32 */ \
29614 + EMIT1(b1); \
29615 + EMIT(_off, 4); \
29616 + /* prevent fall-through, we're not called if off = 0 */ \
29617 + EMIT(0xcccccccc, 4); \
29618 + EMIT(0xcccccccc, 4); \
29619 + break; \
29620 + default: \
29621 + EMIT1(b1); \
29622 + EMIT(_off, 4); \
29623 + } \
29624 +} while (0)
29625 +
29626 +#define EMIT2_off32(b1, b2, _off) \
29627 +do { \
29628 + if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29629 + EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29630 + EMIT(randkey, 4); \
29631 + EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29632 + EMIT((_off) - randkey, 4); \
29633 + } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29634 + DILUTE_CONST_SEQUENCE(_off, randkey); \
29635 + /* imul eax, ecx */ \
29636 + EMIT3(0x0f, 0xaf, 0xc1); \
29637 + } else { \
29638 + EMIT2(b1, b2); \
29639 + EMIT(_off, 4); \
29640 + } \
29641 +} while (0)
29642 +#else
29643 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29644 +#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29645 +#endif
29646
29647 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29648 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29649 @@ -90,6 +165,24 @@ do { \
29650 #define X86_JBE 0x76
29651 #define X86_JA 0x77
29652
29653 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29654 +#define APPEND_FLOW_VERIFY() \
29655 +do { \
29656 + /* mov ecx, randkey */ \
29657 + EMIT1(0xb9); \
29658 + EMIT(randkey, 4); \
29659 + /* cmp ecx, randkey */ \
29660 + EMIT2(0x81, 0xf9); \
29661 + EMIT(randkey, 4); \
29662 + /* jz after 8 int 3s */ \
29663 + EMIT2(0x74, 0x08); \
29664 + EMIT(0xcccccccc, 4); \
29665 + EMIT(0xcccccccc, 4); \
29666 +} while (0)
29667 +#else
29668 +#define APPEND_FLOW_VERIFY() do { } while (0)
29669 +#endif
29670 +
29671 #define EMIT_COND_JMP(op, offset) \
29672 do { \
29673 if (is_near(offset)) \
29674 @@ -97,6 +190,7 @@ do { \
29675 else { \
29676 EMIT2(0x0f, op + 0x10); \
29677 EMIT(offset, 4); /* jxx .+off32 */ \
29678 + APPEND_FLOW_VERIFY(); \
29679 } \
29680 } while (0)
29681
29682 @@ -121,6 +215,11 @@ static inline void bpf_flush_icache(void *start, void *end)
29683 set_fs(old_fs);
29684 }
29685
29686 +struct bpf_jit_work {
29687 + struct work_struct work;
29688 + void *image;
29689 +};
29690 +
29691 #define CHOOSE_LOAD_FUNC(K, func) \
29692 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29693
29694 @@ -146,7 +245,7 @@ static int pkt_type_offset(void)
29695
29696 void bpf_jit_compile(struct sk_filter *fp)
29697 {
29698 - u8 temp[64];
29699 + u8 temp[MAX_INSTR_CODE_SIZE];
29700 u8 *prog;
29701 unsigned int proglen, oldproglen = 0;
29702 int ilen, i;
29703 @@ -159,6 +258,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29704 unsigned int *addrs;
29705 const struct sock_filter *filter = fp->insns;
29706 int flen = fp->len;
29707 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29708 + unsigned int randkey;
29709 +#endif
29710
29711 if (!bpf_jit_enable)
29712 return;
29713 @@ -167,11 +269,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29714 if (addrs == NULL)
29715 return;
29716
29717 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29718 + if (!fp->work)
29719 + goto out;
29720 +
29721 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29722 + randkey = get_random_int();
29723 +#endif
29724 +
29725 /* Before first pass, make a rough estimation of addrs[]
29726 - * each bpf instruction is translated to less than 64 bytes
29727 + * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29728 */
29729 for (proglen = 0, i = 0; i < flen; i++) {
29730 - proglen += 64;
29731 + proglen += MAX_INSTR_CODE_SIZE;
29732 addrs[i] = proglen;
29733 }
29734 cleanup_addr = proglen; /* epilogue address */
29735 @@ -282,10 +392,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29736 case BPF_S_ALU_MUL_K: /* A *= K */
29737 if (is_imm8(K))
29738 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29739 - else {
29740 - EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29741 - EMIT(K, 4);
29742 - }
29743 + else
29744 + EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29745 break;
29746 case BPF_S_ALU_DIV_X: /* A /= X; */
29747 seen |= SEEN_XREG;
29748 @@ -325,13 +433,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29749 break;
29750 case BPF_S_ALU_MOD_K: /* A %= K; */
29751 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29752 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29753 + DILUTE_CONST_SEQUENCE(K, randkey);
29754 +#else
29755 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29756 +#endif
29757 EMIT2(0xf7, 0xf1); /* div %ecx */
29758 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29759 break;
29760 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29761 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29762 + DILUTE_CONST_SEQUENCE(K, randkey);
29763 + // imul rax, rcx
29764 + EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29765 +#else
29766 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29767 EMIT(K, 4);
29768 +#endif
29769 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29770 break;
29771 case BPF_S_ALU_AND_X:
29772 @@ -602,8 +720,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29773 if (is_imm8(K)) {
29774 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29775 } else {
29776 - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29777 - EMIT(K, 4);
29778 + EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29779 }
29780 } else {
29781 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29782 @@ -686,17 +803,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29783 break;
29784 default:
29785 /* hmm, too complex filter, give up with jit compiler */
29786 - goto out;
29787 + goto error;
29788 }
29789 ilen = prog - temp;
29790 if (image) {
29791 if (unlikely(proglen + ilen > oldproglen)) {
29792 pr_err("bpb_jit_compile fatal error\n");
29793 - kfree(addrs);
29794 - module_free(NULL, image);
29795 - return;
29796 + module_free_exec(NULL, image);
29797 + goto error;
29798 }
29799 + pax_open_kernel();
29800 memcpy(image + proglen, temp, ilen);
29801 + pax_close_kernel();
29802 }
29803 proglen += ilen;
29804 addrs[i] = proglen;
29805 @@ -717,11 +835,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29806 break;
29807 }
29808 if (proglen == oldproglen) {
29809 - image = module_alloc(max_t(unsigned int,
29810 - proglen,
29811 - sizeof(struct work_struct)));
29812 + image = module_alloc_exec(proglen);
29813 if (!image)
29814 - goto out;
29815 + goto error;
29816 }
29817 oldproglen = proglen;
29818 }
29819 @@ -737,7 +853,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29820 bpf_flush_icache(image, image + proglen);
29821
29822 fp->bpf_func = (void *)image;
29823 - }
29824 + } else
29825 +error:
29826 + kfree(fp->work);
29827 +
29828 out:
29829 kfree(addrs);
29830 return;
29831 @@ -745,18 +864,20 @@ out:
29832
29833 static void jit_free_defer(struct work_struct *arg)
29834 {
29835 - module_free(NULL, arg);
29836 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29837 + kfree(arg);
29838 }
29839
29840 /* run from softirq, we must use a work_struct to call
29841 - * module_free() from process context
29842 + * module_free_exec() from process context
29843 */
29844 void bpf_jit_free(struct sk_filter *fp)
29845 {
29846 if (fp->bpf_func != sk_run_filter) {
29847 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
29848 + struct work_struct *work = &fp->work->work;
29849
29850 INIT_WORK(work, jit_free_defer);
29851 + fp->work->image = fp->bpf_func;
29852 schedule_work(work);
29853 }
29854 }
29855 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29856 index d6aa6e8..266395a 100644
29857 --- a/arch/x86/oprofile/backtrace.c
29858 +++ b/arch/x86/oprofile/backtrace.c
29859 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29860 struct stack_frame_ia32 *fp;
29861 unsigned long bytes;
29862
29863 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29864 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29865 if (bytes != sizeof(bufhead))
29866 return NULL;
29867
29868 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29869 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29870
29871 oprofile_add_trace(bufhead[0].return_address);
29872
29873 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29874 struct stack_frame bufhead[2];
29875 unsigned long bytes;
29876
29877 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29878 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29879 if (bytes != sizeof(bufhead))
29880 return NULL;
29881
29882 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29883 {
29884 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29885
29886 - if (!user_mode_vm(regs)) {
29887 + if (!user_mode(regs)) {
29888 unsigned long stack = kernel_stack_pointer(regs);
29889 if (depth)
29890 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29891 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29892 index 48768df..ba9143c 100644
29893 --- a/arch/x86/oprofile/nmi_int.c
29894 +++ b/arch/x86/oprofile/nmi_int.c
29895 @@ -23,6 +23,7 @@
29896 #include <asm/nmi.h>
29897 #include <asm/msr.h>
29898 #include <asm/apic.h>
29899 +#include <asm/pgtable.h>
29900
29901 #include "op_counter.h"
29902 #include "op_x86_model.h"
29903 @@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29904 if (ret)
29905 return ret;
29906
29907 - if (!model->num_virt_counters)
29908 - model->num_virt_counters = model->num_counters;
29909 + if (!model->num_virt_counters) {
29910 + pax_open_kernel();
29911 + *(unsigned int *)&model->num_virt_counters = model->num_counters;
29912 + pax_close_kernel();
29913 + }
29914
29915 mux_init(ops);
29916
29917 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29918 index b2b9443..be58856 100644
29919 --- a/arch/x86/oprofile/op_model_amd.c
29920 +++ b/arch/x86/oprofile/op_model_amd.c
29921 @@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29922 num_counters = AMD64_NUM_COUNTERS;
29923 }
29924
29925 - op_amd_spec.num_counters = num_counters;
29926 - op_amd_spec.num_controls = num_counters;
29927 - op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29928 + pax_open_kernel();
29929 + *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29930 + *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29931 + *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29932 + pax_close_kernel();
29933
29934 return 0;
29935 }
29936 diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
29937 index d90528e..0127e2b 100644
29938 --- a/arch/x86/oprofile/op_model_ppro.c
29939 +++ b/arch/x86/oprofile/op_model_ppro.c
29940 @@ -19,6 +19,7 @@
29941 #include <asm/msr.h>
29942 #include <asm/apic.h>
29943 #include <asm/nmi.h>
29944 +#include <asm/pgtable.h>
29945
29946 #include "op_x86_model.h"
29947 #include "op_counter.h"
29948 @@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
29949
29950 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
29951
29952 - op_arch_perfmon_spec.num_counters = num_counters;
29953 - op_arch_perfmon_spec.num_controls = num_counters;
29954 + pax_open_kernel();
29955 + *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
29956 + *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
29957 + pax_close_kernel();
29958 }
29959
29960 static int arch_perfmon_init(struct oprofile_operations *ignore)
29961 diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
29962 index 71e8a67..6a313bb 100644
29963 --- a/arch/x86/oprofile/op_x86_model.h
29964 +++ b/arch/x86/oprofile/op_x86_model.h
29965 @@ -52,7 +52,7 @@ struct op_x86_model_spec {
29966 void (*switch_ctrl)(struct op_x86_model_spec const *model,
29967 struct op_msrs const * const msrs);
29968 #endif
29969 -};
29970 +} __do_const;
29971
29972 struct op_counter_config;
29973
29974 diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
29975 index e9e6ed5..e47ae67 100644
29976 --- a/arch/x86/pci/amd_bus.c
29977 +++ b/arch/x86/pci/amd_bus.c
29978 @@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
29979 return NOTIFY_OK;
29980 }
29981
29982 -static struct notifier_block __cpuinitdata amd_cpu_notifier = {
29983 +static struct notifier_block amd_cpu_notifier = {
29984 .notifier_call = amd_cpu_notify,
29985 };
29986
29987 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
29988 index 372e9b8..e775a6c 100644
29989 --- a/arch/x86/pci/irq.c
29990 +++ b/arch/x86/pci/irq.c
29991 @@ -50,7 +50,7 @@ struct irq_router {
29992 struct irq_router_handler {
29993 u16 vendor;
29994 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
29995 -};
29996 +} __do_const;
29997
29998 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
29999 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30000 @@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30001 return 0;
30002 }
30003
30004 -static __initdata struct irq_router_handler pirq_routers[] = {
30005 +static __initconst const struct irq_router_handler pirq_routers[] = {
30006 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30007 { PCI_VENDOR_ID_AL, ali_router_probe },
30008 { PCI_VENDOR_ID_ITE, ite_router_probe },
30009 @@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30010 static void __init pirq_find_router(struct irq_router *r)
30011 {
30012 struct irq_routing_table *rt = pirq_table;
30013 - struct irq_router_handler *h;
30014 + const struct irq_router_handler *h;
30015
30016 #ifdef CONFIG_PCI_BIOS
30017 if (!rt->signature) {
30018 @@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30019 return 0;
30020 }
30021
30022 -static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30023 +static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30024 {
30025 .callback = fix_broken_hp_bios_irq9,
30026 .ident = "HP Pavilion N5400 Series Laptop",
30027 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30028 index 6eb18c4..20d83de 100644
30029 --- a/arch/x86/pci/mrst.c
30030 +++ b/arch/x86/pci/mrst.c
30031 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30032 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30033 pci_mmcfg_late_init();
30034 pcibios_enable_irq = mrst_pci_irq_enable;
30035 - pci_root_ops = pci_mrst_ops;
30036 + pax_open_kernel();
30037 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30038 + pax_close_kernel();
30039 pci_soc_mode = 1;
30040 /* Continue with standard init */
30041 return 1;
30042 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30043 index c77b24a..c979855 100644
30044 --- a/arch/x86/pci/pcbios.c
30045 +++ b/arch/x86/pci/pcbios.c
30046 @@ -79,7 +79,7 @@ union bios32 {
30047 static struct {
30048 unsigned long address;
30049 unsigned short segment;
30050 -} bios32_indirect = { 0, __KERNEL_CS };
30051 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30052
30053 /*
30054 * Returns the entry point for the given service, NULL on error
30055 @@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30056 unsigned long length; /* %ecx */
30057 unsigned long entry; /* %edx */
30058 unsigned long flags;
30059 + struct desc_struct d, *gdt;
30060
30061 local_irq_save(flags);
30062 - __asm__("lcall *(%%edi); cld"
30063 +
30064 + gdt = get_cpu_gdt_table(smp_processor_id());
30065 +
30066 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30067 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30068 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30069 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30070 +
30071 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30072 : "=a" (return_code),
30073 "=b" (address),
30074 "=c" (length),
30075 "=d" (entry)
30076 : "0" (service),
30077 "1" (0),
30078 - "D" (&bios32_indirect));
30079 + "D" (&bios32_indirect),
30080 + "r"(__PCIBIOS_DS)
30081 + : "memory");
30082 +
30083 + pax_open_kernel();
30084 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30085 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30086 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30087 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30088 + pax_close_kernel();
30089 +
30090 local_irq_restore(flags);
30091
30092 switch (return_code) {
30093 - case 0:
30094 - return address + entry;
30095 - case 0x80: /* Not present */
30096 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30097 - return 0;
30098 - default: /* Shouldn't happen */
30099 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30100 - service, return_code);
30101 + case 0: {
30102 + int cpu;
30103 + unsigned char flags;
30104 +
30105 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30106 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30107 + printk(KERN_WARNING "bios32_service: not valid\n");
30108 return 0;
30109 + }
30110 + address = address + PAGE_OFFSET;
30111 + length += 16UL; /* some BIOSs underreport this... */
30112 + flags = 4;
30113 + if (length >= 64*1024*1024) {
30114 + length >>= PAGE_SHIFT;
30115 + flags |= 8;
30116 + }
30117 +
30118 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30119 + gdt = get_cpu_gdt_table(cpu);
30120 + pack_descriptor(&d, address, length, 0x9b, flags);
30121 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30122 + pack_descriptor(&d, address, length, 0x93, flags);
30123 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30124 + }
30125 + return entry;
30126 + }
30127 + case 0x80: /* Not present */
30128 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30129 + return 0;
30130 + default: /* Shouldn't happen */
30131 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30132 + service, return_code);
30133 + return 0;
30134 }
30135 }
30136
30137 static struct {
30138 unsigned long address;
30139 unsigned short segment;
30140 -} pci_indirect = { 0, __KERNEL_CS };
30141 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30142
30143 -static int pci_bios_present;
30144 +static int pci_bios_present __read_only;
30145
30146 static int check_pcibios(void)
30147 {
30148 @@ -131,11 +174,13 @@ static int check_pcibios(void)
30149 unsigned long flags, pcibios_entry;
30150
30151 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30152 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30153 + pci_indirect.address = pcibios_entry;
30154
30155 local_irq_save(flags);
30156 - __asm__(
30157 - "lcall *(%%edi); cld\n\t"
30158 + __asm__("movw %w6, %%ds\n\t"
30159 + "lcall *%%ss:(%%edi); cld\n\t"
30160 + "push %%ss\n\t"
30161 + "pop %%ds\n\t"
30162 "jc 1f\n\t"
30163 "xor %%ah, %%ah\n"
30164 "1:"
30165 @@ -144,7 +189,8 @@ static int check_pcibios(void)
30166 "=b" (ebx),
30167 "=c" (ecx)
30168 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30169 - "D" (&pci_indirect)
30170 + "D" (&pci_indirect),
30171 + "r" (__PCIBIOS_DS)
30172 : "memory");
30173 local_irq_restore(flags);
30174
30175 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30176
30177 switch (len) {
30178 case 1:
30179 - __asm__("lcall *(%%esi); cld\n\t"
30180 + __asm__("movw %w6, %%ds\n\t"
30181 + "lcall *%%ss:(%%esi); cld\n\t"
30182 + "push %%ss\n\t"
30183 + "pop %%ds\n\t"
30184 "jc 1f\n\t"
30185 "xor %%ah, %%ah\n"
30186 "1:"
30187 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30188 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30189 "b" (bx),
30190 "D" ((long)reg),
30191 - "S" (&pci_indirect));
30192 + "S" (&pci_indirect),
30193 + "r" (__PCIBIOS_DS));
30194 /*
30195 * Zero-extend the result beyond 8 bits, do not trust the
30196 * BIOS having done it:
30197 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30198 *value &= 0xff;
30199 break;
30200 case 2:
30201 - __asm__("lcall *(%%esi); cld\n\t"
30202 + __asm__("movw %w6, %%ds\n\t"
30203 + "lcall *%%ss:(%%esi); cld\n\t"
30204 + "push %%ss\n\t"
30205 + "pop %%ds\n\t"
30206 "jc 1f\n\t"
30207 "xor %%ah, %%ah\n"
30208 "1:"
30209 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30210 : "1" (PCIBIOS_READ_CONFIG_WORD),
30211 "b" (bx),
30212 "D" ((long)reg),
30213 - "S" (&pci_indirect));
30214 + "S" (&pci_indirect),
30215 + "r" (__PCIBIOS_DS));
30216 /*
30217 * Zero-extend the result beyond 16 bits, do not trust the
30218 * BIOS having done it:
30219 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30220 *value &= 0xffff;
30221 break;
30222 case 4:
30223 - __asm__("lcall *(%%esi); cld\n\t"
30224 + __asm__("movw %w6, %%ds\n\t"
30225 + "lcall *%%ss:(%%esi); cld\n\t"
30226 + "push %%ss\n\t"
30227 + "pop %%ds\n\t"
30228 "jc 1f\n\t"
30229 "xor %%ah, %%ah\n"
30230 "1:"
30231 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30232 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30233 "b" (bx),
30234 "D" ((long)reg),
30235 - "S" (&pci_indirect));
30236 + "S" (&pci_indirect),
30237 + "r" (__PCIBIOS_DS));
30238 break;
30239 }
30240
30241 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30242
30243 switch (len) {
30244 case 1:
30245 - __asm__("lcall *(%%esi); cld\n\t"
30246 + __asm__("movw %w6, %%ds\n\t"
30247 + "lcall *%%ss:(%%esi); cld\n\t"
30248 + "push %%ss\n\t"
30249 + "pop %%ds\n\t"
30250 "jc 1f\n\t"
30251 "xor %%ah, %%ah\n"
30252 "1:"
30253 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30254 "c" (value),
30255 "b" (bx),
30256 "D" ((long)reg),
30257 - "S" (&pci_indirect));
30258 + "S" (&pci_indirect),
30259 + "r" (__PCIBIOS_DS));
30260 break;
30261 case 2:
30262 - __asm__("lcall *(%%esi); cld\n\t"
30263 + __asm__("movw %w6, %%ds\n\t"
30264 + "lcall *%%ss:(%%esi); cld\n\t"
30265 + "push %%ss\n\t"
30266 + "pop %%ds\n\t"
30267 "jc 1f\n\t"
30268 "xor %%ah, %%ah\n"
30269 "1:"
30270 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30271 "c" (value),
30272 "b" (bx),
30273 "D" ((long)reg),
30274 - "S" (&pci_indirect));
30275 + "S" (&pci_indirect),
30276 + "r" (__PCIBIOS_DS));
30277 break;
30278 case 4:
30279 - __asm__("lcall *(%%esi); cld\n\t"
30280 + __asm__("movw %w6, %%ds\n\t"
30281 + "lcall *%%ss:(%%esi); cld\n\t"
30282 + "push %%ss\n\t"
30283 + "pop %%ds\n\t"
30284 "jc 1f\n\t"
30285 "xor %%ah, %%ah\n"
30286 "1:"
30287 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30288 "c" (value),
30289 "b" (bx),
30290 "D" ((long)reg),
30291 - "S" (&pci_indirect));
30292 + "S" (&pci_indirect),
30293 + "r" (__PCIBIOS_DS));
30294 break;
30295 }
30296
30297 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30298
30299 DBG("PCI: Fetching IRQ routing table... ");
30300 __asm__("push %%es\n\t"
30301 + "movw %w8, %%ds\n\t"
30302 "push %%ds\n\t"
30303 "pop %%es\n\t"
30304 - "lcall *(%%esi); cld\n\t"
30305 + "lcall *%%ss:(%%esi); cld\n\t"
30306 "pop %%es\n\t"
30307 + "push %%ss\n\t"
30308 + "pop %%ds\n"
30309 "jc 1f\n\t"
30310 "xor %%ah, %%ah\n"
30311 "1:"
30312 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30313 "1" (0),
30314 "D" ((long) &opt),
30315 "S" (&pci_indirect),
30316 - "m" (opt)
30317 + "m" (opt),
30318 + "r" (__PCIBIOS_DS)
30319 : "memory");
30320 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30321 if (ret & 0xff00)
30322 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30323 {
30324 int ret;
30325
30326 - __asm__("lcall *(%%esi); cld\n\t"
30327 + __asm__("movw %w5, %%ds\n\t"
30328 + "lcall *%%ss:(%%esi); cld\n\t"
30329 + "push %%ss\n\t"
30330 + "pop %%ds\n"
30331 "jc 1f\n\t"
30332 "xor %%ah, %%ah\n"
30333 "1:"
30334 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30335 : "0" (PCIBIOS_SET_PCI_HW_INT),
30336 "b" ((dev->bus->number << 8) | dev->devfn),
30337 "c" ((irq << 8) | (pin + 10)),
30338 - "S" (&pci_indirect));
30339 + "S" (&pci_indirect),
30340 + "r" (__PCIBIOS_DS));
30341 return !(ret & 0xff00);
30342 }
30343 EXPORT_SYMBOL(pcibios_set_irq_routing);
30344 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30345 index 40e4469..1ab536e 100644
30346 --- a/arch/x86/platform/efi/efi_32.c
30347 +++ b/arch/x86/platform/efi/efi_32.c
30348 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30349 {
30350 struct desc_ptr gdt_descr;
30351
30352 +#ifdef CONFIG_PAX_KERNEXEC
30353 + struct desc_struct d;
30354 +#endif
30355 +
30356 local_irq_save(efi_rt_eflags);
30357
30358 load_cr3(initial_page_table);
30359 __flush_tlb_all();
30360
30361 +#ifdef CONFIG_PAX_KERNEXEC
30362 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30363 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30364 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30365 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30366 +#endif
30367 +
30368 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30369 gdt_descr.size = GDT_SIZE - 1;
30370 load_gdt(&gdt_descr);
30371 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30372 {
30373 struct desc_ptr gdt_descr;
30374
30375 +#ifdef CONFIG_PAX_KERNEXEC
30376 + struct desc_struct d;
30377 +
30378 + memset(&d, 0, sizeof d);
30379 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30380 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30381 +#endif
30382 +
30383 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30384 gdt_descr.size = GDT_SIZE - 1;
30385 load_gdt(&gdt_descr);
30386 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30387 index fbe66e6..eae5e38 100644
30388 --- a/arch/x86/platform/efi/efi_stub_32.S
30389 +++ b/arch/x86/platform/efi/efi_stub_32.S
30390 @@ -6,7 +6,9 @@
30391 */
30392
30393 #include <linux/linkage.h>
30394 +#include <linux/init.h>
30395 #include <asm/page_types.h>
30396 +#include <asm/segment.h>
30397
30398 /*
30399 * efi_call_phys(void *, ...) is a function with variable parameters.
30400 @@ -20,7 +22,7 @@
30401 * service functions will comply with gcc calling convention, too.
30402 */
30403
30404 -.text
30405 +__INIT
30406 ENTRY(efi_call_phys)
30407 /*
30408 * 0. The function can only be called in Linux kernel. So CS has been
30409 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30410 * The mapping of lower virtual memory has been created in prelog and
30411 * epilog.
30412 */
30413 - movl $1f, %edx
30414 - subl $__PAGE_OFFSET, %edx
30415 - jmp *%edx
30416 +#ifdef CONFIG_PAX_KERNEXEC
30417 + movl $(__KERNEXEC_EFI_DS), %edx
30418 + mov %edx, %ds
30419 + mov %edx, %es
30420 + mov %edx, %ss
30421 + addl $2f,(1f)
30422 + ljmp *(1f)
30423 +
30424 +__INITDATA
30425 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30426 +.previous
30427 +
30428 +2:
30429 + subl $2b,(1b)
30430 +#else
30431 + jmp 1f-__PAGE_OFFSET
30432 1:
30433 +#endif
30434
30435 /*
30436 * 2. Now on the top of stack is the return
30437 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30438 * parameter 2, ..., param n. To make things easy, we save the return
30439 * address of efi_call_phys in a global variable.
30440 */
30441 - popl %edx
30442 - movl %edx, saved_return_addr
30443 - /* get the function pointer into ECX*/
30444 - popl %ecx
30445 - movl %ecx, efi_rt_function_ptr
30446 - movl $2f, %edx
30447 - subl $__PAGE_OFFSET, %edx
30448 - pushl %edx
30449 + popl (saved_return_addr)
30450 + popl (efi_rt_function_ptr)
30451
30452 /*
30453 * 3. Clear PG bit in %CR0.
30454 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30455 /*
30456 * 5. Call the physical function.
30457 */
30458 - jmp *%ecx
30459 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
30460
30461 -2:
30462 /*
30463 * 6. After EFI runtime service returns, control will return to
30464 * following instruction. We'd better readjust stack pointer first.
30465 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30466 movl %cr0, %edx
30467 orl $0x80000000, %edx
30468 movl %edx, %cr0
30469 - jmp 1f
30470 -1:
30471 +
30472 /*
30473 * 8. Now restore the virtual mode from flat mode by
30474 * adding EIP with PAGE_OFFSET.
30475 */
30476 - movl $1f, %edx
30477 - jmp *%edx
30478 +#ifdef CONFIG_PAX_KERNEXEC
30479 + movl $(__KERNEL_DS), %edx
30480 + mov %edx, %ds
30481 + mov %edx, %es
30482 + mov %edx, %ss
30483 + ljmp $(__KERNEL_CS),$1f
30484 +#else
30485 + jmp 1f+__PAGE_OFFSET
30486 +#endif
30487 1:
30488
30489 /*
30490 * 9. Balance the stack. And because EAX contain the return value,
30491 * we'd better not clobber it.
30492 */
30493 - leal efi_rt_function_ptr, %edx
30494 - movl (%edx), %ecx
30495 - pushl %ecx
30496 + pushl (efi_rt_function_ptr)
30497
30498 /*
30499 - * 10. Push the saved return address onto the stack and return.
30500 + * 10. Return to the saved return address.
30501 */
30502 - leal saved_return_addr, %edx
30503 - movl (%edx), %ecx
30504 - pushl %ecx
30505 - ret
30506 + jmpl *(saved_return_addr)
30507 ENDPROC(efi_call_phys)
30508 .previous
30509
30510 -.data
30511 +__INITDATA
30512 saved_return_addr:
30513 .long 0
30514 efi_rt_function_ptr:
30515 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30516 index 4c07cca..2c8427d 100644
30517 --- a/arch/x86/platform/efi/efi_stub_64.S
30518 +++ b/arch/x86/platform/efi/efi_stub_64.S
30519 @@ -7,6 +7,7 @@
30520 */
30521
30522 #include <linux/linkage.h>
30523 +#include <asm/alternative-asm.h>
30524
30525 #define SAVE_XMM \
30526 mov %rsp, %rax; \
30527 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
30528 call *%rdi
30529 addq $32, %rsp
30530 RESTORE_XMM
30531 + pax_force_retaddr 0, 1
30532 ret
30533 ENDPROC(efi_call0)
30534
30535 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
30536 call *%rdi
30537 addq $32, %rsp
30538 RESTORE_XMM
30539 + pax_force_retaddr 0, 1
30540 ret
30541 ENDPROC(efi_call1)
30542
30543 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
30544 call *%rdi
30545 addq $32, %rsp
30546 RESTORE_XMM
30547 + pax_force_retaddr 0, 1
30548 ret
30549 ENDPROC(efi_call2)
30550
30551 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
30552 call *%rdi
30553 addq $32, %rsp
30554 RESTORE_XMM
30555 + pax_force_retaddr 0, 1
30556 ret
30557 ENDPROC(efi_call3)
30558
30559 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
30560 call *%rdi
30561 addq $32, %rsp
30562 RESTORE_XMM
30563 + pax_force_retaddr 0, 1
30564 ret
30565 ENDPROC(efi_call4)
30566
30567 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
30568 call *%rdi
30569 addq $48, %rsp
30570 RESTORE_XMM
30571 + pax_force_retaddr 0, 1
30572 ret
30573 ENDPROC(efi_call5)
30574
30575 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
30576 call *%rdi
30577 addq $48, %rsp
30578 RESTORE_XMM
30579 + pax_force_retaddr 0, 1
30580 ret
30581 ENDPROC(efi_call6)
30582 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30583 index e31bcd8..f12dc46 100644
30584 --- a/arch/x86/platform/mrst/mrst.c
30585 +++ b/arch/x86/platform/mrst/mrst.c
30586 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30587 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30588 int sfi_mrtc_num;
30589
30590 -static void mrst_power_off(void)
30591 +static __noreturn void mrst_power_off(void)
30592 {
30593 + BUG();
30594 }
30595
30596 -static void mrst_reboot(void)
30597 +static __noreturn void mrst_reboot(void)
30598 {
30599 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30600 + BUG();
30601 }
30602
30603 /* parse all the mtimer info to a static mtimer array */
30604 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30605 index d6ee929..3637cb5 100644
30606 --- a/arch/x86/platform/olpc/olpc_dt.c
30607 +++ b/arch/x86/platform/olpc/olpc_dt.c
30608 @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30609 return res;
30610 }
30611
30612 -static struct of_pdt_ops prom_olpc_ops __initdata = {
30613 +static struct of_pdt_ops prom_olpc_ops __initconst = {
30614 .nextprop = olpc_dt_nextprop,
30615 .getproplen = olpc_dt_getproplen,
30616 .getproperty = olpc_dt_getproperty,
30617 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30618 index 3c68768..07e82b8 100644
30619 --- a/arch/x86/power/cpu.c
30620 +++ b/arch/x86/power/cpu.c
30621 @@ -134,7 +134,7 @@ static void do_fpu_end(void)
30622 static void fix_processor_context(void)
30623 {
30624 int cpu = smp_processor_id();
30625 - struct tss_struct *t = &per_cpu(init_tss, cpu);
30626 + struct tss_struct *t = init_tss + cpu;
30627
30628 set_tss_desc(cpu, t); /*
30629 * This just modifies memory; should not be
30630 @@ -144,8 +144,6 @@ static void fix_processor_context(void)
30631 */
30632
30633 #ifdef CONFIG_X86_64
30634 - get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30635 -
30636 syscall_init(); /* This sets MSR_*STAR and related */
30637 #endif
30638 load_TR_desc(); /* This does ltr */
30639 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30640 index a44f457..9140171 100644
30641 --- a/arch/x86/realmode/init.c
30642 +++ b/arch/x86/realmode/init.c
30643 @@ -70,7 +70,13 @@ void __init setup_real_mode(void)
30644 __va(real_mode_header->trampoline_header);
30645
30646 #ifdef CONFIG_X86_32
30647 - trampoline_header->start = __pa_symbol(startup_32_smp);
30648 + trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
30649 +
30650 +#ifdef CONFIG_PAX_KERNEXEC
30651 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30652 +#endif
30653 +
30654 + trampoline_header->boot_cs = __BOOT_CS;
30655 trampoline_header->gdt_limit = __BOOT_DS + 7;
30656 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
30657 #else
30658 @@ -86,7 +92,7 @@ void __init setup_real_mode(void)
30659 *trampoline_cr4_features = read_cr4();
30660
30661 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
30662 - trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
30663 + trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
30664 trampoline_pgd[511] = init_level4_pgt[511].pgd;
30665 #endif
30666 }
30667 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30668 index 8869287..d577672 100644
30669 --- a/arch/x86/realmode/rm/Makefile
30670 +++ b/arch/x86/realmode/rm/Makefile
30671 @@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30672 $(call cc-option, -fno-unit-at-a-time)) \
30673 $(call cc-option, -fno-stack-protector) \
30674 $(call cc-option, -mpreferred-stack-boundary=2)
30675 +ifdef CONSTIFY_PLUGIN
30676 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30677 +endif
30678 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30679 GCOV_PROFILE := n
30680 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30681 index a28221d..93c40f1 100644
30682 --- a/arch/x86/realmode/rm/header.S
30683 +++ b/arch/x86/realmode/rm/header.S
30684 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30685 #endif
30686 /* APM/BIOS reboot */
30687 .long pa_machine_real_restart_asm
30688 -#ifdef CONFIG_X86_64
30689 +#ifdef CONFIG_X86_32
30690 + .long __KERNEL_CS
30691 +#else
30692 .long __KERNEL32_CS
30693 #endif
30694 END(real_mode_header)
30695 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30696 index c1b2791..f9e31c7 100644
30697 --- a/arch/x86/realmode/rm/trampoline_32.S
30698 +++ b/arch/x86/realmode/rm/trampoline_32.S
30699 @@ -25,6 +25,12 @@
30700 #include <asm/page_types.h>
30701 #include "realmode.h"
30702
30703 +#ifdef CONFIG_PAX_KERNEXEC
30704 +#define ta(X) (X)
30705 +#else
30706 +#define ta(X) (pa_ ## X)
30707 +#endif
30708 +
30709 .text
30710 .code16
30711
30712 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30713
30714 cli # We should be safe anyway
30715
30716 - movl tr_start, %eax # where we need to go
30717 -
30718 movl $0xA5A5A5A5, trampoline_status
30719 # write marker for master knows we're running
30720
30721 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30722 movw $1, %dx # protected mode (PE) bit
30723 lmsw %dx # into protected mode
30724
30725 - ljmpl $__BOOT_CS, $pa_startup_32
30726 + ljmpl *(trampoline_header)
30727
30728 .section ".text32","ax"
30729 .code32
30730 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30731 .balign 8
30732 GLOBAL(trampoline_header)
30733 tr_start: .space 4
30734 - tr_gdt_pad: .space 2
30735 + tr_boot_cs: .space 2
30736 tr_gdt: .space 6
30737 END(trampoline_header)
30738
30739 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30740 index bb360dc..3e5945f 100644
30741 --- a/arch/x86/realmode/rm/trampoline_64.S
30742 +++ b/arch/x86/realmode/rm/trampoline_64.S
30743 @@ -107,7 +107,7 @@ ENTRY(startup_32)
30744 wrmsr
30745
30746 # Enable paging and in turn activate Long Mode
30747 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30748 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
30749 movl %eax, %cr0
30750
30751 /*
30752 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30753 index 79d67bd..c7e1b90 100644
30754 --- a/arch/x86/tools/relocs.c
30755 +++ b/arch/x86/tools/relocs.c
30756 @@ -12,10 +12,13 @@
30757 #include <regex.h>
30758 #include <tools/le_byteshift.h>
30759
30760 +#include "../../../include/generated/autoconf.h"
30761 +
30762 static void die(char *fmt, ...);
30763
30764 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
30765 static Elf32_Ehdr ehdr;
30766 +static Elf32_Phdr *phdr;
30767 static unsigned long reloc_count, reloc_idx;
30768 static unsigned long *relocs;
30769 static unsigned long reloc16_count, reloc16_idx;
30770 @@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
30771 }
30772 }
30773
30774 +static void read_phdrs(FILE *fp)
30775 +{
30776 + unsigned int i;
30777 +
30778 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
30779 + if (!phdr) {
30780 + die("Unable to allocate %d program headers\n",
30781 + ehdr.e_phnum);
30782 + }
30783 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30784 + die("Seek to %d failed: %s\n",
30785 + ehdr.e_phoff, strerror(errno));
30786 + }
30787 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30788 + die("Cannot read ELF program headers: %s\n",
30789 + strerror(errno));
30790 + }
30791 + for(i = 0; i < ehdr.e_phnum; i++) {
30792 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
30793 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
30794 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
30795 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
30796 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
30797 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
30798 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
30799 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
30800 + }
30801 +
30802 +}
30803 +
30804 static void read_shdrs(FILE *fp)
30805 {
30806 - int i;
30807 + unsigned int i;
30808 Elf32_Shdr shdr;
30809
30810 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30811 @@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
30812
30813 static void read_strtabs(FILE *fp)
30814 {
30815 - int i;
30816 + unsigned int i;
30817 for (i = 0; i < ehdr.e_shnum; i++) {
30818 struct section *sec = &secs[i];
30819 if (sec->shdr.sh_type != SHT_STRTAB) {
30820 @@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
30821
30822 static void read_symtabs(FILE *fp)
30823 {
30824 - int i,j;
30825 + unsigned int i,j;
30826 for (i = 0; i < ehdr.e_shnum; i++) {
30827 struct section *sec = &secs[i];
30828 if (sec->shdr.sh_type != SHT_SYMTAB) {
30829 @@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
30830 }
30831
30832
30833 -static void read_relocs(FILE *fp)
30834 +static void read_relocs(FILE *fp, int use_real_mode)
30835 {
30836 - int i,j;
30837 + unsigned int i,j;
30838 + uint32_t base;
30839 +
30840 for (i = 0; i < ehdr.e_shnum; i++) {
30841 struct section *sec = &secs[i];
30842 if (sec->shdr.sh_type != SHT_REL) {
30843 @@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
30844 die("Cannot read symbol table: %s\n",
30845 strerror(errno));
30846 }
30847 + base = 0;
30848 +
30849 +#ifdef CONFIG_X86_32
30850 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30851 + if (phdr[j].p_type != PT_LOAD )
30852 + continue;
30853 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30854 + continue;
30855 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30856 + break;
30857 + }
30858 +#endif
30859 +
30860 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
30861 Elf32_Rel *rel = &sec->reltab[j];
30862 - rel->r_offset = elf32_to_cpu(rel->r_offset);
30863 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
30864 rel->r_info = elf32_to_cpu(rel->r_info);
30865 }
30866 }
30867 @@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
30868
30869 static void print_absolute_symbols(void)
30870 {
30871 - int i;
30872 + unsigned int i;
30873 printf("Absolute symbols\n");
30874 printf(" Num: Value Size Type Bind Visibility Name\n");
30875 for (i = 0; i < ehdr.e_shnum; i++) {
30876 struct section *sec = &secs[i];
30877 char *sym_strtab;
30878 - int j;
30879 + unsigned int j;
30880
30881 if (sec->shdr.sh_type != SHT_SYMTAB) {
30882 continue;
30883 @@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
30884
30885 static void print_absolute_relocs(void)
30886 {
30887 - int i, printed = 0;
30888 + unsigned int i, printed = 0;
30889
30890 for (i = 0; i < ehdr.e_shnum; i++) {
30891 struct section *sec = &secs[i];
30892 struct section *sec_applies, *sec_symtab;
30893 char *sym_strtab;
30894 Elf32_Sym *sh_symtab;
30895 - int j;
30896 + unsigned int j;
30897 if (sec->shdr.sh_type != SHT_REL) {
30898 continue;
30899 }
30900 @@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
30901 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30902 int use_real_mode)
30903 {
30904 - int i;
30905 + unsigned int i;
30906 /* Walk through the relocations */
30907 for (i = 0; i < ehdr.e_shnum; i++) {
30908 char *sym_strtab;
30909 Elf32_Sym *sh_symtab;
30910 struct section *sec_applies, *sec_symtab;
30911 - int j;
30912 + unsigned int j;
30913 struct section *sec = &secs[i];
30914
30915 if (sec->shdr.sh_type != SHT_REL) {
30916 @@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30917 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
30918 r_type = ELF32_R_TYPE(rel->r_info);
30919
30920 + if (!use_real_mode) {
30921 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
30922 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
30923 + continue;
30924 +
30925 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
30926 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
30927 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
30928 + continue;
30929 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
30930 + continue;
30931 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
30932 + continue;
30933 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
30934 + continue;
30935 +#endif
30936 + }
30937 +
30938 shn_abs = sym->st_shndx == SHN_ABS;
30939
30940 switch (r_type) {
30941 @@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
30942
30943 static void emit_relocs(int as_text, int use_real_mode)
30944 {
30945 - int i;
30946 + unsigned int i;
30947 /* Count how many relocations I have and allocate space for them. */
30948 reloc_count = 0;
30949 walk_relocs(count_reloc, use_real_mode);
30950 @@ -808,10 +874,11 @@ int main(int argc, char **argv)
30951 fname, strerror(errno));
30952 }
30953 read_ehdr(fp);
30954 + read_phdrs(fp);
30955 read_shdrs(fp);
30956 read_strtabs(fp);
30957 read_symtabs(fp);
30958 - read_relocs(fp);
30959 + read_relocs(fp, use_real_mode);
30960 if (show_absolute_syms) {
30961 print_absolute_symbols();
30962 goto out;
30963 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
30964 index fd14be1..e3c79c0 100644
30965 --- a/arch/x86/vdso/Makefile
30966 +++ b/arch/x86/vdso/Makefile
30967 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
30968 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
30969 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
30970
30971 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30972 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30973 GCOV_PROFILE := n
30974
30975 #
30976 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
30977 index 0faad64..39ef157 100644
30978 --- a/arch/x86/vdso/vdso32-setup.c
30979 +++ b/arch/x86/vdso/vdso32-setup.c
30980 @@ -25,6 +25,7 @@
30981 #include <asm/tlbflush.h>
30982 #include <asm/vdso.h>
30983 #include <asm/proto.h>
30984 +#include <asm/mman.h>
30985
30986 enum {
30987 VDSO_DISABLED = 0,
30988 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
30989 void enable_sep_cpu(void)
30990 {
30991 int cpu = get_cpu();
30992 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
30993 + struct tss_struct *tss = init_tss + cpu;
30994
30995 if (!boot_cpu_has(X86_FEATURE_SEP)) {
30996 put_cpu();
30997 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
30998 gate_vma.vm_start = FIXADDR_USER_START;
30999 gate_vma.vm_end = FIXADDR_USER_END;
31000 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31001 - gate_vma.vm_page_prot = __P101;
31002 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31003
31004 return 0;
31005 }
31006 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31007 if (compat)
31008 addr = VDSO_HIGH_BASE;
31009 else {
31010 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31011 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31012 if (IS_ERR_VALUE(addr)) {
31013 ret = addr;
31014 goto up_fail;
31015 }
31016 }
31017
31018 - current->mm->context.vdso = (void *)addr;
31019 + current->mm->context.vdso = addr;
31020
31021 if (compat_uses_vma || !compat) {
31022 /*
31023 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31024 }
31025
31026 current_thread_info()->sysenter_return =
31027 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31028 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31029
31030 up_fail:
31031 if (ret)
31032 - current->mm->context.vdso = NULL;
31033 + current->mm->context.vdso = 0;
31034
31035 up_write(&mm->mmap_sem);
31036
31037 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31038
31039 const char *arch_vma_name(struct vm_area_struct *vma)
31040 {
31041 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31042 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31043 return "[vdso]";
31044 +
31045 +#ifdef CONFIG_PAX_SEGMEXEC
31046 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31047 + return "[vdso]";
31048 +#endif
31049 +
31050 return NULL;
31051 }
31052
31053 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31054 * Check to see if the corresponding task was created in compat vdso
31055 * mode.
31056 */
31057 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31058 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31059 return &gate_vma;
31060 return NULL;
31061 }
31062 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31063 index 431e875..cbb23f3 100644
31064 --- a/arch/x86/vdso/vma.c
31065 +++ b/arch/x86/vdso/vma.c
31066 @@ -16,8 +16,6 @@
31067 #include <asm/vdso.h>
31068 #include <asm/page.h>
31069
31070 -unsigned int __read_mostly vdso_enabled = 1;
31071 -
31072 extern char vdso_start[], vdso_end[];
31073 extern unsigned short vdso_sync_cpuid;
31074
31075 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31076 * unaligned here as a result of stack start randomization.
31077 */
31078 addr = PAGE_ALIGN(addr);
31079 - addr = align_vdso_addr(addr);
31080
31081 return addr;
31082 }
31083 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31084 unsigned size)
31085 {
31086 struct mm_struct *mm = current->mm;
31087 - unsigned long addr;
31088 + unsigned long addr = 0;
31089 int ret;
31090
31091 - if (!vdso_enabled)
31092 - return 0;
31093 -
31094 down_write(&mm->mmap_sem);
31095 +
31096 +#ifdef CONFIG_PAX_RANDMMAP
31097 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31098 +#endif
31099 +
31100 addr = vdso_addr(mm->start_stack, size);
31101 + addr = align_vdso_addr(addr);
31102 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31103 if (IS_ERR_VALUE(addr)) {
31104 ret = addr;
31105 goto up_fail;
31106 }
31107
31108 - current->mm->context.vdso = (void *)addr;
31109 + mm->context.vdso = addr;
31110
31111 ret = install_special_mapping(mm, addr, size,
31112 VM_READ|VM_EXEC|
31113 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31114 pages);
31115 - if (ret) {
31116 - current->mm->context.vdso = NULL;
31117 - goto up_fail;
31118 - }
31119 + if (ret)
31120 + mm->context.vdso = 0;
31121
31122 up_fail:
31123 up_write(&mm->mmap_sem);
31124 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31125 vdsox32_size);
31126 }
31127 #endif
31128 -
31129 -static __init int vdso_setup(char *s)
31130 -{
31131 - vdso_enabled = simple_strtoul(s, NULL, 0);
31132 - return 0;
31133 -}
31134 -__setup("vdso=", vdso_setup);
31135 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31136 index 2363127..ec09d96 100644
31137 --- a/arch/x86/xen/enlighten.c
31138 +++ b/arch/x86/xen/enlighten.c
31139 @@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31140
31141 struct shared_info xen_dummy_shared_info;
31142
31143 -void *xen_initial_gdt;
31144 -
31145 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31146 __read_mostly int xen_have_vector_callback;
31147 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31148 @@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31149 {
31150 unsigned long va = dtr->address;
31151 unsigned int size = dtr->size + 1;
31152 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31153 - unsigned long frames[pages];
31154 + unsigned long frames[65536 / PAGE_SIZE];
31155 int f;
31156
31157 /*
31158 @@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31159 {
31160 unsigned long va = dtr->address;
31161 unsigned int size = dtr->size + 1;
31162 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31163 - unsigned long frames[pages];
31164 + unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
31165 int f;
31166
31167 /*
31168 @@ -554,7 +550,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31169 * 8-byte entries, or 16 4k pages..
31170 */
31171
31172 - BUG_ON(size > 65536);
31173 + BUG_ON(size > GDT_SIZE);
31174 BUG_ON(va & ~PAGE_MASK);
31175
31176 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
31177 @@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31178 return 0;
31179 }
31180
31181 -static void set_xen_basic_apic_ops(void)
31182 +static void __init set_xen_basic_apic_ops(void)
31183 {
31184 apic->read = xen_apic_read;
31185 apic->write = xen_apic_write;
31186 @@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31187 #endif
31188 };
31189
31190 -static void xen_reboot(int reason)
31191 +static __noreturn void xen_reboot(int reason)
31192 {
31193 struct sched_shutdown r = { .reason = reason };
31194
31195 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31196 - BUG();
31197 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31198 + BUG();
31199 }
31200
31201 -static void xen_restart(char *msg)
31202 +static __noreturn void xen_restart(char *msg)
31203 {
31204 xen_reboot(SHUTDOWN_reboot);
31205 }
31206
31207 -static void xen_emergency_restart(void)
31208 +static __noreturn void xen_emergency_restart(void)
31209 {
31210 xen_reboot(SHUTDOWN_reboot);
31211 }
31212
31213 -static void xen_machine_halt(void)
31214 +static __noreturn void xen_machine_halt(void)
31215 {
31216 xen_reboot(SHUTDOWN_poweroff);
31217 }
31218
31219 -static void xen_machine_power_off(void)
31220 +static __noreturn void xen_machine_power_off(void)
31221 {
31222 if (pm_power_off)
31223 pm_power_off();
31224 @@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
31225 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31226
31227 /* Work out if we support NX */
31228 - x86_configure_nx();
31229 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31230 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31231 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31232 + unsigned l, h;
31233 +
31234 + __supported_pte_mask |= _PAGE_NX;
31235 + rdmsr(MSR_EFER, l, h);
31236 + l |= EFER_NX;
31237 + wrmsr(MSR_EFER, l, h);
31238 + }
31239 +#endif
31240
31241 xen_setup_features();
31242
31243 @@ -1401,13 +1407,6 @@ asmlinkage void __init xen_start_kernel(void)
31244
31245 machine_ops = xen_machine_ops;
31246
31247 - /*
31248 - * The only reliable way to retain the initial address of the
31249 - * percpu gdt_page is to remember it here, so we can go and
31250 - * mark it RW later, when the initial percpu area is freed.
31251 - */
31252 - xen_initial_gdt = &per_cpu(gdt_page, 0);
31253 -
31254 xen_smp_init();
31255
31256 #ifdef CONFIG_ACPI_NUMA
31257 @@ -1601,7 +1600,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31258 return NOTIFY_OK;
31259 }
31260
31261 -static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31262 +static struct notifier_block xen_hvm_cpu_notifier = {
31263 .notifier_call = xen_hvm_cpu_notify,
31264 };
31265
31266 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31267 index e006c18..b9a7d6c 100644
31268 --- a/arch/x86/xen/mmu.c
31269 +++ b/arch/x86/xen/mmu.c
31270 @@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31271 /* L3_k[510] -> level2_kernel_pgt
31272 * L3_i[511] -> level2_fixmap_pgt */
31273 convert_pfn_mfn(level3_kernel_pgt);
31274 + convert_pfn_mfn(level3_vmalloc_start_pgt);
31275 + convert_pfn_mfn(level3_vmalloc_end_pgt);
31276 + convert_pfn_mfn(level3_vmemmap_pgt);
31277
31278 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31279 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31280 @@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31281 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31282 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31283 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31284 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31285 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31286 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31287 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31288 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31289 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31290 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31291 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31292
31293 @@ -2110,6 +2117,7 @@ static void __init xen_post_allocator_init(void)
31294 pv_mmu_ops.set_pud = xen_set_pud;
31295 #if PAGETABLE_LEVELS == 4
31296 pv_mmu_ops.set_pgd = xen_set_pgd;
31297 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31298 #endif
31299
31300 /* This will work as long as patching hasn't happened yet
31301 @@ -2188,6 +2196,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31302 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31303 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31304 .set_pgd = xen_set_pgd_hyper,
31305 + .set_pgd_batched = xen_set_pgd_hyper,
31306
31307 .alloc_pud = xen_alloc_pmd_init,
31308 .release_pud = xen_release_pmd_init,
31309 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31310 index 22c800a..8915f1e 100644
31311 --- a/arch/x86/xen/smp.c
31312 +++ b/arch/x86/xen/smp.c
31313 @@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31314 {
31315 BUG_ON(smp_processor_id() != 0);
31316 native_smp_prepare_boot_cpu();
31317 -
31318 - /* We've switched to the "real" per-cpu gdt, so make sure the
31319 - old memory can be recycled */
31320 - make_lowmem_page_readwrite(xen_initial_gdt);
31321 -
31322 xen_filter_cpu_maps();
31323 xen_setup_vcpu_info_placement();
31324 }
31325 @@ -303,7 +298,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31326 ctxt->user_regs.ss = __KERNEL_DS;
31327 #ifdef CONFIG_X86_32
31328 ctxt->user_regs.fs = __KERNEL_PERCPU;
31329 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31330 + savesegment(gs, ctxt->user_regs.gs);
31331 #else
31332 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31333 #endif
31334 @@ -313,8 +308,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31335
31336 {
31337 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
31338 - ctxt->user_regs.ds = __USER_DS;
31339 - ctxt->user_regs.es = __USER_DS;
31340 + ctxt->user_regs.ds = __KERNEL_DS;
31341 + ctxt->user_regs.es = __KERNEL_DS;
31342
31343 xen_copy_trap_info(ctxt->trap_ctxt);
31344
31345 @@ -359,13 +354,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31346 int rc;
31347
31348 per_cpu(current_task, cpu) = idle;
31349 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
31350 #ifdef CONFIG_X86_32
31351 irq_ctx_init(cpu);
31352 #else
31353 clear_tsk_thread_flag(idle, TIF_FORK);
31354 - per_cpu(kernel_stack, cpu) =
31355 - (unsigned long)task_stack_page(idle) -
31356 - KERNEL_STACK_OFFSET + THREAD_SIZE;
31357 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31358 #endif
31359 xen_setup_runstate_info(cpu);
31360 xen_setup_timer(cpu);
31361 @@ -634,7 +628,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31362
31363 void __init xen_smp_init(void)
31364 {
31365 - smp_ops = xen_smp_ops;
31366 + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31367 xen_fill_possible_map();
31368 xen_init_spinlocks();
31369 }
31370 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31371 index 33ca6e4..0ded929 100644
31372 --- a/arch/x86/xen/xen-asm_32.S
31373 +++ b/arch/x86/xen/xen-asm_32.S
31374 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
31375 ESP_OFFSET=4 # bytes pushed onto stack
31376
31377 /*
31378 - * Store vcpu_info pointer for easy access. Do it this way to
31379 - * avoid having to reload %fs
31380 + * Store vcpu_info pointer for easy access.
31381 */
31382 #ifdef CONFIG_SMP
31383 - GET_THREAD_INFO(%eax)
31384 - movl %ss:TI_cpu(%eax), %eax
31385 - movl %ss:__per_cpu_offset(,%eax,4), %eax
31386 - mov %ss:xen_vcpu(%eax), %eax
31387 + push %fs
31388 + mov $(__KERNEL_PERCPU), %eax
31389 + mov %eax, %fs
31390 + mov PER_CPU_VAR(xen_vcpu), %eax
31391 + pop %fs
31392 #else
31393 movl %ss:xen_vcpu, %eax
31394 #endif
31395 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31396 index 7faed58..ba4427c 100644
31397 --- a/arch/x86/xen/xen-head.S
31398 +++ b/arch/x86/xen/xen-head.S
31399 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
31400 #ifdef CONFIG_X86_32
31401 mov %esi,xen_start_info
31402 mov $init_thread_union+THREAD_SIZE,%esp
31403 +#ifdef CONFIG_SMP
31404 + movl $cpu_gdt_table,%edi
31405 + movl $__per_cpu_load,%eax
31406 + movw %ax,__KERNEL_PERCPU + 2(%edi)
31407 + rorl $16,%eax
31408 + movb %al,__KERNEL_PERCPU + 4(%edi)
31409 + movb %ah,__KERNEL_PERCPU + 7(%edi)
31410 + movl $__per_cpu_end - 1,%eax
31411 + subl $__per_cpu_start,%eax
31412 + movw %ax,__KERNEL_PERCPU + 0(%edi)
31413 +#endif
31414 #else
31415 mov %rsi,xen_start_info
31416 mov $init_thread_union+THREAD_SIZE,%rsp
31417 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31418 index a95b417..b6dbd0b 100644
31419 --- a/arch/x86/xen/xen-ops.h
31420 +++ b/arch/x86/xen/xen-ops.h
31421 @@ -10,8 +10,6 @@
31422 extern const char xen_hypervisor_callback[];
31423 extern const char xen_failsafe_callback[];
31424
31425 -extern void *xen_initial_gdt;
31426 -
31427 struct trap_info;
31428 void xen_copy_trap_info(struct trap_info *traps);
31429
31430 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31431 index 525bd3d..ef888b1 100644
31432 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
31433 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31434 @@ -119,9 +119,9 @@
31435 ----------------------------------------------------------------------*/
31436
31437 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31438 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31439 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31440 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31441 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31442
31443 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31444 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31445 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31446 index 2f33760..835e50a 100644
31447 --- a/arch/xtensa/variants/fsf/include/variant/core.h
31448 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
31449 @@ -11,6 +11,7 @@
31450 #ifndef _XTENSA_CORE_H
31451 #define _XTENSA_CORE_H
31452
31453 +#include <linux/const.h>
31454
31455 /****************************************************************************
31456 Parameters Useful for Any Code, USER or PRIVILEGED
31457 @@ -112,9 +113,9 @@
31458 ----------------------------------------------------------------------*/
31459
31460 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31461 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31462 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31463 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31464 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31465
31466 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31467 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31468 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31469 index af00795..2bb8105 100644
31470 --- a/arch/xtensa/variants/s6000/include/variant/core.h
31471 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
31472 @@ -11,6 +11,7 @@
31473 #ifndef _XTENSA_CORE_CONFIGURATION_H
31474 #define _XTENSA_CORE_CONFIGURATION_H
31475
31476 +#include <linux/const.h>
31477
31478 /****************************************************************************
31479 Parameters Useful for Any Code, USER or PRIVILEGED
31480 @@ -118,9 +119,9 @@
31481 ----------------------------------------------------------------------*/
31482
31483 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31484 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31485 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31486 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31487 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31488
31489 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31490 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31491 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31492 index 58916af..eb9dbcf6 100644
31493 --- a/block/blk-iopoll.c
31494 +++ b/block/blk-iopoll.c
31495 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31496 }
31497 EXPORT_SYMBOL(blk_iopoll_complete);
31498
31499 -static void blk_iopoll_softirq(struct softirq_action *h)
31500 +static void blk_iopoll_softirq(void)
31501 {
31502 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31503 int rearm = 0, budget = blk_iopoll_budget;
31504 @@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31505 return NOTIFY_OK;
31506 }
31507
31508 -static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31509 +static struct notifier_block blk_iopoll_cpu_notifier = {
31510 .notifier_call = blk_iopoll_cpu_notify,
31511 };
31512
31513 diff --git a/block/blk-map.c b/block/blk-map.c
31514 index 623e1cd..ca1e109 100644
31515 --- a/block/blk-map.c
31516 +++ b/block/blk-map.c
31517 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31518 if (!len || !kbuf)
31519 return -EINVAL;
31520
31521 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31522 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31523 if (do_copy)
31524 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31525 else
31526 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31527 index 467c8de..f3628c5 100644
31528 --- a/block/blk-softirq.c
31529 +++ b/block/blk-softirq.c
31530 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31531 * Softirq action handler - move entries to local list and loop over them
31532 * while passing them to the queue registered handler.
31533 */
31534 -static void blk_done_softirq(struct softirq_action *h)
31535 +static void blk_done_softirq(void)
31536 {
31537 struct list_head *cpu_list, local_list;
31538
31539 @@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31540 return NOTIFY_OK;
31541 }
31542
31543 -static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31544 +static struct notifier_block blk_cpu_notifier = {
31545 .notifier_call = blk_cpu_notify,
31546 };
31547
31548 diff --git a/block/bsg.c b/block/bsg.c
31549 index 420a5a9..23834aa 100644
31550 --- a/block/bsg.c
31551 +++ b/block/bsg.c
31552 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31553 struct sg_io_v4 *hdr, struct bsg_device *bd,
31554 fmode_t has_write_perm)
31555 {
31556 + unsigned char tmpcmd[sizeof(rq->__cmd)];
31557 + unsigned char *cmdptr;
31558 +
31559 if (hdr->request_len > BLK_MAX_CDB) {
31560 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31561 if (!rq->cmd)
31562 return -ENOMEM;
31563 - }
31564 + cmdptr = rq->cmd;
31565 + } else
31566 + cmdptr = tmpcmd;
31567
31568 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31569 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31570 hdr->request_len))
31571 return -EFAULT;
31572
31573 + if (cmdptr != rq->cmd)
31574 + memcpy(rq->cmd, cmdptr, hdr->request_len);
31575 +
31576 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31577 if (blk_verify_command(rq->cmd, has_write_perm))
31578 return -EPERM;
31579 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31580 index 7c668c8..db3521c 100644
31581 --- a/block/compat_ioctl.c
31582 +++ b/block/compat_ioctl.c
31583 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31584 err |= __get_user(f->spec1, &uf->spec1);
31585 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31586 err |= __get_user(name, &uf->name);
31587 - f->name = compat_ptr(name);
31588 + f->name = (void __force_kernel *)compat_ptr(name);
31589 if (err) {
31590 err = -EFAULT;
31591 goto out;
31592 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31593 index ff5804e..a88acad 100644
31594 --- a/block/partitions/efi.c
31595 +++ b/block/partitions/efi.c
31596 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31597 if (!gpt)
31598 return NULL;
31599
31600 + if (!le32_to_cpu(gpt->num_partition_entries))
31601 + return NULL;
31602 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31603 + if (!pte)
31604 + return NULL;
31605 +
31606 count = le32_to_cpu(gpt->num_partition_entries) *
31607 le32_to_cpu(gpt->sizeof_partition_entry);
31608 - if (!count)
31609 - return NULL;
31610 - pte = kzalloc(count, GFP_KERNEL);
31611 - if (!pte)
31612 - return NULL;
31613 -
31614 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31615 (u8 *) pte,
31616 count) < count) {
31617 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31618 index 9a87daa..fb17486 100644
31619 --- a/block/scsi_ioctl.c
31620 +++ b/block/scsi_ioctl.c
31621 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31622 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31623 struct sg_io_hdr *hdr, fmode_t mode)
31624 {
31625 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31626 + unsigned char tmpcmd[sizeof(rq->__cmd)];
31627 + unsigned char *cmdptr;
31628 +
31629 + if (rq->cmd != rq->__cmd)
31630 + cmdptr = rq->cmd;
31631 + else
31632 + cmdptr = tmpcmd;
31633 +
31634 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31635 return -EFAULT;
31636 +
31637 + if (cmdptr != rq->cmd)
31638 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31639 +
31640 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31641 return -EPERM;
31642
31643 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31644 int err;
31645 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31646 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31647 + unsigned char tmpcmd[sizeof(rq->__cmd)];
31648 + unsigned char *cmdptr;
31649
31650 if (!sic)
31651 return -EINVAL;
31652 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31653 */
31654 err = -EFAULT;
31655 rq->cmd_len = cmdlen;
31656 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
31657 +
31658 + if (rq->cmd != rq->__cmd)
31659 + cmdptr = rq->cmd;
31660 + else
31661 + cmdptr = tmpcmd;
31662 +
31663 + if (copy_from_user(cmdptr, sic->data, cmdlen))
31664 goto error;
31665
31666 + if (rq->cmd != cmdptr)
31667 + memcpy(rq->cmd, cmdptr, cmdlen);
31668 +
31669 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31670 goto error;
31671
31672 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31673 index 7bdd61b..afec999 100644
31674 --- a/crypto/cryptd.c
31675 +++ b/crypto/cryptd.c
31676 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31677
31678 struct cryptd_blkcipher_request_ctx {
31679 crypto_completion_t complete;
31680 -};
31681 +} __no_const;
31682
31683 struct cryptd_hash_ctx {
31684 struct crypto_shash *child;
31685 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31686
31687 struct cryptd_aead_request_ctx {
31688 crypto_completion_t complete;
31689 -};
31690 +} __no_const;
31691
31692 static void cryptd_queue_worker(struct work_struct *work);
31693
31694 diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31695 index f220d64..d359ad6 100644
31696 --- a/drivers/acpi/apei/apei-internal.h
31697 +++ b/drivers/acpi/apei/apei-internal.h
31698 @@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31699 struct apei_exec_ins_type {
31700 u32 flags;
31701 apei_exec_ins_func_t run;
31702 -};
31703 +} __do_const;
31704
31705 struct apei_exec_context {
31706 u32 ip;
31707 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31708 index fefc2ca..12a535d 100644
31709 --- a/drivers/acpi/apei/cper.c
31710 +++ b/drivers/acpi/apei/cper.c
31711 @@ -39,12 +39,12 @@
31712 */
31713 u64 cper_next_record_id(void)
31714 {
31715 - static atomic64_t seq;
31716 + static atomic64_unchecked_t seq;
31717
31718 - if (!atomic64_read(&seq))
31719 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
31720 + if (!atomic64_read_unchecked(&seq))
31721 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31722
31723 - return atomic64_inc_return(&seq);
31724 + return atomic64_inc_return_unchecked(&seq);
31725 }
31726 EXPORT_SYMBOL_GPL(cper_next_record_id);
31727
31728 diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31729 index be60399..778b33e8 100644
31730 --- a/drivers/acpi/bgrt.c
31731 +++ b/drivers/acpi/bgrt.c
31732 @@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31733 return -ENODEV;
31734
31735 sysfs_bin_attr_init(&image_attr);
31736 - image_attr.private = bgrt_image;
31737 - image_attr.size = bgrt_image_size;
31738 + pax_open_kernel();
31739 + *(void **)&image_attr.private = bgrt_image;
31740 + *(size_t *)&image_attr.size = bgrt_image_size;
31741 + pax_close_kernel();
31742
31743 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31744 if (!bgrt_kobj)
31745 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31746 index cb96296..b81293b 100644
31747 --- a/drivers/acpi/blacklist.c
31748 +++ b/drivers/acpi/blacklist.c
31749 @@ -52,7 +52,7 @@ struct acpi_blacklist_item {
31750 u32 is_critical_error;
31751 };
31752
31753 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
31754 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
31755
31756 /*
31757 * POLICY: If *anything* doesn't work, put it on the blacklist.
31758 @@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31759 return 0;
31760 }
31761
31762 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31763 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31764 {
31765 .callback = dmi_disable_osi_vista,
31766 .ident = "Fujitsu Siemens",
31767 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31768 index 7586544..636a2f0 100644
31769 --- a/drivers/acpi/ec_sys.c
31770 +++ b/drivers/acpi/ec_sys.c
31771 @@ -12,6 +12,7 @@
31772 #include <linux/acpi.h>
31773 #include <linux/debugfs.h>
31774 #include <linux/module.h>
31775 +#include <linux/uaccess.h>
31776 #include "internal.h"
31777
31778 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31779 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31780 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31781 */
31782 unsigned int size = EC_SPACE_SIZE;
31783 - u8 *data = (u8 *) buf;
31784 + u8 data;
31785 loff_t init_off = *off;
31786 int err = 0;
31787
31788 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31789 size = count;
31790
31791 while (size) {
31792 - err = ec_read(*off, &data[*off - init_off]);
31793 + err = ec_read(*off, &data);
31794 if (err)
31795 return err;
31796 + if (put_user(data, &buf[*off - init_off]))
31797 + return -EFAULT;
31798 *off += 1;
31799 size--;
31800 }
31801 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31802
31803 unsigned int size = count;
31804 loff_t init_off = *off;
31805 - u8 *data = (u8 *) buf;
31806 int err = 0;
31807
31808 if (*off >= EC_SPACE_SIZE)
31809 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31810 }
31811
31812 while (size) {
31813 - u8 byte_write = data[*off - init_off];
31814 + u8 byte_write;
31815 + if (get_user(byte_write, &buf[*off - init_off]))
31816 + return -EFAULT;
31817 err = ec_write(*off, byte_write);
31818 if (err)
31819 return err;
31820 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
31821 index ee255c6..747c68b 100644
31822 --- a/drivers/acpi/processor_idle.c
31823 +++ b/drivers/acpi/processor_idle.c
31824 @@ -986,7 +986,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
31825 {
31826 int i, count = CPUIDLE_DRIVER_STATE_START;
31827 struct acpi_processor_cx *cx;
31828 - struct cpuidle_state *state;
31829 + cpuidle_state_no_const *state;
31830 struct cpuidle_driver *drv = &acpi_idle_driver;
31831
31832 if (!pr->flags.power_setup_done)
31833 diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
31834 index 41c0504..f8c0836 100644
31835 --- a/drivers/acpi/sysfs.c
31836 +++ b/drivers/acpi/sysfs.c
31837 @@ -420,11 +420,11 @@ static u32 num_counters;
31838 static struct attribute **all_attrs;
31839 static u32 acpi_gpe_count;
31840
31841 -static struct attribute_group interrupt_stats_attr_group = {
31842 +static attribute_group_no_const interrupt_stats_attr_group = {
31843 .name = "interrupts",
31844 };
31845
31846 -static struct kobj_attribute *counter_attrs;
31847 +static kobj_attribute_no_const *counter_attrs;
31848
31849 static void delete_gpe_attr_array(void)
31850 {
31851 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
31852 index 34c8216..f56c828 100644
31853 --- a/drivers/ata/libahci.c
31854 +++ b/drivers/ata/libahci.c
31855 @@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
31856 }
31857 EXPORT_SYMBOL_GPL(ahci_kick_engine);
31858
31859 -static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
31860 +static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
31861 struct ata_taskfile *tf, int is_cmd, u16 flags,
31862 unsigned long timeout_msec)
31863 {
31864 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
31865 index 63c743b..0422dc6 100644
31866 --- a/drivers/ata/libata-core.c
31867 +++ b/drivers/ata/libata-core.c
31868 @@ -4786,7 +4786,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
31869 struct ata_port *ap;
31870 unsigned int tag;
31871
31872 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31873 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31874 ap = qc->ap;
31875
31876 qc->flags = 0;
31877 @@ -4802,7 +4802,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
31878 struct ata_port *ap;
31879 struct ata_link *link;
31880
31881 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31882 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31883 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
31884 ap = qc->ap;
31885 link = qc->dev->link;
31886 @@ -5920,6 +5920,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31887 return;
31888
31889 spin_lock(&lock);
31890 + pax_open_kernel();
31891
31892 for (cur = ops->inherits; cur; cur = cur->inherits) {
31893 void **inherit = (void **)cur;
31894 @@ -5933,8 +5934,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31895 if (IS_ERR(*pp))
31896 *pp = NULL;
31897
31898 - ops->inherits = NULL;
31899 + *(struct ata_port_operations **)&ops->inherits = NULL;
31900
31901 + pax_close_kernel();
31902 spin_unlock(&lock);
31903 }
31904
31905 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
31906 index 405022d..fb70e53 100644
31907 --- a/drivers/ata/pata_arasan_cf.c
31908 +++ b/drivers/ata/pata_arasan_cf.c
31909 @@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
31910 /* Handle platform specific quirks */
31911 if (pdata->quirk) {
31912 if (pdata->quirk & CF_BROKEN_PIO) {
31913 - ap->ops->set_piomode = NULL;
31914 + pax_open_kernel();
31915 + *(void **)&ap->ops->set_piomode = NULL;
31916 + pax_close_kernel();
31917 ap->pio_mask = 0;
31918 }
31919 if (pdata->quirk & CF_BROKEN_MWDMA)
31920 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31921 index f9b983a..887b9d8 100644
31922 --- a/drivers/atm/adummy.c
31923 +++ b/drivers/atm/adummy.c
31924 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31925 vcc->pop(vcc, skb);
31926 else
31927 dev_kfree_skb_any(skb);
31928 - atomic_inc(&vcc->stats->tx);
31929 + atomic_inc_unchecked(&vcc->stats->tx);
31930
31931 return 0;
31932 }
31933 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31934 index 77a7480..05cde58 100644
31935 --- a/drivers/atm/ambassador.c
31936 +++ b/drivers/atm/ambassador.c
31937 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31938 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31939
31940 // VC layer stats
31941 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31942 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31943
31944 // free the descriptor
31945 kfree (tx_descr);
31946 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31947 dump_skb ("<<<", vc, skb);
31948
31949 // VC layer stats
31950 - atomic_inc(&atm_vcc->stats->rx);
31951 + atomic_inc_unchecked(&atm_vcc->stats->rx);
31952 __net_timestamp(skb);
31953 // end of our responsibility
31954 atm_vcc->push (atm_vcc, skb);
31955 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31956 } else {
31957 PRINTK (KERN_INFO, "dropped over-size frame");
31958 // should we count this?
31959 - atomic_inc(&atm_vcc->stats->rx_drop);
31960 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31961 }
31962
31963 } else {
31964 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31965 }
31966
31967 if (check_area (skb->data, skb->len)) {
31968 - atomic_inc(&atm_vcc->stats->tx_err);
31969 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31970 return -ENOMEM; // ?
31971 }
31972
31973 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31974 index 0e3f8f9..765a7a5 100644
31975 --- a/drivers/atm/atmtcp.c
31976 +++ b/drivers/atm/atmtcp.c
31977 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31978 if (vcc->pop) vcc->pop(vcc,skb);
31979 else dev_kfree_skb(skb);
31980 if (dev_data) return 0;
31981 - atomic_inc(&vcc->stats->tx_err);
31982 + atomic_inc_unchecked(&vcc->stats->tx_err);
31983 return -ENOLINK;
31984 }
31985 size = skb->len+sizeof(struct atmtcp_hdr);
31986 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31987 if (!new_skb) {
31988 if (vcc->pop) vcc->pop(vcc,skb);
31989 else dev_kfree_skb(skb);
31990 - atomic_inc(&vcc->stats->tx_err);
31991 + atomic_inc_unchecked(&vcc->stats->tx_err);
31992 return -ENOBUFS;
31993 }
31994 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31995 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31996 if (vcc->pop) vcc->pop(vcc,skb);
31997 else dev_kfree_skb(skb);
31998 out_vcc->push(out_vcc,new_skb);
31999 - atomic_inc(&vcc->stats->tx);
32000 - atomic_inc(&out_vcc->stats->rx);
32001 + atomic_inc_unchecked(&vcc->stats->tx);
32002 + atomic_inc_unchecked(&out_vcc->stats->rx);
32003 return 0;
32004 }
32005
32006 @@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32007 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32008 read_unlock(&vcc_sklist_lock);
32009 if (!out_vcc) {
32010 - atomic_inc(&vcc->stats->tx_err);
32011 + atomic_inc_unchecked(&vcc->stats->tx_err);
32012 goto done;
32013 }
32014 skb_pull(skb,sizeof(struct atmtcp_hdr));
32015 @@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32016 __net_timestamp(new_skb);
32017 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32018 out_vcc->push(out_vcc,new_skb);
32019 - atomic_inc(&vcc->stats->tx);
32020 - atomic_inc(&out_vcc->stats->rx);
32021 + atomic_inc_unchecked(&vcc->stats->tx);
32022 + atomic_inc_unchecked(&out_vcc->stats->rx);
32023 done:
32024 if (vcc->pop) vcc->pop(vcc,skb);
32025 else dev_kfree_skb(skb);
32026 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32027 index b1955ba..b179940 100644
32028 --- a/drivers/atm/eni.c
32029 +++ b/drivers/atm/eni.c
32030 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32031 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32032 vcc->dev->number);
32033 length = 0;
32034 - atomic_inc(&vcc->stats->rx_err);
32035 + atomic_inc_unchecked(&vcc->stats->rx_err);
32036 }
32037 else {
32038 length = ATM_CELL_SIZE-1; /* no HEC */
32039 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32040 size);
32041 }
32042 eff = length = 0;
32043 - atomic_inc(&vcc->stats->rx_err);
32044 + atomic_inc_unchecked(&vcc->stats->rx_err);
32045 }
32046 else {
32047 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32048 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32049 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32050 vcc->dev->number,vcc->vci,length,size << 2,descr);
32051 length = eff = 0;
32052 - atomic_inc(&vcc->stats->rx_err);
32053 + atomic_inc_unchecked(&vcc->stats->rx_err);
32054 }
32055 }
32056 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32057 @@ -767,7 +767,7 @@ rx_dequeued++;
32058 vcc->push(vcc,skb);
32059 pushed++;
32060 }
32061 - atomic_inc(&vcc->stats->rx);
32062 + atomic_inc_unchecked(&vcc->stats->rx);
32063 }
32064 wake_up(&eni_dev->rx_wait);
32065 }
32066 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32067 PCI_DMA_TODEVICE);
32068 if (vcc->pop) vcc->pop(vcc,skb);
32069 else dev_kfree_skb_irq(skb);
32070 - atomic_inc(&vcc->stats->tx);
32071 + atomic_inc_unchecked(&vcc->stats->tx);
32072 wake_up(&eni_dev->tx_wait);
32073 dma_complete++;
32074 }
32075 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32076 index b41c948..a002b17 100644
32077 --- a/drivers/atm/firestream.c
32078 +++ b/drivers/atm/firestream.c
32079 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32080 }
32081 }
32082
32083 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32084 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32085
32086 fs_dprintk (FS_DEBUG_TXMEM, "i");
32087 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32088 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32089 #endif
32090 skb_put (skb, qe->p1 & 0xffff);
32091 ATM_SKB(skb)->vcc = atm_vcc;
32092 - atomic_inc(&atm_vcc->stats->rx);
32093 + atomic_inc_unchecked(&atm_vcc->stats->rx);
32094 __net_timestamp(skb);
32095 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32096 atm_vcc->push (atm_vcc, skb);
32097 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32098 kfree (pe);
32099 }
32100 if (atm_vcc)
32101 - atomic_inc(&atm_vcc->stats->rx_drop);
32102 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32103 break;
32104 case 0x1f: /* Reassembly abort: no buffers. */
32105 /* Silently increment error counter. */
32106 if (atm_vcc)
32107 - atomic_inc(&atm_vcc->stats->rx_drop);
32108 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32109 break;
32110 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32111 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32112 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32113 index 204814e..cede831 100644
32114 --- a/drivers/atm/fore200e.c
32115 +++ b/drivers/atm/fore200e.c
32116 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32117 #endif
32118 /* check error condition */
32119 if (*entry->status & STATUS_ERROR)
32120 - atomic_inc(&vcc->stats->tx_err);
32121 + atomic_inc_unchecked(&vcc->stats->tx_err);
32122 else
32123 - atomic_inc(&vcc->stats->tx);
32124 + atomic_inc_unchecked(&vcc->stats->tx);
32125 }
32126 }
32127
32128 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32129 if (skb == NULL) {
32130 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32131
32132 - atomic_inc(&vcc->stats->rx_drop);
32133 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32134 return -ENOMEM;
32135 }
32136
32137 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32138
32139 dev_kfree_skb_any(skb);
32140
32141 - atomic_inc(&vcc->stats->rx_drop);
32142 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32143 return -ENOMEM;
32144 }
32145
32146 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32147
32148 vcc->push(vcc, skb);
32149 - atomic_inc(&vcc->stats->rx);
32150 + atomic_inc_unchecked(&vcc->stats->rx);
32151
32152 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32153
32154 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32155 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32156 fore200e->atm_dev->number,
32157 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32158 - atomic_inc(&vcc->stats->rx_err);
32159 + atomic_inc_unchecked(&vcc->stats->rx_err);
32160 }
32161 }
32162
32163 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32164 goto retry_here;
32165 }
32166
32167 - atomic_inc(&vcc->stats->tx_err);
32168 + atomic_inc_unchecked(&vcc->stats->tx_err);
32169
32170 fore200e->tx_sat++;
32171 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32172 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32173 index d689126..e78e412 100644
32174 --- a/drivers/atm/he.c
32175 +++ b/drivers/atm/he.c
32176 @@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32177
32178 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32179 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32180 - atomic_inc(&vcc->stats->rx_drop);
32181 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32182 goto return_host_buffers;
32183 }
32184
32185 @@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32186 RBRQ_LEN_ERR(he_dev->rbrq_head)
32187 ? "LEN_ERR" : "",
32188 vcc->vpi, vcc->vci);
32189 - atomic_inc(&vcc->stats->rx_err);
32190 + atomic_inc_unchecked(&vcc->stats->rx_err);
32191 goto return_host_buffers;
32192 }
32193
32194 @@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32195 vcc->push(vcc, skb);
32196 spin_lock(&he_dev->global_lock);
32197
32198 - atomic_inc(&vcc->stats->rx);
32199 + atomic_inc_unchecked(&vcc->stats->rx);
32200
32201 return_host_buffers:
32202 ++pdus_assembled;
32203 @@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32204 tpd->vcc->pop(tpd->vcc, tpd->skb);
32205 else
32206 dev_kfree_skb_any(tpd->skb);
32207 - atomic_inc(&tpd->vcc->stats->tx_err);
32208 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32209 }
32210 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32211 return;
32212 @@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32213 vcc->pop(vcc, skb);
32214 else
32215 dev_kfree_skb_any(skb);
32216 - atomic_inc(&vcc->stats->tx_err);
32217 + atomic_inc_unchecked(&vcc->stats->tx_err);
32218 return -EINVAL;
32219 }
32220
32221 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32222 vcc->pop(vcc, skb);
32223 else
32224 dev_kfree_skb_any(skb);
32225 - atomic_inc(&vcc->stats->tx_err);
32226 + atomic_inc_unchecked(&vcc->stats->tx_err);
32227 return -EINVAL;
32228 }
32229 #endif
32230 @@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32231 vcc->pop(vcc, skb);
32232 else
32233 dev_kfree_skb_any(skb);
32234 - atomic_inc(&vcc->stats->tx_err);
32235 + atomic_inc_unchecked(&vcc->stats->tx_err);
32236 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32237 return -ENOMEM;
32238 }
32239 @@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32240 vcc->pop(vcc, skb);
32241 else
32242 dev_kfree_skb_any(skb);
32243 - atomic_inc(&vcc->stats->tx_err);
32244 + atomic_inc_unchecked(&vcc->stats->tx_err);
32245 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32246 return -ENOMEM;
32247 }
32248 @@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32249 __enqueue_tpd(he_dev, tpd, cid);
32250 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32251
32252 - atomic_inc(&vcc->stats->tx);
32253 + atomic_inc_unchecked(&vcc->stats->tx);
32254
32255 return 0;
32256 }
32257 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32258 index 1dc0519..1aadaf7 100644
32259 --- a/drivers/atm/horizon.c
32260 +++ b/drivers/atm/horizon.c
32261 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32262 {
32263 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32264 // VC layer stats
32265 - atomic_inc(&vcc->stats->rx);
32266 + atomic_inc_unchecked(&vcc->stats->rx);
32267 __net_timestamp(skb);
32268 // end of our responsibility
32269 vcc->push (vcc, skb);
32270 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32271 dev->tx_iovec = NULL;
32272
32273 // VC layer stats
32274 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32275 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32276
32277 // free the skb
32278 hrz_kfree_skb (skb);
32279 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32280 index 272f009..a18ba55 100644
32281 --- a/drivers/atm/idt77252.c
32282 +++ b/drivers/atm/idt77252.c
32283 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32284 else
32285 dev_kfree_skb(skb);
32286
32287 - atomic_inc(&vcc->stats->tx);
32288 + atomic_inc_unchecked(&vcc->stats->tx);
32289 }
32290
32291 atomic_dec(&scq->used);
32292 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32293 if ((sb = dev_alloc_skb(64)) == NULL) {
32294 printk("%s: Can't allocate buffers for aal0.\n",
32295 card->name);
32296 - atomic_add(i, &vcc->stats->rx_drop);
32297 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
32298 break;
32299 }
32300 if (!atm_charge(vcc, sb->truesize)) {
32301 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32302 card->name);
32303 - atomic_add(i - 1, &vcc->stats->rx_drop);
32304 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32305 dev_kfree_skb(sb);
32306 break;
32307 }
32308 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32309 ATM_SKB(sb)->vcc = vcc;
32310 __net_timestamp(sb);
32311 vcc->push(vcc, sb);
32312 - atomic_inc(&vcc->stats->rx);
32313 + atomic_inc_unchecked(&vcc->stats->rx);
32314
32315 cell += ATM_CELL_PAYLOAD;
32316 }
32317 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32318 "(CDC: %08x)\n",
32319 card->name, len, rpp->len, readl(SAR_REG_CDC));
32320 recycle_rx_pool_skb(card, rpp);
32321 - atomic_inc(&vcc->stats->rx_err);
32322 + atomic_inc_unchecked(&vcc->stats->rx_err);
32323 return;
32324 }
32325 if (stat & SAR_RSQE_CRC) {
32326 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32327 recycle_rx_pool_skb(card, rpp);
32328 - atomic_inc(&vcc->stats->rx_err);
32329 + atomic_inc_unchecked(&vcc->stats->rx_err);
32330 return;
32331 }
32332 if (skb_queue_len(&rpp->queue) > 1) {
32333 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32334 RXPRINTK("%s: Can't alloc RX skb.\n",
32335 card->name);
32336 recycle_rx_pool_skb(card, rpp);
32337 - atomic_inc(&vcc->stats->rx_err);
32338 + atomic_inc_unchecked(&vcc->stats->rx_err);
32339 return;
32340 }
32341 if (!atm_charge(vcc, skb->truesize)) {
32342 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32343 __net_timestamp(skb);
32344
32345 vcc->push(vcc, skb);
32346 - atomic_inc(&vcc->stats->rx);
32347 + atomic_inc_unchecked(&vcc->stats->rx);
32348
32349 return;
32350 }
32351 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32352 __net_timestamp(skb);
32353
32354 vcc->push(vcc, skb);
32355 - atomic_inc(&vcc->stats->rx);
32356 + atomic_inc_unchecked(&vcc->stats->rx);
32357
32358 if (skb->truesize > SAR_FB_SIZE_3)
32359 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32360 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32361 if (vcc->qos.aal != ATM_AAL0) {
32362 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32363 card->name, vpi, vci);
32364 - atomic_inc(&vcc->stats->rx_drop);
32365 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32366 goto drop;
32367 }
32368
32369 if ((sb = dev_alloc_skb(64)) == NULL) {
32370 printk("%s: Can't allocate buffers for AAL0.\n",
32371 card->name);
32372 - atomic_inc(&vcc->stats->rx_err);
32373 + atomic_inc_unchecked(&vcc->stats->rx_err);
32374 goto drop;
32375 }
32376
32377 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32378 ATM_SKB(sb)->vcc = vcc;
32379 __net_timestamp(sb);
32380 vcc->push(vcc, sb);
32381 - atomic_inc(&vcc->stats->rx);
32382 + atomic_inc_unchecked(&vcc->stats->rx);
32383
32384 drop:
32385 skb_pull(queue, 64);
32386 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32387
32388 if (vc == NULL) {
32389 printk("%s: NULL connection in send().\n", card->name);
32390 - atomic_inc(&vcc->stats->tx_err);
32391 + atomic_inc_unchecked(&vcc->stats->tx_err);
32392 dev_kfree_skb(skb);
32393 return -EINVAL;
32394 }
32395 if (!test_bit(VCF_TX, &vc->flags)) {
32396 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32397 - atomic_inc(&vcc->stats->tx_err);
32398 + atomic_inc_unchecked(&vcc->stats->tx_err);
32399 dev_kfree_skb(skb);
32400 return -EINVAL;
32401 }
32402 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32403 break;
32404 default:
32405 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32406 - atomic_inc(&vcc->stats->tx_err);
32407 + atomic_inc_unchecked(&vcc->stats->tx_err);
32408 dev_kfree_skb(skb);
32409 return -EINVAL;
32410 }
32411
32412 if (skb_shinfo(skb)->nr_frags != 0) {
32413 printk("%s: No scatter-gather yet.\n", card->name);
32414 - atomic_inc(&vcc->stats->tx_err);
32415 + atomic_inc_unchecked(&vcc->stats->tx_err);
32416 dev_kfree_skb(skb);
32417 return -EINVAL;
32418 }
32419 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32420
32421 err = queue_skb(card, vc, skb, oam);
32422 if (err) {
32423 - atomic_inc(&vcc->stats->tx_err);
32424 + atomic_inc_unchecked(&vcc->stats->tx_err);
32425 dev_kfree_skb(skb);
32426 return err;
32427 }
32428 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32429 skb = dev_alloc_skb(64);
32430 if (!skb) {
32431 printk("%s: Out of memory in send_oam().\n", card->name);
32432 - atomic_inc(&vcc->stats->tx_err);
32433 + atomic_inc_unchecked(&vcc->stats->tx_err);
32434 return -ENOMEM;
32435 }
32436 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32437 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32438 index 4217f29..88f547a 100644
32439 --- a/drivers/atm/iphase.c
32440 +++ b/drivers/atm/iphase.c
32441 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32442 status = (u_short) (buf_desc_ptr->desc_mode);
32443 if (status & (RX_CER | RX_PTE | RX_OFL))
32444 {
32445 - atomic_inc(&vcc->stats->rx_err);
32446 + atomic_inc_unchecked(&vcc->stats->rx_err);
32447 IF_ERR(printk("IA: bad packet, dropping it");)
32448 if (status & RX_CER) {
32449 IF_ERR(printk(" cause: packet CRC error\n");)
32450 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32451 len = dma_addr - buf_addr;
32452 if (len > iadev->rx_buf_sz) {
32453 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32454 - atomic_inc(&vcc->stats->rx_err);
32455 + atomic_inc_unchecked(&vcc->stats->rx_err);
32456 goto out_free_desc;
32457 }
32458
32459 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32460 ia_vcc = INPH_IA_VCC(vcc);
32461 if (ia_vcc == NULL)
32462 {
32463 - atomic_inc(&vcc->stats->rx_err);
32464 + atomic_inc_unchecked(&vcc->stats->rx_err);
32465 atm_return(vcc, skb->truesize);
32466 dev_kfree_skb_any(skb);
32467 goto INCR_DLE;
32468 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32469 if ((length > iadev->rx_buf_sz) || (length >
32470 (skb->len - sizeof(struct cpcs_trailer))))
32471 {
32472 - atomic_inc(&vcc->stats->rx_err);
32473 + atomic_inc_unchecked(&vcc->stats->rx_err);
32474 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32475 length, skb->len);)
32476 atm_return(vcc, skb->truesize);
32477 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32478
32479 IF_RX(printk("rx_dle_intr: skb push");)
32480 vcc->push(vcc,skb);
32481 - atomic_inc(&vcc->stats->rx);
32482 + atomic_inc_unchecked(&vcc->stats->rx);
32483 iadev->rx_pkt_cnt++;
32484 }
32485 INCR_DLE:
32486 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32487 {
32488 struct k_sonet_stats *stats;
32489 stats = &PRIV(_ia_dev[board])->sonet_stats;
32490 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32491 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32492 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32493 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32494 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32495 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32496 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32497 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32498 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32499 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32500 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32501 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32502 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32503 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32504 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32505 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32506 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32507 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32508 }
32509 ia_cmds.status = 0;
32510 break;
32511 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32512 if ((desc == 0) || (desc > iadev->num_tx_desc))
32513 {
32514 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32515 - atomic_inc(&vcc->stats->tx);
32516 + atomic_inc_unchecked(&vcc->stats->tx);
32517 if (vcc->pop)
32518 vcc->pop(vcc, skb);
32519 else
32520 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32521 ATM_DESC(skb) = vcc->vci;
32522 skb_queue_tail(&iadev->tx_dma_q, skb);
32523
32524 - atomic_inc(&vcc->stats->tx);
32525 + atomic_inc_unchecked(&vcc->stats->tx);
32526 iadev->tx_pkt_cnt++;
32527 /* Increment transaction counter */
32528 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32529
32530 #if 0
32531 /* add flow control logic */
32532 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32533 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32534 if (iavcc->vc_desc_cnt > 10) {
32535 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32536 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32537 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32538 index fa7d701..1e404c7 100644
32539 --- a/drivers/atm/lanai.c
32540 +++ b/drivers/atm/lanai.c
32541 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32542 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32543 lanai_endtx(lanai, lvcc);
32544 lanai_free_skb(lvcc->tx.atmvcc, skb);
32545 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32546 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32547 }
32548
32549 /* Try to fill the buffer - don't call unless there is backlog */
32550 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32551 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32552 __net_timestamp(skb);
32553 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32554 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32555 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32556 out:
32557 lvcc->rx.buf.ptr = end;
32558 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32559 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32560 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32561 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32562 lanai->stats.service_rxnotaal5++;
32563 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32564 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32565 return 0;
32566 }
32567 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32568 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32569 int bytes;
32570 read_unlock(&vcc_sklist_lock);
32571 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32572 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32573 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32574 lvcc->stats.x.aal5.service_trash++;
32575 bytes = (SERVICE_GET_END(s) * 16) -
32576 (((unsigned long) lvcc->rx.buf.ptr) -
32577 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32578 }
32579 if (s & SERVICE_STREAM) {
32580 read_unlock(&vcc_sklist_lock);
32581 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32582 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32583 lvcc->stats.x.aal5.service_stream++;
32584 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32585 "PDU on VCI %d!\n", lanai->number, vci);
32586 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32587 return 0;
32588 }
32589 DPRINTK("got rx crc error on vci %d\n", vci);
32590 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32591 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32592 lvcc->stats.x.aal5.service_rxcrc++;
32593 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32594 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32595 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32596 index 6587dc2..149833d 100644
32597 --- a/drivers/atm/nicstar.c
32598 +++ b/drivers/atm/nicstar.c
32599 @@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32600 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32601 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32602 card->index);
32603 - atomic_inc(&vcc->stats->tx_err);
32604 + atomic_inc_unchecked(&vcc->stats->tx_err);
32605 dev_kfree_skb_any(skb);
32606 return -EINVAL;
32607 }
32608 @@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32609 if (!vc->tx) {
32610 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32611 card->index);
32612 - atomic_inc(&vcc->stats->tx_err);
32613 + atomic_inc_unchecked(&vcc->stats->tx_err);
32614 dev_kfree_skb_any(skb);
32615 return -EINVAL;
32616 }
32617 @@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32618 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32619 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32620 card->index);
32621 - atomic_inc(&vcc->stats->tx_err);
32622 + atomic_inc_unchecked(&vcc->stats->tx_err);
32623 dev_kfree_skb_any(skb);
32624 return -EINVAL;
32625 }
32626
32627 if (skb_shinfo(skb)->nr_frags != 0) {
32628 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32629 - atomic_inc(&vcc->stats->tx_err);
32630 + atomic_inc_unchecked(&vcc->stats->tx_err);
32631 dev_kfree_skb_any(skb);
32632 return -EINVAL;
32633 }
32634 @@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32635 }
32636
32637 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32638 - atomic_inc(&vcc->stats->tx_err);
32639 + atomic_inc_unchecked(&vcc->stats->tx_err);
32640 dev_kfree_skb_any(skb);
32641 return -EIO;
32642 }
32643 - atomic_inc(&vcc->stats->tx);
32644 + atomic_inc_unchecked(&vcc->stats->tx);
32645
32646 return 0;
32647 }
32648 @@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32649 printk
32650 ("nicstar%d: Can't allocate buffers for aal0.\n",
32651 card->index);
32652 - atomic_add(i, &vcc->stats->rx_drop);
32653 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
32654 break;
32655 }
32656 if (!atm_charge(vcc, sb->truesize)) {
32657 RXPRINTK
32658 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32659 card->index);
32660 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32661 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32662 dev_kfree_skb_any(sb);
32663 break;
32664 }
32665 @@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32666 ATM_SKB(sb)->vcc = vcc;
32667 __net_timestamp(sb);
32668 vcc->push(vcc, sb);
32669 - atomic_inc(&vcc->stats->rx);
32670 + atomic_inc_unchecked(&vcc->stats->rx);
32671 cell += ATM_CELL_PAYLOAD;
32672 }
32673
32674 @@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32675 if (iovb == NULL) {
32676 printk("nicstar%d: Out of iovec buffers.\n",
32677 card->index);
32678 - atomic_inc(&vcc->stats->rx_drop);
32679 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32680 recycle_rx_buf(card, skb);
32681 return;
32682 }
32683 @@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32684 small or large buffer itself. */
32685 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32686 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32687 - atomic_inc(&vcc->stats->rx_err);
32688 + atomic_inc_unchecked(&vcc->stats->rx_err);
32689 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32690 NS_MAX_IOVECS);
32691 NS_PRV_IOVCNT(iovb) = 0;
32692 @@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32693 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32694 card->index);
32695 which_list(card, skb);
32696 - atomic_inc(&vcc->stats->rx_err);
32697 + atomic_inc_unchecked(&vcc->stats->rx_err);
32698 recycle_rx_buf(card, skb);
32699 vc->rx_iov = NULL;
32700 recycle_iov_buf(card, iovb);
32701 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32702 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32703 card->index);
32704 which_list(card, skb);
32705 - atomic_inc(&vcc->stats->rx_err);
32706 + atomic_inc_unchecked(&vcc->stats->rx_err);
32707 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32708 NS_PRV_IOVCNT(iovb));
32709 vc->rx_iov = NULL;
32710 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32711 printk(" - PDU size mismatch.\n");
32712 else
32713 printk(".\n");
32714 - atomic_inc(&vcc->stats->rx_err);
32715 + atomic_inc_unchecked(&vcc->stats->rx_err);
32716 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32717 NS_PRV_IOVCNT(iovb));
32718 vc->rx_iov = NULL;
32719 @@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32720 /* skb points to a small buffer */
32721 if (!atm_charge(vcc, skb->truesize)) {
32722 push_rxbufs(card, skb);
32723 - atomic_inc(&vcc->stats->rx_drop);
32724 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32725 } else {
32726 skb_put(skb, len);
32727 dequeue_sm_buf(card, skb);
32728 @@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32729 ATM_SKB(skb)->vcc = vcc;
32730 __net_timestamp(skb);
32731 vcc->push(vcc, skb);
32732 - atomic_inc(&vcc->stats->rx);
32733 + atomic_inc_unchecked(&vcc->stats->rx);
32734 }
32735 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32736 struct sk_buff *sb;
32737 @@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32738 if (len <= NS_SMBUFSIZE) {
32739 if (!atm_charge(vcc, sb->truesize)) {
32740 push_rxbufs(card, sb);
32741 - atomic_inc(&vcc->stats->rx_drop);
32742 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32743 } else {
32744 skb_put(sb, len);
32745 dequeue_sm_buf(card, sb);
32746 @@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32747 ATM_SKB(sb)->vcc = vcc;
32748 __net_timestamp(sb);
32749 vcc->push(vcc, sb);
32750 - atomic_inc(&vcc->stats->rx);
32751 + atomic_inc_unchecked(&vcc->stats->rx);
32752 }
32753
32754 push_rxbufs(card, skb);
32755 @@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32756
32757 if (!atm_charge(vcc, skb->truesize)) {
32758 push_rxbufs(card, skb);
32759 - atomic_inc(&vcc->stats->rx_drop);
32760 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32761 } else {
32762 dequeue_lg_buf(card, skb);
32763 #ifdef NS_USE_DESTRUCTORS
32764 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32765 ATM_SKB(skb)->vcc = vcc;
32766 __net_timestamp(skb);
32767 vcc->push(vcc, skb);
32768 - atomic_inc(&vcc->stats->rx);
32769 + atomic_inc_unchecked(&vcc->stats->rx);
32770 }
32771
32772 push_rxbufs(card, sb);
32773 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32774 printk
32775 ("nicstar%d: Out of huge buffers.\n",
32776 card->index);
32777 - atomic_inc(&vcc->stats->rx_drop);
32778 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32779 recycle_iovec_rx_bufs(card,
32780 (struct iovec *)
32781 iovb->data,
32782 @@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32783 card->hbpool.count++;
32784 } else
32785 dev_kfree_skb_any(hb);
32786 - atomic_inc(&vcc->stats->rx_drop);
32787 + atomic_inc_unchecked(&vcc->stats->rx_drop);
32788 } else {
32789 /* Copy the small buffer to the huge buffer */
32790 sb = (struct sk_buff *)iov->iov_base;
32791 @@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32792 #endif /* NS_USE_DESTRUCTORS */
32793 __net_timestamp(hb);
32794 vcc->push(vcc, hb);
32795 - atomic_inc(&vcc->stats->rx);
32796 + atomic_inc_unchecked(&vcc->stats->rx);
32797 }
32798 }
32799
32800 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32801 index 32784d1..4a8434a 100644
32802 --- a/drivers/atm/solos-pci.c
32803 +++ b/drivers/atm/solos-pci.c
32804 @@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
32805 }
32806 atm_charge(vcc, skb->truesize);
32807 vcc->push(vcc, skb);
32808 - atomic_inc(&vcc->stats->rx);
32809 + atomic_inc_unchecked(&vcc->stats->rx);
32810 break;
32811
32812 case PKT_STATUS:
32813 @@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
32814 vcc = SKB_CB(oldskb)->vcc;
32815
32816 if (vcc) {
32817 - atomic_inc(&vcc->stats->tx);
32818 + atomic_inc_unchecked(&vcc->stats->tx);
32819 solos_pop(vcc, oldskb);
32820 } else {
32821 dev_kfree_skb_irq(oldskb);
32822 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
32823 index 0215934..ce9f5b1 100644
32824 --- a/drivers/atm/suni.c
32825 +++ b/drivers/atm/suni.c
32826 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
32827
32828
32829 #define ADD_LIMITED(s,v) \
32830 - atomic_add((v),&stats->s); \
32831 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
32832 + atomic_add_unchecked((v),&stats->s); \
32833 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
32834
32835
32836 static void suni_hz(unsigned long from_timer)
32837 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
32838 index 5120a96..e2572bd 100644
32839 --- a/drivers/atm/uPD98402.c
32840 +++ b/drivers/atm/uPD98402.c
32841 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
32842 struct sonet_stats tmp;
32843 int error = 0;
32844
32845 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32846 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32847 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
32848 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
32849 if (zero && !error) {
32850 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
32851
32852
32853 #define ADD_LIMITED(s,v) \
32854 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
32855 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
32856 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32857 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
32858 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
32859 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32860
32861
32862 static void stat_event(struct atm_dev *dev)
32863 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
32864 if (reason & uPD98402_INT_PFM) stat_event(dev);
32865 if (reason & uPD98402_INT_PCO) {
32866 (void) GET(PCOCR); /* clear interrupt cause */
32867 - atomic_add(GET(HECCT),
32868 + atomic_add_unchecked(GET(HECCT),
32869 &PRIV(dev)->sonet_stats.uncorr_hcs);
32870 }
32871 if ((reason & uPD98402_INT_RFO) &&
32872 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
32873 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
32874 uPD98402_INT_LOS),PIMR); /* enable them */
32875 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
32876 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32877 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
32878 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
32879 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32880 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
32881 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32882 return 0;
32883 }
32884
32885 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32886 index 969c3c2..9b72956 100644
32887 --- a/drivers/atm/zatm.c
32888 +++ b/drivers/atm/zatm.c
32889 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32890 }
32891 if (!size) {
32892 dev_kfree_skb_irq(skb);
32893 - if (vcc) atomic_inc(&vcc->stats->rx_err);
32894 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32895 continue;
32896 }
32897 if (!atm_charge(vcc,skb->truesize)) {
32898 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32899 skb->len = size;
32900 ATM_SKB(skb)->vcc = vcc;
32901 vcc->push(vcc,skb);
32902 - atomic_inc(&vcc->stats->rx);
32903 + atomic_inc_unchecked(&vcc->stats->rx);
32904 }
32905 zout(pos & 0xffff,MTA(mbx));
32906 #if 0 /* probably a stupid idea */
32907 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32908 skb_queue_head(&zatm_vcc->backlog,skb);
32909 break;
32910 }
32911 - atomic_inc(&vcc->stats->tx);
32912 + atomic_inc_unchecked(&vcc->stats->tx);
32913 wake_up(&zatm_vcc->tx_wait);
32914 }
32915
32916 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32917 index 519865b..e540db3 100644
32918 --- a/drivers/base/bus.c
32919 +++ b/drivers/base/bus.c
32920 @@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
32921 return -EINVAL;
32922
32923 mutex_lock(&subsys->p->mutex);
32924 - list_add_tail(&sif->node, &subsys->p->interfaces);
32925 + pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
32926 if (sif->add_dev) {
32927 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32928 while ((dev = subsys_dev_iter_next(&iter)))
32929 @@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
32930 subsys = sif->subsys;
32931
32932 mutex_lock(&subsys->p->mutex);
32933 - list_del_init(&sif->node);
32934 + pax_list_del_init((struct list_head *)&sif->node);
32935 if (sif->remove_dev) {
32936 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32937 while ((dev = subsys_dev_iter_next(&iter)))
32938 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
32939 index 01fc5b0..d0ed716 100644
32940 --- a/drivers/base/devtmpfs.c
32941 +++ b/drivers/base/devtmpfs.c
32942 @@ -348,7 +348,7 @@ int devtmpfs_mount(const char *mntdir)
32943 if (!thread)
32944 return 0;
32945
32946 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
32947 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
32948 if (err)
32949 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
32950 else
32951 diff --git a/drivers/base/node.c b/drivers/base/node.c
32952 index fac124a..66bd4ab 100644
32953 --- a/drivers/base/node.c
32954 +++ b/drivers/base/node.c
32955 @@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
32956 struct node_attr {
32957 struct device_attribute attr;
32958 enum node_states state;
32959 -};
32960 +} __do_const;
32961
32962 static ssize_t show_node_state(struct device *dev,
32963 struct device_attribute *attr, char *buf)
32964 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
32965 index 9a6b05a..2fc8fb9 100644
32966 --- a/drivers/base/power/domain.c
32967 +++ b/drivers/base/power/domain.c
32968 @@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
32969 {
32970 struct cpuidle_driver *cpuidle_drv;
32971 struct gpd_cpu_data *cpu_data;
32972 - struct cpuidle_state *idle_state;
32973 + cpuidle_state_no_const *idle_state;
32974 int ret = 0;
32975
32976 if (IS_ERR_OR_NULL(genpd) || state < 0)
32977 @@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
32978 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
32979 {
32980 struct gpd_cpu_data *cpu_data;
32981 - struct cpuidle_state *idle_state;
32982 + cpuidle_state_no_const *idle_state;
32983 int ret = 0;
32984
32985 if (IS_ERR_OR_NULL(genpd))
32986 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
32987 index 79715e7..df06b3b 100644
32988 --- a/drivers/base/power/wakeup.c
32989 +++ b/drivers/base/power/wakeup.c
32990 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
32991 * They need to be modified together atomically, so it's better to use one
32992 * atomic variable to hold them both.
32993 */
32994 -static atomic_t combined_event_count = ATOMIC_INIT(0);
32995 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
32996
32997 #define IN_PROGRESS_BITS (sizeof(int) * 4)
32998 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
32999
33000 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33001 {
33002 - unsigned int comb = atomic_read(&combined_event_count);
33003 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
33004
33005 *cnt = (comb >> IN_PROGRESS_BITS);
33006 *inpr = comb & MAX_IN_PROGRESS;
33007 @@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33008 ws->start_prevent_time = ws->last_time;
33009
33010 /* Increment the counter of events in progress. */
33011 - cec = atomic_inc_return(&combined_event_count);
33012 + cec = atomic_inc_return_unchecked(&combined_event_count);
33013
33014 trace_wakeup_source_activate(ws->name, cec);
33015 }
33016 @@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33017 * Increment the counter of registered wakeup events and decrement the
33018 * couter of wakeup events in progress simultaneously.
33019 */
33020 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33021 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33022 trace_wakeup_source_deactivate(ws->name, cec);
33023
33024 split_counters(&cnt, &inpr);
33025 diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33026 index e8d11b6..7b1b36f 100644
33027 --- a/drivers/base/syscore.c
33028 +++ b/drivers/base/syscore.c
33029 @@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33030 void register_syscore_ops(struct syscore_ops *ops)
33031 {
33032 mutex_lock(&syscore_ops_lock);
33033 - list_add_tail(&ops->node, &syscore_ops_list);
33034 + pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33035 mutex_unlock(&syscore_ops_lock);
33036 }
33037 EXPORT_SYMBOL_GPL(register_syscore_ops);
33038 @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33039 void unregister_syscore_ops(struct syscore_ops *ops)
33040 {
33041 mutex_lock(&syscore_ops_lock);
33042 - list_del(&ops->node);
33043 + pax_list_del((struct list_head *)&ops->node);
33044 mutex_unlock(&syscore_ops_lock);
33045 }
33046 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33047 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33048 index 1c1b8e5..b7fc681 100644
33049 --- a/drivers/block/cciss.c
33050 +++ b/drivers/block/cciss.c
33051 @@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33052 int err;
33053 u32 cp;
33054
33055 + memset(&arg64, 0, sizeof(arg64));
33056 +
33057 err = 0;
33058 err |=
33059 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33060 @@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33061 while (!list_empty(&h->reqQ)) {
33062 c = list_entry(h->reqQ.next, CommandList_struct, list);
33063 /* can't do anything if fifo is full */
33064 - if ((h->access.fifo_full(h))) {
33065 + if ((h->access->fifo_full(h))) {
33066 dev_warn(&h->pdev->dev, "fifo full\n");
33067 break;
33068 }
33069 @@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33070 h->Qdepth--;
33071
33072 /* Tell the controller execute command */
33073 - h->access.submit_command(h, c);
33074 + h->access->submit_command(h, c);
33075
33076 /* Put job onto the completed Q */
33077 addQ(&h->cmpQ, c);
33078 @@ -3441,17 +3443,17 @@ startio:
33079
33080 static inline unsigned long get_next_completion(ctlr_info_t *h)
33081 {
33082 - return h->access.command_completed(h);
33083 + return h->access->command_completed(h);
33084 }
33085
33086 static inline int interrupt_pending(ctlr_info_t *h)
33087 {
33088 - return h->access.intr_pending(h);
33089 + return h->access->intr_pending(h);
33090 }
33091
33092 static inline long interrupt_not_for_us(ctlr_info_t *h)
33093 {
33094 - return ((h->access.intr_pending(h) == 0) ||
33095 + return ((h->access->intr_pending(h) == 0) ||
33096 (h->interrupts_enabled == 0));
33097 }
33098
33099 @@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33100 u32 a;
33101
33102 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33103 - return h->access.command_completed(h);
33104 + return h->access->command_completed(h);
33105
33106 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33107 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33108 @@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33109 trans_support & CFGTBL_Trans_use_short_tags);
33110
33111 /* Change the access methods to the performant access methods */
33112 - h->access = SA5_performant_access;
33113 + h->access = &SA5_performant_access;
33114 h->transMethod = CFGTBL_Trans_Performant;
33115
33116 return;
33117 @@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33118 if (prod_index < 0)
33119 return -ENODEV;
33120 h->product_name = products[prod_index].product_name;
33121 - h->access = *(products[prod_index].access);
33122 + h->access = products[prod_index].access;
33123
33124 if (cciss_board_disabled(h)) {
33125 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33126 @@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33127 }
33128
33129 /* make sure the board interrupts are off */
33130 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
33131 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
33132 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33133 if (rc)
33134 goto clean2;
33135 @@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33136 * fake ones to scoop up any residual completions.
33137 */
33138 spin_lock_irqsave(&h->lock, flags);
33139 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
33140 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
33141 spin_unlock_irqrestore(&h->lock, flags);
33142 free_irq(h->intr[h->intr_mode], h);
33143 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33144 @@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33145 dev_info(&h->pdev->dev, "Board READY.\n");
33146 dev_info(&h->pdev->dev,
33147 "Waiting for stale completions to drain.\n");
33148 - h->access.set_intr_mask(h, CCISS_INTR_ON);
33149 + h->access->set_intr_mask(h, CCISS_INTR_ON);
33150 msleep(10000);
33151 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
33152 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
33153
33154 rc = controller_reset_failed(h->cfgtable);
33155 if (rc)
33156 @@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33157 cciss_scsi_setup(h);
33158
33159 /* Turn the interrupts on so we can service requests */
33160 - h->access.set_intr_mask(h, CCISS_INTR_ON);
33161 + h->access->set_intr_mask(h, CCISS_INTR_ON);
33162
33163 /* Get the firmware version */
33164 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33165 @@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33166 kfree(flush_buf);
33167 if (return_code != IO_OK)
33168 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33169 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
33170 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
33171 free_irq(h->intr[h->intr_mode], h);
33172 }
33173
33174 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33175 index 7fda30e..eb5dfe0 100644
33176 --- a/drivers/block/cciss.h
33177 +++ b/drivers/block/cciss.h
33178 @@ -101,7 +101,7 @@ struct ctlr_info
33179 /* information about each logical volume */
33180 drive_info_struct *drv[CISS_MAX_LUN];
33181
33182 - struct access_method access;
33183 + struct access_method *access;
33184
33185 /* queue and queue Info */
33186 struct list_head reqQ;
33187 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33188 index 3f08713..56a586a 100644
33189 --- a/drivers/block/cpqarray.c
33190 +++ b/drivers/block/cpqarray.c
33191 @@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33192 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33193 goto Enomem4;
33194 }
33195 - hba[i]->access.set_intr_mask(hba[i], 0);
33196 + hba[i]->access->set_intr_mask(hba[i], 0);
33197 if (request_irq(hba[i]->intr, do_ida_intr,
33198 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33199 {
33200 @@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33201 add_timer(&hba[i]->timer);
33202
33203 /* Enable IRQ now that spinlock and rate limit timer are set up */
33204 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33205 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33206
33207 for(j=0; j<NWD; j++) {
33208 struct gendisk *disk = ida_gendisk[i][j];
33209 @@ -694,7 +694,7 @@ DBGINFO(
33210 for(i=0; i<NR_PRODUCTS; i++) {
33211 if (board_id == products[i].board_id) {
33212 c->product_name = products[i].product_name;
33213 - c->access = *(products[i].access);
33214 + c->access = products[i].access;
33215 break;
33216 }
33217 }
33218 @@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33219 hba[ctlr]->intr = intr;
33220 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33221 hba[ctlr]->product_name = products[j].product_name;
33222 - hba[ctlr]->access = *(products[j].access);
33223 + hba[ctlr]->access = products[j].access;
33224 hba[ctlr]->ctlr = ctlr;
33225 hba[ctlr]->board_id = board_id;
33226 hba[ctlr]->pci_dev = NULL; /* not PCI */
33227 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33228
33229 while((c = h->reqQ) != NULL) {
33230 /* Can't do anything if we're busy */
33231 - if (h->access.fifo_full(h) == 0)
33232 + if (h->access->fifo_full(h) == 0)
33233 return;
33234
33235 /* Get the first entry from the request Q */
33236 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33237 h->Qdepth--;
33238
33239 /* Tell the controller to do our bidding */
33240 - h->access.submit_command(h, c);
33241 + h->access->submit_command(h, c);
33242
33243 /* Get onto the completion Q */
33244 addQ(&h->cmpQ, c);
33245 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33246 unsigned long flags;
33247 __u32 a,a1;
33248
33249 - istat = h->access.intr_pending(h);
33250 + istat = h->access->intr_pending(h);
33251 /* Is this interrupt for us? */
33252 if (istat == 0)
33253 return IRQ_NONE;
33254 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33255 */
33256 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33257 if (istat & FIFO_NOT_EMPTY) {
33258 - while((a = h->access.command_completed(h))) {
33259 + while((a = h->access->command_completed(h))) {
33260 a1 = a; a &= ~3;
33261 if ((c = h->cmpQ) == NULL)
33262 {
33263 @@ -1449,11 +1449,11 @@ static int sendcmd(
33264 /*
33265 * Disable interrupt
33266 */
33267 - info_p->access.set_intr_mask(info_p, 0);
33268 + info_p->access->set_intr_mask(info_p, 0);
33269 /* Make sure there is room in the command FIFO */
33270 /* Actually it should be completely empty at this time. */
33271 for (i = 200000; i > 0; i--) {
33272 - temp = info_p->access.fifo_full(info_p);
33273 + temp = info_p->access->fifo_full(info_p);
33274 if (temp != 0) {
33275 break;
33276 }
33277 @@ -1466,7 +1466,7 @@ DBG(
33278 /*
33279 * Send the cmd
33280 */
33281 - info_p->access.submit_command(info_p, c);
33282 + info_p->access->submit_command(info_p, c);
33283 complete = pollcomplete(ctlr);
33284
33285 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33286 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33287 * we check the new geometry. Then turn interrupts back on when
33288 * we're done.
33289 */
33290 - host->access.set_intr_mask(host, 0);
33291 + host->access->set_intr_mask(host, 0);
33292 getgeometry(ctlr);
33293 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33294 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33295
33296 for(i=0; i<NWD; i++) {
33297 struct gendisk *disk = ida_gendisk[ctlr][i];
33298 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
33299 /* Wait (up to 2 seconds) for a command to complete */
33300
33301 for (i = 200000; i > 0; i--) {
33302 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
33303 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
33304 if (done == 0) {
33305 udelay(10); /* a short fixed delay */
33306 } else
33307 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33308 index be73e9d..7fbf140 100644
33309 --- a/drivers/block/cpqarray.h
33310 +++ b/drivers/block/cpqarray.h
33311 @@ -99,7 +99,7 @@ struct ctlr_info {
33312 drv_info_t drv[NWD];
33313 struct proc_dir_entry *proc;
33314
33315 - struct access_method access;
33316 + struct access_method *access;
33317
33318 cmdlist_t *reqQ;
33319 cmdlist_t *cmpQ;
33320 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33321 index 6b51afa..17e1191 100644
33322 --- a/drivers/block/drbd/drbd_int.h
33323 +++ b/drivers/block/drbd/drbd_int.h
33324 @@ -582,7 +582,7 @@ struct drbd_epoch {
33325 struct drbd_tconn *tconn;
33326 struct list_head list;
33327 unsigned int barrier_nr;
33328 - atomic_t epoch_size; /* increased on every request added. */
33329 + atomic_unchecked_t epoch_size; /* increased on every request added. */
33330 atomic_t active; /* increased on every req. added, and dec on every finished. */
33331 unsigned long flags;
33332 };
33333 @@ -1011,7 +1011,7 @@ struct drbd_conf {
33334 int al_tr_cycle;
33335 int al_tr_pos; /* position of the next transaction in the journal */
33336 wait_queue_head_t seq_wait;
33337 - atomic_t packet_seq;
33338 + atomic_unchecked_t packet_seq;
33339 unsigned int peer_seq;
33340 spinlock_t peer_seq_lock;
33341 unsigned int minor;
33342 @@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33343 char __user *uoptval;
33344 int err;
33345
33346 - uoptval = (char __user __force *)optval;
33347 + uoptval = (char __force_user *)optval;
33348
33349 set_fs(KERNEL_DS);
33350 if (level == SOL_SOCKET)
33351 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33352 index e98da67..1181716b 100644
33353 --- a/drivers/block/drbd/drbd_main.c
33354 +++ b/drivers/block/drbd/drbd_main.c
33355 @@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33356 p->sector = sector;
33357 p->block_id = block_id;
33358 p->blksize = blksize;
33359 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33360 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33361 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33362 }
33363
33364 @@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33365 return -EIO;
33366 p->sector = cpu_to_be64(req->i.sector);
33367 p->block_id = (unsigned long)req;
33368 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33369 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33370 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33371 if (mdev->state.conn >= C_SYNC_SOURCE &&
33372 mdev->state.conn <= C_PAUSED_SYNC_T)
33373 @@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33374 {
33375 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33376
33377 - if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33378 - conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33379 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33380 + conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33381 kfree(tconn->current_epoch);
33382
33383 idr_destroy(&tconn->volumes);
33384 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33385 index a9eccfc..f5efe87 100644
33386 --- a/drivers/block/drbd/drbd_receiver.c
33387 +++ b/drivers/block/drbd/drbd_receiver.c
33388 @@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33389 {
33390 int err;
33391
33392 - atomic_set(&mdev->packet_seq, 0);
33393 + atomic_set_unchecked(&mdev->packet_seq, 0);
33394 mdev->peer_seq = 0;
33395
33396 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33397 @@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33398 do {
33399 next_epoch = NULL;
33400
33401 - epoch_size = atomic_read(&epoch->epoch_size);
33402 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33403
33404 switch (ev & ~EV_CLEANUP) {
33405 case EV_PUT:
33406 @@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33407 rv = FE_DESTROYED;
33408 } else {
33409 epoch->flags = 0;
33410 - atomic_set(&epoch->epoch_size, 0);
33411 + atomic_set_unchecked(&epoch->epoch_size, 0);
33412 /* atomic_set(&epoch->active, 0); is already zero */
33413 if (rv == FE_STILL_LIVE)
33414 rv = FE_RECYCLED;
33415 @@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33416 conn_wait_active_ee_empty(tconn);
33417 drbd_flush(tconn);
33418
33419 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
33420 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33421 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33422 if (epoch)
33423 break;
33424 @@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33425 }
33426
33427 epoch->flags = 0;
33428 - atomic_set(&epoch->epoch_size, 0);
33429 + atomic_set_unchecked(&epoch->epoch_size, 0);
33430 atomic_set(&epoch->active, 0);
33431
33432 spin_lock(&tconn->epoch_lock);
33433 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
33434 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33435 list_add(&epoch->list, &tconn->current_epoch->list);
33436 tconn->current_epoch = epoch;
33437 tconn->epochs++;
33438 @@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33439
33440 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33441 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33442 - atomic_inc(&tconn->current_epoch->epoch_size);
33443 + atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33444 err2 = drbd_drain_block(mdev, pi->size);
33445 if (!err)
33446 err = err2;
33447 @@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33448
33449 spin_lock(&tconn->epoch_lock);
33450 peer_req->epoch = tconn->current_epoch;
33451 - atomic_inc(&peer_req->epoch->epoch_size);
33452 + atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33453 atomic_inc(&peer_req->epoch->active);
33454 spin_unlock(&tconn->epoch_lock);
33455
33456 @@ -4346,7 +4346,7 @@ struct data_cmd {
33457 int expect_payload;
33458 size_t pkt_size;
33459 int (*fn)(struct drbd_tconn *, struct packet_info *);
33460 -};
33461 +} __do_const;
33462
33463 static struct data_cmd drbd_cmd_handler[] = {
33464 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33465 @@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33466 if (!list_empty(&tconn->current_epoch->list))
33467 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33468 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33469 - atomic_set(&tconn->current_epoch->epoch_size, 0);
33470 + atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33471 tconn->send.seen_any_write_yet = false;
33472
33473 conn_info(tconn, "Connection closed\n");
33474 @@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33475 struct asender_cmd {
33476 size_t pkt_size;
33477 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33478 -};
33479 +} __do_const;
33480
33481 static struct asender_cmd asender_tbl[] = {
33482 [P_PING] = { 0, got_Ping },
33483 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33484 index dfe7583..83768bb 100644
33485 --- a/drivers/block/loop.c
33486 +++ b/drivers/block/loop.c
33487 @@ -231,7 +231,7 @@ static int __do_lo_send_write(struct file *file,
33488 mm_segment_t old_fs = get_fs();
33489
33490 set_fs(get_ds());
33491 - bw = file->f_op->write(file, buf, len, &pos);
33492 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33493 set_fs(old_fs);
33494 if (likely(bw == len))
33495 return 0;
33496 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
33497 index 2e7de7a..ed86dc0 100644
33498 --- a/drivers/block/pktcdvd.c
33499 +++ b/drivers/block/pktcdvd.c
33500 @@ -83,7 +83,7 @@
33501
33502 #define MAX_SPEED 0xffff
33503
33504 -#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
33505 +#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1UL))
33506
33507 static DEFINE_MUTEX(pktcdvd_mutex);
33508 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
33509 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33510 index d620b44..587561e 100644
33511 --- a/drivers/cdrom/cdrom.c
33512 +++ b/drivers/cdrom/cdrom.c
33513 @@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33514 ENSURE(reset, CDC_RESET);
33515 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33516 cdi->mc_flags = 0;
33517 - cdo->n_minors = 0;
33518 cdi->options = CDO_USE_FFLAGS;
33519
33520 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33521 @@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33522 else
33523 cdi->cdda_method = CDDA_OLD;
33524
33525 - if (!cdo->generic_packet)
33526 - cdo->generic_packet = cdrom_dummy_generic_packet;
33527 + if (!cdo->generic_packet) {
33528 + pax_open_kernel();
33529 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33530 + pax_close_kernel();
33531 + }
33532
33533 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33534 mutex_lock(&cdrom_mutex);
33535 @@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33536 if (cdi->exit)
33537 cdi->exit(cdi);
33538
33539 - cdi->ops->n_minors--;
33540 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33541 }
33542
33543 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33544 index d59cdcb..11afddf 100644
33545 --- a/drivers/cdrom/gdrom.c
33546 +++ b/drivers/cdrom/gdrom.c
33547 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33548 .audio_ioctl = gdrom_audio_ioctl,
33549 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33550 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33551 - .n_minors = 1,
33552 };
33553
33554 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33555 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33556 index 3bb6fa3..34013fb 100644
33557 --- a/drivers/char/Kconfig
33558 +++ b/drivers/char/Kconfig
33559 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33560
33561 config DEVKMEM
33562 bool "/dev/kmem virtual device support"
33563 - default y
33564 + default n
33565 + depends on !GRKERNSEC_KMEM
33566 help
33567 Say Y here if you want to support the /dev/kmem device. The
33568 /dev/kmem device is rarely used, but can be used for certain
33569 @@ -582,6 +583,7 @@ config DEVPORT
33570 bool
33571 depends on !M68K
33572 depends on ISA || PCI
33573 + depends on !GRKERNSEC_KMEM
33574 default y
33575
33576 source "drivers/s390/char/Kconfig"
33577 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33578 index 2e04433..22afc64 100644
33579 --- a/drivers/char/agp/frontend.c
33580 +++ b/drivers/char/agp/frontend.c
33581 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33582 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33583 return -EFAULT;
33584
33585 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33586 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33587 return -EFAULT;
33588
33589 client = agp_find_client_by_pid(reserve.pid);
33590 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33591 index 21cb980..f15107c 100644
33592 --- a/drivers/char/genrtc.c
33593 +++ b/drivers/char/genrtc.c
33594 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33595 switch (cmd) {
33596
33597 case RTC_PLL_GET:
33598 + memset(&pll, 0, sizeof(pll));
33599 if (get_rtc_pll(&pll))
33600 return -EINVAL;
33601 else
33602 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33603 index d784650..e8bfd69 100644
33604 --- a/drivers/char/hpet.c
33605 +++ b/drivers/char/hpet.c
33606 @@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33607 }
33608
33609 static int
33610 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33611 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33612 struct hpet_info *info)
33613 {
33614 struct hpet_timer __iomem *timer;
33615 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33616 index 053201b0..8335cce 100644
33617 --- a/drivers/char/ipmi/ipmi_msghandler.c
33618 +++ b/drivers/char/ipmi/ipmi_msghandler.c
33619 @@ -420,7 +420,7 @@ struct ipmi_smi {
33620 struct proc_dir_entry *proc_dir;
33621 char proc_dir_name[10];
33622
33623 - atomic_t stats[IPMI_NUM_STATS];
33624 + atomic_unchecked_t stats[IPMI_NUM_STATS];
33625
33626 /*
33627 * run_to_completion duplicate of smb_info, smi_info
33628 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33629
33630
33631 #define ipmi_inc_stat(intf, stat) \
33632 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33633 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33634 #define ipmi_get_stat(intf, stat) \
33635 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33636 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33637
33638 static int is_lan_addr(struct ipmi_addr *addr)
33639 {
33640 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33641 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33642 init_waitqueue_head(&intf->waitq);
33643 for (i = 0; i < IPMI_NUM_STATS; i++)
33644 - atomic_set(&intf->stats[i], 0);
33645 + atomic_set_unchecked(&intf->stats[i], 0);
33646
33647 intf->proc_dir = NULL;
33648
33649 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33650 index 0ac9b45..6179fb5 100644
33651 --- a/drivers/char/ipmi/ipmi_si_intf.c
33652 +++ b/drivers/char/ipmi/ipmi_si_intf.c
33653 @@ -275,7 +275,7 @@ struct smi_info {
33654 unsigned char slave_addr;
33655
33656 /* Counters and things for the proc filesystem. */
33657 - atomic_t stats[SI_NUM_STATS];
33658 + atomic_unchecked_t stats[SI_NUM_STATS];
33659
33660 struct task_struct *thread;
33661
33662 @@ -284,9 +284,9 @@ struct smi_info {
33663 };
33664
33665 #define smi_inc_stat(smi, stat) \
33666 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33667 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33668 #define smi_get_stat(smi, stat) \
33669 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33670 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33671
33672 #define SI_MAX_PARMS 4
33673
33674 @@ -3254,7 +3254,7 @@ static int try_smi_init(struct smi_info *new_smi)
33675 atomic_set(&new_smi->req_events, 0);
33676 new_smi->run_to_completion = 0;
33677 for (i = 0; i < SI_NUM_STATS; i++)
33678 - atomic_set(&new_smi->stats[i], 0);
33679 + atomic_set_unchecked(&new_smi->stats[i], 0);
33680
33681 new_smi->interrupt_disabled = 1;
33682 atomic_set(&new_smi->stop_operation, 0);
33683 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33684 index 2c644af..b867b3e 100644
33685 --- a/drivers/char/mem.c
33686 +++ b/drivers/char/mem.c
33687 @@ -18,6 +18,7 @@
33688 #include <linux/raw.h>
33689 #include <linux/tty.h>
33690 #include <linux/capability.h>
33691 +#include <linux/security.h>
33692 #include <linux/ptrace.h>
33693 #include <linux/device.h>
33694 #include <linux/highmem.h>
33695 @@ -37,6 +38,10 @@
33696
33697 #define DEVPORT_MINOR 4
33698
33699 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33700 +extern const struct file_operations grsec_fops;
33701 +#endif
33702 +
33703 static inline unsigned long size_inside_page(unsigned long start,
33704 unsigned long size)
33705 {
33706 @@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33707
33708 while (cursor < to) {
33709 if (!devmem_is_allowed(pfn)) {
33710 +#ifdef CONFIG_GRKERNSEC_KMEM
33711 + gr_handle_mem_readwrite(from, to);
33712 +#else
33713 printk(KERN_INFO
33714 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33715 current->comm, from, to);
33716 +#endif
33717 return 0;
33718 }
33719 cursor += PAGE_SIZE;
33720 @@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33721 }
33722 return 1;
33723 }
33724 +#elif defined(CONFIG_GRKERNSEC_KMEM)
33725 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33726 +{
33727 + return 0;
33728 +}
33729 #else
33730 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33731 {
33732 @@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33733
33734 while (count > 0) {
33735 unsigned long remaining;
33736 + char *temp;
33737
33738 sz = size_inside_page(p, count);
33739
33740 @@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33741 if (!ptr)
33742 return -EFAULT;
33743
33744 - remaining = copy_to_user(buf, ptr, sz);
33745 +#ifdef CONFIG_PAX_USERCOPY
33746 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33747 + if (!temp) {
33748 + unxlate_dev_mem_ptr(p, ptr);
33749 + return -ENOMEM;
33750 + }
33751 + memcpy(temp, ptr, sz);
33752 +#else
33753 + temp = ptr;
33754 +#endif
33755 +
33756 + remaining = copy_to_user(buf, temp, sz);
33757 +
33758 +#ifdef CONFIG_PAX_USERCOPY
33759 + kfree(temp);
33760 +#endif
33761 +
33762 unxlate_dev_mem_ptr(p, ptr);
33763 if (remaining)
33764 return -EFAULT;
33765 @@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33766 size_t count, loff_t *ppos)
33767 {
33768 unsigned long p = *ppos;
33769 - ssize_t low_count, read, sz;
33770 + ssize_t low_count, read, sz, err = 0;
33771 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33772 - int err = 0;
33773
33774 read = 0;
33775 if (p < (unsigned long) high_memory) {
33776 @@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33777 }
33778 #endif
33779 while (low_count > 0) {
33780 + char *temp;
33781 +
33782 sz = size_inside_page(p, low_count);
33783
33784 /*
33785 @@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33786 */
33787 kbuf = xlate_dev_kmem_ptr((char *)p);
33788
33789 - if (copy_to_user(buf, kbuf, sz))
33790 +#ifdef CONFIG_PAX_USERCOPY
33791 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33792 + if (!temp)
33793 + return -ENOMEM;
33794 + memcpy(temp, kbuf, sz);
33795 +#else
33796 + temp = kbuf;
33797 +#endif
33798 +
33799 + err = copy_to_user(buf, temp, sz);
33800 +
33801 +#ifdef CONFIG_PAX_USERCOPY
33802 + kfree(temp);
33803 +#endif
33804 +
33805 + if (err)
33806 return -EFAULT;
33807 buf += sz;
33808 p += sz;
33809 @@ -833,6 +880,9 @@ static const struct memdev {
33810 #ifdef CONFIG_CRASH_DUMP
33811 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33812 #endif
33813 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33814 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33815 +#endif
33816 };
33817
33818 static int memory_open(struct inode *inode, struct file *filp)
33819 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
33820 index 9df78e2..01ba9ae 100644
33821 --- a/drivers/char/nvram.c
33822 +++ b/drivers/char/nvram.c
33823 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
33824
33825 spin_unlock_irq(&rtc_lock);
33826
33827 - if (copy_to_user(buf, contents, tmp - contents))
33828 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
33829 return -EFAULT;
33830
33831 *ppos = i;
33832 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
33833 index 5c5cc00..ac9edb7 100644
33834 --- a/drivers/char/pcmcia/synclink_cs.c
33835 +++ b/drivers/char/pcmcia/synclink_cs.c
33836 @@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33837
33838 if (debug_level >= DEBUG_LEVEL_INFO)
33839 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
33840 - __FILE__, __LINE__, info->device_name, port->count);
33841 + __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
33842
33843 - WARN_ON(!port->count);
33844 + WARN_ON(!atomic_read(&port->count));
33845
33846 if (tty_port_close_start(port, tty, filp) == 0)
33847 goto cleanup;
33848 @@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33849 cleanup:
33850 if (debug_level >= DEBUG_LEVEL_INFO)
33851 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
33852 - tty->driver->name, port->count);
33853 + tty->driver->name, atomic_read(&port->count));
33854 }
33855
33856 /* Wait until the transmitter is empty.
33857 @@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33858
33859 if (debug_level >= DEBUG_LEVEL_INFO)
33860 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
33861 - __FILE__, __LINE__, tty->driver->name, port->count);
33862 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
33863
33864 /* If port is closing, signal caller to try again */
33865 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
33866 @@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33867 goto cleanup;
33868 }
33869 spin_lock(&port->lock);
33870 - port->count++;
33871 + atomic_inc(&port->count);
33872 spin_unlock(&port->lock);
33873 spin_unlock_irqrestore(&info->netlock, flags);
33874
33875 - if (port->count == 1) {
33876 + if (atomic_read(&port->count) == 1) {
33877 /* 1st open on this device, init hardware */
33878 retval = startup(info, tty);
33879 if (retval < 0)
33880 @@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
33881 unsigned short new_crctype;
33882
33883 /* return error if TTY interface open */
33884 - if (info->port.count)
33885 + if (atomic_read(&info->port.count))
33886 return -EBUSY;
33887
33888 switch (encoding)
33889 @@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
33890
33891 /* arbitrate between network and tty opens */
33892 spin_lock_irqsave(&info->netlock, flags);
33893 - if (info->port.count != 0 || info->netcount != 0) {
33894 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
33895 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
33896 spin_unlock_irqrestore(&info->netlock, flags);
33897 return -EBUSY;
33898 @@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33899 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
33900
33901 /* return error if TTY interface open */
33902 - if (info->port.count)
33903 + if (atomic_read(&info->port.count))
33904 return -EBUSY;
33905
33906 if (cmd != SIOCWANDEV)
33907 diff --git a/drivers/char/random.c b/drivers/char/random.c
33908 index 32a6c57..e7f0f7b 100644
33909 --- a/drivers/char/random.c
33910 +++ b/drivers/char/random.c
33911 @@ -272,8 +272,13 @@
33912 /*
33913 * Configuration information
33914 */
33915 +#ifdef CONFIG_GRKERNSEC_RANDNET
33916 +#define INPUT_POOL_WORDS 512
33917 +#define OUTPUT_POOL_WORDS 128
33918 +#else
33919 #define INPUT_POOL_WORDS 128
33920 #define OUTPUT_POOL_WORDS 32
33921 +#endif
33922 #define SEC_XFER_SIZE 512
33923 #define EXTRACT_SIZE 10
33924
33925 @@ -313,10 +318,17 @@ static struct poolinfo {
33926 int poolwords;
33927 int tap1, tap2, tap3, tap4, tap5;
33928 } poolinfo_table[] = {
33929 +#ifdef CONFIG_GRKERNSEC_RANDNET
33930 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33931 + { 512, 411, 308, 208, 104, 1 },
33932 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33933 + { 128, 103, 76, 51, 25, 1 },
33934 +#else
33935 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33936 { 128, 103, 76, 51, 25, 1 },
33937 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33938 { 32, 26, 20, 14, 7, 1 },
33939 +#endif
33940 #if 0
33941 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33942 { 2048, 1638, 1231, 819, 411, 1 },
33943 @@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
33944 input_rotate += i ? 7 : 14;
33945 }
33946
33947 - ACCESS_ONCE(r->input_rotate) = input_rotate;
33948 - ACCESS_ONCE(r->add_ptr) = i;
33949 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
33950 + ACCESS_ONCE_RW(r->add_ptr) = i;
33951 smp_wmb();
33952
33953 if (out)
33954 @@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
33955
33956 extract_buf(r, tmp);
33957 i = min_t(int, nbytes, EXTRACT_SIZE);
33958 - if (copy_to_user(buf, tmp, i)) {
33959 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
33960 ret = -EFAULT;
33961 break;
33962 }
33963 @@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33964 #include <linux/sysctl.h>
33965
33966 static int min_read_thresh = 8, min_write_thresh;
33967 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
33968 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33969 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33970 static char sysctl_bootid[16];
33971
33972 @@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
33973 static int proc_do_uuid(ctl_table *table, int write,
33974 void __user *buffer, size_t *lenp, loff_t *ppos)
33975 {
33976 - ctl_table fake_table;
33977 + ctl_table_no_const fake_table;
33978 unsigned char buf[64], tmp_uuid[16], *uuid;
33979
33980 uuid = table->data;
33981 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33982 index bf2349db..5456d53 100644
33983 --- a/drivers/char/sonypi.c
33984 +++ b/drivers/char/sonypi.c
33985 @@ -54,6 +54,7 @@
33986
33987 #include <asm/uaccess.h>
33988 #include <asm/io.h>
33989 +#include <asm/local.h>
33990
33991 #include <linux/sonypi.h>
33992
33993 @@ -490,7 +491,7 @@ static struct sonypi_device {
33994 spinlock_t fifo_lock;
33995 wait_queue_head_t fifo_proc_list;
33996 struct fasync_struct *fifo_async;
33997 - int open_count;
33998 + local_t open_count;
33999 int model;
34000 struct input_dev *input_jog_dev;
34001 struct input_dev *input_key_dev;
34002 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34003 static int sonypi_misc_release(struct inode *inode, struct file *file)
34004 {
34005 mutex_lock(&sonypi_device.lock);
34006 - sonypi_device.open_count--;
34007 + local_dec(&sonypi_device.open_count);
34008 mutex_unlock(&sonypi_device.lock);
34009 return 0;
34010 }
34011 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34012 {
34013 mutex_lock(&sonypi_device.lock);
34014 /* Flush input queue on first open */
34015 - if (!sonypi_device.open_count)
34016 + if (!local_read(&sonypi_device.open_count))
34017 kfifo_reset(&sonypi_device.fifo);
34018 - sonypi_device.open_count++;
34019 + local_inc(&sonypi_device.open_count);
34020 mutex_unlock(&sonypi_device.lock);
34021
34022 return 0;
34023 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34024 index 64420b3..5c40b56 100644
34025 --- a/drivers/char/tpm/tpm_acpi.c
34026 +++ b/drivers/char/tpm/tpm_acpi.c
34027 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34028 virt = acpi_os_map_memory(start, len);
34029 if (!virt) {
34030 kfree(log->bios_event_log);
34031 + log->bios_event_log = NULL;
34032 printk("%s: ERROR - Unable to map memory\n", __func__);
34033 return -EIO;
34034 }
34035
34036 - memcpy_fromio(log->bios_event_log, virt, len);
34037 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34038
34039 acpi_os_unmap_memory(virt, len);
34040 return 0;
34041 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34042 index 84ddc55..1d32f1e 100644
34043 --- a/drivers/char/tpm/tpm_eventlog.c
34044 +++ b/drivers/char/tpm/tpm_eventlog.c
34045 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34046 event = addr;
34047
34048 if ((event->event_type == 0 && event->event_size == 0) ||
34049 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34050 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34051 return NULL;
34052
34053 return addr;
34054 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34055 return NULL;
34056
34057 if ((event->event_type == 0 && event->event_size == 0) ||
34058 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34059 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34060 return NULL;
34061
34062 (*pos)++;
34063 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34064 int i;
34065
34066 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34067 - seq_putc(m, data[i]);
34068 + if (!seq_putc(m, data[i]))
34069 + return -EFAULT;
34070
34071 return 0;
34072 }
34073 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34074 index ce5f3fc..e2d3e55 100644
34075 --- a/drivers/char/virtio_console.c
34076 +++ b/drivers/char/virtio_console.c
34077 @@ -679,7 +679,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34078 if (to_user) {
34079 ssize_t ret;
34080
34081 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34082 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34083 if (ret)
34084 return -EFAULT;
34085 } else {
34086 @@ -778,7 +778,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34087 if (!port_has_data(port) && !port->host_connected)
34088 return 0;
34089
34090 - return fill_readbuf(port, ubuf, count, true);
34091 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34092 }
34093
34094 static int wait_port_writable(struct port *port, bool nonblock)
34095 diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
34096 index d7ad425..3e3f81f 100644
34097 --- a/drivers/clocksource/arm_arch_timer.c
34098 +++ b/drivers/clocksource/arm_arch_timer.c
34099 @@ -262,7 +262,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34100 return NOTIFY_OK;
34101 }
34102
34103 -static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
34104 +static struct notifier_block arch_timer_cpu_nb = {
34105 .notifier_call = arch_timer_cpu_notify,
34106 };
34107
34108 diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
34109 index ade7513..069445f 100644
34110 --- a/drivers/clocksource/metag_generic.c
34111 +++ b/drivers/clocksource/metag_generic.c
34112 @@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34113 return NOTIFY_OK;
34114 }
34115
34116 -static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34117 +static struct notifier_block arch_timer_cpu_nb = {
34118 .notifier_call = arch_timer_cpu_notify,
34119 };
34120
34121 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34122 index 57a8774..545e993 100644
34123 --- a/drivers/cpufreq/acpi-cpufreq.c
34124 +++ b/drivers/cpufreq/acpi-cpufreq.c
34125 @@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34126 return sprintf(buf, "%u\n", boost_enabled);
34127 }
34128
34129 -static struct global_attr global_boost = __ATTR(boost, 0644,
34130 +static global_attr_no_const global_boost = __ATTR(boost, 0644,
34131 show_global_boost,
34132 store_global_boost);
34133
34134 @@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34135 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34136 per_cpu(acfreq_data, cpu) = data;
34137
34138 - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34139 - acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34140 + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34141 + pax_open_kernel();
34142 + *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34143 + pax_close_kernel();
34144 + }
34145
34146 result = acpi_processor_register_performance(data->acpi_data, cpu);
34147 if (result)
34148 @@ -839,7 +842,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34149 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34150 break;
34151 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34152 - acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34153 + pax_open_kernel();
34154 + *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34155 + pax_close_kernel();
34156 policy->cur = get_cur_freq_on_cpu(cpu);
34157 break;
34158 default:
34159 @@ -850,8 +855,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34160 acpi_processor_notify_smm(THIS_MODULE);
34161
34162 /* Check for APERF/MPERF support in hardware */
34163 - if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34164 - acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34165 + if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34166 + pax_open_kernel();
34167 + *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34168 + pax_close_kernel();
34169 + }
34170
34171 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34172 for (i = 0; i < perf->state_count; i++)
34173 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34174 index b02824d..51e44aa 100644
34175 --- a/drivers/cpufreq/cpufreq.c
34176 +++ b/drivers/cpufreq/cpufreq.c
34177 @@ -1813,7 +1813,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34178 return NOTIFY_OK;
34179 }
34180
34181 -static struct notifier_block __refdata cpufreq_cpu_notifier = {
34182 +static struct notifier_block cpufreq_cpu_notifier = {
34183 .notifier_call = cpufreq_cpu_callback,
34184 };
34185
34186 @@ -1845,8 +1845,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34187
34188 pr_debug("trying to register driver %s\n", driver_data->name);
34189
34190 - if (driver_data->setpolicy)
34191 - driver_data->flags |= CPUFREQ_CONST_LOOPS;
34192 + if (driver_data->setpolicy) {
34193 + pax_open_kernel();
34194 + *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34195 + pax_close_kernel();
34196 + }
34197
34198 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34199 if (cpufreq_driver) {
34200 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34201 index 5a76086..0f4d394 100644
34202 --- a/drivers/cpufreq/cpufreq_governor.c
34203 +++ b/drivers/cpufreq/cpufreq_governor.c
34204 @@ -201,8 +201,8 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34205 {
34206 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
34207 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
34208 - struct cs_ops *cs_ops = NULL;
34209 - struct od_ops *od_ops = NULL;
34210 + const struct cs_ops *cs_ops = NULL;
34211 + const struct od_ops *od_ops = NULL;
34212 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
34213 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
34214 struct cpu_dbs_common_info *cpu_cdbs;
34215 diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34216 index cc4bd2f..ad142bc 100644
34217 --- a/drivers/cpufreq/cpufreq_governor.h
34218 +++ b/drivers/cpufreq/cpufreq_governor.h
34219 @@ -142,7 +142,7 @@ struct dbs_data {
34220 void (*gov_check_cpu)(int cpu, unsigned int load);
34221
34222 /* Governor specific ops, see below */
34223 - void *gov_ops;
34224 + const void *gov_ops;
34225 };
34226
34227 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34228 diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34229 index bfd6273..e39dd63 100644
34230 --- a/drivers/cpufreq/cpufreq_stats.c
34231 +++ b/drivers/cpufreq/cpufreq_stats.c
34232 @@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34233 }
34234
34235 /* priority=1 so this will get called before cpufreq_remove_dev */
34236 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34237 +static struct notifier_block cpufreq_stat_cpu_notifier = {
34238 .notifier_call = cpufreq_stat_cpu_callback,
34239 .priority = 1,
34240 };
34241 diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34242 index 827629c9..0bc6a03 100644
34243 --- a/drivers/cpufreq/p4-clockmod.c
34244 +++ b/drivers/cpufreq/p4-clockmod.c
34245 @@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34246 case 0x0F: /* Core Duo */
34247 case 0x16: /* Celeron Core */
34248 case 0x1C: /* Atom */
34249 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34250 + pax_open_kernel();
34251 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34252 + pax_close_kernel();
34253 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34254 case 0x0D: /* Pentium M (Dothan) */
34255 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34256 + pax_open_kernel();
34257 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34258 + pax_close_kernel();
34259 /* fall through */
34260 case 0x09: /* Pentium M (Banias) */
34261 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34262 @@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34263
34264 /* on P-4s, the TSC runs with constant frequency independent whether
34265 * throttling is active or not. */
34266 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34267 + pax_open_kernel();
34268 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34269 + pax_close_kernel();
34270
34271 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34272 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34273 diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34274 index 3a953d5..f5993f6 100644
34275 --- a/drivers/cpufreq/speedstep-centrino.c
34276 +++ b/drivers/cpufreq/speedstep-centrino.c
34277 @@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34278 !cpu_has(cpu, X86_FEATURE_EST))
34279 return -ENODEV;
34280
34281 - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34282 - centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34283 + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34284 + pax_open_kernel();
34285 + *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34286 + pax_close_kernel();
34287 + }
34288
34289 if (policy->cpu != 0)
34290 return -ENODEV;
34291 diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34292 index eba6929..0f53baf 100644
34293 --- a/drivers/cpuidle/cpuidle.c
34294 +++ b/drivers/cpuidle/cpuidle.c
34295 @@ -277,7 +277,7 @@ static int poll_idle(struct cpuidle_device *dev,
34296
34297 static void poll_idle_init(struct cpuidle_driver *drv)
34298 {
34299 - struct cpuidle_state *state = &drv->states[0];
34300 + cpuidle_state_no_const *state = &drv->states[0];
34301
34302 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34303 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34304 diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34305 index ea2f8e7..70ac501 100644
34306 --- a/drivers/cpuidle/governor.c
34307 +++ b/drivers/cpuidle/governor.c
34308 @@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34309 mutex_lock(&cpuidle_lock);
34310 if (__cpuidle_find_governor(gov->name) == NULL) {
34311 ret = 0;
34312 - list_add_tail(&gov->governor_list, &cpuidle_governors);
34313 + pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34314 if (!cpuidle_curr_governor ||
34315 cpuidle_curr_governor->rating < gov->rating)
34316 cpuidle_switch_governor(gov);
34317 @@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34318 new_gov = cpuidle_replace_governor(gov->rating);
34319 cpuidle_switch_governor(new_gov);
34320 }
34321 - list_del(&gov->governor_list);
34322 + pax_list_del((struct list_head *)&gov->governor_list);
34323 mutex_unlock(&cpuidle_lock);
34324 }
34325
34326 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34327 index 428754a..8bdf9cc 100644
34328 --- a/drivers/cpuidle/sysfs.c
34329 +++ b/drivers/cpuidle/sysfs.c
34330 @@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34331 NULL
34332 };
34333
34334 -static struct attribute_group cpuidle_attr_group = {
34335 +static attribute_group_no_const cpuidle_attr_group = {
34336 .attrs = cpuidle_default_attrs,
34337 .name = "cpuidle",
34338 };
34339 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34340 index 3b36797..289c16a 100644
34341 --- a/drivers/devfreq/devfreq.c
34342 +++ b/drivers/devfreq/devfreq.c
34343 @@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34344 goto err_out;
34345 }
34346
34347 - list_add(&governor->node, &devfreq_governor_list);
34348 + pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34349
34350 list_for_each_entry(devfreq, &devfreq_list, node) {
34351 int ret = 0;
34352 @@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34353 }
34354 }
34355
34356 - list_del(&governor->node);
34357 + pax_list_del((struct list_head *)&governor->node);
34358 err_out:
34359 mutex_unlock(&devfreq_list_lock);
34360
34361 diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34362 index b70709b..1d8d02a 100644
34363 --- a/drivers/dma/sh/shdma.c
34364 +++ b/drivers/dma/sh/shdma.c
34365 @@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34366 return ret;
34367 }
34368
34369 -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34370 +static struct notifier_block sh_dmae_nmi_notifier = {
34371 .notifier_call = sh_dmae_nmi_handler,
34372
34373 /* Run before NMI debug handler and KGDB */
34374 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34375 index 769d92e..a3dcc1e 100644
34376 --- a/drivers/edac/edac_mc_sysfs.c
34377 +++ b/drivers/edac/edac_mc_sysfs.c
34378 @@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34379 struct dev_ch_attribute {
34380 struct device_attribute attr;
34381 int channel;
34382 -};
34383 +} __do_const;
34384
34385 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34386 struct dev_ch_attribute dev_attr_legacy_##_name = \
34387 @@ -1003,14 +1003,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
34388 }
34389
34390 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
34391 + pax_open_kernel();
34392 if (mci->get_sdram_scrub_rate) {
34393 - dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34394 - dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34395 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34396 + *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34397 }
34398 if (mci->set_sdram_scrub_rate) {
34399 - dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34400 - dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34401 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34402 + *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34403 }
34404 + pax_close_kernel();
34405 err = device_create_file(&mci->dev,
34406 &dev_attr_sdram_scrub_rate);
34407 if (err) {
34408 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34409 index e8658e4..22746d6 100644
34410 --- a/drivers/edac/edac_pci_sysfs.c
34411 +++ b/drivers/edac/edac_pci_sysfs.c
34412 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34413 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34414 static int edac_pci_poll_msec = 1000; /* one second workq period */
34415
34416 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
34417 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34418 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34419 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34420
34421 static struct kobject *edac_pci_top_main_kobj;
34422 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34423 @@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34424 void *value;
34425 ssize_t(*show) (void *, char *);
34426 ssize_t(*store) (void *, const char *, size_t);
34427 -};
34428 +} __do_const;
34429
34430 /* Set of show/store abstract level functions for PCI Parity object */
34431 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34432 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34433 edac_printk(KERN_CRIT, EDAC_PCI,
34434 "Signaled System Error on %s\n",
34435 pci_name(dev));
34436 - atomic_inc(&pci_nonparity_count);
34437 + atomic_inc_unchecked(&pci_nonparity_count);
34438 }
34439
34440 if (status & (PCI_STATUS_PARITY)) {
34441 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34442 "Master Data Parity Error on %s\n",
34443 pci_name(dev));
34444
34445 - atomic_inc(&pci_parity_count);
34446 + atomic_inc_unchecked(&pci_parity_count);
34447 }
34448
34449 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34450 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34451 "Detected Parity Error on %s\n",
34452 pci_name(dev));
34453
34454 - atomic_inc(&pci_parity_count);
34455 + atomic_inc_unchecked(&pci_parity_count);
34456 }
34457 }
34458
34459 @@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34460 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34461 "Signaled System Error on %s\n",
34462 pci_name(dev));
34463 - atomic_inc(&pci_nonparity_count);
34464 + atomic_inc_unchecked(&pci_nonparity_count);
34465 }
34466
34467 if (status & (PCI_STATUS_PARITY)) {
34468 @@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34469 "Master Data Parity Error on "
34470 "%s\n", pci_name(dev));
34471
34472 - atomic_inc(&pci_parity_count);
34473 + atomic_inc_unchecked(&pci_parity_count);
34474 }
34475
34476 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34477 @@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34478 "Detected Parity Error on %s\n",
34479 pci_name(dev));
34480
34481 - atomic_inc(&pci_parity_count);
34482 + atomic_inc_unchecked(&pci_parity_count);
34483 }
34484 }
34485 }
34486 @@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34487 if (!check_pci_errors)
34488 return;
34489
34490 - before_count = atomic_read(&pci_parity_count);
34491 + before_count = atomic_read_unchecked(&pci_parity_count);
34492
34493 /* scan all PCI devices looking for a Parity Error on devices and
34494 * bridges.
34495 @@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34496 /* Only if operator has selected panic on PCI Error */
34497 if (edac_pci_get_panic_on_pe()) {
34498 /* If the count is different 'after' from 'before' */
34499 - if (before_count != atomic_read(&pci_parity_count))
34500 + if (before_count != atomic_read_unchecked(&pci_parity_count))
34501 panic("EDAC: PCI Parity Error");
34502 }
34503 }
34504 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34505 index 51b7e3a..aa8a3e8 100644
34506 --- a/drivers/edac/mce_amd.h
34507 +++ b/drivers/edac/mce_amd.h
34508 @@ -77,7 +77,7 @@ struct amd_decoder_ops {
34509 bool (*mc0_mce)(u16, u8);
34510 bool (*mc1_mce)(u16, u8);
34511 bool (*mc2_mce)(u16, u8);
34512 -};
34513 +} __no_const;
34514
34515 void amd_report_gart_errors(bool);
34516 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34517 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34518 index 57ea7f4..789e3c3 100644
34519 --- a/drivers/firewire/core-card.c
34520 +++ b/drivers/firewire/core-card.c
34521 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34522
34523 void fw_core_remove_card(struct fw_card *card)
34524 {
34525 - struct fw_card_driver dummy_driver = dummy_driver_template;
34526 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
34527
34528 card->driver->update_phy_reg(card, 4,
34529 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34530 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34531 index 27ac423..13573e8 100644
34532 --- a/drivers/firewire/core-cdev.c
34533 +++ b/drivers/firewire/core-cdev.c
34534 @@ -1366,8 +1366,7 @@ static int init_iso_resource(struct client *client,
34535 int ret;
34536
34537 if ((request->channels == 0 && request->bandwidth == 0) ||
34538 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34539 - request->bandwidth < 0)
34540 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34541 return -EINVAL;
34542
34543 r = kmalloc(sizeof(*r), GFP_KERNEL);
34544 diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34545 index 03ce7d9..b70f5da 100644
34546 --- a/drivers/firewire/core-device.c
34547 +++ b/drivers/firewire/core-device.c
34548 @@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34549 struct config_rom_attribute {
34550 struct device_attribute attr;
34551 u32 key;
34552 -};
34553 +} __do_const;
34554
34555 static ssize_t show_immediate(struct device *dev,
34556 struct device_attribute *dattr, char *buf)
34557 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34558 index 28a94c7..58da63a 100644
34559 --- a/drivers/firewire/core-transaction.c
34560 +++ b/drivers/firewire/core-transaction.c
34561 @@ -38,6 +38,7 @@
34562 #include <linux/timer.h>
34563 #include <linux/types.h>
34564 #include <linux/workqueue.h>
34565 +#include <linux/sched.h>
34566
34567 #include <asm/byteorder.h>
34568
34569 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34570 index 515a42c..5ecf3ba 100644
34571 --- a/drivers/firewire/core.h
34572 +++ b/drivers/firewire/core.h
34573 @@ -111,6 +111,7 @@ struct fw_card_driver {
34574
34575 int (*stop_iso)(struct fw_iso_context *ctx);
34576 };
34577 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34578
34579 void fw_card_initialize(struct fw_card *card,
34580 const struct fw_card_driver *driver, struct device *device);
34581 diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34582 index 94a58a0..f5eba42 100644
34583 --- a/drivers/firmware/dmi-id.c
34584 +++ b/drivers/firmware/dmi-id.c
34585 @@ -16,7 +16,7 @@
34586 struct dmi_device_attribute{
34587 struct device_attribute dev_attr;
34588 int field;
34589 -};
34590 +} __do_const;
34591 #define to_dmi_dev_attr(_dev_attr) \
34592 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34593
34594 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34595 index 4cd392d..4b629e1 100644
34596 --- a/drivers/firmware/dmi_scan.c
34597 +++ b/drivers/firmware/dmi_scan.c
34598 @@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
34599 }
34600 }
34601 else {
34602 - /*
34603 - * no iounmap() for that ioremap(); it would be a no-op, but
34604 - * it's so early in setup that sucker gets confused into doing
34605 - * what it shouldn't if we actually call it.
34606 - */
34607 p = dmi_ioremap(0xF0000, 0x10000);
34608 if (p == NULL)
34609 goto error;
34610 @@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34611 if (buf == NULL)
34612 return -1;
34613
34614 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34615 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34616
34617 iounmap(buf);
34618 return 0;
34619 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34620 index f4baa11..7970c3a 100644
34621 --- a/drivers/firmware/efivars.c
34622 +++ b/drivers/firmware/efivars.c
34623 @@ -139,7 +139,7 @@ struct efivar_attribute {
34624 };
34625
34626 static struct efivars __efivars;
34627 -static struct efivar_operations ops;
34628 +static efivar_operations_no_const ops __read_only;
34629
34630 #define PSTORE_EFI_ATTRIBUTES \
34631 (EFI_VARIABLE_NON_VOLATILE | \
34632 @@ -1844,7 +1844,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34633 static int
34634 create_efivars_bin_attributes(struct efivars *efivars)
34635 {
34636 - struct bin_attribute *attr;
34637 + bin_attribute_no_const *attr;
34638 int error;
34639
34640 /* new_var */
34641 diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34642 index 2a90ba6..07f3733 100644
34643 --- a/drivers/firmware/google/memconsole.c
34644 +++ b/drivers/firmware/google/memconsole.c
34645 @@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34646 if (!found_memconsole())
34647 return -ENODEV;
34648
34649 - memconsole_bin_attr.size = memconsole_length;
34650 + pax_open_kernel();
34651 + *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34652 + pax_close_kernel();
34653
34654 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34655
34656 diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34657 index de3c317..b7cd029 100644
34658 --- a/drivers/gpio/gpio-ich.c
34659 +++ b/drivers/gpio/gpio-ich.c
34660 @@ -69,7 +69,7 @@ struct ichx_desc {
34661 /* Some chipsets have quirks, let these use their own request/get */
34662 int (*request)(struct gpio_chip *chip, unsigned offset);
34663 int (*get)(struct gpio_chip *chip, unsigned offset);
34664 -};
34665 +} __do_const;
34666
34667 static struct {
34668 spinlock_t lock;
34669 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
34670 index 9902732..64b62dd 100644
34671 --- a/drivers/gpio/gpio-vr41xx.c
34672 +++ b/drivers/gpio/gpio-vr41xx.c
34673 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34674 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34675 maskl, pendl, maskh, pendh);
34676
34677 - atomic_inc(&irq_err_count);
34678 + atomic_inc_unchecked(&irq_err_count);
34679
34680 return -EINVAL;
34681 }
34682 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34683 index 7b2d378..cc947ea 100644
34684 --- a/drivers/gpu/drm/drm_crtc_helper.c
34685 +++ b/drivers/gpu/drm/drm_crtc_helper.c
34686 @@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34687 struct drm_crtc *tmp;
34688 int crtc_mask = 1;
34689
34690 - WARN(!crtc, "checking null crtc?\n");
34691 + BUG_ON(!crtc);
34692
34693 dev = crtc->dev;
34694
34695 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34696 index 25f91cd..a376f55 100644
34697 --- a/drivers/gpu/drm/drm_drv.c
34698 +++ b/drivers/gpu/drm/drm_drv.c
34699 @@ -306,7 +306,7 @@ module_exit(drm_core_exit);
34700 /**
34701 * Copy and IOCTL return string to user space
34702 */
34703 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
34704 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
34705 {
34706 int len;
34707
34708 @@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
34709 struct drm_file *file_priv = filp->private_data;
34710 struct drm_device *dev;
34711 struct drm_ioctl_desc *ioctl;
34712 - drm_ioctl_t *func;
34713 + drm_ioctl_no_const_t func;
34714 unsigned int nr = DRM_IOCTL_NR(cmd);
34715 int retcode = -EINVAL;
34716 char stack_kdata[128];
34717 @@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
34718 return -ENODEV;
34719
34720 atomic_inc(&dev->ioctl_count);
34721 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34722 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34723 ++file_priv->ioctl_count;
34724
34725 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34726 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34727 index 429e07d..e681a2c 100644
34728 --- a/drivers/gpu/drm/drm_fops.c
34729 +++ b/drivers/gpu/drm/drm_fops.c
34730 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
34731 }
34732
34733 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34734 - atomic_set(&dev->counts[i], 0);
34735 + atomic_set_unchecked(&dev->counts[i], 0);
34736
34737 dev->sigdata.lock = NULL;
34738
34739 @@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
34740 if (drm_device_is_unplugged(dev))
34741 return -ENODEV;
34742
34743 - if (!dev->open_count++)
34744 + if (local_inc_return(&dev->open_count) == 1)
34745 need_setup = 1;
34746 mutex_lock(&dev->struct_mutex);
34747 old_imapping = inode->i_mapping;
34748 @@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
34749 retcode = drm_open_helper(inode, filp, dev);
34750 if (retcode)
34751 goto err_undo;
34752 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34753 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34754 if (need_setup) {
34755 retcode = drm_setup(dev);
34756 if (retcode)
34757 @@ -166,7 +166,7 @@ err_undo:
34758 iput(container_of(dev->dev_mapping, struct inode, i_data));
34759 dev->dev_mapping = old_mapping;
34760 mutex_unlock(&dev->struct_mutex);
34761 - dev->open_count--;
34762 + local_dec(&dev->open_count);
34763 return retcode;
34764 }
34765 EXPORT_SYMBOL(drm_open);
34766 @@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
34767
34768 mutex_lock(&drm_global_mutex);
34769
34770 - DRM_DEBUG("open_count = %d\n", dev->open_count);
34771 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
34772
34773 if (dev->driver->preclose)
34774 dev->driver->preclose(dev, file_priv);
34775 @@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
34776 * Begin inline drm_release
34777 */
34778
34779 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34780 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
34781 task_pid_nr(current),
34782 (long)old_encode_dev(file_priv->minor->device),
34783 - dev->open_count);
34784 + local_read(&dev->open_count));
34785
34786 /* Release any auth tokens that might point to this file_priv,
34787 (do that under the drm_global_mutex) */
34788 @@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
34789 * End inline drm_release
34790 */
34791
34792 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34793 - if (!--dev->open_count) {
34794 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34795 + if (local_dec_and_test(&dev->open_count)) {
34796 if (atomic_read(&dev->ioctl_count)) {
34797 DRM_ERROR("Device busy: %d\n",
34798 atomic_read(&dev->ioctl_count));
34799 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
34800 index f731116..629842c 100644
34801 --- a/drivers/gpu/drm/drm_global.c
34802 +++ b/drivers/gpu/drm/drm_global.c
34803 @@ -36,7 +36,7 @@
34804 struct drm_global_item {
34805 struct mutex mutex;
34806 void *object;
34807 - int refcount;
34808 + atomic_t refcount;
34809 };
34810
34811 static struct drm_global_item glob[DRM_GLOBAL_NUM];
34812 @@ -49,7 +49,7 @@ void drm_global_init(void)
34813 struct drm_global_item *item = &glob[i];
34814 mutex_init(&item->mutex);
34815 item->object = NULL;
34816 - item->refcount = 0;
34817 + atomic_set(&item->refcount, 0);
34818 }
34819 }
34820
34821 @@ -59,7 +59,7 @@ void drm_global_release(void)
34822 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
34823 struct drm_global_item *item = &glob[i];
34824 BUG_ON(item->object != NULL);
34825 - BUG_ON(item->refcount != 0);
34826 + BUG_ON(atomic_read(&item->refcount) != 0);
34827 }
34828 }
34829
34830 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34831 void *object;
34832
34833 mutex_lock(&item->mutex);
34834 - if (item->refcount == 0) {
34835 + if (atomic_read(&item->refcount) == 0) {
34836 item->object = kzalloc(ref->size, GFP_KERNEL);
34837 if (unlikely(item->object == NULL)) {
34838 ret = -ENOMEM;
34839 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34840 goto out_err;
34841
34842 }
34843 - ++item->refcount;
34844 + atomic_inc(&item->refcount);
34845 ref->object = item->object;
34846 object = item->object;
34847 mutex_unlock(&item->mutex);
34848 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
34849 struct drm_global_item *item = &glob[ref->global_type];
34850
34851 mutex_lock(&item->mutex);
34852 - BUG_ON(item->refcount == 0);
34853 + BUG_ON(atomic_read(&item->refcount) == 0);
34854 BUG_ON(ref->object != item->object);
34855 - if (--item->refcount == 0) {
34856 + if (atomic_dec_and_test(&item->refcount)) {
34857 ref->release(ref);
34858 item->object = NULL;
34859 }
34860 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34861 index d4b20ce..77a8d41 100644
34862 --- a/drivers/gpu/drm/drm_info.c
34863 +++ b/drivers/gpu/drm/drm_info.c
34864 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34865 struct drm_local_map *map;
34866 struct drm_map_list *r_list;
34867
34868 - /* Hardcoded from _DRM_FRAME_BUFFER,
34869 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34870 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34871 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34872 + static const char * const types[] = {
34873 + [_DRM_FRAME_BUFFER] = "FB",
34874 + [_DRM_REGISTERS] = "REG",
34875 + [_DRM_SHM] = "SHM",
34876 + [_DRM_AGP] = "AGP",
34877 + [_DRM_SCATTER_GATHER] = "SG",
34878 + [_DRM_CONSISTENT] = "PCI",
34879 + [_DRM_GEM] = "GEM" };
34880 const char *type;
34881 int i;
34882
34883 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34884 map = r_list->map;
34885 if (!map)
34886 continue;
34887 - if (map->type < 0 || map->type > 5)
34888 + if (map->type >= ARRAY_SIZE(types))
34889 type = "??";
34890 else
34891 type = types[map->type];
34892 @@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34893 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34894 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34895 vma->vm_flags & VM_IO ? 'i' : '-',
34896 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34897 + 0);
34898 +#else
34899 vma->vm_pgoff);
34900 +#endif
34901
34902 #if defined(__i386__)
34903 pgprot = pgprot_val(vma->vm_page_prot);
34904 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34905 index 2f4c434..dd12cd2 100644
34906 --- a/drivers/gpu/drm/drm_ioc32.c
34907 +++ b/drivers/gpu/drm/drm_ioc32.c
34908 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34909 request = compat_alloc_user_space(nbytes);
34910 if (!access_ok(VERIFY_WRITE, request, nbytes))
34911 return -EFAULT;
34912 - list = (struct drm_buf_desc *) (request + 1);
34913 + list = (struct drm_buf_desc __user *) (request + 1);
34914
34915 if (__put_user(count, &request->count)
34916 || __put_user(list, &request->list))
34917 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34918 request = compat_alloc_user_space(nbytes);
34919 if (!access_ok(VERIFY_WRITE, request, nbytes))
34920 return -EFAULT;
34921 - list = (struct drm_buf_pub *) (request + 1);
34922 + list = (struct drm_buf_pub __user *) (request + 1);
34923
34924 if (__put_user(count, &request->count)
34925 || __put_user(list, &request->list))
34926 @@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
34927 return 0;
34928 }
34929
34930 -drm_ioctl_compat_t *drm_compat_ioctls[] = {
34931 +drm_ioctl_compat_t drm_compat_ioctls[] = {
34932 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
34933 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
34934 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
34935 @@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
34936 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34937 {
34938 unsigned int nr = DRM_IOCTL_NR(cmd);
34939 - drm_ioctl_compat_t *fn;
34940 int ret;
34941
34942 /* Assume that ioctls without an explicit compat routine will just
34943 @@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34944 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
34945 return drm_ioctl(filp, cmd, arg);
34946
34947 - fn = drm_compat_ioctls[nr];
34948 -
34949 - if (fn != NULL)
34950 - ret = (*fn) (filp, cmd, arg);
34951 + if (drm_compat_ioctls[nr] != NULL)
34952 + ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
34953 else
34954 ret = drm_ioctl(filp, cmd, arg);
34955
34956 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34957 index e77bd8b..1571b85 100644
34958 --- a/drivers/gpu/drm/drm_ioctl.c
34959 +++ b/drivers/gpu/drm/drm_ioctl.c
34960 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34961 stats->data[i].value =
34962 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34963 else
34964 - stats->data[i].value = atomic_read(&dev->counts[i]);
34965 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34966 stats->data[i].type = dev->types[i];
34967 }
34968
34969 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34970 index d752c96..fe08455 100644
34971 --- a/drivers/gpu/drm/drm_lock.c
34972 +++ b/drivers/gpu/drm/drm_lock.c
34973 @@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34974 if (drm_lock_take(&master->lock, lock->context)) {
34975 master->lock.file_priv = file_priv;
34976 master->lock.lock_time = jiffies;
34977 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34978 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34979 break; /* Got lock */
34980 }
34981
34982 @@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34983 return -EINVAL;
34984 }
34985
34986 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34987 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34988
34989 if (drm_lock_free(&master->lock, lock->context)) {
34990 /* FIXME: Should really bail out here. */
34991 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
34992 index 7d30802..42c6cbb 100644
34993 --- a/drivers/gpu/drm/drm_stub.c
34994 +++ b/drivers/gpu/drm/drm_stub.c
34995 @@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
34996
34997 drm_device_set_unplugged(dev);
34998
34999 - if (dev->open_count == 0) {
35000 + if (local_read(&dev->open_count) == 0) {
35001 drm_put_dev(dev);
35002 }
35003 mutex_unlock(&drm_global_mutex);
35004 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35005 index 004ecdf..db1f6e0 100644
35006 --- a/drivers/gpu/drm/i810/i810_dma.c
35007 +++ b/drivers/gpu/drm/i810/i810_dma.c
35008 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35009 dma->buflist[vertex->idx],
35010 vertex->discard, vertex->used);
35011
35012 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35013 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35014 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35015 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35016 sarea_priv->last_enqueue = dev_priv->counter - 1;
35017 sarea_priv->last_dispatch = (int)hw_status[5];
35018
35019 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35020 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35021 mc->last_render);
35022
35023 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35024 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35025 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35026 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35027 sarea_priv->last_enqueue = dev_priv->counter - 1;
35028 sarea_priv->last_dispatch = (int)hw_status[5];
35029
35030 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35031 index 6e0acad..93c8289 100644
35032 --- a/drivers/gpu/drm/i810/i810_drv.h
35033 +++ b/drivers/gpu/drm/i810/i810_drv.h
35034 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35035 int page_flipping;
35036
35037 wait_queue_head_t irq_queue;
35038 - atomic_t irq_received;
35039 - atomic_t irq_emitted;
35040 + atomic_unchecked_t irq_received;
35041 + atomic_unchecked_t irq_emitted;
35042
35043 int front_offset;
35044 } drm_i810_private_t;
35045 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35046 index 7299ea4..5314487 100644
35047 --- a/drivers/gpu/drm/i915/i915_debugfs.c
35048 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
35049 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35050 I915_READ(GTIMR));
35051 }
35052 seq_printf(m, "Interrupts received: %d\n",
35053 - atomic_read(&dev_priv->irq_received));
35054 + atomic_read_unchecked(&dev_priv->irq_received));
35055 for_each_ring(ring, dev_priv, i) {
35056 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35057 seq_printf(m,
35058 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35059 index 4fa6beb..f930fec 100644
35060 --- a/drivers/gpu/drm/i915/i915_dma.c
35061 +++ b/drivers/gpu/drm/i915/i915_dma.c
35062 @@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35063 bool can_switch;
35064
35065 spin_lock(&dev->count_lock);
35066 - can_switch = (dev->open_count == 0);
35067 + can_switch = (local_read(&dev->open_count) == 0);
35068 spin_unlock(&dev->count_lock);
35069 return can_switch;
35070 }
35071 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35072 index ef99b1c..09ce7fb 100644
35073 --- a/drivers/gpu/drm/i915/i915_drv.h
35074 +++ b/drivers/gpu/drm/i915/i915_drv.h
35075 @@ -893,7 +893,7 @@ typedef struct drm_i915_private {
35076 drm_dma_handle_t *status_page_dmah;
35077 struct resource mch_res;
35078
35079 - atomic_t irq_received;
35080 + atomic_unchecked_t irq_received;
35081
35082 /* protects the irq masks */
35083 spinlock_t irq_lock;
35084 @@ -1775,7 +1775,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35085 struct drm_i915_private *dev_priv, unsigned port);
35086 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35087 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35088 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35089 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35090 {
35091 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35092 }
35093 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35094 index 9a48e1a..f0cbc3e 100644
35095 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35096 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35097 @@ -729,9 +729,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35098
35099 static int
35100 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35101 - int count)
35102 + unsigned int count)
35103 {
35104 - int i;
35105 + unsigned int i;
35106 int relocs_total = 0;
35107 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35108
35109 @@ -1195,7 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35110 return -ENOMEM;
35111 }
35112 ret = copy_from_user(exec2_list,
35113 - (struct drm_i915_relocation_entry __user *)
35114 + (struct drm_i915_gem_exec_object2 __user *)
35115 (uintptr_t) args->buffers_ptr,
35116 sizeof(*exec2_list) * args->buffer_count);
35117 if (ret != 0) {
35118 diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35119 index 3c59584..500f2e9 100644
35120 --- a/drivers/gpu/drm/i915/i915_ioc32.c
35121 +++ b/drivers/gpu/drm/i915/i915_ioc32.c
35122 @@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35123 (unsigned long)request);
35124 }
35125
35126 -static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35127 +static drm_ioctl_compat_t i915_compat_ioctls[] = {
35128 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35129 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35130 [DRM_I915_GETPARAM] = compat_i915_getparam,
35131 @@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35132 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35133 {
35134 unsigned int nr = DRM_IOCTL_NR(cmd);
35135 - drm_ioctl_compat_t *fn = NULL;
35136 int ret;
35137
35138 if (nr < DRM_COMMAND_BASE)
35139 return drm_compat_ioctl(filp, cmd, arg);
35140
35141 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35142 - fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35143 -
35144 - if (fn != NULL)
35145 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35146 + drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35147 ret = (*fn) (filp, cmd, arg);
35148 - else
35149 + } else
35150 ret = drm_ioctl(filp, cmd, arg);
35151
35152 return ret;
35153 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35154 index 3c7bb04..182e049 100644
35155 --- a/drivers/gpu/drm/i915/i915_irq.c
35156 +++ b/drivers/gpu/drm/i915/i915_irq.c
35157 @@ -549,7 +549,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35158 int pipe;
35159 u32 pipe_stats[I915_MAX_PIPES];
35160
35161 - atomic_inc(&dev_priv->irq_received);
35162 + atomic_inc_unchecked(&dev_priv->irq_received);
35163
35164 while (true) {
35165 iir = I915_READ(VLV_IIR);
35166 @@ -705,7 +705,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35167 irqreturn_t ret = IRQ_NONE;
35168 int i;
35169
35170 - atomic_inc(&dev_priv->irq_received);
35171 + atomic_inc_unchecked(&dev_priv->irq_received);
35172
35173 /* disable master interrupt before clearing iir */
35174 de_ier = I915_READ(DEIER);
35175 @@ -791,7 +791,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35176 int ret = IRQ_NONE;
35177 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
35178
35179 - atomic_inc(&dev_priv->irq_received);
35180 + atomic_inc_unchecked(&dev_priv->irq_received);
35181
35182 /* disable master interrupt before clearing iir */
35183 de_ier = I915_READ(DEIER);
35184 @@ -1886,7 +1886,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35185 {
35186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35187
35188 - atomic_set(&dev_priv->irq_received, 0);
35189 + atomic_set_unchecked(&dev_priv->irq_received, 0);
35190
35191 I915_WRITE(HWSTAM, 0xeffe);
35192
35193 @@ -1912,7 +1912,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35194 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35195 int pipe;
35196
35197 - atomic_set(&dev_priv->irq_received, 0);
35198 + atomic_set_unchecked(&dev_priv->irq_received, 0);
35199
35200 /* VLV magic */
35201 I915_WRITE(VLV_IMR, 0);
35202 @@ -2208,7 +2208,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35204 int pipe;
35205
35206 - atomic_set(&dev_priv->irq_received, 0);
35207 + atomic_set_unchecked(&dev_priv->irq_received, 0);
35208
35209 for_each_pipe(pipe)
35210 I915_WRITE(PIPESTAT(pipe), 0);
35211 @@ -2259,7 +2259,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35212 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35213 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35214
35215 - atomic_inc(&dev_priv->irq_received);
35216 + atomic_inc_unchecked(&dev_priv->irq_received);
35217
35218 iir = I915_READ16(IIR);
35219 if (iir == 0)
35220 @@ -2344,7 +2344,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35221 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35222 int pipe;
35223
35224 - atomic_set(&dev_priv->irq_received, 0);
35225 + atomic_set_unchecked(&dev_priv->irq_received, 0);
35226
35227 if (I915_HAS_HOTPLUG(dev)) {
35228 I915_WRITE(PORT_HOTPLUG_EN, 0);
35229 @@ -2448,7 +2448,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35230 };
35231 int pipe, ret = IRQ_NONE;
35232
35233 - atomic_inc(&dev_priv->irq_received);
35234 + atomic_inc_unchecked(&dev_priv->irq_received);
35235
35236 iir = I915_READ(IIR);
35237 do {
35238 @@ -2574,7 +2574,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35240 int pipe;
35241
35242 - atomic_set(&dev_priv->irq_received, 0);
35243 + atomic_set_unchecked(&dev_priv->irq_received, 0);
35244
35245 I915_WRITE(PORT_HOTPLUG_EN, 0);
35246 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35247 @@ -2690,7 +2690,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35248 int irq_received;
35249 int ret = IRQ_NONE, pipe;
35250
35251 - atomic_inc(&dev_priv->irq_received);
35252 + atomic_inc_unchecked(&dev_priv->irq_received);
35253
35254 iir = I915_READ(IIR);
35255
35256 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35257 index c2d173a..f4357cc 100644
35258 --- a/drivers/gpu/drm/i915/intel_display.c
35259 +++ b/drivers/gpu/drm/i915/intel_display.c
35260 @@ -8722,13 +8722,13 @@ struct intel_quirk {
35261 int subsystem_vendor;
35262 int subsystem_device;
35263 void (*hook)(struct drm_device *dev);
35264 -};
35265 +} __do_const;
35266
35267 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35268 struct intel_dmi_quirk {
35269 void (*hook)(struct drm_device *dev);
35270 const struct dmi_system_id (*dmi_id_list)[];
35271 -};
35272 +} __do_const;
35273
35274 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35275 {
35276 @@ -8736,18 +8736,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35277 return 1;
35278 }
35279
35280 -static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35281 +static const struct dmi_system_id intel_dmi_quirks_table[] = {
35282 {
35283 - .dmi_id_list = &(const struct dmi_system_id[]) {
35284 - {
35285 - .callback = intel_dmi_reverse_brightness,
35286 - .ident = "NCR Corporation",
35287 - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35288 - DMI_MATCH(DMI_PRODUCT_NAME, ""),
35289 - },
35290 - },
35291 - { } /* terminating entry */
35292 + .callback = intel_dmi_reverse_brightness,
35293 + .ident = "NCR Corporation",
35294 + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35295 + DMI_MATCH(DMI_PRODUCT_NAME, ""),
35296 },
35297 + },
35298 + { } /* terminating entry */
35299 +};
35300 +
35301 +static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35302 + {
35303 + .dmi_id_list = &intel_dmi_quirks_table,
35304 .hook = quirk_invert_brightness,
35305 },
35306 };
35307 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35308 index 54558a0..2d97005 100644
35309 --- a/drivers/gpu/drm/mga/mga_drv.h
35310 +++ b/drivers/gpu/drm/mga/mga_drv.h
35311 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35312 u32 clear_cmd;
35313 u32 maccess;
35314
35315 - atomic_t vbl_received; /**< Number of vblanks received. */
35316 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35317 wait_queue_head_t fence_queue;
35318 - atomic_t last_fence_retired;
35319 + atomic_unchecked_t last_fence_retired;
35320 u32 next_fence_to_post;
35321
35322 unsigned int fb_cpp;
35323 diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35324 index 709e90d..89a1c0d 100644
35325 --- a/drivers/gpu/drm/mga/mga_ioc32.c
35326 +++ b/drivers/gpu/drm/mga/mga_ioc32.c
35327 @@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35328 return 0;
35329 }
35330
35331 -drm_ioctl_compat_t *mga_compat_ioctls[] = {
35332 +drm_ioctl_compat_t mga_compat_ioctls[] = {
35333 [DRM_MGA_INIT] = compat_mga_init,
35334 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35335 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35336 @@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35337 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35338 {
35339 unsigned int nr = DRM_IOCTL_NR(cmd);
35340 - drm_ioctl_compat_t *fn = NULL;
35341 int ret;
35342
35343 if (nr < DRM_COMMAND_BASE)
35344 return drm_compat_ioctl(filp, cmd, arg);
35345
35346 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35347 - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35348 -
35349 - if (fn != NULL)
35350 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35351 + drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35352 ret = (*fn) (filp, cmd, arg);
35353 - else
35354 + } else
35355 ret = drm_ioctl(filp, cmd, arg);
35356
35357 return ret;
35358 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35359 index 598c281..60d590e 100644
35360 --- a/drivers/gpu/drm/mga/mga_irq.c
35361 +++ b/drivers/gpu/drm/mga/mga_irq.c
35362 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35363 if (crtc != 0)
35364 return 0;
35365
35366 - return atomic_read(&dev_priv->vbl_received);
35367 + return atomic_read_unchecked(&dev_priv->vbl_received);
35368 }
35369
35370
35371 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35372 /* VBLANK interrupt */
35373 if (status & MGA_VLINEPEN) {
35374 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35375 - atomic_inc(&dev_priv->vbl_received);
35376 + atomic_inc_unchecked(&dev_priv->vbl_received);
35377 drm_handle_vblank(dev, 0);
35378 handled = 1;
35379 }
35380 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35381 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35382 MGA_WRITE(MGA_PRIMEND, prim_end);
35383
35384 - atomic_inc(&dev_priv->last_fence_retired);
35385 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
35386 DRM_WAKEUP(&dev_priv->fence_queue);
35387 handled = 1;
35388 }
35389 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35390 * using fences.
35391 */
35392 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35393 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35394 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35395 - *sequence) <= (1 << 23)));
35396
35397 *sequence = cur_fence;
35398 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35399 index 50a6dd0..ea66ed8 100644
35400 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35401 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35402 @@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35403 struct bit_table {
35404 const char id;
35405 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35406 -};
35407 +} __no_const;
35408
35409 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35410
35411 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35412 index 9c39baf..30a22be 100644
35413 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35414 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35415 @@ -81,7 +81,7 @@ struct nouveau_drm {
35416 struct drm_global_reference mem_global_ref;
35417 struct ttm_bo_global_ref bo_global_ref;
35418 struct ttm_bo_device bdev;
35419 - atomic_t validate_sequence;
35420 + atomic_unchecked_t validate_sequence;
35421 int (*move)(struct nouveau_channel *,
35422 struct ttm_buffer_object *,
35423 struct ttm_mem_reg *, struct ttm_mem_reg *);
35424 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35425 index b4b4d0c..b7edc15 100644
35426 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35427 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35428 @@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35429 int ret, i;
35430 struct nouveau_bo *res_bo = NULL;
35431
35432 - sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35433 + sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35434 retry:
35435 if (++trycnt > 100000) {
35436 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
35437 @@ -359,7 +359,7 @@ retry:
35438 if (ret) {
35439 validate_fini(op, NULL);
35440 if (unlikely(ret == -EAGAIN)) {
35441 - sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35442 + sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35443 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
35444 sequence);
35445 if (!ret)
35446 diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35447 index 08214bc..9208577 100644
35448 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35449 +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35450 @@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35451 unsigned long arg)
35452 {
35453 unsigned int nr = DRM_IOCTL_NR(cmd);
35454 - drm_ioctl_compat_t *fn = NULL;
35455 + drm_ioctl_compat_t fn = NULL;
35456 int ret;
35457
35458 if (nr < DRM_COMMAND_BASE)
35459 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35460 index 25d3495..d81aaf6 100644
35461 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35462 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35463 @@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35464 bool can_switch;
35465
35466 spin_lock(&dev->count_lock);
35467 - can_switch = (dev->open_count == 0);
35468 + can_switch = (local_read(&dev->open_count) == 0);
35469 spin_unlock(&dev->count_lock);
35470 return can_switch;
35471 }
35472 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35473 index d4660cf..70dbe65 100644
35474 --- a/drivers/gpu/drm/r128/r128_cce.c
35475 +++ b/drivers/gpu/drm/r128/r128_cce.c
35476 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35477
35478 /* GH: Simple idle check.
35479 */
35480 - atomic_set(&dev_priv->idle_count, 0);
35481 + atomic_set_unchecked(&dev_priv->idle_count, 0);
35482
35483 /* We don't support anything other than bus-mastering ring mode,
35484 * but the ring can be in either AGP or PCI space for the ring
35485 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35486 index 930c71b..499aded 100644
35487 --- a/drivers/gpu/drm/r128/r128_drv.h
35488 +++ b/drivers/gpu/drm/r128/r128_drv.h
35489 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35490 int is_pci;
35491 unsigned long cce_buffers_offset;
35492
35493 - atomic_t idle_count;
35494 + atomic_unchecked_t idle_count;
35495
35496 int page_flipping;
35497 int current_page;
35498 u32 crtc_offset;
35499 u32 crtc_offset_cntl;
35500
35501 - atomic_t vbl_received;
35502 + atomic_unchecked_t vbl_received;
35503
35504 u32 color_fmt;
35505 unsigned int front_offset;
35506 diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35507 index a954c54..9cc595c 100644
35508 --- a/drivers/gpu/drm/r128/r128_ioc32.c
35509 +++ b/drivers/gpu/drm/r128/r128_ioc32.c
35510 @@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35511 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35512 }
35513
35514 -drm_ioctl_compat_t *r128_compat_ioctls[] = {
35515 +drm_ioctl_compat_t r128_compat_ioctls[] = {
35516 [DRM_R128_INIT] = compat_r128_init,
35517 [DRM_R128_DEPTH] = compat_r128_depth,
35518 [DRM_R128_STIPPLE] = compat_r128_stipple,
35519 @@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35520 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35521 {
35522 unsigned int nr = DRM_IOCTL_NR(cmd);
35523 - drm_ioctl_compat_t *fn = NULL;
35524 int ret;
35525
35526 if (nr < DRM_COMMAND_BASE)
35527 return drm_compat_ioctl(filp, cmd, arg);
35528
35529 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35530 - fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35531 -
35532 - if (fn != NULL)
35533 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35534 + drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35535 ret = (*fn) (filp, cmd, arg);
35536 - else
35537 + } else
35538 ret = drm_ioctl(filp, cmd, arg);
35539
35540 return ret;
35541 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35542 index 2ea4f09..d391371 100644
35543 --- a/drivers/gpu/drm/r128/r128_irq.c
35544 +++ b/drivers/gpu/drm/r128/r128_irq.c
35545 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35546 if (crtc != 0)
35547 return 0;
35548
35549 - return atomic_read(&dev_priv->vbl_received);
35550 + return atomic_read_unchecked(&dev_priv->vbl_received);
35551 }
35552
35553 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35554 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35555 /* VBLANK interrupt */
35556 if (status & R128_CRTC_VBLANK_INT) {
35557 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35558 - atomic_inc(&dev_priv->vbl_received);
35559 + atomic_inc_unchecked(&dev_priv->vbl_received);
35560 drm_handle_vblank(dev, 0);
35561 return IRQ_HANDLED;
35562 }
35563 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35564 index 19bb7e6..de7e2a2 100644
35565 --- a/drivers/gpu/drm/r128/r128_state.c
35566 +++ b/drivers/gpu/drm/r128/r128_state.c
35567 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35568
35569 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35570 {
35571 - if (atomic_read(&dev_priv->idle_count) == 0)
35572 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35573 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35574 else
35575 - atomic_set(&dev_priv->idle_count, 0);
35576 + atomic_set_unchecked(&dev_priv->idle_count, 0);
35577 }
35578
35579 #endif
35580 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35581 index 5a82b6b..9e69c73 100644
35582 --- a/drivers/gpu/drm/radeon/mkregtable.c
35583 +++ b/drivers/gpu/drm/radeon/mkregtable.c
35584 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35585 regex_t mask_rex;
35586 regmatch_t match[4];
35587 char buf[1024];
35588 - size_t end;
35589 + long end;
35590 int len;
35591 int done = 0;
35592 int r;
35593 unsigned o;
35594 struct offset *offset;
35595 char last_reg_s[10];
35596 - int last_reg;
35597 + unsigned long last_reg;
35598
35599 if (regcomp
35600 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35601 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35602 index 44b8034..cc722fd 100644
35603 --- a/drivers/gpu/drm/radeon/radeon_device.c
35604 +++ b/drivers/gpu/drm/radeon/radeon_device.c
35605 @@ -977,7 +977,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
35606 bool can_switch;
35607
35608 spin_lock(&dev->count_lock);
35609 - can_switch = (dev->open_count == 0);
35610 + can_switch = (local_read(&dev->open_count) == 0);
35611 spin_unlock(&dev->count_lock);
35612 return can_switch;
35613 }
35614 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35615 index b369d42..8dd04eb 100644
35616 --- a/drivers/gpu/drm/radeon/radeon_drv.h
35617 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
35618 @@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
35619
35620 /* SW interrupt */
35621 wait_queue_head_t swi_queue;
35622 - atomic_t swi_emitted;
35623 + atomic_unchecked_t swi_emitted;
35624 int vblank_crtc;
35625 uint32_t irq_enable_reg;
35626 uint32_t r500_disp_irq_reg;
35627 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35628 index c180df8..5fd8186 100644
35629 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35630 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35631 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35632 request = compat_alloc_user_space(sizeof(*request));
35633 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35634 || __put_user(req32.param, &request->param)
35635 - || __put_user((void __user *)(unsigned long)req32.value,
35636 + || __put_user((unsigned long)req32.value,
35637 &request->value))
35638 return -EFAULT;
35639
35640 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35641 #define compat_radeon_cp_setparam NULL
35642 #endif /* X86_64 || IA64 */
35643
35644 -static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35645 +static drm_ioctl_compat_t radeon_compat_ioctls[] = {
35646 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
35647 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
35648 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
35649 @@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35650 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35651 {
35652 unsigned int nr = DRM_IOCTL_NR(cmd);
35653 - drm_ioctl_compat_t *fn = NULL;
35654 int ret;
35655
35656 if (nr < DRM_COMMAND_BASE)
35657 return drm_compat_ioctl(filp, cmd, arg);
35658
35659 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
35660 - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35661 -
35662 - if (fn != NULL)
35663 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
35664 + drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35665 ret = (*fn) (filp, cmd, arg);
35666 - else
35667 + } else
35668 ret = drm_ioctl(filp, cmd, arg);
35669
35670 return ret;
35671 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35672 index 8d68e97..9dcfed8 100644
35673 --- a/drivers/gpu/drm/radeon/radeon_irq.c
35674 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
35675 @@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35676 unsigned int ret;
35677 RING_LOCALS;
35678
35679 - atomic_inc(&dev_priv->swi_emitted);
35680 - ret = atomic_read(&dev_priv->swi_emitted);
35681 + atomic_inc_unchecked(&dev_priv->swi_emitted);
35682 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35683
35684 BEGIN_RING(4);
35685 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35686 @@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35687 drm_radeon_private_t *dev_priv =
35688 (drm_radeon_private_t *) dev->dev_private;
35689
35690 - atomic_set(&dev_priv->swi_emitted, 0);
35691 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35692 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35693
35694 dev->max_vblank_count = 0x001fffff;
35695 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35696 index 4d20910..6726b6d 100644
35697 --- a/drivers/gpu/drm/radeon/radeon_state.c
35698 +++ b/drivers/gpu/drm/radeon/radeon_state.c
35699 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
35700 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
35701 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
35702
35703 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35704 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35705 sarea_priv->nbox * sizeof(depth_boxes[0])))
35706 return -EFAULT;
35707
35708 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35709 {
35710 drm_radeon_private_t *dev_priv = dev->dev_private;
35711 drm_radeon_getparam_t *param = data;
35712 - int value;
35713 + int value = 0;
35714
35715 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35716
35717 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35718 index 93f760e..8088227 100644
35719 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
35720 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35721 @@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
35722 man->size = size >> PAGE_SHIFT;
35723 }
35724
35725 -static struct vm_operations_struct radeon_ttm_vm_ops;
35726 +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
35727 static const struct vm_operations_struct *ttm_vm_ops = NULL;
35728
35729 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35730 @@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35731 }
35732 if (unlikely(ttm_vm_ops == NULL)) {
35733 ttm_vm_ops = vma->vm_ops;
35734 + pax_open_kernel();
35735 radeon_ttm_vm_ops = *ttm_vm_ops;
35736 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35737 + pax_close_kernel();
35738 }
35739 vma->vm_ops = &radeon_ttm_vm_ops;
35740 return 0;
35741 @@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
35742 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
35743 else
35744 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
35745 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35746 - radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35747 - radeon_mem_types_list[i].driver_features = 0;
35748 + pax_open_kernel();
35749 + *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35750 + *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35751 + *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35752 if (i == 0)
35753 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35754 + *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35755 else
35756 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35757 -
35758 + *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35759 + pax_close_kernel();
35760 }
35761 /* Add ttm page pool to debugfs */
35762 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
35763 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35764 - radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35765 - radeon_mem_types_list[i].driver_features = 0;
35766 - radeon_mem_types_list[i++].data = NULL;
35767 + pax_open_kernel();
35768 + *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35769 + *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35770 + *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35771 + *(void **)&radeon_mem_types_list[i++].data = NULL;
35772 + pax_close_kernel();
35773 #ifdef CONFIG_SWIOTLB
35774 if (swiotlb_nr_tbl()) {
35775 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
35776 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35777 - radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35778 - radeon_mem_types_list[i].driver_features = 0;
35779 - radeon_mem_types_list[i++].data = NULL;
35780 + pax_open_kernel();
35781 + *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35782 + *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35783 + *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35784 + *(void **)&radeon_mem_types_list[i++].data = NULL;
35785 + pax_close_kernel();
35786 }
35787 #endif
35788 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
35789 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35790 index 5706d2a..17aedaa 100644
35791 --- a/drivers/gpu/drm/radeon/rs690.c
35792 +++ b/drivers/gpu/drm/radeon/rs690.c
35793 @@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35794 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35795 rdev->pm.sideport_bandwidth.full)
35796 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35797 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
35798 + read_delay_latency.full = dfixed_const(800 * 1000);
35799 read_delay_latency.full = dfixed_div(read_delay_latency,
35800 rdev->pm.igp_sideport_mclk);
35801 + a.full = dfixed_const(370);
35802 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
35803 } else {
35804 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35805 rdev->pm.k8_bandwidth.full)
35806 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35807 index bd2a3b4..122d9ad 100644
35808 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
35809 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35810 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
35811 static int ttm_pool_mm_shrink(struct shrinker *shrink,
35812 struct shrink_control *sc)
35813 {
35814 - static atomic_t start_pool = ATOMIC_INIT(0);
35815 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
35816 unsigned i;
35817 - unsigned pool_offset = atomic_add_return(1, &start_pool);
35818 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
35819 struct ttm_page_pool *pool;
35820 int shrink_pages = sc->nr_to_scan;
35821
35822 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
35823 index 9f4be3d..cbc9fcc 100644
35824 --- a/drivers/gpu/drm/udl/udl_fb.c
35825 +++ b/drivers/gpu/drm/udl/udl_fb.c
35826 @@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
35827 fb_deferred_io_cleanup(info);
35828 kfree(info->fbdefio);
35829 info->fbdefio = NULL;
35830 - info->fbops->fb_mmap = udl_fb_mmap;
35831 }
35832
35833 pr_warn("released /dev/fb%d user=%d count=%d\n",
35834 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35835 index 893a650..6190d3b 100644
35836 --- a/drivers/gpu/drm/via/via_drv.h
35837 +++ b/drivers/gpu/drm/via/via_drv.h
35838 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35839 typedef uint32_t maskarray_t[5];
35840
35841 typedef struct drm_via_irq {
35842 - atomic_t irq_received;
35843 + atomic_unchecked_t irq_received;
35844 uint32_t pending_mask;
35845 uint32_t enable_mask;
35846 wait_queue_head_t irq_queue;
35847 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
35848 struct timeval last_vblank;
35849 int last_vblank_valid;
35850 unsigned usec_per_vblank;
35851 - atomic_t vbl_received;
35852 + atomic_unchecked_t vbl_received;
35853 drm_via_state_t hc_state;
35854 char pci_buf[VIA_PCI_BUF_SIZE];
35855 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35856 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35857 index ac98964..5dbf512 100644
35858 --- a/drivers/gpu/drm/via/via_irq.c
35859 +++ b/drivers/gpu/drm/via/via_irq.c
35860 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35861 if (crtc != 0)
35862 return 0;
35863
35864 - return atomic_read(&dev_priv->vbl_received);
35865 + return atomic_read_unchecked(&dev_priv->vbl_received);
35866 }
35867
35868 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35869 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35870
35871 status = VIA_READ(VIA_REG_INTERRUPT);
35872 if (status & VIA_IRQ_VBLANK_PENDING) {
35873 - atomic_inc(&dev_priv->vbl_received);
35874 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35875 + atomic_inc_unchecked(&dev_priv->vbl_received);
35876 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35877 do_gettimeofday(&cur_vblank);
35878 if (dev_priv->last_vblank_valid) {
35879 dev_priv->usec_per_vblank =
35880 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35881 dev_priv->last_vblank = cur_vblank;
35882 dev_priv->last_vblank_valid = 1;
35883 }
35884 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35885 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35886 DRM_DEBUG("US per vblank is: %u\n",
35887 dev_priv->usec_per_vblank);
35888 }
35889 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35890
35891 for (i = 0; i < dev_priv->num_irqs; ++i) {
35892 if (status & cur_irq->pending_mask) {
35893 - atomic_inc(&cur_irq->irq_received);
35894 + atomic_inc_unchecked(&cur_irq->irq_received);
35895 DRM_WAKEUP(&cur_irq->irq_queue);
35896 handled = 1;
35897 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
35898 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
35899 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35900 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35901 masks[irq][4]));
35902 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35903 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35904 } else {
35905 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35906 (((cur_irq_sequence =
35907 - atomic_read(&cur_irq->irq_received)) -
35908 + atomic_read_unchecked(&cur_irq->irq_received)) -
35909 *sequence) <= (1 << 23)));
35910 }
35911 *sequence = cur_irq_sequence;
35912 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
35913 }
35914
35915 for (i = 0; i < dev_priv->num_irqs; ++i) {
35916 - atomic_set(&cur_irq->irq_received, 0);
35917 + atomic_set_unchecked(&cur_irq->irq_received, 0);
35918 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35919 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35920 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35921 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35922 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35923 case VIA_IRQ_RELATIVE:
35924 irqwait->request.sequence +=
35925 - atomic_read(&cur_irq->irq_received);
35926 + atomic_read_unchecked(&cur_irq->irq_received);
35927 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35928 case VIA_IRQ_ABSOLUTE:
35929 break;
35930 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35931 index 13aeda7..4a952d1 100644
35932 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35933 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35934 @@ -290,7 +290,7 @@ struct vmw_private {
35935 * Fencing and IRQs.
35936 */
35937
35938 - atomic_t marker_seq;
35939 + atomic_unchecked_t marker_seq;
35940 wait_queue_head_t fence_queue;
35941 wait_queue_head_t fifo_queue;
35942 int fence_queue_waiters; /* Protected by hw_mutex */
35943 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35944 index 3eb1486..0a47ee9 100644
35945 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35946 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35947 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
35948 (unsigned int) min,
35949 (unsigned int) fifo->capabilities);
35950
35951 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35952 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35953 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
35954 vmw_marker_queue_init(&fifo->marker_queue);
35955 return vmw_fifo_send_fence(dev_priv, &dummy);
35956 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
35957 if (reserveable)
35958 iowrite32(bytes, fifo_mem +
35959 SVGA_FIFO_RESERVED);
35960 - return fifo_mem + (next_cmd >> 2);
35961 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
35962 } else {
35963 need_bounce = true;
35964 }
35965 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35966
35967 fm = vmw_fifo_reserve(dev_priv, bytes);
35968 if (unlikely(fm == NULL)) {
35969 - *seqno = atomic_read(&dev_priv->marker_seq);
35970 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
35971 ret = -ENOMEM;
35972 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
35973 false, 3*HZ);
35974 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35975 }
35976
35977 do {
35978 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
35979 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
35980 } while (*seqno == 0);
35981
35982 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
35983 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35984 index 4640adb..e1384ed 100644
35985 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35986 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35987 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
35988 * emitted. Then the fence is stale and signaled.
35989 */
35990
35991 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
35992 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
35993 > VMW_FENCE_WRAP);
35994
35995 return ret;
35996 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
35997
35998 if (fifo_idle)
35999 down_read(&fifo_state->rwsem);
36000 - signal_seq = atomic_read(&dev_priv->marker_seq);
36001 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36002 ret = 0;
36003
36004 for (;;) {
36005 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36006 index 8a8725c2..afed796 100644
36007 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36008 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36009 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36010 while (!vmw_lag_lt(queue, us)) {
36011 spin_lock(&queue->lock);
36012 if (list_empty(&queue->head))
36013 - seqno = atomic_read(&dev_priv->marker_seq);
36014 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36015 else {
36016 marker = list_first_entry(&queue->head,
36017 struct vmw_marker, head);
36018 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36019 index aa341d1..ef07090 100644
36020 --- a/drivers/hid/hid-core.c
36021 +++ b/drivers/hid/hid-core.c
36022 @@ -2267,7 +2267,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36023
36024 int hid_add_device(struct hid_device *hdev)
36025 {
36026 - static atomic_t id = ATOMIC_INIT(0);
36027 + static atomic_unchecked_t id = ATOMIC_INIT(0);
36028 int ret;
36029
36030 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36031 @@ -2301,7 +2301,7 @@ int hid_add_device(struct hid_device *hdev)
36032 /* XXX hack, any other cleaner solution after the driver core
36033 * is converted to allow more than 20 bytes as the device name? */
36034 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36035 - hdev->vendor, hdev->product, atomic_inc_return(&id));
36036 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36037
36038 hid_debug_register(hdev, dev_name(&hdev->dev));
36039 ret = device_add(&hdev->dev);
36040 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36041 index 90124ff..3761764 100644
36042 --- a/drivers/hid/hid-wiimote-debug.c
36043 +++ b/drivers/hid/hid-wiimote-debug.c
36044 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36045 else if (size == 0)
36046 return -EIO;
36047
36048 - if (copy_to_user(u, buf, size))
36049 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
36050 return -EFAULT;
36051
36052 *off += size;
36053 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36054 index 0b122f8..b1d8160 100644
36055 --- a/drivers/hv/channel.c
36056 +++ b/drivers/hv/channel.c
36057 @@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36058 int ret = 0;
36059 int t;
36060
36061 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36062 - atomic_inc(&vmbus_connection.next_gpadl_handle);
36063 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36064 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36065
36066 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36067 if (ret)
36068 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36069 index 7311589..861e9ef 100644
36070 --- a/drivers/hv/hv.c
36071 +++ b/drivers/hv/hv.c
36072 @@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36073 u64 output_address = (output) ? virt_to_phys(output) : 0;
36074 u32 output_address_hi = output_address >> 32;
36075 u32 output_address_lo = output_address & 0xFFFFFFFF;
36076 - void *hypercall_page = hv_context.hypercall_page;
36077 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36078
36079 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36080 "=a"(hv_status_lo) : "d" (control_hi),
36081 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36082 index 12f2f9e..679603c 100644
36083 --- a/drivers/hv/hyperv_vmbus.h
36084 +++ b/drivers/hv/hyperv_vmbus.h
36085 @@ -591,7 +591,7 @@ enum vmbus_connect_state {
36086 struct vmbus_connection {
36087 enum vmbus_connect_state conn_state;
36088
36089 - atomic_t next_gpadl_handle;
36090 + atomic_unchecked_t next_gpadl_handle;
36091
36092 /*
36093 * Represents channel interrupts. Each bit position represents a
36094 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36095 index bf421e0..ce2c897 100644
36096 --- a/drivers/hv/vmbus_drv.c
36097 +++ b/drivers/hv/vmbus_drv.c
36098 @@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36099 {
36100 int ret = 0;
36101
36102 - static atomic_t device_num = ATOMIC_INIT(0);
36103 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36104
36105 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36106 - atomic_inc_return(&device_num));
36107 + atomic_inc_return_unchecked(&device_num));
36108
36109 child_device_obj->device.bus = &hv_bus;
36110 child_device_obj->device.parent = &hv_acpi_dev->dev;
36111 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36112 index 6351aba..dc4aaf4 100644
36113 --- a/drivers/hwmon/acpi_power_meter.c
36114 +++ b/drivers/hwmon/acpi_power_meter.c
36115 @@ -117,7 +117,7 @@ struct sensor_template {
36116 struct device_attribute *devattr,
36117 const char *buf, size_t count);
36118 int index;
36119 -};
36120 +} __do_const;
36121
36122 /* Averaging interval */
36123 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36124 @@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36125 struct sensor_template *attrs)
36126 {
36127 struct device *dev = &resource->acpi_dev->dev;
36128 - struct sensor_device_attribute *sensors =
36129 + sensor_device_attribute_no_const *sensors =
36130 &resource->sensors[resource->num_sensors];
36131 int res = 0;
36132
36133 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36134 index b41baff..4953e4d 100644
36135 --- a/drivers/hwmon/applesmc.c
36136 +++ b/drivers/hwmon/applesmc.c
36137 @@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36138 {
36139 struct applesmc_node_group *grp;
36140 struct applesmc_dev_attr *node;
36141 - struct attribute *attr;
36142 + attribute_no_const *attr;
36143 int ret, i;
36144
36145 for (grp = groups; grp->format; grp++) {
36146 diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36147 index b25c643..a13460d 100644
36148 --- a/drivers/hwmon/asus_atk0110.c
36149 +++ b/drivers/hwmon/asus_atk0110.c
36150 @@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36151 struct atk_sensor_data {
36152 struct list_head list;
36153 struct atk_data *data;
36154 - struct device_attribute label_attr;
36155 - struct device_attribute input_attr;
36156 - struct device_attribute limit1_attr;
36157 - struct device_attribute limit2_attr;
36158 + device_attribute_no_const label_attr;
36159 + device_attribute_no_const input_attr;
36160 + device_attribute_no_const limit1_attr;
36161 + device_attribute_no_const limit2_attr;
36162 char label_attr_name[ATTR_NAME_SIZE];
36163 char input_attr_name[ATTR_NAME_SIZE];
36164 char limit1_attr_name[ATTR_NAME_SIZE];
36165 @@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36166 static struct device_attribute atk_name_attr =
36167 __ATTR(name, 0444, atk_name_show, NULL);
36168
36169 -static void atk_init_attribute(struct device_attribute *attr, char *name,
36170 +static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36171 sysfs_show_func show)
36172 {
36173 sysfs_attr_init(&attr->attr);
36174 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36175 index 3f1e297..a6cafb5 100644
36176 --- a/drivers/hwmon/coretemp.c
36177 +++ b/drivers/hwmon/coretemp.c
36178 @@ -791,7 +791,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36179 return NOTIFY_OK;
36180 }
36181
36182 -static struct notifier_block coretemp_cpu_notifier __refdata = {
36183 +static struct notifier_block coretemp_cpu_notifier = {
36184 .notifier_call = coretemp_cpu_callback,
36185 };
36186
36187 diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36188 index a14f634..2916ee2 100644
36189 --- a/drivers/hwmon/ibmaem.c
36190 +++ b/drivers/hwmon/ibmaem.c
36191 @@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36192 struct aem_rw_sensor_template *rw)
36193 {
36194 struct device *dev = &data->pdev->dev;
36195 - struct sensor_device_attribute *sensors = data->sensors;
36196 + sensor_device_attribute_no_const *sensors = data->sensors;
36197 int err;
36198
36199 /* Set up read-only sensors */
36200 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36201 index 9add6092..ee7ba3f 100644
36202 --- a/drivers/hwmon/pmbus/pmbus_core.c
36203 +++ b/drivers/hwmon/pmbus/pmbus_core.c
36204 @@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
36205 return 0;
36206 }
36207
36208 -static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36209 +static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
36210 const char *name,
36211 umode_t mode,
36212 ssize_t (*show)(struct device *dev,
36213 @@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36214 dev_attr->store = store;
36215 }
36216
36217 -static void pmbus_attr_init(struct sensor_device_attribute *a,
36218 +static void pmbus_attr_init(sensor_device_attribute_no_const *a,
36219 const char *name,
36220 umode_t mode,
36221 ssize_t (*show)(struct device *dev,
36222 @@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
36223 u16 reg, u8 mask)
36224 {
36225 struct pmbus_boolean *boolean;
36226 - struct sensor_device_attribute *a;
36227 + sensor_device_attribute_no_const *a;
36228
36229 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
36230 if (!boolean)
36231 @@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
36232 bool update, bool readonly)
36233 {
36234 struct pmbus_sensor *sensor;
36235 - struct device_attribute *a;
36236 + device_attribute_no_const *a;
36237
36238 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
36239 if (!sensor)
36240 @@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
36241 const char *lstring, int index)
36242 {
36243 struct pmbus_label *label;
36244 - struct device_attribute *a;
36245 + device_attribute_no_const *a;
36246
36247 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
36248 if (!label)
36249 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36250 index 2507f90..1645765 100644
36251 --- a/drivers/hwmon/sht15.c
36252 +++ b/drivers/hwmon/sht15.c
36253 @@ -169,7 +169,7 @@ struct sht15_data {
36254 int supply_uv;
36255 bool supply_uv_valid;
36256 struct work_struct update_supply_work;
36257 - atomic_t interrupt_handled;
36258 + atomic_unchecked_t interrupt_handled;
36259 };
36260
36261 /**
36262 @@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
36263 ret = gpio_direction_input(data->pdata->gpio_data);
36264 if (ret)
36265 return ret;
36266 - atomic_set(&data->interrupt_handled, 0);
36267 + atomic_set_unchecked(&data->interrupt_handled, 0);
36268
36269 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36270 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36271 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36272 /* Only relevant if the interrupt hasn't occurred. */
36273 - if (!atomic_read(&data->interrupt_handled))
36274 + if (!atomic_read_unchecked(&data->interrupt_handled))
36275 schedule_work(&data->read_work);
36276 }
36277 ret = wait_event_timeout(data->wait_queue,
36278 @@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36279
36280 /* First disable the interrupt */
36281 disable_irq_nosync(irq);
36282 - atomic_inc(&data->interrupt_handled);
36283 + atomic_inc_unchecked(&data->interrupt_handled);
36284 /* Then schedule a reading work struct */
36285 if (data->state != SHT15_READING_NOTHING)
36286 schedule_work(&data->read_work);
36287 @@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36288 * If not, then start the interrupt again - care here as could
36289 * have gone low in meantime so verify it hasn't!
36290 */
36291 - atomic_set(&data->interrupt_handled, 0);
36292 + atomic_set_unchecked(&data->interrupt_handled, 0);
36293 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36294 /* If still not occurred or another handler was scheduled */
36295 if (gpio_get_value(data->pdata->gpio_data)
36296 - || atomic_read(&data->interrupt_handled))
36297 + || atomic_read_unchecked(&data->interrupt_handled))
36298 return;
36299 }
36300
36301 diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36302 index 76f157b..9c0db1b 100644
36303 --- a/drivers/hwmon/via-cputemp.c
36304 +++ b/drivers/hwmon/via-cputemp.c
36305 @@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36306 return NOTIFY_OK;
36307 }
36308
36309 -static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36310 +static struct notifier_block via_cputemp_cpu_notifier = {
36311 .notifier_call = via_cputemp_cpu_callback,
36312 };
36313
36314 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36315 index 378fcb5..5e91fa8 100644
36316 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
36317 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36318 @@ -43,7 +43,7 @@
36319 extern struct i2c_adapter amd756_smbus;
36320
36321 static struct i2c_adapter *s4882_adapter;
36322 -static struct i2c_algorithm *s4882_algo;
36323 +static i2c_algorithm_no_const *s4882_algo;
36324
36325 /* Wrapper access functions for multiplexed SMBus */
36326 static DEFINE_MUTEX(amd756_lock);
36327 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36328 index 29015eb..af2d8e9 100644
36329 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36330 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36331 @@ -41,7 +41,7 @@
36332 extern struct i2c_adapter *nforce2_smbus;
36333
36334 static struct i2c_adapter *s4985_adapter;
36335 -static struct i2c_algorithm *s4985_algo;
36336 +static i2c_algorithm_no_const *s4985_algo;
36337
36338 /* Wrapper access functions for multiplexed SMBus */
36339 static DEFINE_MUTEX(nforce2_lock);
36340 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36341 index 8126824..55a2798 100644
36342 --- a/drivers/ide/ide-cd.c
36343 +++ b/drivers/ide/ide-cd.c
36344 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36345 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36346 if ((unsigned long)buf & alignment
36347 || blk_rq_bytes(rq) & q->dma_pad_mask
36348 - || object_is_on_stack(buf))
36349 + || object_starts_on_stack(buf))
36350 drive->dma = 0;
36351 }
36352 }
36353 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36354 index 8848f16..f8e6dd8 100644
36355 --- a/drivers/iio/industrialio-core.c
36356 +++ b/drivers/iio/industrialio-core.c
36357 @@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36358 }
36359
36360 static
36361 -int __iio_device_attr_init(struct device_attribute *dev_attr,
36362 +int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36363 const char *postfix,
36364 struct iio_chan_spec const *chan,
36365 ssize_t (*readfunc)(struct device *dev,
36366 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36367 index 784b97c..c9ceadf 100644
36368 --- a/drivers/infiniband/core/cm.c
36369 +++ b/drivers/infiniband/core/cm.c
36370 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36371
36372 struct cm_counter_group {
36373 struct kobject obj;
36374 - atomic_long_t counter[CM_ATTR_COUNT];
36375 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36376 };
36377
36378 struct cm_counter_attribute {
36379 @@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36380 struct ib_mad_send_buf *msg = NULL;
36381 int ret;
36382
36383 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36384 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36385 counter[CM_REQ_COUNTER]);
36386
36387 /* Quick state check to discard duplicate REQs. */
36388 @@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36389 if (!cm_id_priv)
36390 return;
36391
36392 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36393 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36394 counter[CM_REP_COUNTER]);
36395 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36396 if (ret)
36397 @@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
36398 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36399 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36400 spin_unlock_irq(&cm_id_priv->lock);
36401 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36402 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36403 counter[CM_RTU_COUNTER]);
36404 goto out;
36405 }
36406 @@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
36407 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36408 dreq_msg->local_comm_id);
36409 if (!cm_id_priv) {
36410 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36411 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36412 counter[CM_DREQ_COUNTER]);
36413 cm_issue_drep(work->port, work->mad_recv_wc);
36414 return -EINVAL;
36415 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
36416 case IB_CM_MRA_REP_RCVD:
36417 break;
36418 case IB_CM_TIMEWAIT:
36419 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36420 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36421 counter[CM_DREQ_COUNTER]);
36422 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36423 goto unlock;
36424 @@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
36425 cm_free_msg(msg);
36426 goto deref;
36427 case IB_CM_DREQ_RCVD:
36428 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36429 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36430 counter[CM_DREQ_COUNTER]);
36431 goto unlock;
36432 default:
36433 @@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
36434 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36435 cm_id_priv->msg, timeout)) {
36436 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36437 - atomic_long_inc(&work->port->
36438 + atomic_long_inc_unchecked(&work->port->
36439 counter_group[CM_RECV_DUPLICATES].
36440 counter[CM_MRA_COUNTER]);
36441 goto out;
36442 @@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
36443 break;
36444 case IB_CM_MRA_REQ_RCVD:
36445 case IB_CM_MRA_REP_RCVD:
36446 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36447 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36448 counter[CM_MRA_COUNTER]);
36449 /* fall through */
36450 default:
36451 @@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
36452 case IB_CM_LAP_IDLE:
36453 break;
36454 case IB_CM_MRA_LAP_SENT:
36455 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36456 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36457 counter[CM_LAP_COUNTER]);
36458 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36459 goto unlock;
36460 @@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
36461 cm_free_msg(msg);
36462 goto deref;
36463 case IB_CM_LAP_RCVD:
36464 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36465 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36466 counter[CM_LAP_COUNTER]);
36467 goto unlock;
36468 default:
36469 @@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36470 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36471 if (cur_cm_id_priv) {
36472 spin_unlock_irq(&cm.lock);
36473 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36474 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36475 counter[CM_SIDR_REQ_COUNTER]);
36476 goto out; /* Duplicate message. */
36477 }
36478 @@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36479 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36480 msg->retries = 1;
36481
36482 - atomic_long_add(1 + msg->retries,
36483 + atomic_long_add_unchecked(1 + msg->retries,
36484 &port->counter_group[CM_XMIT].counter[attr_index]);
36485 if (msg->retries)
36486 - atomic_long_add(msg->retries,
36487 + atomic_long_add_unchecked(msg->retries,
36488 &port->counter_group[CM_XMIT_RETRIES].
36489 counter[attr_index]);
36490
36491 @@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36492 }
36493
36494 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36495 - atomic_long_inc(&port->counter_group[CM_RECV].
36496 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36497 counter[attr_id - CM_ATTR_ID_OFFSET]);
36498
36499 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36500 @@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36501 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36502
36503 return sprintf(buf, "%ld\n",
36504 - atomic_long_read(&group->counter[cm_attr->index]));
36505 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36506 }
36507
36508 static const struct sysfs_ops cm_counter_ops = {
36509 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36510 index 9f5ad7c..588cd84 100644
36511 --- a/drivers/infiniband/core/fmr_pool.c
36512 +++ b/drivers/infiniband/core/fmr_pool.c
36513 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
36514
36515 struct task_struct *thread;
36516
36517 - atomic_t req_ser;
36518 - atomic_t flush_ser;
36519 + atomic_unchecked_t req_ser;
36520 + atomic_unchecked_t flush_ser;
36521
36522 wait_queue_head_t force_wait;
36523 };
36524 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36525 struct ib_fmr_pool *pool = pool_ptr;
36526
36527 do {
36528 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36529 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36530 ib_fmr_batch_release(pool);
36531
36532 - atomic_inc(&pool->flush_ser);
36533 + atomic_inc_unchecked(&pool->flush_ser);
36534 wake_up_interruptible(&pool->force_wait);
36535
36536 if (pool->flush_function)
36537 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36538 }
36539
36540 set_current_state(TASK_INTERRUPTIBLE);
36541 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36542 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36543 !kthread_should_stop())
36544 schedule();
36545 __set_current_state(TASK_RUNNING);
36546 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36547 pool->dirty_watermark = params->dirty_watermark;
36548 pool->dirty_len = 0;
36549 spin_lock_init(&pool->pool_lock);
36550 - atomic_set(&pool->req_ser, 0);
36551 - atomic_set(&pool->flush_ser, 0);
36552 + atomic_set_unchecked(&pool->req_ser, 0);
36553 + atomic_set_unchecked(&pool->flush_ser, 0);
36554 init_waitqueue_head(&pool->force_wait);
36555
36556 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36557 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36558 }
36559 spin_unlock_irq(&pool->pool_lock);
36560
36561 - serial = atomic_inc_return(&pool->req_ser);
36562 + serial = atomic_inc_return_unchecked(&pool->req_ser);
36563 wake_up_process(pool->thread);
36564
36565 if (wait_event_interruptible(pool->force_wait,
36566 - atomic_read(&pool->flush_ser) - serial >= 0))
36567 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36568 return -EINTR;
36569
36570 return 0;
36571 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36572 } else {
36573 list_add_tail(&fmr->list, &pool->dirty_list);
36574 if (++pool->dirty_len >= pool->dirty_watermark) {
36575 - atomic_inc(&pool->req_ser);
36576 + atomic_inc_unchecked(&pool->req_ser);
36577 wake_up_process(pool->thread);
36578 }
36579 }
36580 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36581 index 903a92d..9262548 100644
36582 --- a/drivers/infiniband/hw/cxgb4/mem.c
36583 +++ b/drivers/infiniband/hw/cxgb4/mem.c
36584 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36585 int err;
36586 struct fw_ri_tpte tpt;
36587 u32 stag_idx;
36588 - static atomic_t key;
36589 + static atomic_unchecked_t key;
36590
36591 if (c4iw_fatal_error(rdev))
36592 return -EIO;
36593 @@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36594 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36595 rdev->stats.stag.max = rdev->stats.stag.cur;
36596 mutex_unlock(&rdev->stats.lock);
36597 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36598 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36599 }
36600 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36601 __func__, stag_state, type, pdid, stag_idx);
36602 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36603 index 79b3dbc..96e5fcc 100644
36604 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
36605 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36606 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36607 struct ib_atomic_eth *ateth;
36608 struct ipath_ack_entry *e;
36609 u64 vaddr;
36610 - atomic64_t *maddr;
36611 + atomic64_unchecked_t *maddr;
36612 u64 sdata;
36613 u32 rkey;
36614 u8 next;
36615 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36616 IB_ACCESS_REMOTE_ATOMIC)))
36617 goto nack_acc_unlck;
36618 /* Perform atomic OP and save result. */
36619 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36620 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36621 sdata = be64_to_cpu(ateth->swap_data);
36622 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36623 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36624 - (u64) atomic64_add_return(sdata, maddr) - sdata :
36625 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36626 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36627 be64_to_cpu(ateth->compare_data),
36628 sdata);
36629 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36630 index 1f95bba..9530f87 100644
36631 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36632 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36633 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36634 unsigned long flags;
36635 struct ib_wc wc;
36636 u64 sdata;
36637 - atomic64_t *maddr;
36638 + atomic64_unchecked_t *maddr;
36639 enum ib_wc_status send_status;
36640
36641 /*
36642 @@ -382,11 +382,11 @@ again:
36643 IB_ACCESS_REMOTE_ATOMIC)))
36644 goto acc_err;
36645 /* Perform atomic OP and save result. */
36646 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36647 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36648 sdata = wqe->wr.wr.atomic.compare_add;
36649 *(u64 *) sqp->s_sge.sge.vaddr =
36650 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
36651 - (u64) atomic64_add_return(sdata, maddr) - sdata :
36652 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36653 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36654 sdata, wqe->wr.wr.atomic.swap);
36655 goto send_comp;
36656 diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
36657 index 9d3e5c1..d9afe4a 100644
36658 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c
36659 +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
36660 @@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
36661 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
36662 }
36663
36664 -int mthca_QUERY_FW(struct mthca_dev *dev)
36665 +int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
36666 {
36667 struct mthca_mailbox *mailbox;
36668 u32 *outbox;
36669 diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
36670 index ed9a989..e0c5871 100644
36671 --- a/drivers/infiniband/hw/mthca/mthca_mr.c
36672 +++ b/drivers/infiniband/hw/mthca/mthca_mr.c
36673 @@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
36674 return key;
36675 }
36676
36677 -int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
36678 +int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
36679 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
36680 {
36681 struct mthca_mailbox *mailbox;
36682 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36683 index 4291410..d2ab1fb 100644
36684 --- a/drivers/infiniband/hw/nes/nes.c
36685 +++ b/drivers/infiniband/hw/nes/nes.c
36686 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36687 LIST_HEAD(nes_adapter_list);
36688 static LIST_HEAD(nes_dev_list);
36689
36690 -atomic_t qps_destroyed;
36691 +atomic_unchecked_t qps_destroyed;
36692
36693 static unsigned int ee_flsh_adapter;
36694 static unsigned int sysfs_nonidx_addr;
36695 @@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36696 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
36697 struct nes_adapter *nesadapter = nesdev->nesadapter;
36698
36699 - atomic_inc(&qps_destroyed);
36700 + atomic_inc_unchecked(&qps_destroyed);
36701
36702 /* Free the control structures */
36703
36704 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36705 index 33cc589..3bd6538 100644
36706 --- a/drivers/infiniband/hw/nes/nes.h
36707 +++ b/drivers/infiniband/hw/nes/nes.h
36708 @@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
36709 extern unsigned int wqm_quanta;
36710 extern struct list_head nes_adapter_list;
36711
36712 -extern atomic_t cm_connects;
36713 -extern atomic_t cm_accepts;
36714 -extern atomic_t cm_disconnects;
36715 -extern atomic_t cm_closes;
36716 -extern atomic_t cm_connecteds;
36717 -extern atomic_t cm_connect_reqs;
36718 -extern atomic_t cm_rejects;
36719 -extern atomic_t mod_qp_timouts;
36720 -extern atomic_t qps_created;
36721 -extern atomic_t qps_destroyed;
36722 -extern atomic_t sw_qps_destroyed;
36723 +extern atomic_unchecked_t cm_connects;
36724 +extern atomic_unchecked_t cm_accepts;
36725 +extern atomic_unchecked_t cm_disconnects;
36726 +extern atomic_unchecked_t cm_closes;
36727 +extern atomic_unchecked_t cm_connecteds;
36728 +extern atomic_unchecked_t cm_connect_reqs;
36729 +extern atomic_unchecked_t cm_rejects;
36730 +extern atomic_unchecked_t mod_qp_timouts;
36731 +extern atomic_unchecked_t qps_created;
36732 +extern atomic_unchecked_t qps_destroyed;
36733 +extern atomic_unchecked_t sw_qps_destroyed;
36734 extern u32 mh_detected;
36735 extern u32 mh_pauses_sent;
36736 extern u32 cm_packets_sent;
36737 @@ -196,16 +196,16 @@ extern u32 cm_packets_created;
36738 extern u32 cm_packets_received;
36739 extern u32 cm_packets_dropped;
36740 extern u32 cm_packets_retrans;
36741 -extern atomic_t cm_listens_created;
36742 -extern atomic_t cm_listens_destroyed;
36743 +extern atomic_unchecked_t cm_listens_created;
36744 +extern atomic_unchecked_t cm_listens_destroyed;
36745 extern u32 cm_backlog_drops;
36746 -extern atomic_t cm_loopbacks;
36747 -extern atomic_t cm_nodes_created;
36748 -extern atomic_t cm_nodes_destroyed;
36749 -extern atomic_t cm_accel_dropped_pkts;
36750 -extern atomic_t cm_resets_recvd;
36751 -extern atomic_t pau_qps_created;
36752 -extern atomic_t pau_qps_destroyed;
36753 +extern atomic_unchecked_t cm_loopbacks;
36754 +extern atomic_unchecked_t cm_nodes_created;
36755 +extern atomic_unchecked_t cm_nodes_destroyed;
36756 +extern atomic_unchecked_t cm_accel_dropped_pkts;
36757 +extern atomic_unchecked_t cm_resets_recvd;
36758 +extern atomic_unchecked_t pau_qps_created;
36759 +extern atomic_unchecked_t pau_qps_destroyed;
36760
36761 extern u32 int_mod_timer_init;
36762 extern u32 int_mod_cq_depth_256;
36763 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36764 index 24b9f1a..00fd004 100644
36765 --- a/drivers/infiniband/hw/nes/nes_cm.c
36766 +++ b/drivers/infiniband/hw/nes/nes_cm.c
36767 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
36768 u32 cm_packets_retrans;
36769 u32 cm_packets_created;
36770 u32 cm_packets_received;
36771 -atomic_t cm_listens_created;
36772 -atomic_t cm_listens_destroyed;
36773 +atomic_unchecked_t cm_listens_created;
36774 +atomic_unchecked_t cm_listens_destroyed;
36775 u32 cm_backlog_drops;
36776 -atomic_t cm_loopbacks;
36777 -atomic_t cm_nodes_created;
36778 -atomic_t cm_nodes_destroyed;
36779 -atomic_t cm_accel_dropped_pkts;
36780 -atomic_t cm_resets_recvd;
36781 +atomic_unchecked_t cm_loopbacks;
36782 +atomic_unchecked_t cm_nodes_created;
36783 +atomic_unchecked_t cm_nodes_destroyed;
36784 +atomic_unchecked_t cm_accel_dropped_pkts;
36785 +atomic_unchecked_t cm_resets_recvd;
36786
36787 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
36788 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
36789 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
36790
36791 static struct nes_cm_core *g_cm_core;
36792
36793 -atomic_t cm_connects;
36794 -atomic_t cm_accepts;
36795 -atomic_t cm_disconnects;
36796 -atomic_t cm_closes;
36797 -atomic_t cm_connecteds;
36798 -atomic_t cm_connect_reqs;
36799 -atomic_t cm_rejects;
36800 +atomic_unchecked_t cm_connects;
36801 +atomic_unchecked_t cm_accepts;
36802 +atomic_unchecked_t cm_disconnects;
36803 +atomic_unchecked_t cm_closes;
36804 +atomic_unchecked_t cm_connecteds;
36805 +atomic_unchecked_t cm_connect_reqs;
36806 +atomic_unchecked_t cm_rejects;
36807
36808 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
36809 {
36810 @@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
36811 kfree(listener);
36812 listener = NULL;
36813 ret = 0;
36814 - atomic_inc(&cm_listens_destroyed);
36815 + atomic_inc_unchecked(&cm_listens_destroyed);
36816 } else {
36817 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
36818 }
36819 @@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36820 cm_node->rem_mac);
36821
36822 add_hte_node(cm_core, cm_node);
36823 - atomic_inc(&cm_nodes_created);
36824 + atomic_inc_unchecked(&cm_nodes_created);
36825
36826 return cm_node;
36827 }
36828 @@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36829 }
36830
36831 atomic_dec(&cm_core->node_cnt);
36832 - atomic_inc(&cm_nodes_destroyed);
36833 + atomic_inc_unchecked(&cm_nodes_destroyed);
36834 nesqp = cm_node->nesqp;
36835 if (nesqp) {
36836 nesqp->cm_node = NULL;
36837 @@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36838
36839 static void drop_packet(struct sk_buff *skb)
36840 {
36841 - atomic_inc(&cm_accel_dropped_pkts);
36842 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
36843 dev_kfree_skb_any(skb);
36844 }
36845
36846 @@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36847 {
36848
36849 int reset = 0; /* whether to send reset in case of err.. */
36850 - atomic_inc(&cm_resets_recvd);
36851 + atomic_inc_unchecked(&cm_resets_recvd);
36852 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36853 " refcnt=%d\n", cm_node, cm_node->state,
36854 atomic_read(&cm_node->ref_count));
36855 @@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36856 rem_ref_cm_node(cm_node->cm_core, cm_node);
36857 return NULL;
36858 }
36859 - atomic_inc(&cm_loopbacks);
36860 + atomic_inc_unchecked(&cm_loopbacks);
36861 loopbackremotenode->loopbackpartner = cm_node;
36862 loopbackremotenode->tcp_cntxt.rcv_wscale =
36863 NES_CM_DEFAULT_RCV_WND_SCALE;
36864 @@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36865 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
36866 else {
36867 rem_ref_cm_node(cm_core, cm_node);
36868 - atomic_inc(&cm_accel_dropped_pkts);
36869 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
36870 dev_kfree_skb_any(skb);
36871 }
36872 break;
36873 @@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36874
36875 if ((cm_id) && (cm_id->event_handler)) {
36876 if (issue_disconn) {
36877 - atomic_inc(&cm_disconnects);
36878 + atomic_inc_unchecked(&cm_disconnects);
36879 cm_event.event = IW_CM_EVENT_DISCONNECT;
36880 cm_event.status = disconn_status;
36881 cm_event.local_addr = cm_id->local_addr;
36882 @@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36883 }
36884
36885 if (issue_close) {
36886 - atomic_inc(&cm_closes);
36887 + atomic_inc_unchecked(&cm_closes);
36888 nes_disconnect(nesqp, 1);
36889
36890 cm_id->provider_data = nesqp;
36891 @@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36892
36893 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36894 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36895 - atomic_inc(&cm_accepts);
36896 + atomic_inc_unchecked(&cm_accepts);
36897
36898 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36899 netdev_refcnt_read(nesvnic->netdev));
36900 @@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36901 struct nes_cm_core *cm_core;
36902 u8 *start_buff;
36903
36904 - atomic_inc(&cm_rejects);
36905 + atomic_inc_unchecked(&cm_rejects);
36906 cm_node = (struct nes_cm_node *)cm_id->provider_data;
36907 loopback = cm_node->loopbackpartner;
36908 cm_core = cm_node->cm_core;
36909 @@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36910 ntohl(cm_id->local_addr.sin_addr.s_addr),
36911 ntohs(cm_id->local_addr.sin_port));
36912
36913 - atomic_inc(&cm_connects);
36914 + atomic_inc_unchecked(&cm_connects);
36915 nesqp->active_conn = 1;
36916
36917 /* cache the cm_id in the qp */
36918 @@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
36919 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
36920 return err;
36921 }
36922 - atomic_inc(&cm_listens_created);
36923 + atomic_inc_unchecked(&cm_listens_created);
36924 }
36925
36926 cm_id->add_ref(cm_id);
36927 @@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36928
36929 if (nesqp->destroyed)
36930 return;
36931 - atomic_inc(&cm_connecteds);
36932 + atomic_inc_unchecked(&cm_connecteds);
36933 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36934 " local port 0x%04X. jiffies = %lu.\n",
36935 nesqp->hwqp.qp_id,
36936 @@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36937
36938 cm_id->add_ref(cm_id);
36939 ret = cm_id->event_handler(cm_id, &cm_event);
36940 - atomic_inc(&cm_closes);
36941 + atomic_inc_unchecked(&cm_closes);
36942 cm_event.event = IW_CM_EVENT_CLOSE;
36943 cm_event.status = 0;
36944 cm_event.provider_data = cm_id->provider_data;
36945 @@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36946 return;
36947 cm_id = cm_node->cm_id;
36948
36949 - atomic_inc(&cm_connect_reqs);
36950 + atomic_inc_unchecked(&cm_connect_reqs);
36951 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36952 cm_node, cm_id, jiffies);
36953
36954 @@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36955 return;
36956 cm_id = cm_node->cm_id;
36957
36958 - atomic_inc(&cm_connect_reqs);
36959 + atomic_inc_unchecked(&cm_connect_reqs);
36960 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36961 cm_node, cm_id, jiffies);
36962
36963 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
36964 index 4166452..fc952c3 100644
36965 --- a/drivers/infiniband/hw/nes/nes_mgt.c
36966 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
36967 @@ -40,8 +40,8 @@
36968 #include "nes.h"
36969 #include "nes_mgt.h"
36970
36971 -atomic_t pau_qps_created;
36972 -atomic_t pau_qps_destroyed;
36973 +atomic_unchecked_t pau_qps_created;
36974 +atomic_unchecked_t pau_qps_destroyed;
36975
36976 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
36977 {
36978 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
36979 {
36980 struct sk_buff *skb;
36981 unsigned long flags;
36982 - atomic_inc(&pau_qps_destroyed);
36983 + atomic_inc_unchecked(&pau_qps_destroyed);
36984
36985 /* Free packets that have not yet been forwarded */
36986 /* Lock is acquired by skb_dequeue when removing the skb */
36987 @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
36988 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
36989 skb_queue_head_init(&nesqp->pau_list);
36990 spin_lock_init(&nesqp->pau_lock);
36991 - atomic_inc(&pau_qps_created);
36992 + atomic_inc_unchecked(&pau_qps_created);
36993 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
36994 }
36995
36996 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36997 index 85cf4d1..05d8e71 100644
36998 --- a/drivers/infiniband/hw/nes/nes_nic.c
36999 +++ b/drivers/infiniband/hw/nes/nes_nic.c
37000 @@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37001 target_stat_values[++index] = mh_detected;
37002 target_stat_values[++index] = mh_pauses_sent;
37003 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37004 - target_stat_values[++index] = atomic_read(&cm_connects);
37005 - target_stat_values[++index] = atomic_read(&cm_accepts);
37006 - target_stat_values[++index] = atomic_read(&cm_disconnects);
37007 - target_stat_values[++index] = atomic_read(&cm_connecteds);
37008 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37009 - target_stat_values[++index] = atomic_read(&cm_rejects);
37010 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37011 - target_stat_values[++index] = atomic_read(&qps_created);
37012 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37013 - target_stat_values[++index] = atomic_read(&qps_destroyed);
37014 - target_stat_values[++index] = atomic_read(&cm_closes);
37015 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37016 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37017 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37018 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37019 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37020 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37021 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37022 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37023 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37024 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37025 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37026 target_stat_values[++index] = cm_packets_sent;
37027 target_stat_values[++index] = cm_packets_bounced;
37028 target_stat_values[++index] = cm_packets_created;
37029 target_stat_values[++index] = cm_packets_received;
37030 target_stat_values[++index] = cm_packets_dropped;
37031 target_stat_values[++index] = cm_packets_retrans;
37032 - target_stat_values[++index] = atomic_read(&cm_listens_created);
37033 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37034 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37035 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37036 target_stat_values[++index] = cm_backlog_drops;
37037 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
37038 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
37039 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37040 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37041 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37042 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37043 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37044 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37045 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37046 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37047 target_stat_values[++index] = nesadapter->free_4kpbl;
37048 target_stat_values[++index] = nesadapter->free_256pbl;
37049 target_stat_values[++index] = int_mod_timer_init;
37050 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37051 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37052 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37053 - target_stat_values[++index] = atomic_read(&pau_qps_created);
37054 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37055 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37056 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37057 }
37058
37059 /**
37060 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37061 index 8f67fe2..8960859 100644
37062 --- a/drivers/infiniband/hw/nes/nes_verbs.c
37063 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
37064 @@ -46,9 +46,9 @@
37065
37066 #include <rdma/ib_umem.h>
37067
37068 -atomic_t mod_qp_timouts;
37069 -atomic_t qps_created;
37070 -atomic_t sw_qps_destroyed;
37071 +atomic_unchecked_t mod_qp_timouts;
37072 +atomic_unchecked_t qps_created;
37073 +atomic_unchecked_t sw_qps_destroyed;
37074
37075 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37076
37077 @@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37078 if (init_attr->create_flags)
37079 return ERR_PTR(-EINVAL);
37080
37081 - atomic_inc(&qps_created);
37082 + atomic_inc_unchecked(&qps_created);
37083 switch (init_attr->qp_type) {
37084 case IB_QPT_RC:
37085 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37086 @@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37087 struct iw_cm_event cm_event;
37088 int ret = 0;
37089
37090 - atomic_inc(&sw_qps_destroyed);
37091 + atomic_inc_unchecked(&sw_qps_destroyed);
37092 nesqp->destroyed = 1;
37093
37094 /* Blow away the connection if it exists. */
37095 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37096 index 4d11575..3e890e5 100644
37097 --- a/drivers/infiniband/hw/qib/qib.h
37098 +++ b/drivers/infiniband/hw/qib/qib.h
37099 @@ -51,6 +51,7 @@
37100 #include <linux/completion.h>
37101 #include <linux/kref.h>
37102 #include <linux/sched.h>
37103 +#include <linux/slab.h>
37104
37105 #include "qib_common.h"
37106 #include "qib_verbs.h"
37107 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37108 index da739d9..da1c7f4 100644
37109 --- a/drivers/input/gameport/gameport.c
37110 +++ b/drivers/input/gameport/gameport.c
37111 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37112 */
37113 static void gameport_init_port(struct gameport *gameport)
37114 {
37115 - static atomic_t gameport_no = ATOMIC_INIT(0);
37116 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37117
37118 __module_get(THIS_MODULE);
37119
37120 mutex_init(&gameport->drv_mutex);
37121 device_initialize(&gameport->dev);
37122 dev_set_name(&gameport->dev, "gameport%lu",
37123 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
37124 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37125 gameport->dev.bus = &gameport_bus;
37126 gameport->dev.release = gameport_release_port;
37127 if (gameport->parent)
37128 diff --git a/drivers/input/input.c b/drivers/input/input.c
37129 index c044699..174d71a 100644
37130 --- a/drivers/input/input.c
37131 +++ b/drivers/input/input.c
37132 @@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37133 */
37134 int input_register_device(struct input_dev *dev)
37135 {
37136 - static atomic_t input_no = ATOMIC_INIT(0);
37137 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37138 struct input_devres *devres = NULL;
37139 struct input_handler *handler;
37140 unsigned int packet_size;
37141 @@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37142 dev->setkeycode = input_default_setkeycode;
37143
37144 dev_set_name(&dev->dev, "input%ld",
37145 - (unsigned long) atomic_inc_return(&input_no) - 1);
37146 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37147
37148 error = device_add(&dev->dev);
37149 if (error)
37150 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37151 index 04c69af..5f92d00 100644
37152 --- a/drivers/input/joystick/sidewinder.c
37153 +++ b/drivers/input/joystick/sidewinder.c
37154 @@ -30,6 +30,7 @@
37155 #include <linux/kernel.h>
37156 #include <linux/module.h>
37157 #include <linux/slab.h>
37158 +#include <linux/sched.h>
37159 #include <linux/init.h>
37160 #include <linux/input.h>
37161 #include <linux/gameport.h>
37162 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37163 index d6cbfe9..6225402 100644
37164 --- a/drivers/input/joystick/xpad.c
37165 +++ b/drivers/input/joystick/xpad.c
37166 @@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37167
37168 static int xpad_led_probe(struct usb_xpad *xpad)
37169 {
37170 - static atomic_t led_seq = ATOMIC_INIT(0);
37171 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37172 long led_no;
37173 struct xpad_led *led;
37174 struct led_classdev *led_cdev;
37175 @@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37176 if (!led)
37177 return -ENOMEM;
37178
37179 - led_no = (long)atomic_inc_return(&led_seq) - 1;
37180 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37181
37182 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37183 led->xpad = xpad;
37184 diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37185 index 2f0b39d..7370f13 100644
37186 --- a/drivers/input/mouse/psmouse.h
37187 +++ b/drivers/input/mouse/psmouse.h
37188 @@ -116,7 +116,7 @@ struct psmouse_attribute {
37189 ssize_t (*set)(struct psmouse *psmouse, void *data,
37190 const char *buf, size_t count);
37191 bool protect;
37192 -};
37193 +} __do_const;
37194 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37195
37196 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37197 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37198 index 4c842c3..590b0bf 100644
37199 --- a/drivers/input/mousedev.c
37200 +++ b/drivers/input/mousedev.c
37201 @@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37202
37203 spin_unlock_irq(&client->packet_lock);
37204
37205 - if (copy_to_user(buffer, data, count))
37206 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
37207 return -EFAULT;
37208
37209 return count;
37210 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37211 index 25fc597..558bf3b3 100644
37212 --- a/drivers/input/serio/serio.c
37213 +++ b/drivers/input/serio/serio.c
37214 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37215 */
37216 static void serio_init_port(struct serio *serio)
37217 {
37218 - static atomic_t serio_no = ATOMIC_INIT(0);
37219 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37220
37221 __module_get(THIS_MODULE);
37222
37223 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37224 mutex_init(&serio->drv_mutex);
37225 device_initialize(&serio->dev);
37226 dev_set_name(&serio->dev, "serio%ld",
37227 - (long)atomic_inc_return(&serio_no) - 1);
37228 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
37229 serio->dev.bus = &serio_bus;
37230 serio->dev.release = serio_release_port;
37231 serio->dev.groups = serio_device_attr_groups;
37232 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37233 index b972d43..8943713 100644
37234 --- a/drivers/iommu/iommu.c
37235 +++ b/drivers/iommu/iommu.c
37236 @@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37237 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37238 {
37239 bus_register_notifier(bus, &iommu_bus_nb);
37240 - bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37241 + bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37242 }
37243
37244 /**
37245 diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
37246 index 7c11ff3..5b2d7a7 100644
37247 --- a/drivers/iommu/irq_remapping.c
37248 +++ b/drivers/iommu/irq_remapping.c
37249 @@ -369,10 +369,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
37250
37251 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
37252 {
37253 - chip->irq_print_chip = ir_print_prefix;
37254 - chip->irq_ack = ir_ack_apic_edge;
37255 - chip->irq_eoi = ir_ack_apic_level;
37256 - chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37257 + pax_open_kernel();
37258 + *(void **)&chip->irq_print_chip = ir_print_prefix;
37259 + *(void **)&chip->irq_ack = ir_ack_apic_edge;
37260 + *(void **)&chip->irq_eoi = ir_ack_apic_level;
37261 + *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37262 + pax_close_kernel();
37263 }
37264
37265 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
37266 diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
37267 index fc6aebf..762c5f5 100644
37268 --- a/drivers/irqchip/irq-gic.c
37269 +++ b/drivers/irqchip/irq-gic.c
37270 @@ -83,7 +83,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
37271 * Supported arch specific GIC irq extension.
37272 * Default make them NULL.
37273 */
37274 -struct irq_chip gic_arch_extn = {
37275 +irq_chip_no_const gic_arch_extn = {
37276 .irq_eoi = NULL,
37277 .irq_mask = NULL,
37278 .irq_unmask = NULL,
37279 @@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
37280 chained_irq_exit(chip, desc);
37281 }
37282
37283 -static struct irq_chip gic_chip = {
37284 +static irq_chip_no_const gic_chip __read_only = {
37285 .name = "GIC",
37286 .irq_mask = gic_mask_irq,
37287 .irq_unmask = gic_unmask_irq,
37288 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37289 index 89562a8..218999b 100644
37290 --- a/drivers/isdn/capi/capi.c
37291 +++ b/drivers/isdn/capi/capi.c
37292 @@ -81,8 +81,8 @@ struct capiminor {
37293
37294 struct capi20_appl *ap;
37295 u32 ncci;
37296 - atomic_t datahandle;
37297 - atomic_t msgid;
37298 + atomic_unchecked_t datahandle;
37299 + atomic_unchecked_t msgid;
37300
37301 struct tty_port port;
37302 int ttyinstop;
37303 @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37304 capimsg_setu16(s, 2, mp->ap->applid);
37305 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37306 capimsg_setu8 (s, 5, CAPI_RESP);
37307 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37308 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37309 capimsg_setu32(s, 8, mp->ncci);
37310 capimsg_setu16(s, 12, datahandle);
37311 }
37312 @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37313 mp->outbytes -= len;
37314 spin_unlock_bh(&mp->outlock);
37315
37316 - datahandle = atomic_inc_return(&mp->datahandle);
37317 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37318 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37319 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37320 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37321 capimsg_setu16(skb->data, 2, mp->ap->applid);
37322 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37323 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37324 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37325 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37326 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37327 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37328 capimsg_setu16(skb->data, 16, len); /* Data length */
37329 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37330 index e2b5396..c5486dc 100644
37331 --- a/drivers/isdn/gigaset/interface.c
37332 +++ b/drivers/isdn/gigaset/interface.c
37333 @@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37334 }
37335 tty->driver_data = cs;
37336
37337 - ++cs->port.count;
37338 + atomic_inc(&cs->port.count);
37339
37340 - if (cs->port.count == 1) {
37341 + if (atomic_read(&cs->port.count) == 1) {
37342 tty_port_tty_set(&cs->port, tty);
37343 cs->port.low_latency = 1;
37344 }
37345 @@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37346
37347 if (!cs->connected)
37348 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37349 - else if (!cs->port.count)
37350 + else if (!atomic_read(&cs->port.count))
37351 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37352 - else if (!--cs->port.count)
37353 + else if (!atomic_dec_return(&cs->port.count))
37354 tty_port_tty_set(&cs->port, NULL);
37355
37356 mutex_unlock(&cs->mutex);
37357 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37358 index 821f7ac..28d4030 100644
37359 --- a/drivers/isdn/hardware/avm/b1.c
37360 +++ b/drivers/isdn/hardware/avm/b1.c
37361 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37362 }
37363 if (left) {
37364 if (t4file->user) {
37365 - if (copy_from_user(buf, dp, left))
37366 + if (left > sizeof buf || copy_from_user(buf, dp, left))
37367 return -EFAULT;
37368 } else {
37369 memcpy(buf, dp, left);
37370 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37371 }
37372 if (left) {
37373 if (config->user) {
37374 - if (copy_from_user(buf, dp, left))
37375 + if (left > sizeof buf || copy_from_user(buf, dp, left))
37376 return -EFAULT;
37377 } else {
37378 memcpy(buf, dp, left);
37379 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37380 index ebaebdf..acd4405 100644
37381 --- a/drivers/isdn/i4l/isdn_tty.c
37382 +++ b/drivers/isdn/i4l/isdn_tty.c
37383 @@ -1511,9 +1511,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37384
37385 #ifdef ISDN_DEBUG_MODEM_OPEN
37386 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37387 - port->count);
37388 + atomic_read(&port->count));
37389 #endif
37390 - port->count++;
37391 + atomic_inc(&port->count);
37392 port->tty = tty;
37393 /*
37394 * Start up serial port
37395 @@ -1557,7 +1557,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37396 #endif
37397 return;
37398 }
37399 - if ((tty->count == 1) && (port->count != 1)) {
37400 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37401 /*
37402 * Uh, oh. tty->count is 1, which means that the tty
37403 * structure will be freed. Info->count should always
37404 @@ -1566,15 +1566,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37405 * serial port won't be shutdown.
37406 */
37407 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37408 - "info->count is %d\n", port->count);
37409 - port->count = 1;
37410 + "info->count is %d\n", atomic_read(&port->count));
37411 + atomic_set(&port->count, 1);
37412 }
37413 - if (--port->count < 0) {
37414 + if (atomic_dec_return(&port->count) < 0) {
37415 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37416 - info->line, port->count);
37417 - port->count = 0;
37418 + info->line, atomic_read(&port->count));
37419 + atomic_set(&port->count, 0);
37420 }
37421 - if (port->count) {
37422 + if (atomic_read(&port->count)) {
37423 #ifdef ISDN_DEBUG_MODEM_OPEN
37424 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37425 #endif
37426 @@ -1628,7 +1628,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37427 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37428 return;
37429 isdn_tty_shutdown(info);
37430 - port->count = 0;
37431 + atomic_set(&port->count, 0);
37432 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37433 port->tty = NULL;
37434 wake_up_interruptible(&port->open_wait);
37435 @@ -1973,7 +1973,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37436 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37437 modem_info *info = &dev->mdm.info[i];
37438
37439 - if (info->port.count == 0)
37440 + if (atomic_read(&info->port.count) == 0)
37441 continue;
37442 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37443 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37444 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37445 index e74df7c..03a03ba 100644
37446 --- a/drivers/isdn/icn/icn.c
37447 +++ b/drivers/isdn/icn/icn.c
37448 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37449 if (count > len)
37450 count = len;
37451 if (user) {
37452 - if (copy_from_user(msg, buf, count))
37453 + if (count > sizeof msg || copy_from_user(msg, buf, count))
37454 return -EFAULT;
37455 } else
37456 memcpy(msg, buf, count);
37457 diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37458 index 6a8405d..0bd1c7e 100644
37459 --- a/drivers/leds/leds-clevo-mail.c
37460 +++ b/drivers/leds/leds-clevo-mail.c
37461 @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37462 * detected as working, but in reality it is not) as low as
37463 * possible.
37464 */
37465 -static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37466 +static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37467 {
37468 .callback = clevo_mail_led_dmi_callback,
37469 .ident = "Clevo D410J",
37470 diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37471 index 64e204e..c6bf189 100644
37472 --- a/drivers/leds/leds-ss4200.c
37473 +++ b/drivers/leds/leds-ss4200.c
37474 @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37475 * detected as working, but in reality it is not) as low as
37476 * possible.
37477 */
37478 -static struct dmi_system_id __initdata nas_led_whitelist[] = {
37479 +static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37480 {
37481 .callback = ss4200_led_dmi_callback,
37482 .ident = "Intel SS4200-E",
37483 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37484 index a5ebc00..982886f 100644
37485 --- a/drivers/lguest/core.c
37486 +++ b/drivers/lguest/core.c
37487 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
37488 * it's worked so far. The end address needs +1 because __get_vm_area
37489 * allocates an extra guard page, so we need space for that.
37490 */
37491 +
37492 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37493 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37494 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37495 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37496 +#else
37497 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37498 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37499 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37500 +#endif
37501 +
37502 if (!switcher_vma) {
37503 err = -ENOMEM;
37504 printk("lguest: could not map switcher pages high\n");
37505 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
37506 * Now the Switcher is mapped at the right address, we can't fail!
37507 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37508 */
37509 - memcpy(switcher_vma->addr, start_switcher_text,
37510 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37511 end_switcher_text - start_switcher_text);
37512
37513 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37514 diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
37515 index 3b62be16..e33134a 100644
37516 --- a/drivers/lguest/page_tables.c
37517 +++ b/drivers/lguest/page_tables.c
37518 @@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
37519 /*:*/
37520
37521 #ifdef CONFIG_X86_PAE
37522 -static void release_pmd(pmd_t *spmd)
37523 +static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
37524 {
37525 /* If the entry's not present, there's nothing to release. */
37526 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
37527 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37528 index 4af12e1..0e89afe 100644
37529 --- a/drivers/lguest/x86/core.c
37530 +++ b/drivers/lguest/x86/core.c
37531 @@ -59,7 +59,7 @@ static struct {
37532 /* Offset from where switcher.S was compiled to where we've copied it */
37533 static unsigned long switcher_offset(void)
37534 {
37535 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37536 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37537 }
37538
37539 /* This cpu's struct lguest_pages. */
37540 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37541 * These copies are pretty cheap, so we do them unconditionally: */
37542 /* Save the current Host top-level page directory.
37543 */
37544 +
37545 +#ifdef CONFIG_PAX_PER_CPU_PGD
37546 + pages->state.host_cr3 = read_cr3();
37547 +#else
37548 pages->state.host_cr3 = __pa(current->mm->pgd);
37549 +#endif
37550 +
37551 /*
37552 * Set up the Guest's page tables to see this CPU's pages (and no
37553 * other CPU's pages).
37554 @@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37555 * compiled-in switcher code and the high-mapped copy we just made.
37556 */
37557 for (i = 0; i < IDT_ENTRIES; i++)
37558 - default_idt_entries[i] += switcher_offset();
37559 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37560
37561 /*
37562 * Set up the Switcher's per-cpu areas.
37563 @@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37564 * it will be undisturbed when we switch. To change %cs and jump we
37565 * need this structure to feed to Intel's "lcall" instruction.
37566 */
37567 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37568 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37569 lguest_entry.segment = LGUEST_CS;
37570
37571 /*
37572 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37573 index 40634b0..4f5855e 100644
37574 --- a/drivers/lguest/x86/switcher_32.S
37575 +++ b/drivers/lguest/x86/switcher_32.S
37576 @@ -87,6 +87,7 @@
37577 #include <asm/page.h>
37578 #include <asm/segment.h>
37579 #include <asm/lguest.h>
37580 +#include <asm/processor-flags.h>
37581
37582 // We mark the start of the code to copy
37583 // It's placed in .text tho it's never run here
37584 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37585 // Changes type when we load it: damn Intel!
37586 // For after we switch over our page tables
37587 // That entry will be read-only: we'd crash.
37588 +
37589 +#ifdef CONFIG_PAX_KERNEXEC
37590 + mov %cr0, %edx
37591 + xor $X86_CR0_WP, %edx
37592 + mov %edx, %cr0
37593 +#endif
37594 +
37595 movl $(GDT_ENTRY_TSS*8), %edx
37596 ltr %dx
37597
37598 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37599 // Let's clear it again for our return.
37600 // The GDT descriptor of the Host
37601 // Points to the table after two "size" bytes
37602 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37603 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37604 // Clear "used" from type field (byte 5, bit 2)
37605 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37606 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37607 +
37608 +#ifdef CONFIG_PAX_KERNEXEC
37609 + mov %cr0, %eax
37610 + xor $X86_CR0_WP, %eax
37611 + mov %eax, %cr0
37612 +#endif
37613
37614 // Once our page table's switched, the Guest is live!
37615 // The Host fades as we run this final step.
37616 @@ -295,13 +309,12 @@ deliver_to_host:
37617 // I consulted gcc, and it gave
37618 // These instructions, which I gladly credit:
37619 leal (%edx,%ebx,8), %eax
37620 - movzwl (%eax),%edx
37621 - movl 4(%eax), %eax
37622 - xorw %ax, %ax
37623 - orl %eax, %edx
37624 + movl 4(%eax), %edx
37625 + movw (%eax), %dx
37626 // Now the address of the handler's in %edx
37627 // We call it now: its "iret" drops us home.
37628 - jmp *%edx
37629 + ljmp $__KERNEL_CS, $1f
37630 +1: jmp *%edx
37631
37632 // Every interrupt can come to us here
37633 // But we must truly tell each apart.
37634 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37635 index 4fd9d6a..834fa03 100644
37636 --- a/drivers/md/bitmap.c
37637 +++ b/drivers/md/bitmap.c
37638 @@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37639 chunk_kb ? "KB" : "B");
37640 if (bitmap->storage.file) {
37641 seq_printf(seq, ", file: ");
37642 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37643 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37644 }
37645
37646 seq_printf(seq, "\n");
37647 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37648 index aa04f02..2a1309e 100644
37649 --- a/drivers/md/dm-ioctl.c
37650 +++ b/drivers/md/dm-ioctl.c
37651 @@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37652 cmd == DM_LIST_VERSIONS_CMD)
37653 return 0;
37654
37655 - if ((cmd == DM_DEV_CREATE_CMD)) {
37656 + if (cmd == DM_DEV_CREATE_CMD) {
37657 if (!*param->name) {
37658 DMWARN("name not supplied when creating device");
37659 return -EINVAL;
37660 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37661 index d053098..05cc375 100644
37662 --- a/drivers/md/dm-raid1.c
37663 +++ b/drivers/md/dm-raid1.c
37664 @@ -40,7 +40,7 @@ enum dm_raid1_error {
37665
37666 struct mirror {
37667 struct mirror_set *ms;
37668 - atomic_t error_count;
37669 + atomic_unchecked_t error_count;
37670 unsigned long error_type;
37671 struct dm_dev *dev;
37672 sector_t offset;
37673 @@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37674 struct mirror *m;
37675
37676 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37677 - if (!atomic_read(&m->error_count))
37678 + if (!atomic_read_unchecked(&m->error_count))
37679 return m;
37680
37681 return NULL;
37682 @@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37683 * simple way to tell if a device has encountered
37684 * errors.
37685 */
37686 - atomic_inc(&m->error_count);
37687 + atomic_inc_unchecked(&m->error_count);
37688
37689 if (test_and_set_bit(error_type, &m->error_type))
37690 return;
37691 @@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37692 struct mirror *m = get_default_mirror(ms);
37693
37694 do {
37695 - if (likely(!atomic_read(&m->error_count)))
37696 + if (likely(!atomic_read_unchecked(&m->error_count)))
37697 return m;
37698
37699 if (m-- == ms->mirror)
37700 @@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
37701 {
37702 struct mirror *default_mirror = get_default_mirror(m->ms);
37703
37704 - return !atomic_read(&default_mirror->error_count);
37705 + return !atomic_read_unchecked(&default_mirror->error_count);
37706 }
37707
37708 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37709 @@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37710 */
37711 if (likely(region_in_sync(ms, region, 1)))
37712 m = choose_mirror(ms, bio->bi_sector);
37713 - else if (m && atomic_read(&m->error_count))
37714 + else if (m && atomic_read_unchecked(&m->error_count))
37715 m = NULL;
37716
37717 if (likely(m))
37718 @@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37719 }
37720
37721 ms->mirror[mirror].ms = ms;
37722 - atomic_set(&(ms->mirror[mirror].error_count), 0);
37723 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37724 ms->mirror[mirror].error_type = 0;
37725 ms->mirror[mirror].offset = offset;
37726
37727 @@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
37728 */
37729 static char device_status_char(struct mirror *m)
37730 {
37731 - if (!atomic_read(&(m->error_count)))
37732 + if (!atomic_read_unchecked(&(m->error_count)))
37733 return 'A';
37734
37735 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
37736 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37737 index d8837d3..1f7c341 100644
37738 --- a/drivers/md/dm-stripe.c
37739 +++ b/drivers/md/dm-stripe.c
37740 @@ -20,7 +20,7 @@ struct stripe {
37741 struct dm_dev *dev;
37742 sector_t physical_start;
37743
37744 - atomic_t error_count;
37745 + atomic_unchecked_t error_count;
37746 };
37747
37748 struct stripe_c {
37749 @@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37750 kfree(sc);
37751 return r;
37752 }
37753 - atomic_set(&(sc->stripe[i].error_count), 0);
37754 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37755 }
37756
37757 ti->private = sc;
37758 @@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
37759 DMEMIT("%d ", sc->stripes);
37760 for (i = 0; i < sc->stripes; i++) {
37761 DMEMIT("%s ", sc->stripe[i].dev->name);
37762 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37763 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37764 'D' : 'A';
37765 }
37766 buffer[i] = '\0';
37767 @@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
37768 */
37769 for (i = 0; i < sc->stripes; i++)
37770 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37771 - atomic_inc(&(sc->stripe[i].error_count));
37772 - if (atomic_read(&(sc->stripe[i].error_count)) <
37773 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
37774 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37775 DM_IO_ERROR_THRESHOLD)
37776 schedule_work(&sc->trigger_event);
37777 }
37778 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37779 index e50dad0..d9575e2 100644
37780 --- a/drivers/md/dm-table.c
37781 +++ b/drivers/md/dm-table.c
37782 @@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37783 if (!dev_size)
37784 return 0;
37785
37786 - if ((start >= dev_size) || (start + len > dev_size)) {
37787 + if ((start >= dev_size) || (len > dev_size - start)) {
37788 DMWARN("%s: %s too small for target: "
37789 "start=%llu, len=%llu, dev_size=%llu",
37790 dm_device_name(ti->table->md), bdevname(bdev, b),
37791 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
37792 index 00cee02..b89a29d 100644
37793 --- a/drivers/md/dm-thin-metadata.c
37794 +++ b/drivers/md/dm-thin-metadata.c
37795 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37796 {
37797 pmd->info.tm = pmd->tm;
37798 pmd->info.levels = 2;
37799 - pmd->info.value_type.context = pmd->data_sm;
37800 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37801 pmd->info.value_type.size = sizeof(__le64);
37802 pmd->info.value_type.inc = data_block_inc;
37803 pmd->info.value_type.dec = data_block_dec;
37804 @@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37805
37806 pmd->bl_info.tm = pmd->tm;
37807 pmd->bl_info.levels = 1;
37808 - pmd->bl_info.value_type.context = pmd->data_sm;
37809 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37810 pmd->bl_info.value_type.size = sizeof(__le64);
37811 pmd->bl_info.value_type.inc = data_block_inc;
37812 pmd->bl_info.value_type.dec = data_block_dec;
37813 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37814 index 9a0bdad..4df9543 100644
37815 --- a/drivers/md/dm.c
37816 +++ b/drivers/md/dm.c
37817 @@ -169,9 +169,9 @@ struct mapped_device {
37818 /*
37819 * Event handling.
37820 */
37821 - atomic_t event_nr;
37822 + atomic_unchecked_t event_nr;
37823 wait_queue_head_t eventq;
37824 - atomic_t uevent_seq;
37825 + atomic_unchecked_t uevent_seq;
37826 struct list_head uevent_list;
37827 spinlock_t uevent_lock; /* Protect access to uevent_list */
37828
37829 @@ -1879,8 +1879,8 @@ static struct mapped_device *alloc_dev(int minor)
37830 rwlock_init(&md->map_lock);
37831 atomic_set(&md->holders, 1);
37832 atomic_set(&md->open_count, 0);
37833 - atomic_set(&md->event_nr, 0);
37834 - atomic_set(&md->uevent_seq, 0);
37835 + atomic_set_unchecked(&md->event_nr, 0);
37836 + atomic_set_unchecked(&md->uevent_seq, 0);
37837 INIT_LIST_HEAD(&md->uevent_list);
37838 spin_lock_init(&md->uevent_lock);
37839
37840 @@ -2028,7 +2028,7 @@ static void event_callback(void *context)
37841
37842 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37843
37844 - atomic_inc(&md->event_nr);
37845 + atomic_inc_unchecked(&md->event_nr);
37846 wake_up(&md->eventq);
37847 }
37848
37849 @@ -2685,18 +2685,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37850
37851 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37852 {
37853 - return atomic_add_return(1, &md->uevent_seq);
37854 + return atomic_add_return_unchecked(1, &md->uevent_seq);
37855 }
37856
37857 uint32_t dm_get_event_nr(struct mapped_device *md)
37858 {
37859 - return atomic_read(&md->event_nr);
37860 + return atomic_read_unchecked(&md->event_nr);
37861 }
37862
37863 int dm_wait_event(struct mapped_device *md, int event_nr)
37864 {
37865 return wait_event_interruptible(md->eventq,
37866 - (event_nr != atomic_read(&md->event_nr)));
37867 + (event_nr != atomic_read_unchecked(&md->event_nr)));
37868 }
37869
37870 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37871 diff --git a/drivers/md/md.c b/drivers/md/md.c
37872 index a4a93b9..4747b63 100644
37873 --- a/drivers/md/md.c
37874 +++ b/drivers/md/md.c
37875 @@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
37876 * start build, activate spare
37877 */
37878 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37879 -static atomic_t md_event_count;
37880 +static atomic_unchecked_t md_event_count;
37881 void md_new_event(struct mddev *mddev)
37882 {
37883 - atomic_inc(&md_event_count);
37884 + atomic_inc_unchecked(&md_event_count);
37885 wake_up(&md_event_waiters);
37886 }
37887 EXPORT_SYMBOL_GPL(md_new_event);
37888 @@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37889 */
37890 static void md_new_event_inintr(struct mddev *mddev)
37891 {
37892 - atomic_inc(&md_event_count);
37893 + atomic_inc_unchecked(&md_event_count);
37894 wake_up(&md_event_waiters);
37895 }
37896
37897 @@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
37898 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
37899 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
37900 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
37901 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37902 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37903
37904 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37905 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37906 @@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
37907 else
37908 sb->resync_offset = cpu_to_le64(0);
37909
37910 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37911 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37912
37913 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37914 sb->size = cpu_to_le64(mddev->dev_sectors);
37915 @@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37916 static ssize_t
37917 errors_show(struct md_rdev *rdev, char *page)
37918 {
37919 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37920 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37921 }
37922
37923 static ssize_t
37924 @@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
37925 char *e;
37926 unsigned long n = simple_strtoul(buf, &e, 10);
37927 if (*buf && (*e == 0 || *e == '\n')) {
37928 - atomic_set(&rdev->corrected_errors, n);
37929 + atomic_set_unchecked(&rdev->corrected_errors, n);
37930 return len;
37931 }
37932 return -EINVAL;
37933 @@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
37934 rdev->sb_loaded = 0;
37935 rdev->bb_page = NULL;
37936 atomic_set(&rdev->nr_pending, 0);
37937 - atomic_set(&rdev->read_errors, 0);
37938 - atomic_set(&rdev->corrected_errors, 0);
37939 + atomic_set_unchecked(&rdev->read_errors, 0);
37940 + atomic_set_unchecked(&rdev->corrected_errors, 0);
37941
37942 INIT_LIST_HEAD(&rdev->same_set);
37943 init_waitqueue_head(&rdev->blocked_wait);
37944 @@ -6994,7 +6994,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37945
37946 spin_unlock(&pers_lock);
37947 seq_printf(seq, "\n");
37948 - seq->poll_event = atomic_read(&md_event_count);
37949 + seq->poll_event = atomic_read_unchecked(&md_event_count);
37950 return 0;
37951 }
37952 if (v == (void*)2) {
37953 @@ -7097,7 +7097,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37954 return error;
37955
37956 seq = file->private_data;
37957 - seq->poll_event = atomic_read(&md_event_count);
37958 + seq->poll_event = atomic_read_unchecked(&md_event_count);
37959 return error;
37960 }
37961
37962 @@ -7111,7 +7111,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37963 /* always allow read */
37964 mask = POLLIN | POLLRDNORM;
37965
37966 - if (seq->poll_event != atomic_read(&md_event_count))
37967 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
37968 mask |= POLLERR | POLLPRI;
37969 return mask;
37970 }
37971 @@ -7155,7 +7155,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
37972 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37973 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37974 (int)part_stat_read(&disk->part0, sectors[1]) -
37975 - atomic_read(&disk->sync_io);
37976 + atomic_read_unchecked(&disk->sync_io);
37977 /* sync IO will cause sync_io to increase before the disk_stats
37978 * as sync_io is counted when a request starts, and
37979 * disk_stats is counted when it completes.
37980 diff --git a/drivers/md/md.h b/drivers/md/md.h
37981 index d90fb1a..4174a2b 100644
37982 --- a/drivers/md/md.h
37983 +++ b/drivers/md/md.h
37984 @@ -94,13 +94,13 @@ struct md_rdev {
37985 * only maintained for arrays that
37986 * support hot removal
37987 */
37988 - atomic_t read_errors; /* number of consecutive read errors that
37989 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
37990 * we have tried to ignore.
37991 */
37992 struct timespec last_read_error; /* monotonic time since our
37993 * last read error
37994 */
37995 - atomic_t corrected_errors; /* number of corrected read errors,
37996 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37997 * for reporting to userspace and storing
37998 * in superblock.
37999 */
38000 @@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38001
38002 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38003 {
38004 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38005 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38006 }
38007
38008 struct md_personality
38009 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38010 index 1cbfc6b..56e1dbb 100644
38011 --- a/drivers/md/persistent-data/dm-space-map.h
38012 +++ b/drivers/md/persistent-data/dm-space-map.h
38013 @@ -60,6 +60,7 @@ struct dm_space_map {
38014 int (*root_size)(struct dm_space_map *sm, size_t *result);
38015 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38016 };
38017 +typedef struct dm_space_map __no_const dm_space_map_no_const;
38018
38019 /*----------------------------------------------------------------*/
38020
38021 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38022 index 6af167f..40c25a1 100644
38023 --- a/drivers/md/raid1.c
38024 +++ b/drivers/md/raid1.c
38025 @@ -1826,7 +1826,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38026 if (r1_sync_page_io(rdev, sect, s,
38027 bio->bi_io_vec[idx].bv_page,
38028 READ) != 0)
38029 - atomic_add(s, &rdev->corrected_errors);
38030 + atomic_add_unchecked(s, &rdev->corrected_errors);
38031 }
38032 sectors -= s;
38033 sect += s;
38034 @@ -2048,7 +2048,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38035 test_bit(In_sync, &rdev->flags)) {
38036 if (r1_sync_page_io(rdev, sect, s,
38037 conf->tmppage, READ)) {
38038 - atomic_add(s, &rdev->corrected_errors);
38039 + atomic_add_unchecked(s, &rdev->corrected_errors);
38040 printk(KERN_INFO
38041 "md/raid1:%s: read error corrected "
38042 "(%d sectors at %llu on %s)\n",
38043 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38044 index 46c14e5..4db5966 100644
38045 --- a/drivers/md/raid10.c
38046 +++ b/drivers/md/raid10.c
38047 @@ -1932,7 +1932,7 @@ static void end_sync_read(struct bio *bio, int error)
38048 /* The write handler will notice the lack of
38049 * R10BIO_Uptodate and record any errors etc
38050 */
38051 - atomic_add(r10_bio->sectors,
38052 + atomic_add_unchecked(r10_bio->sectors,
38053 &conf->mirrors[d].rdev->corrected_errors);
38054
38055 /* for reconstruct, we always reschedule after a read.
38056 @@ -2281,7 +2281,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38057 {
38058 struct timespec cur_time_mon;
38059 unsigned long hours_since_last;
38060 - unsigned int read_errors = atomic_read(&rdev->read_errors);
38061 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38062
38063 ktime_get_ts(&cur_time_mon);
38064
38065 @@ -2303,9 +2303,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38066 * overflowing the shift of read_errors by hours_since_last.
38067 */
38068 if (hours_since_last >= 8 * sizeof(read_errors))
38069 - atomic_set(&rdev->read_errors, 0);
38070 + atomic_set_unchecked(&rdev->read_errors, 0);
38071 else
38072 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38073 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38074 }
38075
38076 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38077 @@ -2359,8 +2359,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38078 return;
38079
38080 check_decay_read_errors(mddev, rdev);
38081 - atomic_inc(&rdev->read_errors);
38082 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
38083 + atomic_inc_unchecked(&rdev->read_errors);
38084 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38085 char b[BDEVNAME_SIZE];
38086 bdevname(rdev->bdev, b);
38087
38088 @@ -2368,7 +2368,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38089 "md/raid10:%s: %s: Raid device exceeded "
38090 "read_error threshold [cur %d:max %d]\n",
38091 mdname(mddev), b,
38092 - atomic_read(&rdev->read_errors), max_read_errors);
38093 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38094 printk(KERN_NOTICE
38095 "md/raid10:%s: %s: Failing raid device\n",
38096 mdname(mddev), b);
38097 @@ -2523,7 +2523,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38098 sect +
38099 choose_data_offset(r10_bio, rdev)),
38100 bdevname(rdev->bdev, b));
38101 - atomic_add(s, &rdev->corrected_errors);
38102 + atomic_add_unchecked(s, &rdev->corrected_errors);
38103 }
38104
38105 rdev_dec_pending(rdev, mddev);
38106 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38107 index f4e87bf..0d4ad3f 100644
38108 --- a/drivers/md/raid5.c
38109 +++ b/drivers/md/raid5.c
38110 @@ -1763,21 +1763,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38111 mdname(conf->mddev), STRIPE_SECTORS,
38112 (unsigned long long)s,
38113 bdevname(rdev->bdev, b));
38114 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38115 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38116 clear_bit(R5_ReadError, &sh->dev[i].flags);
38117 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38118 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38119 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38120
38121 - if (atomic_read(&rdev->read_errors))
38122 - atomic_set(&rdev->read_errors, 0);
38123 + if (atomic_read_unchecked(&rdev->read_errors))
38124 + atomic_set_unchecked(&rdev->read_errors, 0);
38125 } else {
38126 const char *bdn = bdevname(rdev->bdev, b);
38127 int retry = 0;
38128 int set_bad = 0;
38129
38130 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38131 - atomic_inc(&rdev->read_errors);
38132 + atomic_inc_unchecked(&rdev->read_errors);
38133 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38134 printk_ratelimited(
38135 KERN_WARNING
38136 @@ -1805,7 +1805,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38137 mdname(conf->mddev),
38138 (unsigned long long)s,
38139 bdn);
38140 - } else if (atomic_read(&rdev->read_errors)
38141 + } else if (atomic_read_unchecked(&rdev->read_errors)
38142 > conf->max_nr_stripes)
38143 printk(KERN_WARNING
38144 "md/raid:%s: Too many read errors, failing device %s.\n",
38145 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38146 index 401ef64..836e563 100644
38147 --- a/drivers/media/dvb-core/dvbdev.c
38148 +++ b/drivers/media/dvb-core/dvbdev.c
38149 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38150 const struct dvb_device *template, void *priv, int type)
38151 {
38152 struct dvb_device *dvbdev;
38153 - struct file_operations *dvbdevfops;
38154 + file_operations_no_const *dvbdevfops;
38155 struct device *clsdev;
38156 int minor;
38157 int id;
38158 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38159 index 9b6c3bb..baeb5c7 100644
38160 --- a/drivers/media/dvb-frontends/dib3000.h
38161 +++ b/drivers/media/dvb-frontends/dib3000.h
38162 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38163 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38164 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38165 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38166 -};
38167 +} __no_const;
38168
38169 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
38170 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38171 diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38172 index bc78354..42c9459 100644
38173 --- a/drivers/media/pci/cx88/cx88-video.c
38174 +++ b/drivers/media/pci/cx88/cx88-video.c
38175 @@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38176
38177 /* ------------------------------------------------------------------ */
38178
38179 -static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38180 -static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38181 -static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38182 +static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38183 +static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38184 +static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38185
38186 module_param_array(video_nr, int, NULL, 0444);
38187 module_param_array(vbi_nr, int, NULL, 0444);
38188 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38189 index 96c4a17..1305a79 100644
38190 --- a/drivers/media/platform/omap/omap_vout.c
38191 +++ b/drivers/media/platform/omap/omap_vout.c
38192 @@ -63,7 +63,6 @@ enum omap_vout_channels {
38193 OMAP_VIDEO2,
38194 };
38195
38196 -static struct videobuf_queue_ops video_vbq_ops;
38197 /* Variables configurable through module params*/
38198 static u32 video1_numbuffers = 3;
38199 static u32 video2_numbuffers = 3;
38200 @@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38201 {
38202 struct videobuf_queue *q;
38203 struct omap_vout_device *vout = NULL;
38204 + static struct videobuf_queue_ops video_vbq_ops = {
38205 + .buf_setup = omap_vout_buffer_setup,
38206 + .buf_prepare = omap_vout_buffer_prepare,
38207 + .buf_release = omap_vout_buffer_release,
38208 + .buf_queue = omap_vout_buffer_queue,
38209 + };
38210
38211 vout = video_drvdata(file);
38212 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38213 @@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38214 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38215
38216 q = &vout->vbq;
38217 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38218 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38219 - video_vbq_ops.buf_release = omap_vout_buffer_release;
38220 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38221 spin_lock_init(&vout->vbq_lock);
38222
38223 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38224 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38225 index 04e6490..2df65bf 100644
38226 --- a/drivers/media/platform/s5p-tv/mixer.h
38227 +++ b/drivers/media/platform/s5p-tv/mixer.h
38228 @@ -156,7 +156,7 @@ struct mxr_layer {
38229 /** layer index (unique identifier) */
38230 int idx;
38231 /** callbacks for layer methods */
38232 - struct mxr_layer_ops ops;
38233 + struct mxr_layer_ops *ops;
38234 /** format array */
38235 const struct mxr_format **fmt_array;
38236 /** size of format array */
38237 diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38238 index b93a21f..2535195 100644
38239 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38240 +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38241 @@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38242 {
38243 struct mxr_layer *layer;
38244 int ret;
38245 - struct mxr_layer_ops ops = {
38246 + static struct mxr_layer_ops ops = {
38247 .release = mxr_graph_layer_release,
38248 .buffer_set = mxr_graph_buffer_set,
38249 .stream_set = mxr_graph_stream_set,
38250 diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38251 index b713403..53cb5ad 100644
38252 --- a/drivers/media/platform/s5p-tv/mixer_reg.c
38253 +++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38254 @@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38255 layer->update_buf = next;
38256 }
38257
38258 - layer->ops.buffer_set(layer, layer->update_buf);
38259 + layer->ops->buffer_set(layer, layer->update_buf);
38260
38261 if (done && done != layer->shadow_buf)
38262 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38263 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38264 index 82142a2..6de47e8 100644
38265 --- a/drivers/media/platform/s5p-tv/mixer_video.c
38266 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
38267 @@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38268 layer->geo.src.height = layer->geo.src.full_height;
38269
38270 mxr_geometry_dump(mdev, &layer->geo);
38271 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38272 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38273 mxr_geometry_dump(mdev, &layer->geo);
38274 }
38275
38276 @@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38277 layer->geo.dst.full_width = mbus_fmt.width;
38278 layer->geo.dst.full_height = mbus_fmt.height;
38279 layer->geo.dst.field = mbus_fmt.field;
38280 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38281 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38282
38283 mxr_geometry_dump(mdev, &layer->geo);
38284 }
38285 @@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38286 /* set source size to highest accepted value */
38287 geo->src.full_width = max(geo->dst.full_width, pix->width);
38288 geo->src.full_height = max(geo->dst.full_height, pix->height);
38289 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38290 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38291 mxr_geometry_dump(mdev, &layer->geo);
38292 /* set cropping to total visible screen */
38293 geo->src.width = pix->width;
38294 @@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38295 geo->src.x_offset = 0;
38296 geo->src.y_offset = 0;
38297 /* assure consistency of geometry */
38298 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38299 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38300 mxr_geometry_dump(mdev, &layer->geo);
38301 /* set full size to lowest possible value */
38302 geo->src.full_width = 0;
38303 geo->src.full_height = 0;
38304 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38305 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38306 mxr_geometry_dump(mdev, &layer->geo);
38307
38308 /* returning results */
38309 @@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38310 target->width = s->r.width;
38311 target->height = s->r.height;
38312
38313 - layer->ops.fix_geometry(layer, stage, s->flags);
38314 + layer->ops->fix_geometry(layer, stage, s->flags);
38315
38316 /* retrieve update selection rectangle */
38317 res.left = target->x_offset;
38318 @@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38319 mxr_output_get(mdev);
38320
38321 mxr_layer_update_output(layer);
38322 - layer->ops.format_set(layer);
38323 + layer->ops->format_set(layer);
38324 /* enabling layer in hardware */
38325 spin_lock_irqsave(&layer->enq_slock, flags);
38326 layer->state = MXR_LAYER_STREAMING;
38327 spin_unlock_irqrestore(&layer->enq_slock, flags);
38328
38329 - layer->ops.stream_set(layer, MXR_ENABLE);
38330 + layer->ops->stream_set(layer, MXR_ENABLE);
38331 mxr_streamer_get(mdev);
38332
38333 return 0;
38334 @@ -1014,7 +1014,7 @@ static int stop_streaming(struct vb2_queue *vq)
38335 spin_unlock_irqrestore(&layer->enq_slock, flags);
38336
38337 /* disabling layer in hardware */
38338 - layer->ops.stream_set(layer, MXR_DISABLE);
38339 + layer->ops->stream_set(layer, MXR_DISABLE);
38340 /* remove one streamer */
38341 mxr_streamer_put(mdev);
38342 /* allow changes in output configuration */
38343 @@ -1053,8 +1053,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38344
38345 void mxr_layer_release(struct mxr_layer *layer)
38346 {
38347 - if (layer->ops.release)
38348 - layer->ops.release(layer);
38349 + if (layer->ops->release)
38350 + layer->ops->release(layer);
38351 }
38352
38353 void mxr_base_layer_release(struct mxr_layer *layer)
38354 @@ -1080,7 +1080,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38355
38356 layer->mdev = mdev;
38357 layer->idx = idx;
38358 - layer->ops = *ops;
38359 + layer->ops = ops;
38360
38361 spin_lock_init(&layer->enq_slock);
38362 INIT_LIST_HEAD(&layer->enq_list);
38363 diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38364 index 3d13a63..da31bf1 100644
38365 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38366 +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38367 @@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38368 {
38369 struct mxr_layer *layer;
38370 int ret;
38371 - struct mxr_layer_ops ops = {
38372 + static struct mxr_layer_ops ops = {
38373 .release = mxr_vp_layer_release,
38374 .buffer_set = mxr_vp_buffer_set,
38375 .stream_set = mxr_vp_stream_set,
38376 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38377 index 643d80a..56bb96b 100644
38378 --- a/drivers/media/radio/radio-cadet.c
38379 +++ b/drivers/media/radio/radio-cadet.c
38380 @@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38381 unsigned char readbuf[RDS_BUFFER];
38382 int i = 0;
38383
38384 + if (count > RDS_BUFFER)
38385 + return -EFAULT;
38386 mutex_lock(&dev->lock);
38387 if (dev->rdsstat == 0)
38388 cadet_start_rds(dev);
38389 @@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38390 while (i < count && dev->rdsin != dev->rdsout)
38391 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38392
38393 - if (i && copy_to_user(data, readbuf, i))
38394 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38395 i = -EFAULT;
38396 unlock:
38397 mutex_unlock(&dev->lock);
38398 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38399 index 3940bb0..fb3952a 100644
38400 --- a/drivers/media/usb/dvb-usb/cxusb.c
38401 +++ b/drivers/media/usb/dvb-usb/cxusb.c
38402 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38403
38404 struct dib0700_adapter_state {
38405 int (*set_param_save) (struct dvb_frontend *);
38406 -};
38407 +} __no_const;
38408
38409 static int dib7070_set_param_override(struct dvb_frontend *fe)
38410 {
38411 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38412 index 9578a67..31aa652 100644
38413 --- a/drivers/media/usb/dvb-usb/dw2102.c
38414 +++ b/drivers/media/usb/dvb-usb/dw2102.c
38415 @@ -115,7 +115,7 @@ struct su3000_state {
38416
38417 struct s6x0_state {
38418 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38419 -};
38420 +} __no_const;
38421
38422 /* debug */
38423 static int dvb_usb_dw2102_debug;
38424 diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
38425 index aa6e7c7..4cd8061 100644
38426 --- a/drivers/media/v4l2-core/v4l2-ioctl.c
38427 +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
38428 @@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
38429 struct file *file, void *fh, void *p);
38430 } u;
38431 void (*debug)(const void *arg, bool write_only);
38432 -};
38433 +} __do_const;
38434 +typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38435
38436 /* This control needs a priority check */
38437 #define INFO_FL_PRIO (1 << 0)
38438 @@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38439 struct video_device *vfd = video_devdata(file);
38440 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38441 bool write_only = false;
38442 - struct v4l2_ioctl_info default_info;
38443 + v4l2_ioctl_info_no_const default_info;
38444 const struct v4l2_ioctl_info *info;
38445 void *fh = file->private_data;
38446 struct v4l2_fh *vfh = NULL;
38447 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38448 index fb69baa..3aeea2e 100644
38449 --- a/drivers/message/fusion/mptbase.c
38450 +++ b/drivers/message/fusion/mptbase.c
38451 @@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38452 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38453 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38454
38455 +#ifdef CONFIG_GRKERNSEC_HIDESYM
38456 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38457 +#else
38458 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38459 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38460 +#endif
38461 +
38462 /*
38463 * Rounding UP to nearest 4-kB boundary here...
38464 */
38465 @@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38466 ioc->facts.GlobalCredits);
38467
38468 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
38469 +#ifdef CONFIG_GRKERNSEC_HIDESYM
38470 + NULL, NULL);
38471 +#else
38472 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
38473 +#endif
38474 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
38475 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
38476 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
38477 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38478 index fa43c39..daeb158 100644
38479 --- a/drivers/message/fusion/mptsas.c
38480 +++ b/drivers/message/fusion/mptsas.c
38481 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38482 return 0;
38483 }
38484
38485 +static inline void
38486 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38487 +{
38488 + if (phy_info->port_details) {
38489 + phy_info->port_details->rphy = rphy;
38490 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38491 + ioc->name, rphy));
38492 + }
38493 +
38494 + if (rphy) {
38495 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38496 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38497 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38498 + ioc->name, rphy, rphy->dev.release));
38499 + }
38500 +}
38501 +
38502 /* no mutex */
38503 static void
38504 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38505 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38506 return NULL;
38507 }
38508
38509 -static inline void
38510 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38511 -{
38512 - if (phy_info->port_details) {
38513 - phy_info->port_details->rphy = rphy;
38514 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38515 - ioc->name, rphy));
38516 - }
38517 -
38518 - if (rphy) {
38519 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38520 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38521 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38522 - ioc->name, rphy, rphy->dev.release));
38523 - }
38524 -}
38525 -
38526 static inline struct sas_port *
38527 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38528 {
38529 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38530 index 164afa7..b6b2e74 100644
38531 --- a/drivers/message/fusion/mptscsih.c
38532 +++ b/drivers/message/fusion/mptscsih.c
38533 @@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38534
38535 h = shost_priv(SChost);
38536
38537 - if (h) {
38538 - if (h->info_kbuf == NULL)
38539 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38540 - return h->info_kbuf;
38541 - h->info_kbuf[0] = '\0';
38542 + if (!h)
38543 + return NULL;
38544
38545 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38546 - h->info_kbuf[size-1] = '\0';
38547 - }
38548 + if (h->info_kbuf == NULL)
38549 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38550 + return h->info_kbuf;
38551 + h->info_kbuf[0] = '\0';
38552 +
38553 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38554 + h->info_kbuf[size-1] = '\0';
38555
38556 return h->info_kbuf;
38557 }
38558 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38559 index 8001aa6..b137580 100644
38560 --- a/drivers/message/i2o/i2o_proc.c
38561 +++ b/drivers/message/i2o/i2o_proc.c
38562 @@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38563 "Array Controller Device"
38564 };
38565
38566 -static char *chtostr(char *tmp, u8 *chars, int n)
38567 -{
38568 - tmp[0] = 0;
38569 - return strncat(tmp, (char *)chars, n);
38570 -}
38571 -
38572 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38573 char *group)
38574 {
38575 @@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38576 } *result;
38577
38578 i2o_exec_execute_ddm_table ddm_table;
38579 - char tmp[28 + 1];
38580
38581 result = kmalloc(sizeof(*result), GFP_KERNEL);
38582 if (!result)
38583 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38584
38585 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38586 seq_printf(seq, "%-#8x", ddm_table.module_id);
38587 - seq_printf(seq, "%-29s",
38588 - chtostr(tmp, ddm_table.module_name_version, 28));
38589 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38590 seq_printf(seq, "%9d ", ddm_table.data_size);
38591 seq_printf(seq, "%8d", ddm_table.code_size);
38592
38593 @@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38594
38595 i2o_driver_result_table *result;
38596 i2o_driver_store_table *dst;
38597 - char tmp[28 + 1];
38598
38599 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38600 if (result == NULL)
38601 @@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38602
38603 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38604 seq_printf(seq, "%-#8x", dst->module_id);
38605 - seq_printf(seq, "%-29s",
38606 - chtostr(tmp, dst->module_name_version, 28));
38607 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38608 + seq_printf(seq, "%-.28s", dst->module_name_version);
38609 + seq_printf(seq, "%-.8s", dst->date);
38610 seq_printf(seq, "%8d ", dst->module_size);
38611 seq_printf(seq, "%8d ", dst->mpb_size);
38612 seq_printf(seq, "0x%04x", dst->module_flags);
38613 @@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38614 // == (allow) 512d bytes (max)
38615 static u16 *work16 = (u16 *) work32;
38616 int token;
38617 - char tmp[16 + 1];
38618
38619 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38620
38621 @@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38622 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38623 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38624 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38625 - seq_printf(seq, "Vendor info : %s\n",
38626 - chtostr(tmp, (u8 *) (work32 + 2), 16));
38627 - seq_printf(seq, "Product info : %s\n",
38628 - chtostr(tmp, (u8 *) (work32 + 6), 16));
38629 - seq_printf(seq, "Description : %s\n",
38630 - chtostr(tmp, (u8 *) (work32 + 10), 16));
38631 - seq_printf(seq, "Product rev. : %s\n",
38632 - chtostr(tmp, (u8 *) (work32 + 14), 8));
38633 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38634 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38635 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38636 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38637
38638 seq_printf(seq, "Serial number : ");
38639 print_serial_number(seq, (u8 *) (work32 + 16),
38640 @@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38641 u8 pad[256]; // allow up to 256 byte (max) serial number
38642 } result;
38643
38644 - char tmp[24 + 1];
38645 -
38646 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38647
38648 if (token < 0) {
38649 @@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38650 }
38651
38652 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38653 - seq_printf(seq, "Module name : %s\n",
38654 - chtostr(tmp, result.module_name, 24));
38655 - seq_printf(seq, "Module revision : %s\n",
38656 - chtostr(tmp, result.module_rev, 8));
38657 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
38658 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38659
38660 seq_printf(seq, "Serial number : ");
38661 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38662 @@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38663 u8 instance_number[4];
38664 } result;
38665
38666 - char tmp[64 + 1];
38667 -
38668 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
38669
38670 if (token < 0) {
38671 @@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38672 return 0;
38673 }
38674
38675 - seq_printf(seq, "Device name : %s\n",
38676 - chtostr(tmp, result.device_name, 64));
38677 - seq_printf(seq, "Service name : %s\n",
38678 - chtostr(tmp, result.service_name, 64));
38679 - seq_printf(seq, "Physical name : %s\n",
38680 - chtostr(tmp, result.physical_location, 64));
38681 - seq_printf(seq, "Instance number : %s\n",
38682 - chtostr(tmp, result.instance_number, 4));
38683 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
38684 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
38685 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38686 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38687
38688 return 0;
38689 }
38690 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38691 index a8c08f3..155fe3d 100644
38692 --- a/drivers/message/i2o/iop.c
38693 +++ b/drivers/message/i2o/iop.c
38694 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38695
38696 spin_lock_irqsave(&c->context_list_lock, flags);
38697
38698 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38699 - atomic_inc(&c->context_list_counter);
38700 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38701 + atomic_inc_unchecked(&c->context_list_counter);
38702
38703 - entry->context = atomic_read(&c->context_list_counter);
38704 + entry->context = atomic_read_unchecked(&c->context_list_counter);
38705
38706 list_add(&entry->list, &c->context_list);
38707
38708 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38709
38710 #if BITS_PER_LONG == 64
38711 spin_lock_init(&c->context_list_lock);
38712 - atomic_set(&c->context_list_counter, 0);
38713 + atomic_set_unchecked(&c->context_list_counter, 0);
38714 INIT_LIST_HEAD(&c->context_list);
38715 #endif
38716
38717 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
38718 index 45ece11..8efa218 100644
38719 --- a/drivers/mfd/janz-cmodio.c
38720 +++ b/drivers/mfd/janz-cmodio.c
38721 @@ -13,6 +13,7 @@
38722
38723 #include <linux/kernel.h>
38724 #include <linux/module.h>
38725 +#include <linux/slab.h>
38726 #include <linux/init.h>
38727 #include <linux/pci.h>
38728 #include <linux/interrupt.h>
38729 diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
38730 index a5f9888..1c0ed56 100644
38731 --- a/drivers/mfd/twl4030-irq.c
38732 +++ b/drivers/mfd/twl4030-irq.c
38733 @@ -35,6 +35,7 @@
38734 #include <linux/of.h>
38735 #include <linux/irqdomain.h>
38736 #include <linux/i2c/twl.h>
38737 +#include <asm/pgtable.h>
38738
38739 #include "twl-core.h"
38740
38741 @@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
38742 * Install an irq handler for each of the SIH modules;
38743 * clone dummy irq_chip since PIH can't *do* anything
38744 */
38745 - twl4030_irq_chip = dummy_irq_chip;
38746 - twl4030_irq_chip.name = "twl4030";
38747 + pax_open_kernel();
38748 + memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
38749 + *(const char **)&twl4030_irq_chip.name = "twl4030";
38750
38751 - twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38752 + *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38753 + pax_close_kernel();
38754
38755 for (i = irq_base; i < irq_end; i++) {
38756 irq_set_chip_and_handler(i, &twl4030_irq_chip,
38757 diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
38758 index 277a8db..0e0b754 100644
38759 --- a/drivers/mfd/twl6030-irq.c
38760 +++ b/drivers/mfd/twl6030-irq.c
38761 @@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
38762 * install an irq handler for each of the modules;
38763 * clone dummy irq_chip since PIH can't *do* anything
38764 */
38765 - twl6030_irq_chip = dummy_irq_chip;
38766 - twl6030_irq_chip.name = "twl6030";
38767 - twl6030_irq_chip.irq_set_type = NULL;
38768 - twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38769 + pax_open_kernel();
38770 + memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
38771 + *(const char **)&twl6030_irq_chip.name = "twl6030";
38772 + *(void **)&twl6030_irq_chip.irq_set_type = NULL;
38773 + *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38774 + pax_close_kernel();
38775
38776 for (i = irq_base; i < irq_end; i++) {
38777 irq_set_chip_and_handler(i, &twl6030_irq_chip,
38778 diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
38779 index f32550a..e3e52a2 100644
38780 --- a/drivers/misc/c2port/core.c
38781 +++ b/drivers/misc/c2port/core.c
38782 @@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
38783 mutex_init(&c2dev->mutex);
38784
38785 /* Create binary file */
38786 - c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38787 + pax_open_kernel();
38788 + *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38789 + pax_close_kernel();
38790 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
38791 if (unlikely(ret))
38792 goto error_device_create_bin_file;
38793 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38794 index 36f5d52..32311c3 100644
38795 --- a/drivers/misc/kgdbts.c
38796 +++ b/drivers/misc/kgdbts.c
38797 @@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
38798 char before[BREAK_INSTR_SIZE];
38799 char after[BREAK_INSTR_SIZE];
38800
38801 - probe_kernel_read(before, (char *)kgdbts_break_test,
38802 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
38803 BREAK_INSTR_SIZE);
38804 init_simple_test();
38805 ts.tst = plant_and_detach_test;
38806 @@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
38807 /* Activate test with initial breakpoint */
38808 if (!is_early)
38809 kgdb_breakpoint();
38810 - probe_kernel_read(after, (char *)kgdbts_break_test,
38811 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
38812 BREAK_INSTR_SIZE);
38813 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
38814 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
38815 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
38816 index 4a87e5c..76bdf5c 100644
38817 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
38818 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
38819 @@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
38820 * the lid is closed. This leads to interrupts as soon as a little move
38821 * is done.
38822 */
38823 - atomic_inc(&lis3->count);
38824 + atomic_inc_unchecked(&lis3->count);
38825
38826 wake_up_interruptible(&lis3->misc_wait);
38827 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
38828 @@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
38829 if (lis3->pm_dev)
38830 pm_runtime_get_sync(lis3->pm_dev);
38831
38832 - atomic_set(&lis3->count, 0);
38833 + atomic_set_unchecked(&lis3->count, 0);
38834 return 0;
38835 }
38836
38837 @@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
38838 add_wait_queue(&lis3->misc_wait, &wait);
38839 while (true) {
38840 set_current_state(TASK_INTERRUPTIBLE);
38841 - data = atomic_xchg(&lis3->count, 0);
38842 + data = atomic_xchg_unchecked(&lis3->count, 0);
38843 if (data)
38844 break;
38845
38846 @@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
38847 struct lis3lv02d, miscdev);
38848
38849 poll_wait(file, &lis3->misc_wait, wait);
38850 - if (atomic_read(&lis3->count))
38851 + if (atomic_read_unchecked(&lis3->count))
38852 return POLLIN | POLLRDNORM;
38853 return 0;
38854 }
38855 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
38856 index c439c82..1f20f57 100644
38857 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
38858 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
38859 @@ -297,7 +297,7 @@ struct lis3lv02d {
38860 struct input_polled_dev *idev; /* input device */
38861 struct platform_device *pdev; /* platform device */
38862 struct regulator_bulk_data regulators[2];
38863 - atomic_t count; /* interrupt count after last read */
38864 + atomic_unchecked_t count; /* interrupt count after last read */
38865 union axis_conversion ac; /* hw -> logical axis */
38866 int mapped_btns[3];
38867
38868 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38869 index 2f30bad..c4c13d0 100644
38870 --- a/drivers/misc/sgi-gru/gruhandles.c
38871 +++ b/drivers/misc/sgi-gru/gruhandles.c
38872 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38873 unsigned long nsec;
38874
38875 nsec = CLKS2NSEC(clks);
38876 - atomic_long_inc(&mcs_op_statistics[op].count);
38877 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
38878 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38879 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
38880 if (mcs_op_statistics[op].max < nsec)
38881 mcs_op_statistics[op].max = nsec;
38882 }
38883 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38884 index 950dbe9..eeef0f8 100644
38885 --- a/drivers/misc/sgi-gru/gruprocfs.c
38886 +++ b/drivers/misc/sgi-gru/gruprocfs.c
38887 @@ -32,9 +32,9 @@
38888
38889 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38890
38891 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38892 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38893 {
38894 - unsigned long val = atomic_long_read(v);
38895 + unsigned long val = atomic_long_read_unchecked(v);
38896
38897 seq_printf(s, "%16lu %s\n", val, id);
38898 }
38899 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38900
38901 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
38902 for (op = 0; op < mcsop_last; op++) {
38903 - count = atomic_long_read(&mcs_op_statistics[op].count);
38904 - total = atomic_long_read(&mcs_op_statistics[op].total);
38905 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38906 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38907 max = mcs_op_statistics[op].max;
38908 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38909 count ? total / count : 0, max);
38910 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38911 index 5c3ce24..4915ccb 100644
38912 --- a/drivers/misc/sgi-gru/grutables.h
38913 +++ b/drivers/misc/sgi-gru/grutables.h
38914 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
38915 * GRU statistics.
38916 */
38917 struct gru_stats_s {
38918 - atomic_long_t vdata_alloc;
38919 - atomic_long_t vdata_free;
38920 - atomic_long_t gts_alloc;
38921 - atomic_long_t gts_free;
38922 - atomic_long_t gms_alloc;
38923 - atomic_long_t gms_free;
38924 - atomic_long_t gts_double_allocate;
38925 - atomic_long_t assign_context;
38926 - atomic_long_t assign_context_failed;
38927 - atomic_long_t free_context;
38928 - atomic_long_t load_user_context;
38929 - atomic_long_t load_kernel_context;
38930 - atomic_long_t lock_kernel_context;
38931 - atomic_long_t unlock_kernel_context;
38932 - atomic_long_t steal_user_context;
38933 - atomic_long_t steal_kernel_context;
38934 - atomic_long_t steal_context_failed;
38935 - atomic_long_t nopfn;
38936 - atomic_long_t asid_new;
38937 - atomic_long_t asid_next;
38938 - atomic_long_t asid_wrap;
38939 - atomic_long_t asid_reuse;
38940 - atomic_long_t intr;
38941 - atomic_long_t intr_cbr;
38942 - atomic_long_t intr_tfh;
38943 - atomic_long_t intr_spurious;
38944 - atomic_long_t intr_mm_lock_failed;
38945 - atomic_long_t call_os;
38946 - atomic_long_t call_os_wait_queue;
38947 - atomic_long_t user_flush_tlb;
38948 - atomic_long_t user_unload_context;
38949 - atomic_long_t user_exception;
38950 - atomic_long_t set_context_option;
38951 - atomic_long_t check_context_retarget_intr;
38952 - atomic_long_t check_context_unload;
38953 - atomic_long_t tlb_dropin;
38954 - atomic_long_t tlb_preload_page;
38955 - atomic_long_t tlb_dropin_fail_no_asid;
38956 - atomic_long_t tlb_dropin_fail_upm;
38957 - atomic_long_t tlb_dropin_fail_invalid;
38958 - atomic_long_t tlb_dropin_fail_range_active;
38959 - atomic_long_t tlb_dropin_fail_idle;
38960 - atomic_long_t tlb_dropin_fail_fmm;
38961 - atomic_long_t tlb_dropin_fail_no_exception;
38962 - atomic_long_t tfh_stale_on_fault;
38963 - atomic_long_t mmu_invalidate_range;
38964 - atomic_long_t mmu_invalidate_page;
38965 - atomic_long_t flush_tlb;
38966 - atomic_long_t flush_tlb_gru;
38967 - atomic_long_t flush_tlb_gru_tgh;
38968 - atomic_long_t flush_tlb_gru_zero_asid;
38969 + atomic_long_unchecked_t vdata_alloc;
38970 + atomic_long_unchecked_t vdata_free;
38971 + atomic_long_unchecked_t gts_alloc;
38972 + atomic_long_unchecked_t gts_free;
38973 + atomic_long_unchecked_t gms_alloc;
38974 + atomic_long_unchecked_t gms_free;
38975 + atomic_long_unchecked_t gts_double_allocate;
38976 + atomic_long_unchecked_t assign_context;
38977 + atomic_long_unchecked_t assign_context_failed;
38978 + atomic_long_unchecked_t free_context;
38979 + atomic_long_unchecked_t load_user_context;
38980 + atomic_long_unchecked_t load_kernel_context;
38981 + atomic_long_unchecked_t lock_kernel_context;
38982 + atomic_long_unchecked_t unlock_kernel_context;
38983 + atomic_long_unchecked_t steal_user_context;
38984 + atomic_long_unchecked_t steal_kernel_context;
38985 + atomic_long_unchecked_t steal_context_failed;
38986 + atomic_long_unchecked_t nopfn;
38987 + atomic_long_unchecked_t asid_new;
38988 + atomic_long_unchecked_t asid_next;
38989 + atomic_long_unchecked_t asid_wrap;
38990 + atomic_long_unchecked_t asid_reuse;
38991 + atomic_long_unchecked_t intr;
38992 + atomic_long_unchecked_t intr_cbr;
38993 + atomic_long_unchecked_t intr_tfh;
38994 + atomic_long_unchecked_t intr_spurious;
38995 + atomic_long_unchecked_t intr_mm_lock_failed;
38996 + atomic_long_unchecked_t call_os;
38997 + atomic_long_unchecked_t call_os_wait_queue;
38998 + atomic_long_unchecked_t user_flush_tlb;
38999 + atomic_long_unchecked_t user_unload_context;
39000 + atomic_long_unchecked_t user_exception;
39001 + atomic_long_unchecked_t set_context_option;
39002 + atomic_long_unchecked_t check_context_retarget_intr;
39003 + atomic_long_unchecked_t check_context_unload;
39004 + atomic_long_unchecked_t tlb_dropin;
39005 + atomic_long_unchecked_t tlb_preload_page;
39006 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39007 + atomic_long_unchecked_t tlb_dropin_fail_upm;
39008 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
39009 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
39010 + atomic_long_unchecked_t tlb_dropin_fail_idle;
39011 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
39012 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39013 + atomic_long_unchecked_t tfh_stale_on_fault;
39014 + atomic_long_unchecked_t mmu_invalidate_range;
39015 + atomic_long_unchecked_t mmu_invalidate_page;
39016 + atomic_long_unchecked_t flush_tlb;
39017 + atomic_long_unchecked_t flush_tlb_gru;
39018 + atomic_long_unchecked_t flush_tlb_gru_tgh;
39019 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39020
39021 - atomic_long_t copy_gpa;
39022 - atomic_long_t read_gpa;
39023 + atomic_long_unchecked_t copy_gpa;
39024 + atomic_long_unchecked_t read_gpa;
39025
39026 - atomic_long_t mesq_receive;
39027 - atomic_long_t mesq_receive_none;
39028 - atomic_long_t mesq_send;
39029 - atomic_long_t mesq_send_failed;
39030 - atomic_long_t mesq_noop;
39031 - atomic_long_t mesq_send_unexpected_error;
39032 - atomic_long_t mesq_send_lb_overflow;
39033 - atomic_long_t mesq_send_qlimit_reached;
39034 - atomic_long_t mesq_send_amo_nacked;
39035 - atomic_long_t mesq_send_put_nacked;
39036 - atomic_long_t mesq_page_overflow;
39037 - atomic_long_t mesq_qf_locked;
39038 - atomic_long_t mesq_qf_noop_not_full;
39039 - atomic_long_t mesq_qf_switch_head_failed;
39040 - atomic_long_t mesq_qf_unexpected_error;
39041 - atomic_long_t mesq_noop_unexpected_error;
39042 - atomic_long_t mesq_noop_lb_overflow;
39043 - atomic_long_t mesq_noop_qlimit_reached;
39044 - atomic_long_t mesq_noop_amo_nacked;
39045 - atomic_long_t mesq_noop_put_nacked;
39046 - atomic_long_t mesq_noop_page_overflow;
39047 + atomic_long_unchecked_t mesq_receive;
39048 + atomic_long_unchecked_t mesq_receive_none;
39049 + atomic_long_unchecked_t mesq_send;
39050 + atomic_long_unchecked_t mesq_send_failed;
39051 + atomic_long_unchecked_t mesq_noop;
39052 + atomic_long_unchecked_t mesq_send_unexpected_error;
39053 + atomic_long_unchecked_t mesq_send_lb_overflow;
39054 + atomic_long_unchecked_t mesq_send_qlimit_reached;
39055 + atomic_long_unchecked_t mesq_send_amo_nacked;
39056 + atomic_long_unchecked_t mesq_send_put_nacked;
39057 + atomic_long_unchecked_t mesq_page_overflow;
39058 + atomic_long_unchecked_t mesq_qf_locked;
39059 + atomic_long_unchecked_t mesq_qf_noop_not_full;
39060 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
39061 + atomic_long_unchecked_t mesq_qf_unexpected_error;
39062 + atomic_long_unchecked_t mesq_noop_unexpected_error;
39063 + atomic_long_unchecked_t mesq_noop_lb_overflow;
39064 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
39065 + atomic_long_unchecked_t mesq_noop_amo_nacked;
39066 + atomic_long_unchecked_t mesq_noop_put_nacked;
39067 + atomic_long_unchecked_t mesq_noop_page_overflow;
39068
39069 };
39070
39071 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39072 tghop_invalidate, mcsop_last};
39073
39074 struct mcs_op_statistic {
39075 - atomic_long_t count;
39076 - atomic_long_t total;
39077 + atomic_long_unchecked_t count;
39078 + atomic_long_unchecked_t total;
39079 unsigned long max;
39080 };
39081
39082 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39083
39084 #define STAT(id) do { \
39085 if (gru_options & OPT_STATS) \
39086 - atomic_long_inc(&gru_stats.id); \
39087 + atomic_long_inc_unchecked(&gru_stats.id); \
39088 } while (0)
39089
39090 #ifdef CONFIG_SGI_GRU_DEBUG
39091 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39092 index c862cd4..0d176fe 100644
39093 --- a/drivers/misc/sgi-xp/xp.h
39094 +++ b/drivers/misc/sgi-xp/xp.h
39095 @@ -288,7 +288,7 @@ struct xpc_interface {
39096 xpc_notify_func, void *);
39097 void (*received) (short, int, void *);
39098 enum xp_retval (*partid_to_nasids) (short, void *);
39099 -};
39100 +} __no_const;
39101
39102 extern struct xpc_interface xpc_interface;
39103
39104 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39105 index b94d5f7..7f494c5 100644
39106 --- a/drivers/misc/sgi-xp/xpc.h
39107 +++ b/drivers/misc/sgi-xp/xpc.h
39108 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
39109 void (*received_payload) (struct xpc_channel *, void *);
39110 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39111 };
39112 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39113
39114 /* struct xpc_partition act_state values (for XPC HB) */
39115
39116 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39117 /* found in xpc_main.c */
39118 extern struct device *xpc_part;
39119 extern struct device *xpc_chan;
39120 -extern struct xpc_arch_operations xpc_arch_ops;
39121 +extern xpc_arch_operations_no_const xpc_arch_ops;
39122 extern int xpc_disengage_timelimit;
39123 extern int xpc_disengage_timedout;
39124 extern int xpc_activate_IRQ_rcvd;
39125 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39126 index d971817..33bdca5 100644
39127 --- a/drivers/misc/sgi-xp/xpc_main.c
39128 +++ b/drivers/misc/sgi-xp/xpc_main.c
39129 @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39130 .notifier_call = xpc_system_die,
39131 };
39132
39133 -struct xpc_arch_operations xpc_arch_ops;
39134 +xpc_arch_operations_no_const xpc_arch_ops;
39135
39136 /*
39137 * Timer function to enforce the timelimit on the partition disengage.
39138 @@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39139
39140 if (((die_args->trapnr == X86_TRAP_MF) ||
39141 (die_args->trapnr == X86_TRAP_XF)) &&
39142 - !user_mode_vm(die_args->regs))
39143 + !user_mode(die_args->regs))
39144 xpc_die_deactivate();
39145
39146 break;
39147 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39148 index 49f04bc..65660c2 100644
39149 --- a/drivers/mmc/core/mmc_ops.c
39150 +++ b/drivers/mmc/core/mmc_ops.c
39151 @@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39152 void *data_buf;
39153 int is_on_stack;
39154
39155 - is_on_stack = object_is_on_stack(buf);
39156 + is_on_stack = object_starts_on_stack(buf);
39157 if (is_on_stack) {
39158 /*
39159 * dma onto stack is unsafe/nonportable, but callers to this
39160 diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39161 index 53b8fd9..615b462 100644
39162 --- a/drivers/mmc/host/dw_mmc.h
39163 +++ b/drivers/mmc/host/dw_mmc.h
39164 @@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39165 int (*parse_dt)(struct dw_mci *host);
39166 int (*setup_bus)(struct dw_mci *host,
39167 struct device_node *slot_np, u8 bus_width);
39168 -};
39169 +} __do_const;
39170 #endif /* _DW_MMC_H_ */
39171 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39172 index 7363efe..681558e 100644
39173 --- a/drivers/mmc/host/sdhci-s3c.c
39174 +++ b/drivers/mmc/host/sdhci-s3c.c
39175 @@ -720,9 +720,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39176 * we can use overriding functions instead of default.
39177 */
39178 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39179 - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39180 - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39181 - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39182 + pax_open_kernel();
39183 + *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39184 + *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39185 + *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39186 + pax_close_kernel();
39187 }
39188
39189 /* It supports additional host capabilities if needed */
39190 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39191 index a4eb8b5..8c0628f 100644
39192 --- a/drivers/mtd/devices/doc2000.c
39193 +++ b/drivers/mtd/devices/doc2000.c
39194 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39195
39196 /* The ECC will not be calculated correctly if less than 512 is written */
39197 /* DBB-
39198 - if (len != 0x200 && eccbuf)
39199 + if (len != 0x200)
39200 printk(KERN_WARNING
39201 "ECC needs a full sector write (adr: %lx size %lx)\n",
39202 (long) to, (long) len);
39203 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39204 index 0c8bb6b..6f35deb 100644
39205 --- a/drivers/mtd/nand/denali.c
39206 +++ b/drivers/mtd/nand/denali.c
39207 @@ -24,6 +24,7 @@
39208 #include <linux/slab.h>
39209 #include <linux/mtd/mtd.h>
39210 #include <linux/module.h>
39211 +#include <linux/slab.h>
39212
39213 #include "denali.h"
39214
39215 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39216 index 51b9d6a..52af9a7 100644
39217 --- a/drivers/mtd/nftlmount.c
39218 +++ b/drivers/mtd/nftlmount.c
39219 @@ -24,6 +24,7 @@
39220 #include <asm/errno.h>
39221 #include <linux/delay.h>
39222 #include <linux/slab.h>
39223 +#include <linux/sched.h>
39224 #include <linux/mtd/mtd.h>
39225 #include <linux/mtd/nand.h>
39226 #include <linux/mtd/nftl.h>
39227 diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39228 index 8dd6ba5..419cc1d 100644
39229 --- a/drivers/mtd/sm_ftl.c
39230 +++ b/drivers/mtd/sm_ftl.c
39231 @@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39232 #define SM_CIS_VENDOR_OFFSET 0x59
39233 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39234 {
39235 - struct attribute_group *attr_group;
39236 + attribute_group_no_const *attr_group;
39237 struct attribute **attributes;
39238 struct sm_sysfs_attribute *vendor_attribute;
39239
39240 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39241 index dbbea0e..3f4a0b1 100644
39242 --- a/drivers/net/bonding/bond_main.c
39243 +++ b/drivers/net/bonding/bond_main.c
39244 @@ -4822,7 +4822,7 @@ static unsigned int bond_get_num_tx_queues(void)
39245 return tx_queues;
39246 }
39247
39248 -static struct rtnl_link_ops bond_link_ops __read_mostly = {
39249 +static struct rtnl_link_ops bond_link_ops = {
39250 .kind = "bond",
39251 .priv_size = sizeof(struct bonding),
39252 .setup = bond_setup,
39253 @@ -4947,8 +4947,8 @@ static void __exit bonding_exit(void)
39254
39255 bond_destroy_debugfs();
39256
39257 - rtnl_link_unregister(&bond_link_ops);
39258 unregister_pernet_subsys(&bond_net_ops);
39259 + rtnl_link_unregister(&bond_link_ops);
39260
39261 #ifdef CONFIG_NET_POLL_CONTROLLER
39262 /*
39263 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39264 index e1d2643..7f4133b 100644
39265 --- a/drivers/net/ethernet/8390/ax88796.c
39266 +++ b/drivers/net/ethernet/8390/ax88796.c
39267 @@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39268 if (ax->plat->reg_offsets)
39269 ei_local->reg_offset = ax->plat->reg_offsets;
39270 else {
39271 + resource_size_t _mem_size = mem_size;
39272 + do_div(_mem_size, 0x18);
39273 ei_local->reg_offset = ax->reg_offsets;
39274 for (ret = 0; ret < 0x18; ret++)
39275 - ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39276 + ax->reg_offsets[ret] = _mem_size * ret;
39277 }
39278
39279 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39280 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39281 index aee7671..3ca2651 100644
39282 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39283 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39284 @@ -1093,7 +1093,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39285 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39286 {
39287 /* RX_MODE controlling object */
39288 - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39289 + bnx2x_init_rx_mode_obj(bp);
39290
39291 /* multicast configuration controlling object */
39292 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39293 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39294 index 7306416..5fb7fb5 100644
39295 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39296 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39297 @@ -2381,15 +2381,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39298 return rc;
39299 }
39300
39301 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39302 - struct bnx2x_rx_mode_obj *o)
39303 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39304 {
39305 if (CHIP_IS_E1x(bp)) {
39306 - o->wait_comp = bnx2x_empty_rx_mode_wait;
39307 - o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39308 + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39309 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39310 } else {
39311 - o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39312 - o->config_rx_mode = bnx2x_set_rx_mode_e2;
39313 + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39314 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39315 }
39316 }
39317
39318 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39319 index ff90760..08d8aed 100644
39320 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39321 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39322 @@ -1306,8 +1306,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39323
39324 /********************* RX MODE ****************/
39325
39326 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39327 - struct bnx2x_rx_mode_obj *o);
39328 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39329
39330 /**
39331 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39332 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39333 index 8d7d4c2..95f7681 100644
39334 --- a/drivers/net/ethernet/broadcom/tg3.h
39335 +++ b/drivers/net/ethernet/broadcom/tg3.h
39336 @@ -147,6 +147,7 @@
39337 #define CHIPREV_ID_5750_A0 0x4000
39338 #define CHIPREV_ID_5750_A1 0x4001
39339 #define CHIPREV_ID_5750_A3 0x4003
39340 +#define CHIPREV_ID_5750_C1 0x4201
39341 #define CHIPREV_ID_5750_C2 0x4202
39342 #define CHIPREV_ID_5752_A0_HW 0x5000
39343 #define CHIPREV_ID_5752_A0 0x6000
39344 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39345 index 8cffcdf..aadf043 100644
39346 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39347 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39348 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39349 */
39350 struct l2t_skb_cb {
39351 arp_failure_handler_func arp_failure_handler;
39352 -};
39353 +} __no_const;
39354
39355 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
39356
39357 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
39358 index 4c83003..2a2a5b9 100644
39359 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
39360 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
39361 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39362 for (i=0; i<ETH_ALEN; i++) {
39363 tmp.addr[i] = dev->dev_addr[i];
39364 }
39365 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39366 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39367 break;
39368
39369 case DE4X5_SET_HWADDR: /* Set the hardware address */
39370 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39371 spin_lock_irqsave(&lp->lock, flags);
39372 memcpy(&statbuf, &lp->pktStats, ioc->len);
39373 spin_unlock_irqrestore(&lp->lock, flags);
39374 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39375 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39376 return -EFAULT;
39377 break;
39378 }
39379 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
39380 index 2886c9b..db71673 100644
39381 --- a/drivers/net/ethernet/emulex/benet/be_main.c
39382 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
39383 @@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
39384
39385 if (wrapped)
39386 newacc += 65536;
39387 - ACCESS_ONCE(*acc) = newacc;
39388 + ACCESS_ONCE_RW(*acc) = newacc;
39389 }
39390
39391 void be_parse_stats(struct be_adapter *adapter)
39392 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
39393 index 7c361d1..57e3ff1 100644
39394 --- a/drivers/net/ethernet/faraday/ftgmac100.c
39395 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
39396 @@ -31,6 +31,8 @@
39397 #include <linux/netdevice.h>
39398 #include <linux/phy.h>
39399 #include <linux/platform_device.h>
39400 +#include <linux/interrupt.h>
39401 +#include <linux/irqreturn.h>
39402 #include <net/ip.h>
39403
39404 #include "ftgmac100.h"
39405 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
39406 index b5ea8fb..bd25e9a 100644
39407 --- a/drivers/net/ethernet/faraday/ftmac100.c
39408 +++ b/drivers/net/ethernet/faraday/ftmac100.c
39409 @@ -31,6 +31,8 @@
39410 #include <linux/module.h>
39411 #include <linux/netdevice.h>
39412 #include <linux/platform_device.h>
39413 +#include <linux/interrupt.h>
39414 +#include <linux/irqreturn.h>
39415
39416 #include "ftmac100.h"
39417
39418 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39419 index 331987d..3be1135 100644
39420 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39421 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39422 @@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
39423 }
39424
39425 /* update the base incval used to calculate frequency adjustment */
39426 - ACCESS_ONCE(adapter->base_incval) = incval;
39427 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
39428 smp_mb();
39429
39430 /* need lock to prevent incorrect read while modifying cyclecounter */
39431 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39432 index fbe5363..266b4e3 100644
39433 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39434 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39435 @@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39436 struct __vxge_hw_fifo *fifo;
39437 struct vxge_hw_fifo_config *config;
39438 u32 txdl_size, txdl_per_memblock;
39439 - struct vxge_hw_mempool_cbs fifo_mp_callback;
39440 + static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39441 + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39442 + };
39443 +
39444 struct __vxge_hw_virtualpath *vpath;
39445
39446 if ((vp == NULL) || (attr == NULL)) {
39447 @@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39448 goto exit;
39449 }
39450
39451 - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39452 -
39453 fifo->mempool =
39454 __vxge_hw_mempool_create(vpath->hldev,
39455 fifo->config->memblock_size,
39456 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
39457 index 5c033f2..7bbb0d8 100644
39458 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
39459 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
39460 @@ -1894,7 +1894,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
39461 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
39462
39463 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
39464 - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
39465 + pax_open_kernel();
39466 + *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
39467 + pax_close_kernel();
39468 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
39469 } else {
39470 return -EIO;
39471 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
39472 index b0c3de9..fc5857e 100644
39473 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
39474 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
39475 @@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
39476 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
39477 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
39478 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
39479 - nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
39480 + pax_open_kernel();
39481 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
39482 + pax_close_kernel();
39483 } else if (priv_level == QLCNIC_PRIV_FUNC) {
39484 ahw->op_mode = QLCNIC_PRIV_FUNC;
39485 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
39486 - nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
39487 + pax_open_kernel();
39488 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
39489 + pax_close_kernel();
39490 } else if (priv_level == QLCNIC_MGMT_FUNC) {
39491 ahw->op_mode = QLCNIC_MGMT_FUNC;
39492 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
39493 - nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
39494 + pax_open_kernel();
39495 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
39496 + pax_close_kernel();
39497 } else {
39498 return -EIO;
39499 }
39500 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39501 index 15ba8c4..3f56838 100644
39502 --- a/drivers/net/ethernet/realtek/r8169.c
39503 +++ b/drivers/net/ethernet/realtek/r8169.c
39504 @@ -740,22 +740,22 @@ struct rtl8169_private {
39505 struct mdio_ops {
39506 void (*write)(struct rtl8169_private *, int, int);
39507 int (*read)(struct rtl8169_private *, int);
39508 - } mdio_ops;
39509 + } __no_const mdio_ops;
39510
39511 struct pll_power_ops {
39512 void (*down)(struct rtl8169_private *);
39513 void (*up)(struct rtl8169_private *);
39514 - } pll_power_ops;
39515 + } __no_const pll_power_ops;
39516
39517 struct jumbo_ops {
39518 void (*enable)(struct rtl8169_private *);
39519 void (*disable)(struct rtl8169_private *);
39520 - } jumbo_ops;
39521 + } __no_const jumbo_ops;
39522
39523 struct csi_ops {
39524 void (*write)(struct rtl8169_private *, int, int);
39525 u32 (*read)(struct rtl8169_private *, int);
39526 - } csi_ops;
39527 + } __no_const csi_ops;
39528
39529 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39530 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39531 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39532 index 3f93624..cf01144 100644
39533 --- a/drivers/net/ethernet/sfc/ptp.c
39534 +++ b/drivers/net/ethernet/sfc/ptp.c
39535 @@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39536 (u32)((u64)ptp->start.dma_addr >> 32));
39537
39538 /* Clear flag that signals MC ready */
39539 - ACCESS_ONCE(*start) = 0;
39540 + ACCESS_ONCE_RW(*start) = 0;
39541 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39542 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39543
39544 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39545 index 50617c5..b13724c 100644
39546 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39547 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39548 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39549
39550 writel(value, ioaddr + MMC_CNTRL);
39551
39552 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39553 - MMC_CNTRL, value);
39554 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39555 +// MMC_CNTRL, value);
39556 }
39557
39558 /* To mask all all interrupts.*/
39559 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39560 index e6fe0d8..2b7d752 100644
39561 --- a/drivers/net/hyperv/hyperv_net.h
39562 +++ b/drivers/net/hyperv/hyperv_net.h
39563 @@ -101,7 +101,7 @@ struct rndis_device {
39564
39565 enum rndis_device_state state;
39566 bool link_state;
39567 - atomic_t new_req_id;
39568 + atomic_unchecked_t new_req_id;
39569
39570 spinlock_t request_lock;
39571 struct list_head req_list;
39572 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39573 index 0775f0a..d4fb316 100644
39574 --- a/drivers/net/hyperv/rndis_filter.c
39575 +++ b/drivers/net/hyperv/rndis_filter.c
39576 @@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39577 * template
39578 */
39579 set = &rndis_msg->msg.set_req;
39580 - set->req_id = atomic_inc_return(&dev->new_req_id);
39581 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39582
39583 /* Add to the request list */
39584 spin_lock_irqsave(&dev->request_lock, flags);
39585 @@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39586
39587 /* Setup the rndis set */
39588 halt = &request->request_msg.msg.halt_req;
39589 - halt->req_id = atomic_inc_return(&dev->new_req_id);
39590 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39591
39592 /* Ignore return since this msg is optional. */
39593 rndis_filter_send_request(dev, request);
39594 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39595 index 8f1c256..a2991d1 100644
39596 --- a/drivers/net/ieee802154/fakehard.c
39597 +++ b/drivers/net/ieee802154/fakehard.c
39598 @@ -385,7 +385,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39599 phy->transmit_power = 0xbf;
39600
39601 dev->netdev_ops = &fake_ops;
39602 - dev->ml_priv = &fake_mlme;
39603 + dev->ml_priv = (void *)&fake_mlme;
39604
39605 priv = netdev_priv(dev);
39606 priv->phy = phy;
39607 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39608 index 73abbc1..f25db7c 100644
39609 --- a/drivers/net/macvlan.c
39610 +++ b/drivers/net/macvlan.c
39611 @@ -891,13 +891,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39612 int macvlan_link_register(struct rtnl_link_ops *ops)
39613 {
39614 /* common fields */
39615 - ops->priv_size = sizeof(struct macvlan_dev);
39616 - ops->validate = macvlan_validate;
39617 - ops->maxtype = IFLA_MACVLAN_MAX;
39618 - ops->policy = macvlan_policy;
39619 - ops->changelink = macvlan_changelink;
39620 - ops->get_size = macvlan_get_size;
39621 - ops->fill_info = macvlan_fill_info;
39622 + pax_open_kernel();
39623 + *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
39624 + *(void **)&ops->validate = macvlan_validate;
39625 + *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
39626 + *(const void **)&ops->policy = macvlan_policy;
39627 + *(void **)&ops->changelink = macvlan_changelink;
39628 + *(void **)&ops->get_size = macvlan_get_size;
39629 + *(void **)&ops->fill_info = macvlan_fill_info;
39630 + pax_close_kernel();
39631
39632 return rtnl_link_register(ops);
39633 };
39634 @@ -953,7 +955,7 @@ static int macvlan_device_event(struct notifier_block *unused,
39635 return NOTIFY_DONE;
39636 }
39637
39638 -static struct notifier_block macvlan_notifier_block __read_mostly = {
39639 +static struct notifier_block macvlan_notifier_block = {
39640 .notifier_call = macvlan_device_event,
39641 };
39642
39643 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
39644 index a449439..1e468fe 100644
39645 --- a/drivers/net/macvtap.c
39646 +++ b/drivers/net/macvtap.c
39647 @@ -1090,7 +1090,7 @@ static int macvtap_device_event(struct notifier_block *unused,
39648 return NOTIFY_DONE;
39649 }
39650
39651 -static struct notifier_block macvtap_notifier_block __read_mostly = {
39652 +static struct notifier_block macvtap_notifier_block = {
39653 .notifier_call = macvtap_device_event,
39654 };
39655
39656 diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
39657 index daec9b0..6428fcb 100644
39658 --- a/drivers/net/phy/mdio-bitbang.c
39659 +++ b/drivers/net/phy/mdio-bitbang.c
39660 @@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
39661 struct mdiobb_ctrl *ctrl = bus->priv;
39662
39663 module_put(ctrl->ops->owner);
39664 + mdiobus_unregister(bus);
39665 mdiobus_free(bus);
39666 }
39667 EXPORT_SYMBOL(free_mdio_bitbang);
39668 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
39669 index 72ff14b..11d442d 100644
39670 --- a/drivers/net/ppp/ppp_generic.c
39671 +++ b/drivers/net/ppp/ppp_generic.c
39672 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39673 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
39674 struct ppp_stats stats;
39675 struct ppp_comp_stats cstats;
39676 - char *vers;
39677
39678 switch (cmd) {
39679 case SIOCGPPPSTATS:
39680 @@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39681 break;
39682
39683 case SIOCGPPPVER:
39684 - vers = PPP_VERSION;
39685 - if (copy_to_user(addr, vers, strlen(vers) + 1))
39686 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
39687 break;
39688 err = 0;
39689 break;
39690 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
39691 index 1252d9c..80e660b 100644
39692 --- a/drivers/net/slip/slhc.c
39693 +++ b/drivers/net/slip/slhc.c
39694 @@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
39695 register struct tcphdr *thp;
39696 register struct iphdr *ip;
39697 register struct cstate *cs;
39698 - int len, hdrlen;
39699 + long len, hdrlen;
39700 unsigned char *cp = icp;
39701
39702 /* We've got a compressed packet; read the change byte */
39703 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
39704 index bf34192..fba3500 100644
39705 --- a/drivers/net/team/team.c
39706 +++ b/drivers/net/team/team.c
39707 @@ -2668,7 +2668,7 @@ static int team_device_event(struct notifier_block *unused,
39708 return NOTIFY_DONE;
39709 }
39710
39711 -static struct notifier_block team_notifier_block __read_mostly = {
39712 +static struct notifier_block team_notifier_block = {
39713 .notifier_call = team_device_event,
39714 };
39715
39716 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
39717 index 729ed53..9453f99 100644
39718 --- a/drivers/net/tun.c
39719 +++ b/drivers/net/tun.c
39720 @@ -1838,7 +1838,7 @@ unlock:
39721 }
39722
39723 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39724 - unsigned long arg, int ifreq_len)
39725 + unsigned long arg, size_t ifreq_len)
39726 {
39727 struct tun_file *tfile = file->private_data;
39728 struct tun_struct *tun;
39729 @@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39730 int vnet_hdr_sz;
39731 int ret;
39732
39733 + if (ifreq_len > sizeof ifr)
39734 + return -EFAULT;
39735 +
39736 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
39737 if (copy_from_user(&ifr, argp, ifreq_len))
39738 return -EFAULT;
39739 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39740 index e2dd324..be92fcf 100644
39741 --- a/drivers/net/usb/hso.c
39742 +++ b/drivers/net/usb/hso.c
39743 @@ -71,7 +71,7 @@
39744 #include <asm/byteorder.h>
39745 #include <linux/serial_core.h>
39746 #include <linux/serial.h>
39747 -
39748 +#include <asm/local.h>
39749
39750 #define MOD_AUTHOR "Option Wireless"
39751 #define MOD_DESCRIPTION "USB High Speed Option driver"
39752 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39753 struct urb *urb;
39754
39755 urb = serial->rx_urb[0];
39756 - if (serial->port.count > 0) {
39757 + if (atomic_read(&serial->port.count) > 0) {
39758 count = put_rxbuf_data(urb, serial);
39759 if (count == -1)
39760 return;
39761 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39762 DUMP1(urb->transfer_buffer, urb->actual_length);
39763
39764 /* Anyone listening? */
39765 - if (serial->port.count == 0)
39766 + if (atomic_read(&serial->port.count) == 0)
39767 return;
39768
39769 if (status == 0) {
39770 @@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39771 tty_port_tty_set(&serial->port, tty);
39772
39773 /* check for port already opened, if not set the termios */
39774 - serial->port.count++;
39775 - if (serial->port.count == 1) {
39776 + if (atomic_inc_return(&serial->port.count) == 1) {
39777 serial->rx_state = RX_IDLE;
39778 /* Force default termio settings */
39779 _hso_serial_set_termios(tty, NULL);
39780 @@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39781 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39782 if (result) {
39783 hso_stop_serial_device(serial->parent);
39784 - serial->port.count--;
39785 + atomic_dec(&serial->port.count);
39786 kref_put(&serial->parent->ref, hso_serial_ref_free);
39787 }
39788 } else {
39789 @@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39790
39791 /* reset the rts and dtr */
39792 /* do the actual close */
39793 - serial->port.count--;
39794 + atomic_dec(&serial->port.count);
39795
39796 - if (serial->port.count <= 0) {
39797 - serial->port.count = 0;
39798 + if (atomic_read(&serial->port.count) <= 0) {
39799 + atomic_set(&serial->port.count, 0);
39800 tty_port_tty_set(&serial->port, NULL);
39801 if (!usb_gone)
39802 hso_stop_serial_device(serial->parent);
39803 @@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39804
39805 /* the actual setup */
39806 spin_lock_irqsave(&serial->serial_lock, flags);
39807 - if (serial->port.count)
39808 + if (atomic_read(&serial->port.count))
39809 _hso_serial_set_termios(tty, old);
39810 else
39811 tty->termios = *old;
39812 @@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
39813 D1("Pending read interrupt on port %d\n", i);
39814 spin_lock(&serial->serial_lock);
39815 if (serial->rx_state == RX_IDLE &&
39816 - serial->port.count > 0) {
39817 + atomic_read(&serial->port.count) > 0) {
39818 /* Setup and send a ctrl req read on
39819 * port i */
39820 if (!serial->rx_urb_filled[0]) {
39821 @@ -3066,7 +3065,7 @@ static int hso_resume(struct usb_interface *iface)
39822 /* Start all serial ports */
39823 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39824 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39825 - if (dev2ser(serial_table[i])->port.count) {
39826 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
39827 result =
39828 hso_start_serial_device(serial_table[i], GFP_NOIO);
39829 hso_kick_transmit(dev2ser(serial_table[i]));
39830 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
39831 index 7cee7a3..1eb9f3b 100644
39832 --- a/drivers/net/vxlan.c
39833 +++ b/drivers/net/vxlan.c
39834 @@ -1443,7 +1443,7 @@ nla_put_failure:
39835 return -EMSGSIZE;
39836 }
39837
39838 -static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
39839 +static struct rtnl_link_ops vxlan_link_ops = {
39840 .kind = "vxlan",
39841 .maxtype = IFLA_VXLAN_MAX,
39842 .policy = vxlan_policy,
39843 diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
39844 index 5ac5f7a..5f82012 100644
39845 --- a/drivers/net/wireless/at76c50x-usb.c
39846 +++ b/drivers/net/wireless/at76c50x-usb.c
39847 @@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
39848 }
39849
39850 /* Convert timeout from the DFU status to jiffies */
39851 -static inline unsigned long at76_get_timeout(struct dfu_status *s)
39852 +static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
39853 {
39854 return msecs_to_jiffies((s->poll_timeout[2] << 16)
39855 | (s->poll_timeout[1] << 8)
39856 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39857 index 8d78253..bebbb68 100644
39858 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39859 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39860 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39861 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
39862 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
39863
39864 - ACCESS_ONCE(ads->ds_link) = i->link;
39865 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
39866 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
39867 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
39868
39869 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
39870 ctl6 = SM(i->keytype, AR_EncrType);
39871 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39872
39873 if ((i->is_first || i->is_last) &&
39874 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
39875 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
39876 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
39877 | set11nTries(i->rates, 1)
39878 | set11nTries(i->rates, 2)
39879 | set11nTries(i->rates, 3)
39880 | (i->dur_update ? AR_DurUpdateEna : 0)
39881 | SM(0, AR_BurstDur);
39882
39883 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
39884 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
39885 | set11nRate(i->rates, 1)
39886 | set11nRate(i->rates, 2)
39887 | set11nRate(i->rates, 3);
39888 } else {
39889 - ACCESS_ONCE(ads->ds_ctl2) = 0;
39890 - ACCESS_ONCE(ads->ds_ctl3) = 0;
39891 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
39892 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
39893 }
39894
39895 if (!i->is_first) {
39896 - ACCESS_ONCE(ads->ds_ctl0) = 0;
39897 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39898 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39899 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
39900 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39901 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39902 return;
39903 }
39904
39905 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39906 break;
39907 }
39908
39909 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39910 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39911 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39912 | SM(i->txpower, AR_XmitPower)
39913 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39914 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39915 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
39916 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
39917
39918 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39919 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39920 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39921 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39922
39923 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
39924 return;
39925
39926 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39927 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39928 | set11nPktDurRTSCTS(i->rates, 1);
39929
39930 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39931 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39932 | set11nPktDurRTSCTS(i->rates, 3);
39933
39934 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39935 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39936 | set11nRateFlags(i->rates, 1)
39937 | set11nRateFlags(i->rates, 2)
39938 | set11nRateFlags(i->rates, 3)
39939 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39940 index 301bf72..3f5654f 100644
39941 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39942 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39943 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39944 (i->qcu << AR_TxQcuNum_S) | desc_len;
39945
39946 checksum += val;
39947 - ACCESS_ONCE(ads->info) = val;
39948 + ACCESS_ONCE_RW(ads->info) = val;
39949
39950 checksum += i->link;
39951 - ACCESS_ONCE(ads->link) = i->link;
39952 + ACCESS_ONCE_RW(ads->link) = i->link;
39953
39954 checksum += i->buf_addr[0];
39955 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
39956 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
39957 checksum += i->buf_addr[1];
39958 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
39959 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
39960 checksum += i->buf_addr[2];
39961 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
39962 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
39963 checksum += i->buf_addr[3];
39964 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
39965 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
39966
39967 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
39968 - ACCESS_ONCE(ads->ctl3) = val;
39969 + ACCESS_ONCE_RW(ads->ctl3) = val;
39970 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
39971 - ACCESS_ONCE(ads->ctl5) = val;
39972 + ACCESS_ONCE_RW(ads->ctl5) = val;
39973 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
39974 - ACCESS_ONCE(ads->ctl7) = val;
39975 + ACCESS_ONCE_RW(ads->ctl7) = val;
39976 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
39977 - ACCESS_ONCE(ads->ctl9) = val;
39978 + ACCESS_ONCE_RW(ads->ctl9) = val;
39979
39980 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
39981 - ACCESS_ONCE(ads->ctl10) = checksum;
39982 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
39983
39984 if (i->is_first || i->is_last) {
39985 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
39986 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
39987 | set11nTries(i->rates, 1)
39988 | set11nTries(i->rates, 2)
39989 | set11nTries(i->rates, 3)
39990 | (i->dur_update ? AR_DurUpdateEna : 0)
39991 | SM(0, AR_BurstDur);
39992
39993 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
39994 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
39995 | set11nRate(i->rates, 1)
39996 | set11nRate(i->rates, 2)
39997 | set11nRate(i->rates, 3);
39998 } else {
39999 - ACCESS_ONCE(ads->ctl13) = 0;
40000 - ACCESS_ONCE(ads->ctl14) = 0;
40001 + ACCESS_ONCE_RW(ads->ctl13) = 0;
40002 + ACCESS_ONCE_RW(ads->ctl14) = 0;
40003 }
40004
40005 ads->ctl20 = 0;
40006 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40007
40008 ctl17 = SM(i->keytype, AR_EncrType);
40009 if (!i->is_first) {
40010 - ACCESS_ONCE(ads->ctl11) = 0;
40011 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40012 - ACCESS_ONCE(ads->ctl15) = 0;
40013 - ACCESS_ONCE(ads->ctl16) = 0;
40014 - ACCESS_ONCE(ads->ctl17) = ctl17;
40015 - ACCESS_ONCE(ads->ctl18) = 0;
40016 - ACCESS_ONCE(ads->ctl19) = 0;
40017 + ACCESS_ONCE_RW(ads->ctl11) = 0;
40018 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40019 + ACCESS_ONCE_RW(ads->ctl15) = 0;
40020 + ACCESS_ONCE_RW(ads->ctl16) = 0;
40021 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40022 + ACCESS_ONCE_RW(ads->ctl18) = 0;
40023 + ACCESS_ONCE_RW(ads->ctl19) = 0;
40024 return;
40025 }
40026
40027 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40028 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40029 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40030 | SM(i->txpower, AR_XmitPower)
40031 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40032 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40033 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40034 ctl12 |= SM(val, AR_PAPRDChainMask);
40035
40036 - ACCESS_ONCE(ads->ctl12) = ctl12;
40037 - ACCESS_ONCE(ads->ctl17) = ctl17;
40038 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40039 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40040
40041 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40042 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40043 | set11nPktDurRTSCTS(i->rates, 1);
40044
40045 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40046 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40047 | set11nPktDurRTSCTS(i->rates, 3);
40048
40049 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40050 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40051 | set11nRateFlags(i->rates, 1)
40052 | set11nRateFlags(i->rates, 2)
40053 | set11nRateFlags(i->rates, 3)
40054 | SM(i->rtscts_rate, AR_RTSCTSRate);
40055
40056 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40057 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40058 }
40059
40060 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40061 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40062 index 784e81c..349e01e 100644
40063 --- a/drivers/net/wireless/ath/ath9k/hw.h
40064 +++ b/drivers/net/wireless/ath/ath9k/hw.h
40065 @@ -653,7 +653,7 @@ struct ath_hw_private_ops {
40066
40067 /* ANI */
40068 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40069 -};
40070 +} __no_const;
40071
40072 /**
40073 * struct ath_spec_scan - parameters for Atheros spectral scan
40074 @@ -722,7 +722,7 @@ struct ath_hw_ops {
40075 struct ath_spec_scan *param);
40076 void (*spectral_scan_trigger)(struct ath_hw *ah);
40077 void (*spectral_scan_wait)(struct ath_hw *ah);
40078 -};
40079 +} __no_const;
40080
40081 struct ath_nf_limits {
40082 s16 max;
40083 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40084 index c353b5f..62aaca2 100644
40085 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
40086 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40087 @@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40088 */
40089 if (il3945_mod_params.disable_hw_scan) {
40090 D_INFO("Disabling hw_scan\n");
40091 - il3945_mac_ops.hw_scan = NULL;
40092 + pax_open_kernel();
40093 + *(void **)&il3945_mac_ops.hw_scan = NULL;
40094 + pax_close_kernel();
40095 }
40096
40097 D_INFO("*** LOAD DRIVER ***\n");
40098 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40099 index 81d4071..f2071ea 100644
40100 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40101 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40102 @@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40103 {
40104 struct iwl_priv *priv = file->private_data;
40105 char buf[64];
40106 - int buf_size;
40107 + size_t buf_size;
40108 u32 offset, len;
40109
40110 memset(buf, 0, sizeof(buf));
40111 @@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40112 struct iwl_priv *priv = file->private_data;
40113
40114 char buf[8];
40115 - int buf_size;
40116 + size_t buf_size;
40117 u32 reset_flag;
40118
40119 memset(buf, 0, sizeof(buf));
40120 @@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40121 {
40122 struct iwl_priv *priv = file->private_data;
40123 char buf[8];
40124 - int buf_size;
40125 + size_t buf_size;
40126 int ht40;
40127
40128 memset(buf, 0, sizeof(buf));
40129 @@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40130 {
40131 struct iwl_priv *priv = file->private_data;
40132 char buf[8];
40133 - int buf_size;
40134 + size_t buf_size;
40135 int value;
40136
40137 memset(buf, 0, sizeof(buf));
40138 @@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40139 {
40140 struct iwl_priv *priv = file->private_data;
40141 char buf[8];
40142 - int buf_size;
40143 + size_t buf_size;
40144 int clear;
40145
40146 memset(buf, 0, sizeof(buf));
40147 @@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40148 {
40149 struct iwl_priv *priv = file->private_data;
40150 char buf[8];
40151 - int buf_size;
40152 + size_t buf_size;
40153 int trace;
40154
40155 memset(buf, 0, sizeof(buf));
40156 @@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40157 {
40158 struct iwl_priv *priv = file->private_data;
40159 char buf[8];
40160 - int buf_size;
40161 + size_t buf_size;
40162 int missed;
40163
40164 memset(buf, 0, sizeof(buf));
40165 @@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40166
40167 struct iwl_priv *priv = file->private_data;
40168 char buf[8];
40169 - int buf_size;
40170 + size_t buf_size;
40171 int plcp;
40172
40173 memset(buf, 0, sizeof(buf));
40174 @@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40175
40176 struct iwl_priv *priv = file->private_data;
40177 char buf[8];
40178 - int buf_size;
40179 + size_t buf_size;
40180 int flush;
40181
40182 memset(buf, 0, sizeof(buf));
40183 @@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40184
40185 struct iwl_priv *priv = file->private_data;
40186 char buf[8];
40187 - int buf_size;
40188 + size_t buf_size;
40189 int rts;
40190
40191 if (!priv->cfg->ht_params)
40192 @@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40193 {
40194 struct iwl_priv *priv = file->private_data;
40195 char buf[8];
40196 - int buf_size;
40197 + size_t buf_size;
40198
40199 memset(buf, 0, sizeof(buf));
40200 buf_size = min(count, sizeof(buf) - 1);
40201 @@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40202 struct iwl_priv *priv = file->private_data;
40203 u32 event_log_flag;
40204 char buf[8];
40205 - int buf_size;
40206 + size_t buf_size;
40207
40208 /* check that the interface is up */
40209 if (!iwl_is_ready(priv))
40210 @@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40211 struct iwl_priv *priv = file->private_data;
40212 char buf[8];
40213 u32 calib_disabled;
40214 - int buf_size;
40215 + size_t buf_size;
40216
40217 memset(buf, 0, sizeof(buf));
40218 buf_size = min(count, sizeof(buf) - 1);
40219 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40220 index 12c4f31..484d948 100644
40221 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40222 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40223 @@ -1328,7 +1328,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40224 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40225
40226 char buf[8];
40227 - int buf_size;
40228 + size_t buf_size;
40229 u32 reset_flag;
40230
40231 memset(buf, 0, sizeof(buf));
40232 @@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40233 {
40234 struct iwl_trans *trans = file->private_data;
40235 char buf[8];
40236 - int buf_size;
40237 + size_t buf_size;
40238 int csr;
40239
40240 memset(buf, 0, sizeof(buf));
40241 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40242 index cffdf4f..7cefb69 100644
40243 --- a/drivers/net/wireless/mac80211_hwsim.c
40244 +++ b/drivers/net/wireless/mac80211_hwsim.c
40245 @@ -2144,25 +2144,19 @@ static int __init init_mac80211_hwsim(void)
40246
40247 if (channels > 1) {
40248 hwsim_if_comb.num_different_channels = channels;
40249 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40250 - mac80211_hwsim_ops.cancel_hw_scan =
40251 - mac80211_hwsim_cancel_hw_scan;
40252 - mac80211_hwsim_ops.sw_scan_start = NULL;
40253 - mac80211_hwsim_ops.sw_scan_complete = NULL;
40254 - mac80211_hwsim_ops.remain_on_channel =
40255 - mac80211_hwsim_roc;
40256 - mac80211_hwsim_ops.cancel_remain_on_channel =
40257 - mac80211_hwsim_croc;
40258 - mac80211_hwsim_ops.add_chanctx =
40259 - mac80211_hwsim_add_chanctx;
40260 - mac80211_hwsim_ops.remove_chanctx =
40261 - mac80211_hwsim_remove_chanctx;
40262 - mac80211_hwsim_ops.change_chanctx =
40263 - mac80211_hwsim_change_chanctx;
40264 - mac80211_hwsim_ops.assign_vif_chanctx =
40265 - mac80211_hwsim_assign_vif_chanctx;
40266 - mac80211_hwsim_ops.unassign_vif_chanctx =
40267 - mac80211_hwsim_unassign_vif_chanctx;
40268 + pax_open_kernel();
40269 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40270 + *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40271 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40272 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40273 + *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40274 + *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40275 + *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40276 + *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40277 + *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40278 + *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40279 + *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40280 + pax_close_kernel();
40281 }
40282
40283 spin_lock_init(&hwsim_radio_lock);
40284 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40285 index 525fd75..6c9f791 100644
40286 --- a/drivers/net/wireless/rndis_wlan.c
40287 +++ b/drivers/net/wireless/rndis_wlan.c
40288 @@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40289
40290 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40291
40292 - if (rts_threshold < 0 || rts_threshold > 2347)
40293 + if (rts_threshold > 2347)
40294 rts_threshold = 2347;
40295
40296 tmp = cpu_to_le32(rts_threshold);
40297 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40298 index 086abb4..8279c30 100644
40299 --- a/drivers/net/wireless/rt2x00/rt2x00.h
40300 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
40301 @@ -396,7 +396,7 @@ struct rt2x00_intf {
40302 * for hardware which doesn't support hardware
40303 * sequence counting.
40304 */
40305 - atomic_t seqno;
40306 + atomic_unchecked_t seqno;
40307 };
40308
40309 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40310 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
40311 index 4d91795..62fccff 100644
40312 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
40313 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
40314 @@ -251,9 +251,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
40315 * sequence counter given by mac80211.
40316 */
40317 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
40318 - seqno = atomic_add_return(0x10, &intf->seqno);
40319 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
40320 else
40321 - seqno = atomic_read(&intf->seqno);
40322 + seqno = atomic_read_unchecked(&intf->seqno);
40323
40324 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
40325 hdr->seq_ctrl |= cpu_to_le16(seqno);
40326 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
40327 index e57ee48..541cf6c 100644
40328 --- a/drivers/net/wireless/ti/wl1251/sdio.c
40329 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
40330 @@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
40331
40332 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
40333
40334 - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40335 - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40336 + pax_open_kernel();
40337 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40338 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40339 + pax_close_kernel();
40340
40341 wl1251_info("using dedicated interrupt line");
40342 } else {
40343 - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40344 - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40345 + pax_open_kernel();
40346 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40347 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40348 + pax_close_kernel();
40349
40350 wl1251_info("using SDIO interrupt");
40351 }
40352 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
40353 index 09694e3..24ccec7 100644
40354 --- a/drivers/net/wireless/ti/wl12xx/main.c
40355 +++ b/drivers/net/wireless/ti/wl12xx/main.c
40356 @@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40357 sizeof(wl->conf.mem));
40358
40359 /* read data preparation is only needed by wl127x */
40360 - wl->ops->prepare_read = wl127x_prepare_read;
40361 + pax_open_kernel();
40362 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40363 + pax_close_kernel();
40364
40365 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
40366 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
40367 @@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40368 sizeof(wl->conf.mem));
40369
40370 /* read data preparation is only needed by wl127x */
40371 - wl->ops->prepare_read = wl127x_prepare_read;
40372 + pax_open_kernel();
40373 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40374 + pax_close_kernel();
40375
40376 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
40377 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
40378 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
40379 index da3ef1b..4790b95 100644
40380 --- a/drivers/net/wireless/ti/wl18xx/main.c
40381 +++ b/drivers/net/wireless/ti/wl18xx/main.c
40382 @@ -1664,8 +1664,10 @@ static int wl18xx_setup(struct wl1271 *wl)
40383 }
40384
40385 if (!checksum_param) {
40386 - wl18xx_ops.set_rx_csum = NULL;
40387 - wl18xx_ops.init_vif = NULL;
40388 + pax_open_kernel();
40389 + *(void **)&wl18xx_ops.set_rx_csum = NULL;
40390 + *(void **)&wl18xx_ops.init_vif = NULL;
40391 + pax_close_kernel();
40392 }
40393
40394 /* Enable 11a Band only if we have 5G antennas */
40395 diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
40396 index 7ef0b4a..ff65c28 100644
40397 --- a/drivers/net/wireless/zd1211rw/zd_usb.c
40398 +++ b/drivers/net/wireless/zd1211rw/zd_usb.c
40399 @@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
40400 {
40401 struct zd_usb *usb = urb->context;
40402 struct zd_usb_interrupt *intr = &usb->intr;
40403 - int len;
40404 + unsigned int len;
40405 u16 int_num;
40406
40407 ZD_ASSERT(in_interrupt());
40408 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40409 index d93b2b6..ae50401 100644
40410 --- a/drivers/oprofile/buffer_sync.c
40411 +++ b/drivers/oprofile/buffer_sync.c
40412 @@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40413 if (cookie == NO_COOKIE)
40414 offset = pc;
40415 if (cookie == INVALID_COOKIE) {
40416 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40417 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40418 offset = pc;
40419 }
40420 if (cookie != last_cookie) {
40421 @@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40422 /* add userspace sample */
40423
40424 if (!mm) {
40425 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
40426 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40427 return 0;
40428 }
40429
40430 cookie = lookup_dcookie(mm, s->eip, &offset);
40431
40432 if (cookie == INVALID_COOKIE) {
40433 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40434 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40435 return 0;
40436 }
40437
40438 @@ -552,7 +552,7 @@ void sync_buffer(int cpu)
40439 /* ignore backtraces if failed to add a sample */
40440 if (state == sb_bt_start) {
40441 state = sb_bt_ignore;
40442 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40443 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40444 }
40445 }
40446 release_mm(mm);
40447 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40448 index c0cc4e7..44d4e54 100644
40449 --- a/drivers/oprofile/event_buffer.c
40450 +++ b/drivers/oprofile/event_buffer.c
40451 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40452 }
40453
40454 if (buffer_pos == buffer_size) {
40455 - atomic_inc(&oprofile_stats.event_lost_overflow);
40456 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40457 return;
40458 }
40459
40460 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40461 index ed2c3ec..deda85a 100644
40462 --- a/drivers/oprofile/oprof.c
40463 +++ b/drivers/oprofile/oprof.c
40464 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40465 if (oprofile_ops.switch_events())
40466 return;
40467
40468 - atomic_inc(&oprofile_stats.multiplex_counter);
40469 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40470 start_switch_worker();
40471 }
40472
40473 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
40474 index 84a208d..d61b0a1 100644
40475 --- a/drivers/oprofile/oprofile_files.c
40476 +++ b/drivers/oprofile/oprofile_files.c
40477 @@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
40478
40479 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
40480
40481 -static ssize_t timeout_read(struct file *file, char __user *buf,
40482 +static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
40483 size_t count, loff_t *offset)
40484 {
40485 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
40486 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40487 index 917d28e..d62d981 100644
40488 --- a/drivers/oprofile/oprofile_stats.c
40489 +++ b/drivers/oprofile/oprofile_stats.c
40490 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40491 cpu_buf->sample_invalid_eip = 0;
40492 }
40493
40494 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40495 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40496 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
40497 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40498 - atomic_set(&oprofile_stats.multiplex_counter, 0);
40499 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40500 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40501 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40502 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40503 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40504 }
40505
40506
40507 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40508 index 38b6fc0..b5cbfce 100644
40509 --- a/drivers/oprofile/oprofile_stats.h
40510 +++ b/drivers/oprofile/oprofile_stats.h
40511 @@ -13,11 +13,11 @@
40512 #include <linux/atomic.h>
40513
40514 struct oprofile_stat_struct {
40515 - atomic_t sample_lost_no_mm;
40516 - atomic_t sample_lost_no_mapping;
40517 - atomic_t bt_lost_no_mapping;
40518 - atomic_t event_lost_overflow;
40519 - atomic_t multiplex_counter;
40520 + atomic_unchecked_t sample_lost_no_mm;
40521 + atomic_unchecked_t sample_lost_no_mapping;
40522 + atomic_unchecked_t bt_lost_no_mapping;
40523 + atomic_unchecked_t event_lost_overflow;
40524 + atomic_unchecked_t multiplex_counter;
40525 };
40526
40527 extern struct oprofile_stat_struct oprofile_stats;
40528 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40529 index 7c12d9c..558bf3bb 100644
40530 --- a/drivers/oprofile/oprofilefs.c
40531 +++ b/drivers/oprofile/oprofilefs.c
40532 @@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
40533
40534
40535 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40536 - char const *name, atomic_t *val)
40537 + char const *name, atomic_unchecked_t *val)
40538 {
40539 return __oprofilefs_create_file(sb, root, name,
40540 &atomic_ro_fops, 0444, val);
40541 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40542 index 93404f7..4a313d8 100644
40543 --- a/drivers/oprofile/timer_int.c
40544 +++ b/drivers/oprofile/timer_int.c
40545 @@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40546 return NOTIFY_OK;
40547 }
40548
40549 -static struct notifier_block __refdata oprofile_cpu_notifier = {
40550 +static struct notifier_block oprofile_cpu_notifier = {
40551 .notifier_call = oprofile_cpu_notify,
40552 };
40553
40554 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40555 index 3f56bc0..707d642 100644
40556 --- a/drivers/parport/procfs.c
40557 +++ b/drivers/parport/procfs.c
40558 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40559
40560 *ppos += len;
40561
40562 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40563 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40564 }
40565
40566 #ifdef CONFIG_PARPORT_1284
40567 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40568
40569 *ppos += len;
40570
40571 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40572 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40573 }
40574 #endif /* IEEE1284.3 support. */
40575
40576 diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40577 index c35e8ad..fc33beb 100644
40578 --- a/drivers/pci/hotplug/acpiphp_ibm.c
40579 +++ b/drivers/pci/hotplug/acpiphp_ibm.c
40580 @@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40581 goto init_cleanup;
40582 }
40583
40584 - ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40585 + pax_open_kernel();
40586 + *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40587 + pax_close_kernel();
40588 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40589
40590 return retval;
40591 diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40592 index a6a71c4..c91097b 100644
40593 --- a/drivers/pci/hotplug/cpcihp_generic.c
40594 +++ b/drivers/pci/hotplug/cpcihp_generic.c
40595 @@ -73,7 +73,6 @@ static u16 port;
40596 static unsigned int enum_bit;
40597 static u8 enum_mask;
40598
40599 -static struct cpci_hp_controller_ops generic_hpc_ops;
40600 static struct cpci_hp_controller generic_hpc;
40601
40602 static int __init validate_parameters(void)
40603 @@ -139,6 +138,10 @@ static int query_enum(void)
40604 return ((value & enum_mask) == enum_mask);
40605 }
40606
40607 +static struct cpci_hp_controller_ops generic_hpc_ops = {
40608 + .query_enum = query_enum,
40609 +};
40610 +
40611 static int __init cpcihp_generic_init(void)
40612 {
40613 int status;
40614 @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40615 pci_dev_put(dev);
40616
40617 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40618 - generic_hpc_ops.query_enum = query_enum;
40619 generic_hpc.ops = &generic_hpc_ops;
40620
40621 status = cpci_hp_register_controller(&generic_hpc);
40622 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
40623 index 449b4bb..257e2e8 100644
40624 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
40625 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
40626 @@ -59,7 +59,6 @@
40627 /* local variables */
40628 static bool debug;
40629 static bool poll;
40630 -static struct cpci_hp_controller_ops zt5550_hpc_ops;
40631 static struct cpci_hp_controller zt5550_hpc;
40632
40633 /* Primary cPCI bus bridge device */
40634 @@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
40635 return 0;
40636 }
40637
40638 +static struct cpci_hp_controller_ops zt5550_hpc_ops = {
40639 + .query_enum = zt5550_hc_query_enum,
40640 +};
40641 +
40642 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
40643 {
40644 int status;
40645 @@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
40646 dbg("returned from zt5550_hc_config");
40647
40648 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
40649 - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
40650 zt5550_hpc.ops = &zt5550_hpc_ops;
40651 if(!poll) {
40652 zt5550_hpc.irq = hc_dev->irq;
40653 zt5550_hpc.irq_flags = IRQF_SHARED;
40654 zt5550_hpc.dev_id = hc_dev;
40655
40656 - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40657 - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40658 - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40659 + pax_open_kernel();
40660 + *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40661 + *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40662 + *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40663 + pax_open_kernel();
40664 } else {
40665 info("using ENUM# polling mode");
40666 }
40667 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40668 index 76ba8a1..20ca857 100644
40669 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40670 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40671 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40672
40673 void compaq_nvram_init (void __iomem *rom_start)
40674 {
40675 +
40676 +#ifndef CONFIG_PAX_KERNEXEC
40677 if (rom_start) {
40678 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40679 }
40680 +#endif
40681 +
40682 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40683
40684 /* initialize our int15 lock */
40685 diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
40686 index 202f4a9..8ee47d0 100644
40687 --- a/drivers/pci/hotplug/pci_hotplug_core.c
40688 +++ b/drivers/pci/hotplug/pci_hotplug_core.c
40689 @@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
40690 return -EINVAL;
40691 }
40692
40693 - slot->ops->owner = owner;
40694 - slot->ops->mod_name = mod_name;
40695 + pax_open_kernel();
40696 + *(struct module **)&slot->ops->owner = owner;
40697 + *(const char **)&slot->ops->mod_name = mod_name;
40698 + pax_close_kernel();
40699
40700 mutex_lock(&pci_hp_mutex);
40701 /*
40702 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
40703 index 7d72c5e..edce02c 100644
40704 --- a/drivers/pci/hotplug/pciehp_core.c
40705 +++ b/drivers/pci/hotplug/pciehp_core.c
40706 @@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
40707 struct slot *slot = ctrl->slot;
40708 struct hotplug_slot *hotplug = NULL;
40709 struct hotplug_slot_info *info = NULL;
40710 - struct hotplug_slot_ops *ops = NULL;
40711 + hotplug_slot_ops_no_const *ops = NULL;
40712 char name[SLOT_NAME_SIZE];
40713 int retval = -ENOMEM;
40714
40715 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
40716 index 9c6e9bb..2916736 100644
40717 --- a/drivers/pci/pci-sysfs.c
40718 +++ b/drivers/pci/pci-sysfs.c
40719 @@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
40720 {
40721 /* allocate attribute structure, piggyback attribute name */
40722 int name_len = write_combine ? 13 : 10;
40723 - struct bin_attribute *res_attr;
40724 + bin_attribute_no_const *res_attr;
40725 int retval;
40726
40727 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
40728 @@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
40729 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
40730 {
40731 int retval;
40732 - struct bin_attribute *attr;
40733 + bin_attribute_no_const *attr;
40734
40735 /* If the device has VPD, try to expose it in sysfs. */
40736 if (dev->vpd) {
40737 @@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
40738 {
40739 int retval;
40740 int rom_size = 0;
40741 - struct bin_attribute *attr;
40742 + bin_attribute_no_const *attr;
40743
40744 if (!sysfs_initialized)
40745 return -EACCES;
40746 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
40747 index 7346ee6..41520eb 100644
40748 --- a/drivers/pci/pci.h
40749 +++ b/drivers/pci/pci.h
40750 @@ -93,7 +93,7 @@ struct pci_vpd_ops {
40751 struct pci_vpd {
40752 unsigned int len;
40753 const struct pci_vpd_ops *ops;
40754 - struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
40755 + bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
40756 };
40757
40758 extern int pci_vpd_pci22_init(struct pci_dev *dev);
40759 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40760 index d320df6..ca9a8f6 100644
40761 --- a/drivers/pci/pcie/aspm.c
40762 +++ b/drivers/pci/pcie/aspm.c
40763 @@ -27,9 +27,9 @@
40764 #define MODULE_PARAM_PREFIX "pcie_aspm."
40765
40766 /* Note: those are not register definitions */
40767 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40768 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40769 -#define ASPM_STATE_L1 (4) /* L1 state */
40770 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40771 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40772 +#define ASPM_STATE_L1 (4U) /* L1 state */
40773 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40774 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40775
40776 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40777 index 5427787..8df273b 100644
40778 --- a/drivers/pci/probe.c
40779 +++ b/drivers/pci/probe.c
40780 @@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
40781 struct pci_bus_region region;
40782 bool bar_too_big = false, bar_disabled = false;
40783
40784 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
40785 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
40786
40787 /* No printks while decoding is disabled! */
40788 if (!dev->mmio_always_on) {
40789 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40790 index 0b00947..64f7c0a 100644
40791 --- a/drivers/pci/proc.c
40792 +++ b/drivers/pci/proc.c
40793 @@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40794 static int __init pci_proc_init(void)
40795 {
40796 struct pci_dev *dev = NULL;
40797 +
40798 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40799 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40800 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40801 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40802 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40803 +#endif
40804 +#else
40805 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40806 +#endif
40807 proc_create("devices", 0, proc_bus_pci_dir,
40808 &proc_bus_pci_dev_operations);
40809 proc_initialized = 1;
40810 diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
40811 index 3e5b4497..dcdfb70 100644
40812 --- a/drivers/platform/x86/chromeos_laptop.c
40813 +++ b/drivers/platform/x86/chromeos_laptop.c
40814 @@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
40815 return 0;
40816 }
40817
40818 -static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
40819 +static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
40820 {
40821 .ident = "Samsung Series 5 550 - Touchpad",
40822 .matches = {
40823 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40824 index 6b22938..bc9700e 100644
40825 --- a/drivers/platform/x86/msi-laptop.c
40826 +++ b/drivers/platform/x86/msi-laptop.c
40827 @@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
40828
40829 if (!quirks->ec_read_only) {
40830 /* allow userland write sysfs file */
40831 - dev_attr_bluetooth.store = store_bluetooth;
40832 - dev_attr_wlan.store = store_wlan;
40833 - dev_attr_threeg.store = store_threeg;
40834 - dev_attr_bluetooth.attr.mode |= S_IWUSR;
40835 - dev_attr_wlan.attr.mode |= S_IWUSR;
40836 - dev_attr_threeg.attr.mode |= S_IWUSR;
40837 + pax_open_kernel();
40838 + *(void **)&dev_attr_bluetooth.store = store_bluetooth;
40839 + *(void **)&dev_attr_wlan.store = store_wlan;
40840 + *(void **)&dev_attr_threeg.store = store_threeg;
40841 + *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
40842 + *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
40843 + *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
40844 + pax_close_kernel();
40845 }
40846
40847 /* disable hardware control by fn key */
40848 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40849 index 14d4dce..b129917 100644
40850 --- a/drivers/platform/x86/sony-laptop.c
40851 +++ b/drivers/platform/x86/sony-laptop.c
40852 @@ -2465,7 +2465,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
40853 }
40854
40855 /* High speed charging function */
40856 -static struct device_attribute *hsc_handle;
40857 +static device_attribute_no_const *hsc_handle;
40858
40859 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
40860 struct device_attribute *attr,
40861 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40862 index edec135..59a24a3 100644
40863 --- a/drivers/platform/x86/thinkpad_acpi.c
40864 +++ b/drivers/platform/x86/thinkpad_acpi.c
40865 @@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
40866 return 0;
40867 }
40868
40869 -void static hotkey_mask_warn_incomplete_mask(void)
40870 +static void hotkey_mask_warn_incomplete_mask(void)
40871 {
40872 /* log only what the user can fix... */
40873 const u32 wantedmask = hotkey_driver_mask &
40874 @@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
40875 }
40876 }
40877
40878 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40879 - struct tp_nvram_state *newn,
40880 - const u32 event_mask)
40881 -{
40882 -
40883 #define TPACPI_COMPARE_KEY(__scancode, __member) \
40884 do { \
40885 if ((event_mask & (1 << __scancode)) && \
40886 @@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40887 tpacpi_hotkey_send_key(__scancode); \
40888 } while (0)
40889
40890 - void issue_volchange(const unsigned int oldvol,
40891 - const unsigned int newvol)
40892 - {
40893 - unsigned int i = oldvol;
40894 +static void issue_volchange(const unsigned int oldvol,
40895 + const unsigned int newvol,
40896 + const u32 event_mask)
40897 +{
40898 + unsigned int i = oldvol;
40899
40900 - while (i > newvol) {
40901 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40902 - i--;
40903 - }
40904 - while (i < newvol) {
40905 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40906 - i++;
40907 - }
40908 + while (i > newvol) {
40909 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40910 + i--;
40911 }
40912 + while (i < newvol) {
40913 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40914 + i++;
40915 + }
40916 +}
40917
40918 - void issue_brightnesschange(const unsigned int oldbrt,
40919 - const unsigned int newbrt)
40920 - {
40921 - unsigned int i = oldbrt;
40922 +static void issue_brightnesschange(const unsigned int oldbrt,
40923 + const unsigned int newbrt,
40924 + const u32 event_mask)
40925 +{
40926 + unsigned int i = oldbrt;
40927
40928 - while (i > newbrt) {
40929 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40930 - i--;
40931 - }
40932 - while (i < newbrt) {
40933 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40934 - i++;
40935 - }
40936 + while (i > newbrt) {
40937 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40938 + i--;
40939 + }
40940 + while (i < newbrt) {
40941 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40942 + i++;
40943 }
40944 +}
40945
40946 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40947 + struct tp_nvram_state *newn,
40948 + const u32 event_mask)
40949 +{
40950 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
40951 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
40952 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
40953 @@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40954 oldn->volume_level != newn->volume_level) {
40955 /* recently muted, or repeated mute keypress, or
40956 * multiple presses ending in mute */
40957 - issue_volchange(oldn->volume_level, newn->volume_level);
40958 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40959 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
40960 }
40961 } else {
40962 @@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40963 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40964 }
40965 if (oldn->volume_level != newn->volume_level) {
40966 - issue_volchange(oldn->volume_level, newn->volume_level);
40967 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40968 } else if (oldn->volume_toggle != newn->volume_toggle) {
40969 /* repeated vol up/down keypress at end of scale ? */
40970 if (newn->volume_level == 0)
40971 @@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40972 /* handle brightness */
40973 if (oldn->brightness_level != newn->brightness_level) {
40974 issue_brightnesschange(oldn->brightness_level,
40975 - newn->brightness_level);
40976 + newn->brightness_level,
40977 + event_mask);
40978 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
40979 /* repeated key presses that didn't change state */
40980 if (newn->brightness_level == 0)
40981 @@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40982 && !tp_features.bright_unkfw)
40983 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40984 }
40985 +}
40986
40987 #undef TPACPI_COMPARE_KEY
40988 #undef TPACPI_MAY_SEND_KEY
40989 -}
40990
40991 /*
40992 * Polling driver
40993 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40994 index 769d265..a3a05ca 100644
40995 --- a/drivers/pnp/pnpbios/bioscalls.c
40996 +++ b/drivers/pnp/pnpbios/bioscalls.c
40997 @@ -58,7 +58,7 @@ do { \
40998 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40999 } while(0)
41000
41001 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41002 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41003 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41004
41005 /*
41006 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41007
41008 cpu = get_cpu();
41009 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41010 +
41011 + pax_open_kernel();
41012 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41013 + pax_close_kernel();
41014
41015 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41016 spin_lock_irqsave(&pnp_bios_lock, flags);
41017 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41018 :"memory");
41019 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41020
41021 + pax_open_kernel();
41022 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41023 + pax_close_kernel();
41024 +
41025 put_cpu();
41026
41027 /* If we get here and this is set then the PnP BIOS faulted on us. */
41028 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41029 return status;
41030 }
41031
41032 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
41033 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41034 {
41035 int i;
41036
41037 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41038 pnp_bios_callpoint.offset = header->fields.pm16offset;
41039 pnp_bios_callpoint.segment = PNP_CS16;
41040
41041 + pax_open_kernel();
41042 +
41043 for_each_possible_cpu(i) {
41044 struct desc_struct *gdt = get_cpu_gdt_table(i);
41045 if (!gdt)
41046 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41047 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41048 (unsigned long)__va(header->fields.pm16dseg));
41049 }
41050 +
41051 + pax_close_kernel();
41052 }
41053 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41054 index 3e6db1c..1fbbdae 100644
41055 --- a/drivers/pnp/resource.c
41056 +++ b/drivers/pnp/resource.c
41057 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41058 return 1;
41059
41060 /* check if the resource is valid */
41061 - if (*irq < 0 || *irq > 15)
41062 + if (*irq > 15)
41063 return 0;
41064
41065 /* check if the resource is reserved */
41066 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41067 return 1;
41068
41069 /* check if the resource is valid */
41070 - if (*dma < 0 || *dma == 4 || *dma > 7)
41071 + if (*dma == 4 || *dma > 7)
41072 return 0;
41073
41074 /* check if the resource is reserved */
41075 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41076 index 7df7c5f..bd48c47 100644
41077 --- a/drivers/power/pda_power.c
41078 +++ b/drivers/power/pda_power.c
41079 @@ -37,7 +37,11 @@ static int polling;
41080
41081 #ifdef CONFIG_USB_OTG_UTILS
41082 static struct usb_phy *transceiver;
41083 -static struct notifier_block otg_nb;
41084 +static int otg_handle_notification(struct notifier_block *nb,
41085 + unsigned long event, void *unused);
41086 +static struct notifier_block otg_nb = {
41087 + .notifier_call = otg_handle_notification
41088 +};
41089 #endif
41090
41091 static struct regulator *ac_draw;
41092 @@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41093
41094 #ifdef CONFIG_USB_OTG_UTILS
41095 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41096 - otg_nb.notifier_call = otg_handle_notification;
41097 ret = usb_register_notifier(transceiver, &otg_nb);
41098 if (ret) {
41099 dev_err(dev, "failure to register otg notifier\n");
41100 diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41101 index cc439fd..8fa30df 100644
41102 --- a/drivers/power/power_supply.h
41103 +++ b/drivers/power/power_supply.h
41104 @@ -16,12 +16,12 @@ struct power_supply;
41105
41106 #ifdef CONFIG_SYSFS
41107
41108 -extern void power_supply_init_attrs(struct device_type *dev_type);
41109 +extern void power_supply_init_attrs(void);
41110 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41111
41112 #else
41113
41114 -static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41115 +static inline void power_supply_init_attrs(void) {}
41116 #define power_supply_uevent NULL
41117
41118 #endif /* CONFIG_SYSFS */
41119 diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41120 index 5deac43..608c5ff 100644
41121 --- a/drivers/power/power_supply_core.c
41122 +++ b/drivers/power/power_supply_core.c
41123 @@ -24,7 +24,10 @@
41124 struct class *power_supply_class;
41125 EXPORT_SYMBOL_GPL(power_supply_class);
41126
41127 -static struct device_type power_supply_dev_type;
41128 +extern const struct attribute_group *power_supply_attr_groups[];
41129 +static struct device_type power_supply_dev_type = {
41130 + .groups = power_supply_attr_groups,
41131 +};
41132
41133 static int __power_supply_changed_work(struct device *dev, void *data)
41134 {
41135 @@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41136 return PTR_ERR(power_supply_class);
41137
41138 power_supply_class->dev_uevent = power_supply_uevent;
41139 - power_supply_init_attrs(&power_supply_dev_type);
41140 + power_supply_init_attrs();
41141
41142 return 0;
41143 }
41144 diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41145 index 29178f7..c65f324 100644
41146 --- a/drivers/power/power_supply_sysfs.c
41147 +++ b/drivers/power/power_supply_sysfs.c
41148 @@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
41149 .is_visible = power_supply_attr_is_visible,
41150 };
41151
41152 -static const struct attribute_group *power_supply_attr_groups[] = {
41153 +const struct attribute_group *power_supply_attr_groups[] = {
41154 &power_supply_attr_group,
41155 NULL,
41156 };
41157
41158 -void power_supply_init_attrs(struct device_type *dev_type)
41159 +void power_supply_init_attrs(void)
41160 {
41161 int i;
41162
41163 - dev_type->groups = power_supply_attr_groups;
41164 -
41165 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41166 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41167 }
41168 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41169 index 4d7c635..9860196 100644
41170 --- a/drivers/regulator/max8660.c
41171 +++ b/drivers/regulator/max8660.c
41172 @@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41173 max8660->shadow_regs[MAX8660_OVER1] = 5;
41174 } else {
41175 /* Otherwise devices can be toggled via software */
41176 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
41177 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
41178 + pax_open_kernel();
41179 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41180 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41181 + pax_close_kernel();
41182 }
41183
41184 /*
41185 diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41186 index 9a8ea91..c483dd9 100644
41187 --- a/drivers/regulator/max8973-regulator.c
41188 +++ b/drivers/regulator/max8973-regulator.c
41189 @@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41190 if (!pdata->enable_ext_control) {
41191 max->desc.enable_reg = MAX8973_VOUT;
41192 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41193 - max8973_dcdc_ops.enable = regulator_enable_regmap;
41194 - max8973_dcdc_ops.disable = regulator_disable_regmap;
41195 - max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41196 + pax_open_kernel();
41197 + *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41198 + *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41199 + *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41200 + pax_close_kernel();
41201 }
41202
41203 max->enable_external_control = pdata->enable_ext_control;
41204 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41205 index 9891aec..beb3083 100644
41206 --- a/drivers/regulator/mc13892-regulator.c
41207 +++ b/drivers/regulator/mc13892-regulator.c
41208 @@ -583,10 +583,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41209 }
41210 mc13xxx_unlock(mc13892);
41211
41212 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41213 + pax_open_kernel();
41214 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41215 = mc13892_vcam_set_mode;
41216 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41217 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41218 = mc13892_vcam_get_mode;
41219 + pax_close_kernel();
41220
41221 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41222 ARRAY_SIZE(mc13892_regulators),
41223 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41224 index cc5bea9..689f7d9 100644
41225 --- a/drivers/rtc/rtc-cmos.c
41226 +++ b/drivers/rtc/rtc-cmos.c
41227 @@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41228 hpet_rtc_timer_init();
41229
41230 /* export at least the first block of NVRAM */
41231 - nvram.size = address_space - NVRAM_OFFSET;
41232 + pax_open_kernel();
41233 + *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41234 + pax_close_kernel();
41235 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41236 if (retval < 0) {
41237 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41238 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41239 index d049393..bb20be0 100644
41240 --- a/drivers/rtc/rtc-dev.c
41241 +++ b/drivers/rtc/rtc-dev.c
41242 @@ -16,6 +16,7 @@
41243 #include <linux/module.h>
41244 #include <linux/rtc.h>
41245 #include <linux/sched.h>
41246 +#include <linux/grsecurity.h>
41247 #include "rtc-core.h"
41248
41249 static dev_t rtc_devt;
41250 @@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
41251 if (copy_from_user(&tm, uarg, sizeof(tm)))
41252 return -EFAULT;
41253
41254 + gr_log_timechange();
41255 +
41256 return rtc_set_time(rtc, &tm);
41257
41258 case RTC_PIE_ON:
41259 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41260 index 970a236..3613169 100644
41261 --- a/drivers/rtc/rtc-ds1307.c
41262 +++ b/drivers/rtc/rtc-ds1307.c
41263 @@ -106,7 +106,7 @@ struct ds1307 {
41264 u8 offset; /* register's offset */
41265 u8 regs[11];
41266 u16 nvram_offset;
41267 - struct bin_attribute *nvram;
41268 + bin_attribute_no_const *nvram;
41269 enum ds_type type;
41270 unsigned long flags;
41271 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41272 diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41273 index 130f29a..6179d03 100644
41274 --- a/drivers/rtc/rtc-m48t59.c
41275 +++ b/drivers/rtc/rtc-m48t59.c
41276 @@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41277 goto out;
41278 }
41279
41280 - m48t59_nvram_attr.size = pdata->offset;
41281 + pax_open_kernel();
41282 + *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41283 + pax_close_kernel();
41284
41285 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41286 if (ret) {
41287 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41288 index e693af6..2e525b6 100644
41289 --- a/drivers/scsi/bfa/bfa_fcpim.h
41290 +++ b/drivers/scsi/bfa/bfa_fcpim.h
41291 @@ -36,7 +36,7 @@ struct bfa_iotag_s {
41292
41293 struct bfa_itn_s {
41294 bfa_isr_func_t isr;
41295 -};
41296 +} __no_const;
41297
41298 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41299 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41300 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41301 index 23a90e7..9cf04ee 100644
41302 --- a/drivers/scsi/bfa/bfa_ioc.h
41303 +++ b/drivers/scsi/bfa/bfa_ioc.h
41304 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41305 bfa_ioc_disable_cbfn_t disable_cbfn;
41306 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41307 bfa_ioc_reset_cbfn_t reset_cbfn;
41308 -};
41309 +} __no_const;
41310
41311 /*
41312 * IOC event notification mechanism.
41313 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
41314 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
41315 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
41316 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
41317 -};
41318 +} __no_const;
41319
41320 /*
41321 * Queue element to wait for room in request queue. FIFO order is
41322 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41323 index df0c3c7..b00e1d0 100644
41324 --- a/drivers/scsi/hosts.c
41325 +++ b/drivers/scsi/hosts.c
41326 @@ -42,7 +42,7 @@
41327 #include "scsi_logging.h"
41328
41329
41330 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41331 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41332
41333
41334 static void scsi_host_cls_release(struct device *dev)
41335 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41336 * subtract one because we increment first then return, but we need to
41337 * know what the next host number was before increment
41338 */
41339 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41340 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41341 shost->dma_channel = 0xff;
41342
41343 /* These three are default values which can be overridden */
41344 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
41345 index 7f4f790..b75b92a 100644
41346 --- a/drivers/scsi/hpsa.c
41347 +++ b/drivers/scsi/hpsa.c
41348 @@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
41349 unsigned long flags;
41350
41351 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
41352 - return h->access.command_completed(h, q);
41353 + return h->access->command_completed(h, q);
41354
41355 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
41356 a = rq->head[rq->current_entry];
41357 @@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
41358 while (!list_empty(&h->reqQ)) {
41359 c = list_entry(h->reqQ.next, struct CommandList, list);
41360 /* can't do anything if fifo is full */
41361 - if ((h->access.fifo_full(h))) {
41362 + if ((h->access->fifo_full(h))) {
41363 dev_warn(&h->pdev->dev, "fifo full\n");
41364 break;
41365 }
41366 @@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
41367
41368 /* Tell the controller execute command */
41369 spin_unlock_irqrestore(&h->lock, flags);
41370 - h->access.submit_command(h, c);
41371 + h->access->submit_command(h, c);
41372 spin_lock_irqsave(&h->lock, flags);
41373 }
41374 spin_unlock_irqrestore(&h->lock, flags);
41375 @@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
41376
41377 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
41378 {
41379 - return h->access.command_completed(h, q);
41380 + return h->access->command_completed(h, q);
41381 }
41382
41383 static inline bool interrupt_pending(struct ctlr_info *h)
41384 {
41385 - return h->access.intr_pending(h);
41386 + return h->access->intr_pending(h);
41387 }
41388
41389 static inline long interrupt_not_for_us(struct ctlr_info *h)
41390 {
41391 - return (h->access.intr_pending(h) == 0) ||
41392 + return (h->access->intr_pending(h) == 0) ||
41393 (h->interrupts_enabled == 0);
41394 }
41395
41396 @@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
41397 if (prod_index < 0)
41398 return -ENODEV;
41399 h->product_name = products[prod_index].product_name;
41400 - h->access = *(products[prod_index].access);
41401 + h->access = products[prod_index].access;
41402
41403 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
41404 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
41405 @@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
41406
41407 assert_spin_locked(&lockup_detector_lock);
41408 remove_ctlr_from_lockup_detector_list(h);
41409 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
41410 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
41411 spin_lock_irqsave(&h->lock, flags);
41412 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
41413 spin_unlock_irqrestore(&h->lock, flags);
41414 @@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
41415 }
41416
41417 /* make sure the board interrupts are off */
41418 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
41419 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
41420
41421 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
41422 goto clean2;
41423 @@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
41424 * fake ones to scoop up any residual completions.
41425 */
41426 spin_lock_irqsave(&h->lock, flags);
41427 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
41428 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
41429 spin_unlock_irqrestore(&h->lock, flags);
41430 free_irqs(h);
41431 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
41432 @@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
41433 dev_info(&h->pdev->dev, "Board READY.\n");
41434 dev_info(&h->pdev->dev,
41435 "Waiting for stale completions to drain.\n");
41436 - h->access.set_intr_mask(h, HPSA_INTR_ON);
41437 + h->access->set_intr_mask(h, HPSA_INTR_ON);
41438 msleep(10000);
41439 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
41440 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
41441
41442 rc = controller_reset_failed(h->cfgtable);
41443 if (rc)
41444 @@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
41445 }
41446
41447 /* Turn the interrupts on so we can service requests */
41448 - h->access.set_intr_mask(h, HPSA_INTR_ON);
41449 + h->access->set_intr_mask(h, HPSA_INTR_ON);
41450
41451 hpsa_hba_inquiry(h);
41452 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
41453 @@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
41454 * To write all data in the battery backed cache to disks
41455 */
41456 hpsa_flush_cache(h);
41457 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
41458 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
41459 hpsa_free_irqs_and_disable_msix(h);
41460 }
41461
41462 @@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
41463 return;
41464 }
41465 /* Change the access methods to the performant access methods */
41466 - h->access = SA5_performant_access;
41467 + h->access = &SA5_performant_access;
41468 h->transMethod = CFGTBL_Trans_Performant;
41469 }
41470
41471 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
41472 index 9816479..c5d4e97 100644
41473 --- a/drivers/scsi/hpsa.h
41474 +++ b/drivers/scsi/hpsa.h
41475 @@ -79,7 +79,7 @@ struct ctlr_info {
41476 unsigned int msix_vector;
41477 unsigned int msi_vector;
41478 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
41479 - struct access_method access;
41480 + struct access_method *access;
41481
41482 /* queue and queue Info */
41483 struct list_head reqQ;
41484 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41485 index c772d8d..35c362c 100644
41486 --- a/drivers/scsi/libfc/fc_exch.c
41487 +++ b/drivers/scsi/libfc/fc_exch.c
41488 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
41489 u16 pool_max_index;
41490
41491 struct {
41492 - atomic_t no_free_exch;
41493 - atomic_t no_free_exch_xid;
41494 - atomic_t xid_not_found;
41495 - atomic_t xid_busy;
41496 - atomic_t seq_not_found;
41497 - atomic_t non_bls_resp;
41498 + atomic_unchecked_t no_free_exch;
41499 + atomic_unchecked_t no_free_exch_xid;
41500 + atomic_unchecked_t xid_not_found;
41501 + atomic_unchecked_t xid_busy;
41502 + atomic_unchecked_t seq_not_found;
41503 + atomic_unchecked_t non_bls_resp;
41504 } stats;
41505 };
41506
41507 @@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41508 /* allocate memory for exchange */
41509 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41510 if (!ep) {
41511 - atomic_inc(&mp->stats.no_free_exch);
41512 + atomic_inc_unchecked(&mp->stats.no_free_exch);
41513 goto out;
41514 }
41515 memset(ep, 0, sizeof(*ep));
41516 @@ -786,7 +786,7 @@ out:
41517 return ep;
41518 err:
41519 spin_unlock_bh(&pool->lock);
41520 - atomic_inc(&mp->stats.no_free_exch_xid);
41521 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41522 mempool_free(ep, mp->ep_pool);
41523 return NULL;
41524 }
41525 @@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41526 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41527 ep = fc_exch_find(mp, xid);
41528 if (!ep) {
41529 - atomic_inc(&mp->stats.xid_not_found);
41530 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41531 reject = FC_RJT_OX_ID;
41532 goto out;
41533 }
41534 @@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41535 ep = fc_exch_find(mp, xid);
41536 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41537 if (ep) {
41538 - atomic_inc(&mp->stats.xid_busy);
41539 + atomic_inc_unchecked(&mp->stats.xid_busy);
41540 reject = FC_RJT_RX_ID;
41541 goto rel;
41542 }
41543 @@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41544 }
41545 xid = ep->xid; /* get our XID */
41546 } else if (!ep) {
41547 - atomic_inc(&mp->stats.xid_not_found);
41548 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41549 reject = FC_RJT_RX_ID; /* XID not found */
41550 goto out;
41551 }
41552 @@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41553 } else {
41554 sp = &ep->seq;
41555 if (sp->id != fh->fh_seq_id) {
41556 - atomic_inc(&mp->stats.seq_not_found);
41557 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41558 if (f_ctl & FC_FC_END_SEQ) {
41559 /*
41560 * Update sequence_id based on incoming last
41561 @@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41562
41563 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41564 if (!ep) {
41565 - atomic_inc(&mp->stats.xid_not_found);
41566 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41567 goto out;
41568 }
41569 if (ep->esb_stat & ESB_ST_COMPLETE) {
41570 - atomic_inc(&mp->stats.xid_not_found);
41571 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41572 goto rel;
41573 }
41574 if (ep->rxid == FC_XID_UNKNOWN)
41575 ep->rxid = ntohs(fh->fh_rx_id);
41576 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41577 - atomic_inc(&mp->stats.xid_not_found);
41578 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41579 goto rel;
41580 }
41581 if (ep->did != ntoh24(fh->fh_s_id) &&
41582 ep->did != FC_FID_FLOGI) {
41583 - atomic_inc(&mp->stats.xid_not_found);
41584 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41585 goto rel;
41586 }
41587 sof = fr_sof(fp);
41588 @@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41589 sp->ssb_stat |= SSB_ST_RESP;
41590 sp->id = fh->fh_seq_id;
41591 } else if (sp->id != fh->fh_seq_id) {
41592 - atomic_inc(&mp->stats.seq_not_found);
41593 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41594 goto rel;
41595 }
41596
41597 @@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41598 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41599
41600 if (!sp)
41601 - atomic_inc(&mp->stats.xid_not_found);
41602 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41603 else
41604 - atomic_inc(&mp->stats.non_bls_resp);
41605 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41606
41607 fc_frame_free(fp);
41608 }
41609 @@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41610
41611 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41612 mp = ema->mp;
41613 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41614 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41615 st->fc_no_free_exch_xid +=
41616 - atomic_read(&mp->stats.no_free_exch_xid);
41617 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41618 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41619 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41620 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41621 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41622 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41623 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41624 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41625 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41626 }
41627 }
41628 EXPORT_SYMBOL(fc_exch_update_stats);
41629 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41630 index bdb81cd..d3c7c2c 100644
41631 --- a/drivers/scsi/libsas/sas_ata.c
41632 +++ b/drivers/scsi/libsas/sas_ata.c
41633 @@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
41634 .postreset = ata_std_postreset,
41635 .error_handler = ata_std_error_handler,
41636 .post_internal_cmd = sas_ata_post_internal,
41637 - .qc_defer = ata_std_qc_defer,
41638 + .qc_defer = ata_std_qc_defer,
41639 .qc_prep = ata_noop_qc_prep,
41640 .qc_issue = sas_ata_qc_issue,
41641 .qc_fill_rtf = sas_ata_qc_fill_rtf,
41642 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41643 index 7706c99..3b4fc0c 100644
41644 --- a/drivers/scsi/lpfc/lpfc.h
41645 +++ b/drivers/scsi/lpfc/lpfc.h
41646 @@ -424,7 +424,7 @@ struct lpfc_vport {
41647 struct dentry *debug_nodelist;
41648 struct dentry *vport_debugfs_root;
41649 struct lpfc_debugfs_trc *disc_trc;
41650 - atomic_t disc_trc_cnt;
41651 + atomic_unchecked_t disc_trc_cnt;
41652 #endif
41653 uint8_t stat_data_enabled;
41654 uint8_t stat_data_blocked;
41655 @@ -853,8 +853,8 @@ struct lpfc_hba {
41656 struct timer_list fabric_block_timer;
41657 unsigned long bit_flags;
41658 #define FABRIC_COMANDS_BLOCKED 0
41659 - atomic_t num_rsrc_err;
41660 - atomic_t num_cmd_success;
41661 + atomic_unchecked_t num_rsrc_err;
41662 + atomic_unchecked_t num_cmd_success;
41663 unsigned long last_rsrc_error_time;
41664 unsigned long last_ramp_down_time;
41665 unsigned long last_ramp_up_time;
41666 @@ -890,7 +890,7 @@ struct lpfc_hba {
41667
41668 struct dentry *debug_slow_ring_trc;
41669 struct lpfc_debugfs_trc *slow_ring_trc;
41670 - atomic_t slow_ring_trc_cnt;
41671 + atomic_unchecked_t slow_ring_trc_cnt;
41672 /* iDiag debugfs sub-directory */
41673 struct dentry *idiag_root;
41674 struct dentry *idiag_pci_cfg;
41675 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41676 index f63f5ff..de29189 100644
41677 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41678 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41679 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
41680
41681 #include <linux/debugfs.h>
41682
41683 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41684 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41685 static unsigned long lpfc_debugfs_start_time = 0L;
41686
41687 /* iDiag */
41688 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41689 lpfc_debugfs_enable = 0;
41690
41691 len = 0;
41692 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41693 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41694 (lpfc_debugfs_max_disc_trc - 1);
41695 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41696 dtp = vport->disc_trc + i;
41697 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41698 lpfc_debugfs_enable = 0;
41699
41700 len = 0;
41701 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41702 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41703 (lpfc_debugfs_max_slow_ring_trc - 1);
41704 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41705 dtp = phba->slow_ring_trc + i;
41706 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41707 !vport || !vport->disc_trc)
41708 return;
41709
41710 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41711 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41712 (lpfc_debugfs_max_disc_trc - 1);
41713 dtp = vport->disc_trc + index;
41714 dtp->fmt = fmt;
41715 dtp->data1 = data1;
41716 dtp->data2 = data2;
41717 dtp->data3 = data3;
41718 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41719 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41720 dtp->jif = jiffies;
41721 #endif
41722 return;
41723 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41724 !phba || !phba->slow_ring_trc)
41725 return;
41726
41727 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41728 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41729 (lpfc_debugfs_max_slow_ring_trc - 1);
41730 dtp = phba->slow_ring_trc + index;
41731 dtp->fmt = fmt;
41732 dtp->data1 = data1;
41733 dtp->data2 = data2;
41734 dtp->data3 = data3;
41735 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41736 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41737 dtp->jif = jiffies;
41738 #endif
41739 return;
41740 @@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41741 "slow_ring buffer\n");
41742 goto debug_failed;
41743 }
41744 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41745 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41746 memset(phba->slow_ring_trc, 0,
41747 (sizeof(struct lpfc_debugfs_trc) *
41748 lpfc_debugfs_max_slow_ring_trc));
41749 @@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41750 "buffer\n");
41751 goto debug_failed;
41752 }
41753 - atomic_set(&vport->disc_trc_cnt, 0);
41754 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41755
41756 snprintf(name, sizeof(name), "discovery_trace");
41757 vport->debug_disc_trc =
41758 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41759 index 314b4f6..7005d10 100644
41760 --- a/drivers/scsi/lpfc/lpfc_init.c
41761 +++ b/drivers/scsi/lpfc/lpfc_init.c
41762 @@ -10551,8 +10551,10 @@ lpfc_init(void)
41763 "misc_register returned with status %d", error);
41764
41765 if (lpfc_enable_npiv) {
41766 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41767 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41768 + pax_open_kernel();
41769 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41770 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41771 + pax_close_kernel();
41772 }
41773 lpfc_transport_template =
41774 fc_attach_transport(&lpfc_transport_functions);
41775 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41776 index 98af07c..7625fb5 100644
41777 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41778 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41779 @@ -325,7 +325,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41780 uint32_t evt_posted;
41781
41782 spin_lock_irqsave(&phba->hbalock, flags);
41783 - atomic_inc(&phba->num_rsrc_err);
41784 + atomic_inc_unchecked(&phba->num_rsrc_err);
41785 phba->last_rsrc_error_time = jiffies;
41786
41787 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41788 @@ -366,7 +366,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41789 unsigned long flags;
41790 struct lpfc_hba *phba = vport->phba;
41791 uint32_t evt_posted;
41792 - atomic_inc(&phba->num_cmd_success);
41793 + atomic_inc_unchecked(&phba->num_cmd_success);
41794
41795 if (vport->cfg_lun_queue_depth <= queue_depth)
41796 return;
41797 @@ -410,8 +410,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41798 unsigned long num_rsrc_err, num_cmd_success;
41799 int i;
41800
41801 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41802 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41803 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41804 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41805
41806 /*
41807 * The error and success command counters are global per
41808 @@ -439,8 +439,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41809 }
41810 }
41811 lpfc_destroy_vport_work_array(phba, vports);
41812 - atomic_set(&phba->num_rsrc_err, 0);
41813 - atomic_set(&phba->num_cmd_success, 0);
41814 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41815 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41816 }
41817
41818 /**
41819 @@ -474,8 +474,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41820 }
41821 }
41822 lpfc_destroy_vport_work_array(phba, vports);
41823 - atomic_set(&phba->num_rsrc_err, 0);
41824 - atomic_set(&phba->num_cmd_success, 0);
41825 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41826 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41827 }
41828
41829 /**
41830 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41831 index b46f5e9..c4c4ccb 100644
41832 --- a/drivers/scsi/pmcraid.c
41833 +++ b/drivers/scsi/pmcraid.c
41834 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41835 res->scsi_dev = scsi_dev;
41836 scsi_dev->hostdata = res;
41837 res->change_detected = 0;
41838 - atomic_set(&res->read_failures, 0);
41839 - atomic_set(&res->write_failures, 0);
41840 + atomic_set_unchecked(&res->read_failures, 0);
41841 + atomic_set_unchecked(&res->write_failures, 0);
41842 rc = 0;
41843 }
41844 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41845 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41846
41847 /* If this was a SCSI read/write command keep count of errors */
41848 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41849 - atomic_inc(&res->read_failures);
41850 + atomic_inc_unchecked(&res->read_failures);
41851 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41852 - atomic_inc(&res->write_failures);
41853 + atomic_inc_unchecked(&res->write_failures);
41854
41855 if (!RES_IS_GSCSI(res->cfg_entry) &&
41856 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41857 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
41858 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41859 * hrrq_id assigned here in queuecommand
41860 */
41861 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41862 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41863 pinstance->num_hrrq;
41864 cmd->cmd_done = pmcraid_io_done;
41865
41866 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
41867 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41868 * hrrq_id assigned here in queuecommand
41869 */
41870 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41871 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41872 pinstance->num_hrrq;
41873
41874 if (request_size) {
41875 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41876
41877 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41878 /* add resources only after host is added into system */
41879 - if (!atomic_read(&pinstance->expose_resources))
41880 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41881 return;
41882
41883 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
41884 @@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
41885 init_waitqueue_head(&pinstance->reset_wait_q);
41886
41887 atomic_set(&pinstance->outstanding_cmds, 0);
41888 - atomic_set(&pinstance->last_message_id, 0);
41889 - atomic_set(&pinstance->expose_resources, 0);
41890 + atomic_set_unchecked(&pinstance->last_message_id, 0);
41891 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41892
41893 INIT_LIST_HEAD(&pinstance->free_res_q);
41894 INIT_LIST_HEAD(&pinstance->used_res_q);
41895 @@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
41896 /* Schedule worker thread to handle CCN and take care of adding and
41897 * removing devices to OS
41898 */
41899 - atomic_set(&pinstance->expose_resources, 1);
41900 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41901 schedule_work(&pinstance->worker_q);
41902 return rc;
41903
41904 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41905 index e1d150f..6c6df44 100644
41906 --- a/drivers/scsi/pmcraid.h
41907 +++ b/drivers/scsi/pmcraid.h
41908 @@ -748,7 +748,7 @@ struct pmcraid_instance {
41909 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
41910
41911 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
41912 - atomic_t last_message_id;
41913 + atomic_unchecked_t last_message_id;
41914
41915 /* configuration table */
41916 struct pmcraid_config_table *cfg_table;
41917 @@ -777,7 +777,7 @@ struct pmcraid_instance {
41918 atomic_t outstanding_cmds;
41919
41920 /* should add/delete resources to mid-layer now ?*/
41921 - atomic_t expose_resources;
41922 + atomic_unchecked_t expose_resources;
41923
41924
41925
41926 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
41927 struct pmcraid_config_table_entry_ext cfg_entry_ext;
41928 };
41929 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41930 - atomic_t read_failures; /* count of failed READ commands */
41931 - atomic_t write_failures; /* count of failed WRITE commands */
41932 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41933 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41934
41935 /* To indicate add/delete/modify during CCN */
41936 u8 change_detected;
41937 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
41938 index b3db9dc..c3b1756 100644
41939 --- a/drivers/scsi/qla2xxx/qla_attr.c
41940 +++ b/drivers/scsi/qla2xxx/qla_attr.c
41941 @@ -1971,7 +1971,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
41942 return 0;
41943 }
41944
41945 -struct fc_function_template qla2xxx_transport_functions = {
41946 +fc_function_template_no_const qla2xxx_transport_functions = {
41947
41948 .show_host_node_name = 1,
41949 .show_host_port_name = 1,
41950 @@ -2018,7 +2018,7 @@ struct fc_function_template qla2xxx_transport_functions = {
41951 .bsg_timeout = qla24xx_bsg_timeout,
41952 };
41953
41954 -struct fc_function_template qla2xxx_transport_vport_functions = {
41955 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
41956
41957 .show_host_node_name = 1,
41958 .show_host_port_name = 1,
41959 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
41960 index b310fa9..b9b3944 100644
41961 --- a/drivers/scsi/qla2xxx/qla_gbl.h
41962 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
41963 @@ -523,8 +523,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
41964 struct device_attribute;
41965 extern struct device_attribute *qla2x00_host_attrs[];
41966 struct fc_function_template;
41967 -extern struct fc_function_template qla2xxx_transport_functions;
41968 -extern struct fc_function_template qla2xxx_transport_vport_functions;
41969 +extern fc_function_template_no_const qla2xxx_transport_functions;
41970 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
41971 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
41972 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
41973 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
41974 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
41975 index 2c6dd3d..e5ecd82 100644
41976 --- a/drivers/scsi/qla2xxx/qla_os.c
41977 +++ b/drivers/scsi/qla2xxx/qla_os.c
41978 @@ -1554,8 +1554,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
41979 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
41980 /* Ok, a 64bit DMA mask is applicable. */
41981 ha->flags.enable_64bit_addressing = 1;
41982 - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41983 - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41984 + pax_open_kernel();
41985 + *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41986 + *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41987 + pax_close_kernel();
41988 return;
41989 }
41990 }
41991 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41992 index 129f5dd..ade53e8 100644
41993 --- a/drivers/scsi/qla4xxx/ql4_def.h
41994 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41995 @@ -275,7 +275,7 @@ struct ddb_entry {
41996 * (4000 only) */
41997 atomic_t relogin_timer; /* Max Time to wait for
41998 * relogin to complete */
41999 - atomic_t relogin_retry_count; /* Num of times relogin has been
42000 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42001 * retried */
42002 uint32_t default_time2wait; /* Default Min time between
42003 * relogins (+aens) */
42004 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42005 index 6142729..b6a85c9 100644
42006 --- a/drivers/scsi/qla4xxx/ql4_os.c
42007 +++ b/drivers/scsi/qla4xxx/ql4_os.c
42008 @@ -2622,12 +2622,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42009 */
42010 if (!iscsi_is_session_online(cls_sess)) {
42011 /* Reset retry relogin timer */
42012 - atomic_inc(&ddb_entry->relogin_retry_count);
42013 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42014 DEBUG2(ql4_printk(KERN_INFO, ha,
42015 "%s: index[%d] relogin timed out-retrying"
42016 " relogin (%d), retry (%d)\n", __func__,
42017 ddb_entry->fw_ddb_index,
42018 - atomic_read(&ddb_entry->relogin_retry_count),
42019 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42020 ddb_entry->default_time2wait + 4));
42021 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42022 atomic_set(&ddb_entry->retry_relogin_timer,
42023 @@ -4742,7 +4742,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42024
42025 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42026 atomic_set(&ddb_entry->relogin_timer, 0);
42027 - atomic_set(&ddb_entry->relogin_retry_count, 0);
42028 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42029 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42030 ddb_entry->default_relogin_timeout =
42031 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42032 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42033 index 2c0d0ec..4e8681a 100644
42034 --- a/drivers/scsi/scsi.c
42035 +++ b/drivers/scsi/scsi.c
42036 @@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42037 unsigned long timeout;
42038 int rtn = 0;
42039
42040 - atomic_inc(&cmd->device->iorequest_cnt);
42041 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42042
42043 /* check if the device is still usable */
42044 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42045 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42046 index c31187d..0ead8c3 100644
42047 --- a/drivers/scsi/scsi_lib.c
42048 +++ b/drivers/scsi/scsi_lib.c
42049 @@ -1459,7 +1459,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42050 shost = sdev->host;
42051 scsi_init_cmd_errh(cmd);
42052 cmd->result = DID_NO_CONNECT << 16;
42053 - atomic_inc(&cmd->device->iorequest_cnt);
42054 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42055
42056 /*
42057 * SCSI request completion path will do scsi_device_unbusy(),
42058 @@ -1485,9 +1485,9 @@ static void scsi_softirq_done(struct request *rq)
42059
42060 INIT_LIST_HEAD(&cmd->eh_entry);
42061
42062 - atomic_inc(&cmd->device->iodone_cnt);
42063 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
42064 if (cmd->result)
42065 - atomic_inc(&cmd->device->ioerr_cnt);
42066 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42067
42068 disposition = scsi_decide_disposition(cmd);
42069 if (disposition != SUCCESS &&
42070 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42071 index 931a7d9..0c2a754 100644
42072 --- a/drivers/scsi/scsi_sysfs.c
42073 +++ b/drivers/scsi/scsi_sysfs.c
42074 @@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42075 char *buf) \
42076 { \
42077 struct scsi_device *sdev = to_scsi_device(dev); \
42078 - unsigned long long count = atomic_read(&sdev->field); \
42079 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
42080 return snprintf(buf, 20, "0x%llx\n", count); \
42081 } \
42082 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42083 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42084 index 84a1fdf..693b0d6 100644
42085 --- a/drivers/scsi/scsi_tgt_lib.c
42086 +++ b/drivers/scsi/scsi_tgt_lib.c
42087 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42088 int err;
42089
42090 dprintk("%lx %u\n", uaddr, len);
42091 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42092 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42093 if (err) {
42094 /*
42095 * TODO: need to fixup sg_tablesize, max_segment_size,
42096 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42097 index e894ca7..de9d7660 100644
42098 --- a/drivers/scsi/scsi_transport_fc.c
42099 +++ b/drivers/scsi/scsi_transport_fc.c
42100 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42101 * Netlink Infrastructure
42102 */
42103
42104 -static atomic_t fc_event_seq;
42105 +static atomic_unchecked_t fc_event_seq;
42106
42107 /**
42108 * fc_get_event_number - Obtain the next sequential FC event number
42109 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42110 u32
42111 fc_get_event_number(void)
42112 {
42113 - return atomic_add_return(1, &fc_event_seq);
42114 + return atomic_add_return_unchecked(1, &fc_event_seq);
42115 }
42116 EXPORT_SYMBOL(fc_get_event_number);
42117
42118 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42119 {
42120 int error;
42121
42122 - atomic_set(&fc_event_seq, 0);
42123 + atomic_set_unchecked(&fc_event_seq, 0);
42124
42125 error = transport_class_register(&fc_host_class);
42126 if (error)
42127 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42128 char *cp;
42129
42130 *val = simple_strtoul(buf, &cp, 0);
42131 - if ((*cp && (*cp != '\n')) || (*val < 0))
42132 + if (*cp && (*cp != '\n'))
42133 return -EINVAL;
42134 /*
42135 * Check for overflow; dev_loss_tmo is u32
42136 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42137 index 0a74b97..fa8d648 100644
42138 --- a/drivers/scsi/scsi_transport_iscsi.c
42139 +++ b/drivers/scsi/scsi_transport_iscsi.c
42140 @@ -79,7 +79,7 @@ struct iscsi_internal {
42141 struct transport_container session_cont;
42142 };
42143
42144 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42145 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42146 static struct workqueue_struct *iscsi_eh_timer_workq;
42147
42148 static DEFINE_IDA(iscsi_sess_ida);
42149 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42150 int err;
42151
42152 ihost = shost->shost_data;
42153 - session->sid = atomic_add_return(1, &iscsi_session_nr);
42154 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42155
42156 if (target_id == ISCSI_MAX_TARGET) {
42157 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42158 @@ -2955,7 +2955,7 @@ static __init int iscsi_transport_init(void)
42159 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42160 ISCSI_TRANSPORT_VERSION);
42161
42162 - atomic_set(&iscsi_session_nr, 0);
42163 + atomic_set_unchecked(&iscsi_session_nr, 0);
42164
42165 err = class_register(&iscsi_transport_class);
42166 if (err)
42167 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42168 index f379c7f..e8fc69c 100644
42169 --- a/drivers/scsi/scsi_transport_srp.c
42170 +++ b/drivers/scsi/scsi_transport_srp.c
42171 @@ -33,7 +33,7 @@
42172 #include "scsi_transport_srp_internal.h"
42173
42174 struct srp_host_attrs {
42175 - atomic_t next_port_id;
42176 + atomic_unchecked_t next_port_id;
42177 };
42178 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42179
42180 @@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42181 struct Scsi_Host *shost = dev_to_shost(dev);
42182 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42183
42184 - atomic_set(&srp_host->next_port_id, 0);
42185 + atomic_set_unchecked(&srp_host->next_port_id, 0);
42186 return 0;
42187 }
42188
42189 @@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42190 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42191 rport->roles = ids->roles;
42192
42193 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42194 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42195 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42196
42197 transport_setup_device(&rport->dev);
42198 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42199 index 7992635..609faf8 100644
42200 --- a/drivers/scsi/sd.c
42201 +++ b/drivers/scsi/sd.c
42202 @@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
42203 sdkp->disk = gd;
42204 sdkp->index = index;
42205 atomic_set(&sdkp->openers, 0);
42206 - atomic_set(&sdkp->device->ioerr_cnt, 0);
42207 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42208
42209 if (!sdp->request_queue->rq_timeout) {
42210 if (sdp->type != TYPE_MOD)
42211 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42212 index 9f0c465..47194ee 100644
42213 --- a/drivers/scsi/sg.c
42214 +++ b/drivers/scsi/sg.c
42215 @@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42216 sdp->disk->disk_name,
42217 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42218 NULL,
42219 - (char *)arg);
42220 + (char __user *)arg);
42221 case BLKTRACESTART:
42222 return blk_trace_startstop(sdp->device->request_queue, 1);
42223 case BLKTRACESTOP:
42224 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42225 index 004b10f..7c98d51 100644
42226 --- a/drivers/spi/spi.c
42227 +++ b/drivers/spi/spi.c
42228 @@ -1620,7 +1620,7 @@ int spi_bus_unlock(struct spi_master *master)
42229 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42230
42231 /* portable code must never pass more than 32 bytes */
42232 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42233 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42234
42235 static u8 *buf;
42236
42237 diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42238 index 93af756..a4bc5bf 100644
42239 --- a/drivers/staging/iio/iio_hwmon.c
42240 +++ b/drivers/staging/iio/iio_hwmon.c
42241 @@ -67,7 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42242 {
42243 struct device *dev = &pdev->dev;
42244 struct iio_hwmon_state *st;
42245 - struct sensor_device_attribute *a;
42246 + sensor_device_attribute_no_const *a;
42247 int ret, i;
42248 int in_i = 1, temp_i = 1, curr_i = 1;
42249 enum iio_chan_type type;
42250 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42251 index 34afc16..ffe44dd 100644
42252 --- a/drivers/staging/octeon/ethernet-rx.c
42253 +++ b/drivers/staging/octeon/ethernet-rx.c
42254 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42255 /* Increment RX stats for virtual ports */
42256 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42257 #ifdef CONFIG_64BIT
42258 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42259 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42260 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42261 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42262 #else
42263 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42264 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42265 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42266 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42267 #endif
42268 }
42269 netif_receive_skb(skb);
42270 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42271 dev->name);
42272 */
42273 #ifdef CONFIG_64BIT
42274 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42275 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42276 #else
42277 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42278 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
42279 #endif
42280 dev_kfree_skb_irq(skb);
42281 }
42282 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42283 index c3a90e7..023619a 100644
42284 --- a/drivers/staging/octeon/ethernet.c
42285 +++ b/drivers/staging/octeon/ethernet.c
42286 @@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42287 * since the RX tasklet also increments it.
42288 */
42289 #ifdef CONFIG_64BIT
42290 - atomic64_add(rx_status.dropped_packets,
42291 - (atomic64_t *)&priv->stats.rx_dropped);
42292 + atomic64_add_unchecked(rx_status.dropped_packets,
42293 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42294 #else
42295 - atomic_add(rx_status.dropped_packets,
42296 - (atomic_t *)&priv->stats.rx_dropped);
42297 + atomic_add_unchecked(rx_status.dropped_packets,
42298 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42299 #endif
42300 }
42301
42302 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
42303 index dc23395..cf7e9b1 100644
42304 --- a/drivers/staging/rtl8712/rtl871x_io.h
42305 +++ b/drivers/staging/rtl8712/rtl871x_io.h
42306 @@ -108,7 +108,7 @@ struct _io_ops {
42307 u8 *pmem);
42308 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
42309 u8 *pmem);
42310 -};
42311 +} __no_const;
42312
42313 struct io_req {
42314 struct list_head list;
42315 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
42316 index 1f5088b..0e59820 100644
42317 --- a/drivers/staging/sbe-2t3e3/netdev.c
42318 +++ b/drivers/staging/sbe-2t3e3/netdev.c
42319 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42320 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42321
42322 if (rlen)
42323 - if (copy_to_user(data, &resp, rlen))
42324 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42325 return -EFAULT;
42326
42327 return 0;
42328 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42329 index 5dddc4d..34fcb2f 100644
42330 --- a/drivers/staging/usbip/vhci.h
42331 +++ b/drivers/staging/usbip/vhci.h
42332 @@ -83,7 +83,7 @@ struct vhci_hcd {
42333 unsigned resuming:1;
42334 unsigned long re_timeout;
42335
42336 - atomic_t seqnum;
42337 + atomic_unchecked_t seqnum;
42338
42339 /*
42340 * NOTE:
42341 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42342 index f1ca084..7b5c0c3 100644
42343 --- a/drivers/staging/usbip/vhci_hcd.c
42344 +++ b/drivers/staging/usbip/vhci_hcd.c
42345 @@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
42346
42347 spin_lock(&vdev->priv_lock);
42348
42349 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42350 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42351 if (priv->seqnum == 0xffff)
42352 dev_info(&urb->dev->dev, "seqnum max\n");
42353
42354 @@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42355 return -ENOMEM;
42356 }
42357
42358 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42359 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42360 if (unlink->seqnum == 0xffff)
42361 pr_info("seqnum max\n");
42362
42363 @@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
42364 vdev->rhport = rhport;
42365 }
42366
42367 - atomic_set(&vhci->seqnum, 0);
42368 + atomic_set_unchecked(&vhci->seqnum, 0);
42369 spin_lock_init(&vhci->lock);
42370
42371 hcd->power_budget = 0; /* no limit */
42372 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42373 index faf8e60..c46f8ab 100644
42374 --- a/drivers/staging/usbip/vhci_rx.c
42375 +++ b/drivers/staging/usbip/vhci_rx.c
42376 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42377 if (!urb) {
42378 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42379 pr_info("max seqnum %d\n",
42380 - atomic_read(&the_controller->seqnum));
42381 + atomic_read_unchecked(&the_controller->seqnum));
42382 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42383 return;
42384 }
42385 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42386 index 5f13890..36a044b 100644
42387 --- a/drivers/staging/vt6655/hostap.c
42388 +++ b/drivers/staging/vt6655/hostap.c
42389 @@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42390 *
42391 */
42392
42393 +static net_device_ops_no_const apdev_netdev_ops;
42394 +
42395 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42396 {
42397 PSDevice apdev_priv;
42398 struct net_device *dev = pDevice->dev;
42399 int ret;
42400 - const struct net_device_ops apdev_netdev_ops = {
42401 - .ndo_start_xmit = pDevice->tx_80211,
42402 - };
42403
42404 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42405
42406 @@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42407 *apdev_priv = *pDevice;
42408 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42409
42410 + /* only half broken now */
42411 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42412 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42413
42414 pDevice->apdev->type = ARPHRD_IEEE80211;
42415 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42416 index bc5e9da..dacd556 100644
42417 --- a/drivers/staging/vt6656/hostap.c
42418 +++ b/drivers/staging/vt6656/hostap.c
42419 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
42420 *
42421 */
42422
42423 +static net_device_ops_no_const apdev_netdev_ops;
42424 +
42425 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
42426 {
42427 struct vnt_private *apdev_priv;
42428 struct net_device *dev = pDevice->dev;
42429 int ret;
42430 - const struct net_device_ops apdev_netdev_ops = {
42431 - .ndo_start_xmit = pDevice->tx_80211,
42432 - };
42433
42434 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42435
42436 @@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
42437 *apdev_priv = *pDevice;
42438 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42439
42440 + /* only half broken now */
42441 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42442 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42443
42444 pDevice->apdev->type = ARPHRD_IEEE80211;
42445 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
42446 index a2b7e03..9ff4bbd 100644
42447 --- a/drivers/staging/zcache/tmem.c
42448 +++ b/drivers/staging/zcache/tmem.c
42449 @@ -50,7 +50,7 @@
42450 * A tmem host implementation must use this function to register callbacks
42451 * for memory allocation.
42452 */
42453 -static struct tmem_hostops tmem_hostops;
42454 +static tmem_hostops_no_const tmem_hostops;
42455
42456 static void tmem_objnode_tree_init(void);
42457
42458 @@ -64,7 +64,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
42459 * A tmem host implementation must use this function to register
42460 * callbacks for a page-accessible memory (PAM) implementation.
42461 */
42462 -static struct tmem_pamops tmem_pamops;
42463 +static tmem_pamops_no_const tmem_pamops;
42464
42465 void tmem_register_pamops(struct tmem_pamops *m)
42466 {
42467 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
42468 index adbe5a8..d387359 100644
42469 --- a/drivers/staging/zcache/tmem.h
42470 +++ b/drivers/staging/zcache/tmem.h
42471 @@ -226,6 +226,7 @@ struct tmem_pamops {
42472 int (*replace_in_obj)(void *, struct tmem_obj *);
42473 #endif
42474 };
42475 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
42476 extern void tmem_register_pamops(struct tmem_pamops *m);
42477
42478 /* memory allocation methods provided by the host implementation */
42479 @@ -235,6 +236,7 @@ struct tmem_hostops {
42480 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
42481 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
42482 };
42483 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
42484 extern void tmem_register_hostops(struct tmem_hostops *m);
42485
42486 /* core tmem accessor functions */
42487 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
42488 index 2e4d655..fd72e68 100644
42489 --- a/drivers/target/target_core_device.c
42490 +++ b/drivers/target/target_core_device.c
42491 @@ -1414,7 +1414,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
42492 spin_lock_init(&dev->se_port_lock);
42493 spin_lock_init(&dev->se_tmr_lock);
42494 spin_lock_init(&dev->qf_cmd_lock);
42495 - atomic_set(&dev->dev_ordered_id, 0);
42496 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
42497 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
42498 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
42499 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
42500 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
42501 index 3243ea7..4f19a6e 100644
42502 --- a/drivers/target/target_core_transport.c
42503 +++ b/drivers/target/target_core_transport.c
42504 @@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
42505 * Used to determine when ORDERED commands should go from
42506 * Dormant to Active status.
42507 */
42508 - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
42509 + cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
42510 smp_mb__after_atomic_inc();
42511 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
42512 cmd->se_ordered_id, cmd->sam_task_attr,
42513 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
42514 index 345bd0e..61d5375 100644
42515 --- a/drivers/tty/cyclades.c
42516 +++ b/drivers/tty/cyclades.c
42517 @@ -1576,10 +1576,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
42518 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
42519 info->port.count);
42520 #endif
42521 - info->port.count++;
42522 + atomic_inc(&info->port.count);
42523 #ifdef CY_DEBUG_COUNT
42524 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
42525 - current->pid, info->port.count);
42526 + current->pid, atomic_read(&info->port.count));
42527 #endif
42528
42529 /*
42530 @@ -3978,7 +3978,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
42531 for (j = 0; j < cy_card[i].nports; j++) {
42532 info = &cy_card[i].ports[j];
42533
42534 - if (info->port.count) {
42535 + if (atomic_read(&info->port.count)) {
42536 /* XXX is the ldisc num worth this? */
42537 struct tty_struct *tty;
42538 struct tty_ldisc *ld;
42539 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
42540 index eb255e8..f637a57 100644
42541 --- a/drivers/tty/hvc/hvc_console.c
42542 +++ b/drivers/tty/hvc/hvc_console.c
42543 @@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
42544
42545 spin_lock_irqsave(&hp->port.lock, flags);
42546 /* Check and then increment for fast path open. */
42547 - if (hp->port.count++ > 0) {
42548 + if (atomic_inc_return(&hp->port.count) > 1) {
42549 spin_unlock_irqrestore(&hp->port.lock, flags);
42550 hvc_kick();
42551 return 0;
42552 @@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42553
42554 spin_lock_irqsave(&hp->port.lock, flags);
42555
42556 - if (--hp->port.count == 0) {
42557 + if (atomic_dec_return(&hp->port.count) == 0) {
42558 spin_unlock_irqrestore(&hp->port.lock, flags);
42559 /* We are done with the tty pointer now. */
42560 tty_port_tty_set(&hp->port, NULL);
42561 @@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42562 */
42563 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
42564 } else {
42565 - if (hp->port.count < 0)
42566 + if (atomic_read(&hp->port.count) < 0)
42567 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
42568 - hp->vtermno, hp->port.count);
42569 + hp->vtermno, atomic_read(&hp->port.count));
42570 spin_unlock_irqrestore(&hp->port.lock, flags);
42571 }
42572 }
42573 @@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
42574 * open->hangup case this can be called after the final close so prevent
42575 * that from happening for now.
42576 */
42577 - if (hp->port.count <= 0) {
42578 + if (atomic_read(&hp->port.count) <= 0) {
42579 spin_unlock_irqrestore(&hp->port.lock, flags);
42580 return;
42581 }
42582
42583 - hp->port.count = 0;
42584 + atomic_set(&hp->port.count, 0);
42585 spin_unlock_irqrestore(&hp->port.lock, flags);
42586 tty_port_tty_set(&hp->port, NULL);
42587
42588 @@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
42589 return -EPIPE;
42590
42591 /* FIXME what's this (unprotected) check for? */
42592 - if (hp->port.count <= 0)
42593 + if (atomic_read(&hp->port.count) <= 0)
42594 return -EIO;
42595
42596 spin_lock_irqsave(&hp->lock, flags);
42597 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
42598 index 81e939e..95ead10 100644
42599 --- a/drivers/tty/hvc/hvcs.c
42600 +++ b/drivers/tty/hvc/hvcs.c
42601 @@ -83,6 +83,7 @@
42602 #include <asm/hvcserver.h>
42603 #include <asm/uaccess.h>
42604 #include <asm/vio.h>
42605 +#include <asm/local.h>
42606
42607 /*
42608 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
42609 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
42610
42611 spin_lock_irqsave(&hvcsd->lock, flags);
42612
42613 - if (hvcsd->port.count > 0) {
42614 + if (atomic_read(&hvcsd->port.count) > 0) {
42615 spin_unlock_irqrestore(&hvcsd->lock, flags);
42616 printk(KERN_INFO "HVCS: vterm state unchanged. "
42617 "The hvcs device node is still in use.\n");
42618 @@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
42619 }
42620 }
42621
42622 - hvcsd->port.count = 0;
42623 + atomic_set(&hvcsd->port.count, 0);
42624 hvcsd->port.tty = tty;
42625 tty->driver_data = hvcsd;
42626
42627 @@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
42628 unsigned long flags;
42629
42630 spin_lock_irqsave(&hvcsd->lock, flags);
42631 - hvcsd->port.count++;
42632 + atomic_inc(&hvcsd->port.count);
42633 hvcsd->todo_mask |= HVCS_SCHED_READ;
42634 spin_unlock_irqrestore(&hvcsd->lock, flags);
42635
42636 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42637 hvcsd = tty->driver_data;
42638
42639 spin_lock_irqsave(&hvcsd->lock, flags);
42640 - if (--hvcsd->port.count == 0) {
42641 + if (atomic_dec_and_test(&hvcsd->port.count)) {
42642
42643 vio_disable_interrupts(hvcsd->vdev);
42644
42645 @@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42646
42647 free_irq(irq, hvcsd);
42648 return;
42649 - } else if (hvcsd->port.count < 0) {
42650 + } else if (atomic_read(&hvcsd->port.count) < 0) {
42651 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
42652 " is missmanaged.\n",
42653 - hvcsd->vdev->unit_address, hvcsd->port.count);
42654 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
42655 }
42656
42657 spin_unlock_irqrestore(&hvcsd->lock, flags);
42658 @@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42659
42660 spin_lock_irqsave(&hvcsd->lock, flags);
42661 /* Preserve this so that we know how many kref refs to put */
42662 - temp_open_count = hvcsd->port.count;
42663 + temp_open_count = atomic_read(&hvcsd->port.count);
42664
42665 /*
42666 * Don't kref put inside the spinlock because the destruction
42667 @@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42668 tty->driver_data = NULL;
42669 hvcsd->port.tty = NULL;
42670
42671 - hvcsd->port.count = 0;
42672 + atomic_set(&hvcsd->port.count, 0);
42673
42674 /* This will drop any buffered data on the floor which is OK in a hangup
42675 * scenario. */
42676 @@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
42677 * the middle of a write operation? This is a crummy place to do this
42678 * but we want to keep it all in the spinlock.
42679 */
42680 - if (hvcsd->port.count <= 0) {
42681 + if (atomic_read(&hvcsd->port.count) <= 0) {
42682 spin_unlock_irqrestore(&hvcsd->lock, flags);
42683 return -ENODEV;
42684 }
42685 @@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
42686 {
42687 struct hvcs_struct *hvcsd = tty->driver_data;
42688
42689 - if (!hvcsd || hvcsd->port.count <= 0)
42690 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
42691 return 0;
42692
42693 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
42694 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
42695 index 8fd72ff..34a0bed 100644
42696 --- a/drivers/tty/ipwireless/tty.c
42697 +++ b/drivers/tty/ipwireless/tty.c
42698 @@ -29,6 +29,7 @@
42699 #include <linux/tty_driver.h>
42700 #include <linux/tty_flip.h>
42701 #include <linux/uaccess.h>
42702 +#include <asm/local.h>
42703
42704 #include "tty.h"
42705 #include "network.h"
42706 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42707 mutex_unlock(&tty->ipw_tty_mutex);
42708 return -ENODEV;
42709 }
42710 - if (tty->port.count == 0)
42711 + if (atomic_read(&tty->port.count) == 0)
42712 tty->tx_bytes_queued = 0;
42713
42714 - tty->port.count++;
42715 + atomic_inc(&tty->port.count);
42716
42717 tty->port.tty = linux_tty;
42718 linux_tty->driver_data = tty;
42719 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42720
42721 static void do_ipw_close(struct ipw_tty *tty)
42722 {
42723 - tty->port.count--;
42724 -
42725 - if (tty->port.count == 0) {
42726 + if (atomic_dec_return(&tty->port.count) == 0) {
42727 struct tty_struct *linux_tty = tty->port.tty;
42728
42729 if (linux_tty != NULL) {
42730 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
42731 return;
42732
42733 mutex_lock(&tty->ipw_tty_mutex);
42734 - if (tty->port.count == 0) {
42735 + if (atomic_read(&tty->port.count) == 0) {
42736 mutex_unlock(&tty->ipw_tty_mutex);
42737 return;
42738 }
42739 @@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
42740
42741 mutex_lock(&tty->ipw_tty_mutex);
42742
42743 - if (!tty->port.count) {
42744 + if (!atomic_read(&tty->port.count)) {
42745 mutex_unlock(&tty->ipw_tty_mutex);
42746 return;
42747 }
42748 @@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
42749 return -ENODEV;
42750
42751 mutex_lock(&tty->ipw_tty_mutex);
42752 - if (!tty->port.count) {
42753 + if (!atomic_read(&tty->port.count)) {
42754 mutex_unlock(&tty->ipw_tty_mutex);
42755 return -EINVAL;
42756 }
42757 @@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
42758 if (!tty)
42759 return -ENODEV;
42760
42761 - if (!tty->port.count)
42762 + if (!atomic_read(&tty->port.count))
42763 return -EINVAL;
42764
42765 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
42766 @@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
42767 if (!tty)
42768 return 0;
42769
42770 - if (!tty->port.count)
42771 + if (!atomic_read(&tty->port.count))
42772 return 0;
42773
42774 return tty->tx_bytes_queued;
42775 @@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
42776 if (!tty)
42777 return -ENODEV;
42778
42779 - if (!tty->port.count)
42780 + if (!atomic_read(&tty->port.count))
42781 return -EINVAL;
42782
42783 return get_control_lines(tty);
42784 @@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
42785 if (!tty)
42786 return -ENODEV;
42787
42788 - if (!tty->port.count)
42789 + if (!atomic_read(&tty->port.count))
42790 return -EINVAL;
42791
42792 return set_control_lines(tty, set, clear);
42793 @@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
42794 if (!tty)
42795 return -ENODEV;
42796
42797 - if (!tty->port.count)
42798 + if (!atomic_read(&tty->port.count))
42799 return -EINVAL;
42800
42801 /* FIXME: Exactly how is the tty object locked here .. */
42802 @@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
42803 * are gone */
42804 mutex_lock(&ttyj->ipw_tty_mutex);
42805 }
42806 - while (ttyj->port.count)
42807 + while (atomic_read(&ttyj->port.count))
42808 do_ipw_close(ttyj);
42809 ipwireless_disassociate_network_ttys(network,
42810 ttyj->channel_idx);
42811 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
42812 index adeac25..787a0a1 100644
42813 --- a/drivers/tty/moxa.c
42814 +++ b/drivers/tty/moxa.c
42815 @@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
42816 }
42817
42818 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
42819 - ch->port.count++;
42820 + atomic_inc(&ch->port.count);
42821 tty->driver_data = ch;
42822 tty_port_tty_set(&ch->port, tty);
42823 mutex_lock(&ch->port.mutex);
42824 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
42825 index 4a43ef5d7..aa71f27 100644
42826 --- a/drivers/tty/n_gsm.c
42827 +++ b/drivers/tty/n_gsm.c
42828 @@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
42829 spin_lock_init(&dlci->lock);
42830 mutex_init(&dlci->mutex);
42831 dlci->fifo = &dlci->_fifo;
42832 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
42833 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
42834 kfree(dlci);
42835 return NULL;
42836 }
42837 @@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
42838 struct gsm_dlci *dlci = tty->driver_data;
42839 struct tty_port *port = &dlci->port;
42840
42841 - port->count++;
42842 + atomic_inc(&port->count);
42843 dlci_get(dlci);
42844 dlci_get(dlci->gsm->dlci[0]);
42845 mux_get(dlci->gsm);
42846 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
42847 index 05e72be..67f6a0f 100644
42848 --- a/drivers/tty/n_tty.c
42849 +++ b/drivers/tty/n_tty.c
42850 @@ -2197,6 +2197,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
42851 {
42852 *ops = tty_ldisc_N_TTY;
42853 ops->owner = NULL;
42854 - ops->refcount = ops->flags = 0;
42855 + atomic_set(&ops->refcount, 0);
42856 + ops->flags = 0;
42857 }
42858 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
42859 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
42860 index 125e0fd..8c50690 100644
42861 --- a/drivers/tty/pty.c
42862 +++ b/drivers/tty/pty.c
42863 @@ -800,8 +800,10 @@ static void __init unix98_pty_init(void)
42864 panic("Couldn't register Unix98 pts driver");
42865
42866 /* Now create the /dev/ptmx special device */
42867 + pax_open_kernel();
42868 tty_default_fops(&ptmx_fops);
42869 - ptmx_fops.open = ptmx_open;
42870 + *(void **)&ptmx_fops.open = ptmx_open;
42871 + pax_close_kernel();
42872
42873 cdev_init(&ptmx_cdev, &ptmx_fops);
42874 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
42875 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
42876 index 1d27003..959f452 100644
42877 --- a/drivers/tty/rocket.c
42878 +++ b/drivers/tty/rocket.c
42879 @@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42880 tty->driver_data = info;
42881 tty_port_tty_set(port, tty);
42882
42883 - if (port->count++ == 0) {
42884 + if (atomic_inc_return(&port->count) == 1) {
42885 atomic_inc(&rp_num_ports_open);
42886
42887 #ifdef ROCKET_DEBUG_OPEN
42888 @@ -932,7 +932,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42889 #endif
42890 }
42891 #ifdef ROCKET_DEBUG_OPEN
42892 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
42893 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
42894 #endif
42895
42896 /*
42897 @@ -1527,7 +1527,7 @@ static void rp_hangup(struct tty_struct *tty)
42898 spin_unlock_irqrestore(&info->port.lock, flags);
42899 return;
42900 }
42901 - if (info->port.count)
42902 + if (atomic_read(&info->port.count))
42903 atomic_dec(&rp_num_ports_open);
42904 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
42905 spin_unlock_irqrestore(&info->port.lock, flags);
42906 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
42907 index 1002054..dd644a8 100644
42908 --- a/drivers/tty/serial/kgdboc.c
42909 +++ b/drivers/tty/serial/kgdboc.c
42910 @@ -24,8 +24,9 @@
42911 #define MAX_CONFIG_LEN 40
42912
42913 static struct kgdb_io kgdboc_io_ops;
42914 +static struct kgdb_io kgdboc_io_ops_console;
42915
42916 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
42917 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
42918 static int configured = -1;
42919
42920 static char config[MAX_CONFIG_LEN];
42921 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
42922 kgdboc_unregister_kbd();
42923 if (configured == 1)
42924 kgdb_unregister_io_module(&kgdboc_io_ops);
42925 + else if (configured == 2)
42926 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
42927 }
42928
42929 static int configure_kgdboc(void)
42930 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
42931 int err;
42932 char *cptr = config;
42933 struct console *cons;
42934 + int is_console = 0;
42935
42936 err = kgdboc_option_setup(config);
42937 if (err || !strlen(config) || isspace(config[0]))
42938 goto noconfig;
42939
42940 err = -ENODEV;
42941 - kgdboc_io_ops.is_console = 0;
42942 kgdb_tty_driver = NULL;
42943
42944 kgdboc_use_kms = 0;
42945 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
42946 int idx;
42947 if (cons->device && cons->device(cons, &idx) == p &&
42948 idx == tty_line) {
42949 - kgdboc_io_ops.is_console = 1;
42950 + is_console = 1;
42951 break;
42952 }
42953 cons = cons->next;
42954 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
42955 kgdb_tty_line = tty_line;
42956
42957 do_register:
42958 - err = kgdb_register_io_module(&kgdboc_io_ops);
42959 + if (is_console) {
42960 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
42961 + configured = 2;
42962 + } else {
42963 + err = kgdb_register_io_module(&kgdboc_io_ops);
42964 + configured = 1;
42965 + }
42966 if (err)
42967 goto noconfig;
42968
42969 @@ -205,8 +214,6 @@ do_register:
42970 if (err)
42971 goto nmi_con_failed;
42972
42973 - configured = 1;
42974 -
42975 return 0;
42976
42977 nmi_con_failed:
42978 @@ -223,7 +230,7 @@ noconfig:
42979 static int __init init_kgdboc(void)
42980 {
42981 /* Already configured? */
42982 - if (configured == 1)
42983 + if (configured >= 1)
42984 return 0;
42985
42986 return configure_kgdboc();
42987 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
42988 if (config[len - 1] == '\n')
42989 config[len - 1] = '\0';
42990
42991 - if (configured == 1)
42992 + if (configured >= 1)
42993 cleanup_kgdboc();
42994
42995 /* Go and configure with the new params. */
42996 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
42997 .post_exception = kgdboc_post_exp_handler,
42998 };
42999
43000 +static struct kgdb_io kgdboc_io_ops_console = {
43001 + .name = "kgdboc",
43002 + .read_char = kgdboc_get_char,
43003 + .write_char = kgdboc_put_char,
43004 + .pre_exception = kgdboc_pre_exp_handler,
43005 + .post_exception = kgdboc_post_exp_handler,
43006 + .is_console = 1
43007 +};
43008 +
43009 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43010 /* This is only available if kgdboc is a built in for early debugging */
43011 static int __init kgdboc_early_init(char *opt)
43012 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43013 index 2769a38..f3dbe48 100644
43014 --- a/drivers/tty/serial/samsung.c
43015 +++ b/drivers/tty/serial/samsung.c
43016 @@ -451,11 +451,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43017 }
43018 }
43019
43020 +static int s3c64xx_serial_startup(struct uart_port *port);
43021 static int s3c24xx_serial_startup(struct uart_port *port)
43022 {
43023 struct s3c24xx_uart_port *ourport = to_ourport(port);
43024 int ret;
43025
43026 + /* Startup sequence is different for s3c64xx and higher SoC's */
43027 + if (s3c24xx_serial_has_interrupt_mask(port))
43028 + return s3c64xx_serial_startup(port);
43029 +
43030 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43031 port->mapbase, port->membase);
43032
43033 @@ -1120,10 +1125,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43034 /* setup info for port */
43035 port->dev = &platdev->dev;
43036
43037 - /* Startup sequence is different for s3c64xx and higher SoC's */
43038 - if (s3c24xx_serial_has_interrupt_mask(port))
43039 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43040 -
43041 port->uartclk = 1;
43042
43043 if (cfg->uart_flags & UPF_CONS_FLOW) {
43044 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43045 index 8fbb6d2..822a9e6 100644
43046 --- a/drivers/tty/serial/serial_core.c
43047 +++ b/drivers/tty/serial/serial_core.c
43048 @@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
43049 uart_flush_buffer(tty);
43050 uart_shutdown(tty, state);
43051 spin_lock_irqsave(&port->lock, flags);
43052 - port->count = 0;
43053 + atomic_set(&port->count, 0);
43054 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43055 spin_unlock_irqrestore(&port->lock, flags);
43056 tty_port_tty_set(port, NULL);
43057 @@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43058 goto end;
43059 }
43060
43061 - port->count++;
43062 + atomic_inc(&port->count);
43063 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43064 retval = -ENXIO;
43065 goto err_dec_count;
43066 @@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43067 /*
43068 * Make sure the device is in D0 state.
43069 */
43070 - if (port->count == 1)
43071 + if (atomic_read(&port->count) == 1)
43072 uart_change_pm(state, UART_PM_STATE_ON);
43073
43074 /*
43075 @@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43076 end:
43077 return retval;
43078 err_dec_count:
43079 - port->count--;
43080 + atomic_inc(&port->count);
43081 mutex_unlock(&port->mutex);
43082 goto end;
43083 }
43084 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43085 index 8983276..72a4090 100644
43086 --- a/drivers/tty/synclink.c
43087 +++ b/drivers/tty/synclink.c
43088 @@ -3093,7 +3093,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43089
43090 if (debug_level >= DEBUG_LEVEL_INFO)
43091 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43092 - __FILE__,__LINE__, info->device_name, info->port.count);
43093 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43094
43095 if (tty_port_close_start(&info->port, tty, filp) == 0)
43096 goto cleanup;
43097 @@ -3111,7 +3111,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43098 cleanup:
43099 if (debug_level >= DEBUG_LEVEL_INFO)
43100 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43101 - tty->driver->name, info->port.count);
43102 + tty->driver->name, atomic_read(&info->port.count));
43103
43104 } /* end of mgsl_close() */
43105
43106 @@ -3210,8 +3210,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43107
43108 mgsl_flush_buffer(tty);
43109 shutdown(info);
43110 -
43111 - info->port.count = 0;
43112 +
43113 + atomic_set(&info->port.count, 0);
43114 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43115 info->port.tty = NULL;
43116
43117 @@ -3300,12 +3300,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43118
43119 if (debug_level >= DEBUG_LEVEL_INFO)
43120 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43121 - __FILE__,__LINE__, tty->driver->name, port->count );
43122 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43123
43124 spin_lock_irqsave(&info->irq_spinlock, flags);
43125 if (!tty_hung_up_p(filp)) {
43126 extra_count = true;
43127 - port->count--;
43128 + atomic_dec(&port->count);
43129 }
43130 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43131 port->blocked_open++;
43132 @@ -3334,7 +3334,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43133
43134 if (debug_level >= DEBUG_LEVEL_INFO)
43135 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43136 - __FILE__,__LINE__, tty->driver->name, port->count );
43137 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43138
43139 tty_unlock(tty);
43140 schedule();
43141 @@ -3346,12 +3346,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43142
43143 /* FIXME: Racy on hangup during close wait */
43144 if (extra_count)
43145 - port->count++;
43146 + atomic_inc(&port->count);
43147 port->blocked_open--;
43148
43149 if (debug_level >= DEBUG_LEVEL_INFO)
43150 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43151 - __FILE__,__LINE__, tty->driver->name, port->count );
43152 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43153
43154 if (!retval)
43155 port->flags |= ASYNC_NORMAL_ACTIVE;
43156 @@ -3403,7 +3403,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43157
43158 if (debug_level >= DEBUG_LEVEL_INFO)
43159 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43160 - __FILE__,__LINE__,tty->driver->name, info->port.count);
43161 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43162
43163 /* If port is closing, signal caller to try again */
43164 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43165 @@ -3422,10 +3422,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43166 spin_unlock_irqrestore(&info->netlock, flags);
43167 goto cleanup;
43168 }
43169 - info->port.count++;
43170 + atomic_inc(&info->port.count);
43171 spin_unlock_irqrestore(&info->netlock, flags);
43172
43173 - if (info->port.count == 1) {
43174 + if (atomic_read(&info->port.count) == 1) {
43175 /* 1st open on this device, init hardware */
43176 retval = startup(info);
43177 if (retval < 0)
43178 @@ -3449,8 +3449,8 @@ cleanup:
43179 if (retval) {
43180 if (tty->count == 1)
43181 info->port.tty = NULL; /* tty layer will release tty struct */
43182 - if(info->port.count)
43183 - info->port.count--;
43184 + if (atomic_read(&info->port.count))
43185 + atomic_dec(&info->port.count);
43186 }
43187
43188 return retval;
43189 @@ -7668,7 +7668,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43190 unsigned short new_crctype;
43191
43192 /* return error if TTY interface open */
43193 - if (info->port.count)
43194 + if (atomic_read(&info->port.count))
43195 return -EBUSY;
43196
43197 switch (encoding)
43198 @@ -7763,7 +7763,7 @@ static int hdlcdev_open(struct net_device *dev)
43199
43200 /* arbitrate between network and tty opens */
43201 spin_lock_irqsave(&info->netlock, flags);
43202 - if (info->port.count != 0 || info->netcount != 0) {
43203 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43204 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43205 spin_unlock_irqrestore(&info->netlock, flags);
43206 return -EBUSY;
43207 @@ -7849,7 +7849,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43208 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43209
43210 /* return error if TTY interface open */
43211 - if (info->port.count)
43212 + if (atomic_read(&info->port.count))
43213 return -EBUSY;
43214
43215 if (cmd != SIOCWANDEV)
43216 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43217 index aa9eece..d8baaec 100644
43218 --- a/drivers/tty/synclink_gt.c
43219 +++ b/drivers/tty/synclink_gt.c
43220 @@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43221 tty->driver_data = info;
43222 info->port.tty = tty;
43223
43224 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43225 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43226
43227 /* If port is closing, signal caller to try again */
43228 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43229 @@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43230 mutex_unlock(&info->port.mutex);
43231 goto cleanup;
43232 }
43233 - info->port.count++;
43234 + atomic_inc(&info->port.count);
43235 spin_unlock_irqrestore(&info->netlock, flags);
43236
43237 - if (info->port.count == 1) {
43238 + if (atomic_read(&info->port.count) == 1) {
43239 /* 1st open on this device, init hardware */
43240 retval = startup(info);
43241 if (retval < 0) {
43242 @@ -715,8 +715,8 @@ cleanup:
43243 if (retval) {
43244 if (tty->count == 1)
43245 info->port.tty = NULL; /* tty layer will release tty struct */
43246 - if(info->port.count)
43247 - info->port.count--;
43248 + if(atomic_read(&info->port.count))
43249 + atomic_dec(&info->port.count);
43250 }
43251
43252 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43253 @@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43254
43255 if (sanity_check(info, tty->name, "close"))
43256 return;
43257 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43258 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43259
43260 if (tty_port_close_start(&info->port, tty, filp) == 0)
43261 goto cleanup;
43262 @@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43263 tty_port_close_end(&info->port, tty);
43264 info->port.tty = NULL;
43265 cleanup:
43266 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43267 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43268 }
43269
43270 static void hangup(struct tty_struct *tty)
43271 @@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
43272 shutdown(info);
43273
43274 spin_lock_irqsave(&info->port.lock, flags);
43275 - info->port.count = 0;
43276 + atomic_set(&info->port.count, 0);
43277 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43278 info->port.tty = NULL;
43279 spin_unlock_irqrestore(&info->port.lock, flags);
43280 @@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43281 unsigned short new_crctype;
43282
43283 /* return error if TTY interface open */
43284 - if (info->port.count)
43285 + if (atomic_read(&info->port.count))
43286 return -EBUSY;
43287
43288 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
43289 @@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
43290
43291 /* arbitrate between network and tty opens */
43292 spin_lock_irqsave(&info->netlock, flags);
43293 - if (info->port.count != 0 || info->netcount != 0) {
43294 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43295 DBGINFO(("%s hdlc_open busy\n", dev->name));
43296 spin_unlock_irqrestore(&info->netlock, flags);
43297 return -EBUSY;
43298 @@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43299 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
43300
43301 /* return error if TTY interface open */
43302 - if (info->port.count)
43303 + if (atomic_read(&info->port.count))
43304 return -EBUSY;
43305
43306 if (cmd != SIOCWANDEV)
43307 @@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
43308 if (port == NULL)
43309 continue;
43310 spin_lock(&port->lock);
43311 - if ((port->port.count || port->netcount) &&
43312 + if ((atomic_read(&port->port.count) || port->netcount) &&
43313 port->pending_bh && !port->bh_running &&
43314 !port->bh_requested) {
43315 DBGISR(("%s bh queued\n", port->device_name));
43316 @@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43317 spin_lock_irqsave(&info->lock, flags);
43318 if (!tty_hung_up_p(filp)) {
43319 extra_count = true;
43320 - port->count--;
43321 + atomic_dec(&port->count);
43322 }
43323 spin_unlock_irqrestore(&info->lock, flags);
43324 port->blocked_open++;
43325 @@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43326 remove_wait_queue(&port->open_wait, &wait);
43327
43328 if (extra_count)
43329 - port->count++;
43330 + atomic_inc(&port->count);
43331 port->blocked_open--;
43332
43333 if (!retval)
43334 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43335 index 6d5780c..aa4d8cd 100644
43336 --- a/drivers/tty/synclinkmp.c
43337 +++ b/drivers/tty/synclinkmp.c
43338 @@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43339
43340 if (debug_level >= DEBUG_LEVEL_INFO)
43341 printk("%s(%d):%s open(), old ref count = %d\n",
43342 - __FILE__,__LINE__,tty->driver->name, info->port.count);
43343 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43344
43345 /* If port is closing, signal caller to try again */
43346 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43347 @@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43348 spin_unlock_irqrestore(&info->netlock, flags);
43349 goto cleanup;
43350 }
43351 - info->port.count++;
43352 + atomic_inc(&info->port.count);
43353 spin_unlock_irqrestore(&info->netlock, flags);
43354
43355 - if (info->port.count == 1) {
43356 + if (atomic_read(&info->port.count) == 1) {
43357 /* 1st open on this device, init hardware */
43358 retval = startup(info);
43359 if (retval < 0)
43360 @@ -796,8 +796,8 @@ cleanup:
43361 if (retval) {
43362 if (tty->count == 1)
43363 info->port.tty = NULL; /* tty layer will release tty struct */
43364 - if(info->port.count)
43365 - info->port.count--;
43366 + if(atomic_read(&info->port.count))
43367 + atomic_dec(&info->port.count);
43368 }
43369
43370 return retval;
43371 @@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43372
43373 if (debug_level >= DEBUG_LEVEL_INFO)
43374 printk("%s(%d):%s close() entry, count=%d\n",
43375 - __FILE__,__LINE__, info->device_name, info->port.count);
43376 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43377
43378 if (tty_port_close_start(&info->port, tty, filp) == 0)
43379 goto cleanup;
43380 @@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43381 cleanup:
43382 if (debug_level >= DEBUG_LEVEL_INFO)
43383 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43384 - tty->driver->name, info->port.count);
43385 + tty->driver->name, atomic_read(&info->port.count));
43386 }
43387
43388 /* Called by tty_hangup() when a hangup is signaled.
43389 @@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
43390 shutdown(info);
43391
43392 spin_lock_irqsave(&info->port.lock, flags);
43393 - info->port.count = 0;
43394 + atomic_set(&info->port.count, 0);
43395 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43396 info->port.tty = NULL;
43397 spin_unlock_irqrestore(&info->port.lock, flags);
43398 @@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43399 unsigned short new_crctype;
43400
43401 /* return error if TTY interface open */
43402 - if (info->port.count)
43403 + if (atomic_read(&info->port.count))
43404 return -EBUSY;
43405
43406 switch (encoding)
43407 @@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
43408
43409 /* arbitrate between network and tty opens */
43410 spin_lock_irqsave(&info->netlock, flags);
43411 - if (info->port.count != 0 || info->netcount != 0) {
43412 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43413 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43414 spin_unlock_irqrestore(&info->netlock, flags);
43415 return -EBUSY;
43416 @@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43417 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43418
43419 /* return error if TTY interface open */
43420 - if (info->port.count)
43421 + if (atomic_read(&info->port.count))
43422 return -EBUSY;
43423
43424 if (cmd != SIOCWANDEV)
43425 @@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
43426 * do not request bottom half processing if the
43427 * device is not open in a normal mode.
43428 */
43429 - if ( port && (port->port.count || port->netcount) &&
43430 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
43431 port->pending_bh && !port->bh_running &&
43432 !port->bh_requested ) {
43433 if ( debug_level >= DEBUG_LEVEL_ISR )
43434 @@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43435
43436 if (debug_level >= DEBUG_LEVEL_INFO)
43437 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
43438 - __FILE__,__LINE__, tty->driver->name, port->count );
43439 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43440
43441 spin_lock_irqsave(&info->lock, flags);
43442 if (!tty_hung_up_p(filp)) {
43443 extra_count = true;
43444 - port->count--;
43445 + atomic_dec(&port->count);
43446 }
43447 spin_unlock_irqrestore(&info->lock, flags);
43448 port->blocked_open++;
43449 @@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43450
43451 if (debug_level >= DEBUG_LEVEL_INFO)
43452 printk("%s(%d):%s block_til_ready() count=%d\n",
43453 - __FILE__,__LINE__, tty->driver->name, port->count );
43454 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43455
43456 tty_unlock(tty);
43457 schedule();
43458 @@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43459 remove_wait_queue(&port->open_wait, &wait);
43460
43461 if (extra_count)
43462 - port->count++;
43463 + atomic_inc(&port->count);
43464 port->blocked_open--;
43465
43466 if (debug_level >= DEBUG_LEVEL_INFO)
43467 printk("%s(%d):%s block_til_ready() after, count=%d\n",
43468 - __FILE__,__LINE__, tty->driver->name, port->count );
43469 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43470
43471 if (!retval)
43472 port->flags |= ASYNC_NORMAL_ACTIVE;
43473 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
43474 index 3687f0c..6b9b808 100644
43475 --- a/drivers/tty/sysrq.c
43476 +++ b/drivers/tty/sysrq.c
43477 @@ -995,7 +995,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
43478 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
43479 size_t count, loff_t *ppos)
43480 {
43481 - if (count) {
43482 + if (count && capable(CAP_SYS_ADMIN)) {
43483 char c;
43484
43485 if (get_user(c, buf))
43486 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
43487 index a9cd0b9..47b9336 100644
43488 --- a/drivers/tty/tty_io.c
43489 +++ b/drivers/tty/tty_io.c
43490 @@ -3398,7 +3398,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
43491
43492 void tty_default_fops(struct file_operations *fops)
43493 {
43494 - *fops = tty_fops;
43495 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
43496 }
43497
43498 /*
43499 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
43500 index d794087..e4f49e5 100644
43501 --- a/drivers/tty/tty_ldisc.c
43502 +++ b/drivers/tty/tty_ldisc.c
43503 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
43504 if (atomic_dec_and_test(&ld->users)) {
43505 struct tty_ldisc_ops *ldo = ld->ops;
43506
43507 - ldo->refcount--;
43508 + atomic_dec(&ldo->refcount);
43509 module_put(ldo->owner);
43510 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43511
43512 @@ -93,7 +93,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
43513 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43514 tty_ldiscs[disc] = new_ldisc;
43515 new_ldisc->num = disc;
43516 - new_ldisc->refcount = 0;
43517 + atomic_set(&new_ldisc->refcount, 0);
43518 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43519
43520 return ret;
43521 @@ -121,7 +121,7 @@ int tty_unregister_ldisc(int disc)
43522 return -EINVAL;
43523
43524 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43525 - if (tty_ldiscs[disc]->refcount)
43526 + if (atomic_read(&tty_ldiscs[disc]->refcount))
43527 ret = -EBUSY;
43528 else
43529 tty_ldiscs[disc] = NULL;
43530 @@ -142,7 +142,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
43531 if (ldops) {
43532 ret = ERR_PTR(-EAGAIN);
43533 if (try_module_get(ldops->owner)) {
43534 - ldops->refcount++;
43535 + atomic_inc(&ldops->refcount);
43536 ret = ldops;
43537 }
43538 }
43539 @@ -155,7 +155,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
43540 unsigned long flags;
43541
43542 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43543 - ldops->refcount--;
43544 + atomic_dec(&ldops->refcount);
43545 module_put(ldops->owner);
43546 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43547 }
43548 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
43549 index b7ff59d..7c6105e 100644
43550 --- a/drivers/tty/tty_port.c
43551 +++ b/drivers/tty/tty_port.c
43552 @@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
43553 unsigned long flags;
43554
43555 spin_lock_irqsave(&port->lock, flags);
43556 - port->count = 0;
43557 + atomic_set(&port->count, 0);
43558 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43559 if (port->tty) {
43560 set_bit(TTY_IO_ERROR, &port->tty->flags);
43561 @@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43562 /* The port lock protects the port counts */
43563 spin_lock_irqsave(&port->lock, flags);
43564 if (!tty_hung_up_p(filp))
43565 - port->count--;
43566 + atomic_dec(&port->count);
43567 port->blocked_open++;
43568 spin_unlock_irqrestore(&port->lock, flags);
43569
43570 @@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43571 we must not mess that up further */
43572 spin_lock_irqsave(&port->lock, flags);
43573 if (!tty_hung_up_p(filp))
43574 - port->count++;
43575 + atomic_inc(&port->count);
43576 port->blocked_open--;
43577 if (retval == 0)
43578 port->flags |= ASYNC_NORMAL_ACTIVE;
43579 @@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
43580 return 0;
43581 }
43582
43583 - if (tty->count == 1 && port->count != 1) {
43584 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
43585 printk(KERN_WARNING
43586 "tty_port_close_start: tty->count = 1 port count = %d.\n",
43587 - port->count);
43588 - port->count = 1;
43589 + atomic_read(&port->count));
43590 + atomic_set(&port->count, 1);
43591 }
43592 - if (--port->count < 0) {
43593 + if (atomic_dec_return(&port->count) < 0) {
43594 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
43595 - port->count);
43596 - port->count = 0;
43597 + atomic_read(&port->count));
43598 + atomic_set(&port->count, 0);
43599 }
43600
43601 - if (port->count) {
43602 + if (atomic_read(&port->count)) {
43603 spin_unlock_irqrestore(&port->lock, flags);
43604 if (port->ops->drop)
43605 port->ops->drop(port);
43606 @@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
43607 {
43608 spin_lock_irq(&port->lock);
43609 if (!tty_hung_up_p(filp))
43610 - ++port->count;
43611 + atomic_inc(&port->count);
43612 spin_unlock_irq(&port->lock);
43613 tty_port_tty_set(port, tty);
43614
43615 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
43616 index a9af1b9a..1e08e7f 100644
43617 --- a/drivers/tty/vt/keyboard.c
43618 +++ b/drivers/tty/vt/keyboard.c
43619 @@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
43620 kbd->kbdmode == VC_OFF) &&
43621 value != KVAL(K_SAK))
43622 return; /* SAK is allowed even in raw mode */
43623 +
43624 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43625 + {
43626 + void *func = fn_handler[value];
43627 + if (func == fn_show_state || func == fn_show_ptregs ||
43628 + func == fn_show_mem)
43629 + return;
43630 + }
43631 +#endif
43632 +
43633 fn_handler[value](vc);
43634 }
43635
43636 @@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43637 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
43638 return -EFAULT;
43639
43640 - if (!capable(CAP_SYS_TTY_CONFIG))
43641 - perm = 0;
43642 -
43643 switch (cmd) {
43644 case KDGKBENT:
43645 /* Ensure another thread doesn't free it under us */
43646 @@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43647 spin_unlock_irqrestore(&kbd_event_lock, flags);
43648 return put_user(val, &user_kbe->kb_value);
43649 case KDSKBENT:
43650 + if (!capable(CAP_SYS_TTY_CONFIG))
43651 + perm = 0;
43652 +
43653 if (!perm)
43654 return -EPERM;
43655 if (!i && v == K_NOSUCHMAP) {
43656 @@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43657 int i, j, k;
43658 int ret;
43659
43660 - if (!capable(CAP_SYS_TTY_CONFIG))
43661 - perm = 0;
43662 -
43663 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
43664 if (!kbs) {
43665 ret = -ENOMEM;
43666 @@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43667 kfree(kbs);
43668 return ((p && *p) ? -EOVERFLOW : 0);
43669 case KDSKBSENT:
43670 + if (!capable(CAP_SYS_TTY_CONFIG))
43671 + perm = 0;
43672 +
43673 if (!perm) {
43674 ret = -EPERM;
43675 goto reterr;
43676 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
43677 index c8b9262..7e824e6 100644
43678 --- a/drivers/uio/uio.c
43679 +++ b/drivers/uio/uio.c
43680 @@ -25,6 +25,7 @@
43681 #include <linux/kobject.h>
43682 #include <linux/cdev.h>
43683 #include <linux/uio_driver.h>
43684 +#include <asm/local.h>
43685
43686 #define UIO_MAX_DEVICES (1U << MINORBITS)
43687
43688 @@ -32,10 +33,10 @@ struct uio_device {
43689 struct module *owner;
43690 struct device *dev;
43691 int minor;
43692 - atomic_t event;
43693 + atomic_unchecked_t event;
43694 struct fasync_struct *async_queue;
43695 wait_queue_head_t wait;
43696 - int vma_count;
43697 + local_t vma_count;
43698 struct uio_info *info;
43699 struct kobject *map_dir;
43700 struct kobject *portio_dir;
43701 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
43702 struct device_attribute *attr, char *buf)
43703 {
43704 struct uio_device *idev = dev_get_drvdata(dev);
43705 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
43706 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
43707 }
43708
43709 static struct device_attribute uio_class_attributes[] = {
43710 @@ -397,7 +398,7 @@ void uio_event_notify(struct uio_info *info)
43711 {
43712 struct uio_device *idev = info->uio_dev;
43713
43714 - atomic_inc(&idev->event);
43715 + atomic_inc_unchecked(&idev->event);
43716 wake_up_interruptible(&idev->wait);
43717 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
43718 }
43719 @@ -450,7 +451,7 @@ static int uio_open(struct inode *inode, struct file *filep)
43720 }
43721
43722 listener->dev = idev;
43723 - listener->event_count = atomic_read(&idev->event);
43724 + listener->event_count = atomic_read_unchecked(&idev->event);
43725 filep->private_data = listener;
43726
43727 if (idev->info->open) {
43728 @@ -501,7 +502,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
43729 return -EIO;
43730
43731 poll_wait(filep, &idev->wait, wait);
43732 - if (listener->event_count != atomic_read(&idev->event))
43733 + if (listener->event_count != atomic_read_unchecked(&idev->event))
43734 return POLLIN | POLLRDNORM;
43735 return 0;
43736 }
43737 @@ -526,7 +527,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
43738 do {
43739 set_current_state(TASK_INTERRUPTIBLE);
43740
43741 - event_count = atomic_read(&idev->event);
43742 + event_count = atomic_read_unchecked(&idev->event);
43743 if (event_count != listener->event_count) {
43744 if (copy_to_user(buf, &event_count, count))
43745 retval = -EFAULT;
43746 @@ -595,13 +596,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
43747 static void uio_vma_open(struct vm_area_struct *vma)
43748 {
43749 struct uio_device *idev = vma->vm_private_data;
43750 - idev->vma_count++;
43751 + local_inc(&idev->vma_count);
43752 }
43753
43754 static void uio_vma_close(struct vm_area_struct *vma)
43755 {
43756 struct uio_device *idev = vma->vm_private_data;
43757 - idev->vma_count--;
43758 + local_dec(&idev->vma_count);
43759 }
43760
43761 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43762 @@ -808,7 +809,7 @@ int __uio_register_device(struct module *owner,
43763 idev->owner = owner;
43764 idev->info = info;
43765 init_waitqueue_head(&idev->wait);
43766 - atomic_set(&idev->event, 0);
43767 + atomic_set_unchecked(&idev->event, 0);
43768
43769 ret = uio_get_minor(idev);
43770 if (ret)
43771 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
43772 index b7eb86a..36d28af 100644
43773 --- a/drivers/usb/atm/cxacru.c
43774 +++ b/drivers/usb/atm/cxacru.c
43775 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
43776 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
43777 if (ret < 2)
43778 return -EINVAL;
43779 - if (index < 0 || index > 0x7f)
43780 + if (index > 0x7f)
43781 return -EINVAL;
43782 pos += tmp;
43783
43784 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
43785 index 35f10bf..6a38a0b 100644
43786 --- a/drivers/usb/atm/usbatm.c
43787 +++ b/drivers/usb/atm/usbatm.c
43788 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43789 if (printk_ratelimit())
43790 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
43791 __func__, vpi, vci);
43792 - atomic_inc(&vcc->stats->rx_err);
43793 + atomic_inc_unchecked(&vcc->stats->rx_err);
43794 return;
43795 }
43796
43797 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43798 if (length > ATM_MAX_AAL5_PDU) {
43799 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
43800 __func__, length, vcc);
43801 - atomic_inc(&vcc->stats->rx_err);
43802 + atomic_inc_unchecked(&vcc->stats->rx_err);
43803 goto out;
43804 }
43805
43806 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43807 if (sarb->len < pdu_length) {
43808 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
43809 __func__, pdu_length, sarb->len, vcc);
43810 - atomic_inc(&vcc->stats->rx_err);
43811 + atomic_inc_unchecked(&vcc->stats->rx_err);
43812 goto out;
43813 }
43814
43815 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
43816 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
43817 __func__, vcc);
43818 - atomic_inc(&vcc->stats->rx_err);
43819 + atomic_inc_unchecked(&vcc->stats->rx_err);
43820 goto out;
43821 }
43822
43823 @@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43824 if (printk_ratelimit())
43825 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
43826 __func__, length);
43827 - atomic_inc(&vcc->stats->rx_drop);
43828 + atomic_inc_unchecked(&vcc->stats->rx_drop);
43829 goto out;
43830 }
43831
43832 @@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43833
43834 vcc->push(vcc, skb);
43835
43836 - atomic_inc(&vcc->stats->rx);
43837 + atomic_inc_unchecked(&vcc->stats->rx);
43838 out:
43839 skb_trim(sarb, 0);
43840 }
43841 @@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
43842 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
43843
43844 usbatm_pop(vcc, skb);
43845 - atomic_inc(&vcc->stats->tx);
43846 + atomic_inc_unchecked(&vcc->stats->tx);
43847
43848 skb = skb_dequeue(&instance->sndqueue);
43849 }
43850 @@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
43851 if (!left--)
43852 return sprintf(page,
43853 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
43854 - atomic_read(&atm_dev->stats.aal5.tx),
43855 - atomic_read(&atm_dev->stats.aal5.tx_err),
43856 - atomic_read(&atm_dev->stats.aal5.rx),
43857 - atomic_read(&atm_dev->stats.aal5.rx_err),
43858 - atomic_read(&atm_dev->stats.aal5.rx_drop));
43859 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
43860 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
43861 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
43862 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
43863 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
43864
43865 if (!left--) {
43866 if (instance->disconnected)
43867 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
43868 index 2a3bbdf..91d72cf 100644
43869 --- a/drivers/usb/core/devices.c
43870 +++ b/drivers/usb/core/devices.c
43871 @@ -126,7 +126,7 @@ static const char format_endpt[] =
43872 * time it gets called.
43873 */
43874 static struct device_connect_event {
43875 - atomic_t count;
43876 + atomic_unchecked_t count;
43877 wait_queue_head_t wait;
43878 } device_event = {
43879 .count = ATOMIC_INIT(1),
43880 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
43881
43882 void usbfs_conn_disc_event(void)
43883 {
43884 - atomic_add(2, &device_event.count);
43885 + atomic_add_unchecked(2, &device_event.count);
43886 wake_up(&device_event.wait);
43887 }
43888
43889 @@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
43890
43891 poll_wait(file, &device_event.wait, wait);
43892
43893 - event_count = atomic_read(&device_event.count);
43894 + event_count = atomic_read_unchecked(&device_event.count);
43895 if (file->f_version != event_count) {
43896 file->f_version = event_count;
43897 return POLLIN | POLLRDNORM;
43898 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
43899 index f9ec44c..eb5779f 100644
43900 --- a/drivers/usb/core/hcd.c
43901 +++ b/drivers/usb/core/hcd.c
43902 @@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43903 */
43904 usb_get_urb(urb);
43905 atomic_inc(&urb->use_count);
43906 - atomic_inc(&urb->dev->urbnum);
43907 + atomic_inc_unchecked(&urb->dev->urbnum);
43908 usbmon_urb_submit(&hcd->self, urb);
43909
43910 /* NOTE requirements on root-hub callers (usbfs and the hub
43911 @@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43912 urb->hcpriv = NULL;
43913 INIT_LIST_HEAD(&urb->urb_list);
43914 atomic_dec(&urb->use_count);
43915 - atomic_dec(&urb->dev->urbnum);
43916 + atomic_dec_unchecked(&urb->dev->urbnum);
43917 if (atomic_read(&urb->reject))
43918 wake_up(&usb_kill_urb_queue);
43919 usb_put_urb(urb);
43920 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
43921 index 444d30e..f15c850 100644
43922 --- a/drivers/usb/core/message.c
43923 +++ b/drivers/usb/core/message.c
43924 @@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
43925 * method can wait for it to complete. Since you don't have a handle on the
43926 * URB used, you can't cancel the request.
43927 */
43928 -int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
43929 +int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
43930 __u8 requesttype, __u16 value, __u16 index, void *data,
43931 __u16 size, int timeout)
43932 {
43933 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
43934 index 3f81a3d..a3aa993 100644
43935 --- a/drivers/usb/core/sysfs.c
43936 +++ b/drivers/usb/core/sysfs.c
43937 @@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
43938 struct usb_device *udev;
43939
43940 udev = to_usb_device(dev);
43941 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
43942 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
43943 }
43944 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
43945
43946 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
43947 index f81b925..78d22ec 100644
43948 --- a/drivers/usb/core/usb.c
43949 +++ b/drivers/usb/core/usb.c
43950 @@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
43951 set_dev_node(&dev->dev, dev_to_node(bus->controller));
43952 dev->state = USB_STATE_ATTACHED;
43953 dev->lpm_disable_count = 1;
43954 - atomic_set(&dev->urbnum, 0);
43955 + atomic_set_unchecked(&dev->urbnum, 0);
43956
43957 INIT_LIST_HEAD(&dev->ep0.urb_list);
43958 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
43959 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
43960 index 5e29dde..eca992f 100644
43961 --- a/drivers/usb/early/ehci-dbgp.c
43962 +++ b/drivers/usb/early/ehci-dbgp.c
43963 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
43964
43965 #ifdef CONFIG_KGDB
43966 static struct kgdb_io kgdbdbgp_io_ops;
43967 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
43968 +static struct kgdb_io kgdbdbgp_io_ops_console;
43969 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
43970 #else
43971 #define dbgp_kgdb_mode (0)
43972 #endif
43973 @@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
43974 .write_char = kgdbdbgp_write_char,
43975 };
43976
43977 +static struct kgdb_io kgdbdbgp_io_ops_console = {
43978 + .name = "kgdbdbgp",
43979 + .read_char = kgdbdbgp_read_char,
43980 + .write_char = kgdbdbgp_write_char,
43981 + .is_console = 1
43982 +};
43983 +
43984 static int kgdbdbgp_wait_time;
43985
43986 static int __init kgdbdbgp_parse_config(char *str)
43987 @@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
43988 ptr++;
43989 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
43990 }
43991 - kgdb_register_io_module(&kgdbdbgp_io_ops);
43992 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
43993 + if (early_dbgp_console.index != -1)
43994 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
43995 + else
43996 + kgdb_register_io_module(&kgdbdbgp_io_ops);
43997
43998 return 0;
43999 }
44000 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44001 index b369292..9f3ba40 100644
44002 --- a/drivers/usb/gadget/u_serial.c
44003 +++ b/drivers/usb/gadget/u_serial.c
44004 @@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44005 spin_lock_irq(&port->port_lock);
44006
44007 /* already open? Great. */
44008 - if (port->port.count) {
44009 + if (atomic_read(&port->port.count)) {
44010 status = 0;
44011 - port->port.count++;
44012 + atomic_inc(&port->port.count);
44013
44014 /* currently opening/closing? wait ... */
44015 } else if (port->openclose) {
44016 @@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44017 tty->driver_data = port;
44018 port->port.tty = tty;
44019
44020 - port->port.count = 1;
44021 + atomic_set(&port->port.count, 1);
44022 port->openclose = false;
44023
44024 /* if connected, start the I/O stream */
44025 @@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44026
44027 spin_lock_irq(&port->port_lock);
44028
44029 - if (port->port.count != 1) {
44030 - if (port->port.count == 0)
44031 + if (atomic_read(&port->port.count) != 1) {
44032 + if (atomic_read(&port->port.count) == 0)
44033 WARN_ON(1);
44034 else
44035 - --port->port.count;
44036 + atomic_dec(&port->port.count);
44037 goto exit;
44038 }
44039
44040 @@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44041 * and sleep if necessary
44042 */
44043 port->openclose = true;
44044 - port->port.count = 0;
44045 + atomic_set(&port->port.count, 0);
44046
44047 gser = port->port_usb;
44048 if (gser && gser->disconnect)
44049 @@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
44050 int cond;
44051
44052 spin_lock_irq(&port->port_lock);
44053 - cond = (port->port.count == 0) && !port->openclose;
44054 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44055 spin_unlock_irq(&port->port_lock);
44056 return cond;
44057 }
44058 @@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44059 /* if it's already open, start I/O ... and notify the serial
44060 * protocol about open/close status (connect/disconnect).
44061 */
44062 - if (port->port.count) {
44063 + if (atomic_read(&port->port.count)) {
44064 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44065 gs_start_io(port);
44066 if (gser->connect)
44067 @@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
44068
44069 port->port_usb = NULL;
44070 gser->ioport = NULL;
44071 - if (port->port.count > 0 || port->openclose) {
44072 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
44073 wake_up_interruptible(&port->drain_wait);
44074 if (port->port.tty)
44075 tty_hangup(port->port.tty);
44076 @@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
44077
44078 /* finally, free any unused/unusable I/O buffers */
44079 spin_lock_irqsave(&port->port_lock, flags);
44080 - if (port->port.count == 0 && !port->openclose)
44081 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
44082 gs_buf_free(&port->port_write_buf);
44083 gs_free_requests(gser->out, &port->read_pool, NULL);
44084 gs_free_requests(gser->out, &port->read_queue, NULL);
44085 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44086 index 5f3bcd3..bfca43f 100644
44087 --- a/drivers/usb/serial/console.c
44088 +++ b/drivers/usb/serial/console.c
44089 @@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44090
44091 info->port = port;
44092
44093 - ++port->port.count;
44094 + atomic_inc(&port->port.count);
44095 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44096 if (serial->type->set_termios) {
44097 /*
44098 @@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44099 }
44100 /* Now that any required fake tty operations are completed restore
44101 * the tty port count */
44102 - --port->port.count;
44103 + atomic_dec(&port->port.count);
44104 /* The console is special in terms of closing the device so
44105 * indicate this port is now acting as a system console. */
44106 port->port.console = 1;
44107 @@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44108 free_tty:
44109 kfree(tty);
44110 reset_open_count:
44111 - port->port.count = 0;
44112 + atomic_set(&port->port.count, 0);
44113 usb_autopm_put_interface(serial->interface);
44114 error_get_interface:
44115 usb_serial_put(serial);
44116 diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
44117 index 6c3586a..a94e621 100644
44118 --- a/drivers/usb/storage/realtek_cr.c
44119 +++ b/drivers/usb/storage/realtek_cr.c
44120 @@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
44121
44122 buf = kmalloc(len, GFP_NOIO);
44123 if (buf == NULL)
44124 - return USB_STOR_TRANSPORT_ERROR;
44125 + return -ENOMEM;
44126
44127 US_DEBUGP("%s, lun = %d\n", __func__, lun);
44128
44129 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44130 index 75f70f0..d467e1a 100644
44131 --- a/drivers/usb/storage/usb.h
44132 +++ b/drivers/usb/storage/usb.h
44133 @@ -63,7 +63,7 @@ struct us_unusual_dev {
44134 __u8 useProtocol;
44135 __u8 useTransport;
44136 int (*initFunction)(struct us_data *);
44137 -};
44138 +} __do_const;
44139
44140
44141 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44142 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44143 index d6bea3e..60b250e 100644
44144 --- a/drivers/usb/wusbcore/wa-hc.h
44145 +++ b/drivers/usb/wusbcore/wa-hc.h
44146 @@ -192,7 +192,7 @@ struct wahc {
44147 struct list_head xfer_delayed_list;
44148 spinlock_t xfer_list_lock;
44149 struct work_struct xfer_work;
44150 - atomic_t xfer_id_count;
44151 + atomic_unchecked_t xfer_id_count;
44152 };
44153
44154
44155 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44156 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44157 spin_lock_init(&wa->xfer_list_lock);
44158 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44159 - atomic_set(&wa->xfer_id_count, 1);
44160 + atomic_set_unchecked(&wa->xfer_id_count, 1);
44161 }
44162
44163 /**
44164 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44165 index 6ef94bc..1b41265 100644
44166 --- a/drivers/usb/wusbcore/wa-xfer.c
44167 +++ b/drivers/usb/wusbcore/wa-xfer.c
44168 @@ -296,7 +296,7 @@ out:
44169 */
44170 static void wa_xfer_id_init(struct wa_xfer *xfer)
44171 {
44172 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44173 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44174 }
44175
44176 /*
44177 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44178 index 8c55011..eed4ae1a 100644
44179 --- a/drivers/video/aty/aty128fb.c
44180 +++ b/drivers/video/aty/aty128fb.c
44181 @@ -149,7 +149,7 @@ enum {
44182 };
44183
44184 /* Must match above enum */
44185 -static char * const r128_family[] = {
44186 +static const char * const r128_family[] = {
44187 "AGP",
44188 "PCI",
44189 "PRO AGP",
44190 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44191 index 4f27fdc..d3537e6 100644
44192 --- a/drivers/video/aty/atyfb_base.c
44193 +++ b/drivers/video/aty/atyfb_base.c
44194 @@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44195 par->accel_flags = var->accel_flags; /* hack */
44196
44197 if (var->accel_flags) {
44198 - info->fbops->fb_sync = atyfb_sync;
44199 + pax_open_kernel();
44200 + *(void **)&info->fbops->fb_sync = atyfb_sync;
44201 + pax_close_kernel();
44202 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44203 } else {
44204 - info->fbops->fb_sync = NULL;
44205 + pax_open_kernel();
44206 + *(void **)&info->fbops->fb_sync = NULL;
44207 + pax_close_kernel();
44208 info->flags |= FBINFO_HWACCEL_DISABLED;
44209 }
44210
44211 diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44212 index 95ec042..e6affdd 100644
44213 --- a/drivers/video/aty/mach64_cursor.c
44214 +++ b/drivers/video/aty/mach64_cursor.c
44215 @@ -7,6 +7,7 @@
44216 #include <linux/string.h>
44217
44218 #include <asm/io.h>
44219 +#include <asm/pgtable.h>
44220
44221 #ifdef __sparc__
44222 #include <asm/fbio.h>
44223 @@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44224 info->sprite.buf_align = 16; /* and 64 lines tall. */
44225 info->sprite.flags = FB_PIXMAP_IO;
44226
44227 - info->fbops->fb_cursor = atyfb_cursor;
44228 + pax_open_kernel();
44229 + *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44230 + pax_close_kernel();
44231
44232 return 0;
44233 }
44234 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44235 index 6c5ed6b..b727c88 100644
44236 --- a/drivers/video/backlight/kb3886_bl.c
44237 +++ b/drivers/video/backlight/kb3886_bl.c
44238 @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44239 static unsigned long kb3886bl_flags;
44240 #define KB3886BL_SUSPENDED 0x01
44241
44242 -static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44243 +static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44244 {
44245 .ident = "Sahara Touch-iT",
44246 .matches = {
44247 diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44248 index 900aa4e..6d49418 100644
44249 --- a/drivers/video/fb_defio.c
44250 +++ b/drivers/video/fb_defio.c
44251 @@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44252
44253 BUG_ON(!fbdefio);
44254 mutex_init(&fbdefio->lock);
44255 - info->fbops->fb_mmap = fb_deferred_io_mmap;
44256 + pax_open_kernel();
44257 + *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44258 + pax_close_kernel();
44259 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44260 INIT_LIST_HEAD(&fbdefio->pagelist);
44261 if (fbdefio->delay == 0) /* set a default of 1 s */
44262 @@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44263 page->mapping = NULL;
44264 }
44265
44266 - info->fbops->fb_mmap = NULL;
44267 + *(void **)&info->fbops->fb_mmap = NULL;
44268 mutex_destroy(&fbdefio->lock);
44269 }
44270 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44271 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44272 index 5c3960d..15cf8fc 100644
44273 --- a/drivers/video/fbcmap.c
44274 +++ b/drivers/video/fbcmap.c
44275 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
44276 rc = -ENODEV;
44277 goto out;
44278 }
44279 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
44280 - !info->fbops->fb_setcmap)) {
44281 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
44282 rc = -EINVAL;
44283 goto out1;
44284 }
44285 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
44286 index 86291dc..7cc5962 100644
44287 --- a/drivers/video/fbmem.c
44288 +++ b/drivers/video/fbmem.c
44289 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44290 image->dx += image->width + 8;
44291 }
44292 } else if (rotate == FB_ROTATE_UD) {
44293 - for (x = 0; x < num && image->dx >= 0; x++) {
44294 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
44295 info->fbops->fb_imageblit(info, image);
44296 image->dx -= image->width + 8;
44297 }
44298 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44299 image->dy += image->height + 8;
44300 }
44301 } else if (rotate == FB_ROTATE_CCW) {
44302 - for (x = 0; x < num && image->dy >= 0; x++) {
44303 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
44304 info->fbops->fb_imageblit(info, image);
44305 image->dy -= image->height + 8;
44306 }
44307 @@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
44308 return -EFAULT;
44309 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
44310 return -EINVAL;
44311 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
44312 + if (con2fb.framebuffer >= FB_MAX)
44313 return -EINVAL;
44314 if (!registered_fb[con2fb.framebuffer])
44315 request_module("fb%d", con2fb.framebuffer);
44316 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
44317 index 7672d2e..b56437f 100644
44318 --- a/drivers/video/i810/i810_accel.c
44319 +++ b/drivers/video/i810/i810_accel.c
44320 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
44321 }
44322 }
44323 printk("ringbuffer lockup!!!\n");
44324 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
44325 i810_report_error(mmio);
44326 par->dev_flags |= LOCKUP;
44327 info->pixmap.scan_align = 1;
44328 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
44329 index 3c14e43..eafa544 100644
44330 --- a/drivers/video/logo/logo_linux_clut224.ppm
44331 +++ b/drivers/video/logo/logo_linux_clut224.ppm
44332 @@ -1,1604 +1,1123 @@
44333 P3
44334 -# Standard 224-color Linux logo
44335 80 80
44336 255
44337 - 0 0 0 0 0 0 0 0 0 0 0 0
44338 - 0 0 0 0 0 0 0 0 0 0 0 0
44339 - 0 0 0 0 0 0 0 0 0 0 0 0
44340 - 0 0 0 0 0 0 0 0 0 0 0 0
44341 - 0 0 0 0 0 0 0 0 0 0 0 0
44342 - 0 0 0 0 0 0 0 0 0 0 0 0
44343 - 0 0 0 0 0 0 0 0 0 0 0 0
44344 - 0 0 0 0 0 0 0 0 0 0 0 0
44345 - 0 0 0 0 0 0 0 0 0 0 0 0
44346 - 6 6 6 6 6 6 10 10 10 10 10 10
44347 - 10 10 10 6 6 6 6 6 6 6 6 6
44348 - 0 0 0 0 0 0 0 0 0 0 0 0
44349 - 0 0 0 0 0 0 0 0 0 0 0 0
44350 - 0 0 0 0 0 0 0 0 0 0 0 0
44351 - 0 0 0 0 0 0 0 0 0 0 0 0
44352 - 0 0 0 0 0 0 0 0 0 0 0 0
44353 - 0 0 0 0 0 0 0 0 0 0 0 0
44354 - 0 0 0 0 0 0 0 0 0 0 0 0
44355 - 0 0 0 0 0 0 0 0 0 0 0 0
44356 - 0 0 0 0 0 0 0 0 0 0 0 0
44357 - 0 0 0 0 0 0 0 0 0 0 0 0
44358 - 0 0 0 0 0 0 0 0 0 0 0 0
44359 - 0 0 0 0 0 0 0 0 0 0 0 0
44360 - 0 0 0 0 0 0 0 0 0 0 0 0
44361 - 0 0 0 0 0 0 0 0 0 0 0 0
44362 - 0 0 0 0 0 0 0 0 0 0 0 0
44363 - 0 0 0 0 0 0 0 0 0 0 0 0
44364 - 0 0 0 0 0 0 0 0 0 0 0 0
44365 - 0 0 0 6 6 6 10 10 10 14 14 14
44366 - 22 22 22 26 26 26 30 30 30 34 34 34
44367 - 30 30 30 30 30 30 26 26 26 18 18 18
44368 - 14 14 14 10 10 10 6 6 6 0 0 0
44369 - 0 0 0 0 0 0 0 0 0 0 0 0
44370 - 0 0 0 0 0 0 0 0 0 0 0 0
44371 - 0 0 0 0 0 0 0 0 0 0 0 0
44372 - 0 0 0 0 0 0 0 0 0 0 0 0
44373 - 0 0 0 0 0 0 0 0 0 0 0 0
44374 - 0 0 0 0 0 0 0 0 0 0 0 0
44375 - 0 0 0 0 0 0 0 0 0 0 0 0
44376 - 0 0 0 0 0 0 0 0 0 0 0 0
44377 - 0 0 0 0 0 0 0 0 0 0 0 0
44378 - 0 0 0 0 0 1 0 0 1 0 0 0
44379 - 0 0 0 0 0 0 0 0 0 0 0 0
44380 - 0 0 0 0 0 0 0 0 0 0 0 0
44381 - 0 0 0 0 0 0 0 0 0 0 0 0
44382 - 0 0 0 0 0 0 0 0 0 0 0 0
44383 - 0 0 0 0 0 0 0 0 0 0 0 0
44384 - 0 0 0 0 0 0 0 0 0 0 0 0
44385 - 6 6 6 14 14 14 26 26 26 42 42 42
44386 - 54 54 54 66 66 66 78 78 78 78 78 78
44387 - 78 78 78 74 74 74 66 66 66 54 54 54
44388 - 42 42 42 26 26 26 18 18 18 10 10 10
44389 - 6 6 6 0 0 0 0 0 0 0 0 0
44390 - 0 0 0 0 0 0 0 0 0 0 0 0
44391 - 0 0 0 0 0 0 0 0 0 0 0 0
44392 - 0 0 0 0 0 0 0 0 0 0 0 0
44393 - 0 0 0 0 0 0 0 0 0 0 0 0
44394 - 0 0 0 0 0 0 0 0 0 0 0 0
44395 - 0 0 0 0 0 0 0 0 0 0 0 0
44396 - 0 0 0 0 0 0 0 0 0 0 0 0
44397 - 0 0 0 0 0 0 0 0 0 0 0 0
44398 - 0 0 1 0 0 0 0 0 0 0 0 0
44399 - 0 0 0 0 0 0 0 0 0 0 0 0
44400 - 0 0 0 0 0 0 0 0 0 0 0 0
44401 - 0 0 0 0 0 0 0 0 0 0 0 0
44402 - 0 0 0 0 0 0 0 0 0 0 0 0
44403 - 0 0 0 0 0 0 0 0 0 0 0 0
44404 - 0 0 0 0 0 0 0 0 0 10 10 10
44405 - 22 22 22 42 42 42 66 66 66 86 86 86
44406 - 66 66 66 38 38 38 38 38 38 22 22 22
44407 - 26 26 26 34 34 34 54 54 54 66 66 66
44408 - 86 86 86 70 70 70 46 46 46 26 26 26
44409 - 14 14 14 6 6 6 0 0 0 0 0 0
44410 - 0 0 0 0 0 0 0 0 0 0 0 0
44411 - 0 0 0 0 0 0 0 0 0 0 0 0
44412 - 0 0 0 0 0 0 0 0 0 0 0 0
44413 - 0 0 0 0 0 0 0 0 0 0 0 0
44414 - 0 0 0 0 0 0 0 0 0 0 0 0
44415 - 0 0 0 0 0 0 0 0 0 0 0 0
44416 - 0 0 0 0 0 0 0 0 0 0 0 0
44417 - 0 0 0 0 0 0 0 0 0 0 0 0
44418 - 0 0 1 0 0 1 0 0 1 0 0 0
44419 - 0 0 0 0 0 0 0 0 0 0 0 0
44420 - 0 0 0 0 0 0 0 0 0 0 0 0
44421 - 0 0 0 0 0 0 0 0 0 0 0 0
44422 - 0 0 0 0 0 0 0 0 0 0 0 0
44423 - 0 0 0 0 0 0 0 0 0 0 0 0
44424 - 0 0 0 0 0 0 10 10 10 26 26 26
44425 - 50 50 50 82 82 82 58 58 58 6 6 6
44426 - 2 2 6 2 2 6 2 2 6 2 2 6
44427 - 2 2 6 2 2 6 2 2 6 2 2 6
44428 - 6 6 6 54 54 54 86 86 86 66 66 66
44429 - 38 38 38 18 18 18 6 6 6 0 0 0
44430 - 0 0 0 0 0 0 0 0 0 0 0 0
44431 - 0 0 0 0 0 0 0 0 0 0 0 0
44432 - 0 0 0 0 0 0 0 0 0 0 0 0
44433 - 0 0 0 0 0 0 0 0 0 0 0 0
44434 - 0 0 0 0 0 0 0 0 0 0 0 0
44435 - 0 0 0 0 0 0 0 0 0 0 0 0
44436 - 0 0 0 0 0 0 0 0 0 0 0 0
44437 - 0 0 0 0 0 0 0 0 0 0 0 0
44438 - 0 0 0 0 0 0 0 0 0 0 0 0
44439 - 0 0 0 0 0 0 0 0 0 0 0 0
44440 - 0 0 0 0 0 0 0 0 0 0 0 0
44441 - 0 0 0 0 0 0 0 0 0 0 0 0
44442 - 0 0 0 0 0 0 0 0 0 0 0 0
44443 - 0 0 0 0 0 0 0 0 0 0 0 0
44444 - 0 0 0 6 6 6 22 22 22 50 50 50
44445 - 78 78 78 34 34 34 2 2 6 2 2 6
44446 - 2 2 6 2 2 6 2 2 6 2 2 6
44447 - 2 2 6 2 2 6 2 2 6 2 2 6
44448 - 2 2 6 2 2 6 6 6 6 70 70 70
44449 - 78 78 78 46 46 46 22 22 22 6 6 6
44450 - 0 0 0 0 0 0 0 0 0 0 0 0
44451 - 0 0 0 0 0 0 0 0 0 0 0 0
44452 - 0 0 0 0 0 0 0 0 0 0 0 0
44453 - 0 0 0 0 0 0 0 0 0 0 0 0
44454 - 0 0 0 0 0 0 0 0 0 0 0 0
44455 - 0 0 0 0 0 0 0 0 0 0 0 0
44456 - 0 0 0 0 0 0 0 0 0 0 0 0
44457 - 0 0 0 0 0 0 0 0 0 0 0 0
44458 - 0 0 1 0 0 1 0 0 1 0 0 0
44459 - 0 0 0 0 0 0 0 0 0 0 0 0
44460 - 0 0 0 0 0 0 0 0 0 0 0 0
44461 - 0 0 0 0 0 0 0 0 0 0 0 0
44462 - 0 0 0 0 0 0 0 0 0 0 0 0
44463 - 0 0 0 0 0 0 0 0 0 0 0 0
44464 - 6 6 6 18 18 18 42 42 42 82 82 82
44465 - 26 26 26 2 2 6 2 2 6 2 2 6
44466 - 2 2 6 2 2 6 2 2 6 2 2 6
44467 - 2 2 6 2 2 6 2 2 6 14 14 14
44468 - 46 46 46 34 34 34 6 6 6 2 2 6
44469 - 42 42 42 78 78 78 42 42 42 18 18 18
44470 - 6 6 6 0 0 0 0 0 0 0 0 0
44471 - 0 0 0 0 0 0 0 0 0 0 0 0
44472 - 0 0 0 0 0 0 0 0 0 0 0 0
44473 - 0 0 0 0 0 0 0 0 0 0 0 0
44474 - 0 0 0 0 0 0 0 0 0 0 0 0
44475 - 0 0 0 0 0 0 0 0 0 0 0 0
44476 - 0 0 0 0 0 0 0 0 0 0 0 0
44477 - 0 0 0 0 0 0 0 0 0 0 0 0
44478 - 0 0 1 0 0 0 0 0 1 0 0 0
44479 - 0 0 0 0 0 0 0 0 0 0 0 0
44480 - 0 0 0 0 0 0 0 0 0 0 0 0
44481 - 0 0 0 0 0 0 0 0 0 0 0 0
44482 - 0 0 0 0 0 0 0 0 0 0 0 0
44483 - 0 0 0 0 0 0 0 0 0 0 0 0
44484 - 10 10 10 30 30 30 66 66 66 58 58 58
44485 - 2 2 6 2 2 6 2 2 6 2 2 6
44486 - 2 2 6 2 2 6 2 2 6 2 2 6
44487 - 2 2 6 2 2 6 2 2 6 26 26 26
44488 - 86 86 86 101 101 101 46 46 46 10 10 10
44489 - 2 2 6 58 58 58 70 70 70 34 34 34
44490 - 10 10 10 0 0 0 0 0 0 0 0 0
44491 - 0 0 0 0 0 0 0 0 0 0 0 0
44492 - 0 0 0 0 0 0 0 0 0 0 0 0
44493 - 0 0 0 0 0 0 0 0 0 0 0 0
44494 - 0 0 0 0 0 0 0 0 0 0 0 0
44495 - 0 0 0 0 0 0 0 0 0 0 0 0
44496 - 0 0 0 0 0 0 0 0 0 0 0 0
44497 - 0 0 0 0 0 0 0 0 0 0 0 0
44498 - 0 0 1 0 0 1 0 0 1 0 0 0
44499 - 0 0 0 0 0 0 0 0 0 0 0 0
44500 - 0 0 0 0 0 0 0 0 0 0 0 0
44501 - 0 0 0 0 0 0 0 0 0 0 0 0
44502 - 0 0 0 0 0 0 0 0 0 0 0 0
44503 - 0 0 0 0 0 0 0 0 0 0 0 0
44504 - 14 14 14 42 42 42 86 86 86 10 10 10
44505 - 2 2 6 2 2 6 2 2 6 2 2 6
44506 - 2 2 6 2 2 6 2 2 6 2 2 6
44507 - 2 2 6 2 2 6 2 2 6 30 30 30
44508 - 94 94 94 94 94 94 58 58 58 26 26 26
44509 - 2 2 6 6 6 6 78 78 78 54 54 54
44510 - 22 22 22 6 6 6 0 0 0 0 0 0
44511 - 0 0 0 0 0 0 0 0 0 0 0 0
44512 - 0 0 0 0 0 0 0 0 0 0 0 0
44513 - 0 0 0 0 0 0 0 0 0 0 0 0
44514 - 0 0 0 0 0 0 0 0 0 0 0 0
44515 - 0 0 0 0 0 0 0 0 0 0 0 0
44516 - 0 0 0 0 0 0 0 0 0 0 0 0
44517 - 0 0 0 0 0 0 0 0 0 0 0 0
44518 - 0 0 0 0 0 0 0 0 0 0 0 0
44519 - 0 0 0 0 0 0 0 0 0 0 0 0
44520 - 0 0 0 0 0 0 0 0 0 0 0 0
44521 - 0 0 0 0 0 0 0 0 0 0 0 0
44522 - 0 0 0 0 0 0 0 0 0 0 0 0
44523 - 0 0 0 0 0 0 0 0 0 6 6 6
44524 - 22 22 22 62 62 62 62 62 62 2 2 6
44525 - 2 2 6 2 2 6 2 2 6 2 2 6
44526 - 2 2 6 2 2 6 2 2 6 2 2 6
44527 - 2 2 6 2 2 6 2 2 6 26 26 26
44528 - 54 54 54 38 38 38 18 18 18 10 10 10
44529 - 2 2 6 2 2 6 34 34 34 82 82 82
44530 - 38 38 38 14 14 14 0 0 0 0 0 0
44531 - 0 0 0 0 0 0 0 0 0 0 0 0
44532 - 0 0 0 0 0 0 0 0 0 0 0 0
44533 - 0 0 0 0 0 0 0 0 0 0 0 0
44534 - 0 0 0 0 0 0 0 0 0 0 0 0
44535 - 0 0 0 0 0 0 0 0 0 0 0 0
44536 - 0 0 0 0 0 0 0 0 0 0 0 0
44537 - 0 0 0 0 0 0 0 0 0 0 0 0
44538 - 0 0 0 0 0 1 0 0 1 0 0 0
44539 - 0 0 0 0 0 0 0 0 0 0 0 0
44540 - 0 0 0 0 0 0 0 0 0 0 0 0
44541 - 0 0 0 0 0 0 0 0 0 0 0 0
44542 - 0 0 0 0 0 0 0 0 0 0 0 0
44543 - 0 0 0 0 0 0 0 0 0 6 6 6
44544 - 30 30 30 78 78 78 30 30 30 2 2 6
44545 - 2 2 6 2 2 6 2 2 6 2 2 6
44546 - 2 2 6 2 2 6 2 2 6 2 2 6
44547 - 2 2 6 2 2 6 2 2 6 10 10 10
44548 - 10 10 10 2 2 6 2 2 6 2 2 6
44549 - 2 2 6 2 2 6 2 2 6 78 78 78
44550 - 50 50 50 18 18 18 6 6 6 0 0 0
44551 - 0 0 0 0 0 0 0 0 0 0 0 0
44552 - 0 0 0 0 0 0 0 0 0 0 0 0
44553 - 0 0 0 0 0 0 0 0 0 0 0 0
44554 - 0 0 0 0 0 0 0 0 0 0 0 0
44555 - 0 0 0 0 0 0 0 0 0 0 0 0
44556 - 0 0 0 0 0 0 0 0 0 0 0 0
44557 - 0 0 0 0 0 0 0 0 0 0 0 0
44558 - 0 0 1 0 0 0 0 0 0 0 0 0
44559 - 0 0 0 0 0 0 0 0 0 0 0 0
44560 - 0 0 0 0 0 0 0 0 0 0 0 0
44561 - 0 0 0 0 0 0 0 0 0 0 0 0
44562 - 0 0 0 0 0 0 0 0 0 0 0 0
44563 - 0 0 0 0 0 0 0 0 0 10 10 10
44564 - 38 38 38 86 86 86 14 14 14 2 2 6
44565 - 2 2 6 2 2 6 2 2 6 2 2 6
44566 - 2 2 6 2 2 6 2 2 6 2 2 6
44567 - 2 2 6 2 2 6 2 2 6 2 2 6
44568 - 2 2 6 2 2 6 2 2 6 2 2 6
44569 - 2 2 6 2 2 6 2 2 6 54 54 54
44570 - 66 66 66 26 26 26 6 6 6 0 0 0
44571 - 0 0 0 0 0 0 0 0 0 0 0 0
44572 - 0 0 0 0 0 0 0 0 0 0 0 0
44573 - 0 0 0 0 0 0 0 0 0 0 0 0
44574 - 0 0 0 0 0 0 0 0 0 0 0 0
44575 - 0 0 0 0 0 0 0 0 0 0 0 0
44576 - 0 0 0 0 0 0 0 0 0 0 0 0
44577 - 0 0 0 0 0 0 0 0 0 0 0 0
44578 - 0 0 0 0 0 1 0 0 1 0 0 0
44579 - 0 0 0 0 0 0 0 0 0 0 0 0
44580 - 0 0 0 0 0 0 0 0 0 0 0 0
44581 - 0 0 0 0 0 0 0 0 0 0 0 0
44582 - 0 0 0 0 0 0 0 0 0 0 0 0
44583 - 0 0 0 0 0 0 0 0 0 14 14 14
44584 - 42 42 42 82 82 82 2 2 6 2 2 6
44585 - 2 2 6 6 6 6 10 10 10 2 2 6
44586 - 2 2 6 2 2 6 2 2 6 2 2 6
44587 - 2 2 6 2 2 6 2 2 6 6 6 6
44588 - 14 14 14 10 10 10 2 2 6 2 2 6
44589 - 2 2 6 2 2 6 2 2 6 18 18 18
44590 - 82 82 82 34 34 34 10 10 10 0 0 0
44591 - 0 0 0 0 0 0 0 0 0 0 0 0
44592 - 0 0 0 0 0 0 0 0 0 0 0 0
44593 - 0 0 0 0 0 0 0 0 0 0 0 0
44594 - 0 0 0 0 0 0 0 0 0 0 0 0
44595 - 0 0 0 0 0 0 0 0 0 0 0 0
44596 - 0 0 0 0 0 0 0 0 0 0 0 0
44597 - 0 0 0 0 0 0 0 0 0 0 0 0
44598 - 0 0 1 0 0 0 0 0 0 0 0 0
44599 - 0 0 0 0 0 0 0 0 0 0 0 0
44600 - 0 0 0 0 0 0 0 0 0 0 0 0
44601 - 0 0 0 0 0 0 0 0 0 0 0 0
44602 - 0 0 0 0 0 0 0 0 0 0 0 0
44603 - 0 0 0 0 0 0 0 0 0 14 14 14
44604 - 46 46 46 86 86 86 2 2 6 2 2 6
44605 - 6 6 6 6 6 6 22 22 22 34 34 34
44606 - 6 6 6 2 2 6 2 2 6 2 2 6
44607 - 2 2 6 2 2 6 18 18 18 34 34 34
44608 - 10 10 10 50 50 50 22 22 22 2 2 6
44609 - 2 2 6 2 2 6 2 2 6 10 10 10
44610 - 86 86 86 42 42 42 14 14 14 0 0 0
44611 - 0 0 0 0 0 0 0 0 0 0 0 0
44612 - 0 0 0 0 0 0 0 0 0 0 0 0
44613 - 0 0 0 0 0 0 0 0 0 0 0 0
44614 - 0 0 0 0 0 0 0 0 0 0 0 0
44615 - 0 0 0 0 0 0 0 0 0 0 0 0
44616 - 0 0 0 0 0 0 0 0 0 0 0 0
44617 - 0 0 0 0 0 0 0 0 0 0 0 0
44618 - 0 0 1 0 0 1 0 0 1 0 0 0
44619 - 0 0 0 0 0 0 0 0 0 0 0 0
44620 - 0 0 0 0 0 0 0 0 0 0 0 0
44621 - 0 0 0 0 0 0 0 0 0 0 0 0
44622 - 0 0 0 0 0 0 0 0 0 0 0 0
44623 - 0 0 0 0 0 0 0 0 0 14 14 14
44624 - 46 46 46 86 86 86 2 2 6 2 2 6
44625 - 38 38 38 116 116 116 94 94 94 22 22 22
44626 - 22 22 22 2 2 6 2 2 6 2 2 6
44627 - 14 14 14 86 86 86 138 138 138 162 162 162
44628 -154 154 154 38 38 38 26 26 26 6 6 6
44629 - 2 2 6 2 2 6 2 2 6 2 2 6
44630 - 86 86 86 46 46 46 14 14 14 0 0 0
44631 - 0 0 0 0 0 0 0 0 0 0 0 0
44632 - 0 0 0 0 0 0 0 0 0 0 0 0
44633 - 0 0 0 0 0 0 0 0 0 0 0 0
44634 - 0 0 0 0 0 0 0 0 0 0 0 0
44635 - 0 0 0 0 0 0 0 0 0 0 0 0
44636 - 0 0 0 0 0 0 0 0 0 0 0 0
44637 - 0 0 0 0 0 0 0 0 0 0 0 0
44638 - 0 0 0 0 0 0 0 0 0 0 0 0
44639 - 0 0 0 0 0 0 0 0 0 0 0 0
44640 - 0 0 0 0 0 0 0 0 0 0 0 0
44641 - 0 0 0 0 0 0 0 0 0 0 0 0
44642 - 0 0 0 0 0 0 0 0 0 0 0 0
44643 - 0 0 0 0 0 0 0 0 0 14 14 14
44644 - 46 46 46 86 86 86 2 2 6 14 14 14
44645 -134 134 134 198 198 198 195 195 195 116 116 116
44646 - 10 10 10 2 2 6 2 2 6 6 6 6
44647 -101 98 89 187 187 187 210 210 210 218 218 218
44648 -214 214 214 134 134 134 14 14 14 6 6 6
44649 - 2 2 6 2 2 6 2 2 6 2 2 6
44650 - 86 86 86 50 50 50 18 18 18 6 6 6
44651 - 0 0 0 0 0 0 0 0 0 0 0 0
44652 - 0 0 0 0 0 0 0 0 0 0 0 0
44653 - 0 0 0 0 0 0 0 0 0 0 0 0
44654 - 0 0 0 0 0 0 0 0 0 0 0 0
44655 - 0 0 0 0 0 0 0 0 0 0 0 0
44656 - 0 0 0 0 0 0 0 0 0 0 0 0
44657 - 0 0 0 0 0 0 0 0 1 0 0 0
44658 - 0 0 1 0 0 1 0 0 1 0 0 0
44659 - 0 0 0 0 0 0 0 0 0 0 0 0
44660 - 0 0 0 0 0 0 0 0 0 0 0 0
44661 - 0 0 0 0 0 0 0 0 0 0 0 0
44662 - 0 0 0 0 0 0 0 0 0 0 0 0
44663 - 0 0 0 0 0 0 0 0 0 14 14 14
44664 - 46 46 46 86 86 86 2 2 6 54 54 54
44665 -218 218 218 195 195 195 226 226 226 246 246 246
44666 - 58 58 58 2 2 6 2 2 6 30 30 30
44667 -210 210 210 253 253 253 174 174 174 123 123 123
44668 -221 221 221 234 234 234 74 74 74 2 2 6
44669 - 2 2 6 2 2 6 2 2 6 2 2 6
44670 - 70 70 70 58 58 58 22 22 22 6 6 6
44671 - 0 0 0 0 0 0 0 0 0 0 0 0
44672 - 0 0 0 0 0 0 0 0 0 0 0 0
44673 - 0 0 0 0 0 0 0 0 0 0 0 0
44674 - 0 0 0 0 0 0 0 0 0 0 0 0
44675 - 0 0 0 0 0 0 0 0 0 0 0 0
44676 - 0 0 0 0 0 0 0 0 0 0 0 0
44677 - 0 0 0 0 0 0 0 0 0 0 0 0
44678 - 0 0 0 0 0 0 0 0 0 0 0 0
44679 - 0 0 0 0 0 0 0 0 0 0 0 0
44680 - 0 0 0 0 0 0 0 0 0 0 0 0
44681 - 0 0 0 0 0 0 0 0 0 0 0 0
44682 - 0 0 0 0 0 0 0 0 0 0 0 0
44683 - 0 0 0 0 0 0 0 0 0 14 14 14
44684 - 46 46 46 82 82 82 2 2 6 106 106 106
44685 -170 170 170 26 26 26 86 86 86 226 226 226
44686 -123 123 123 10 10 10 14 14 14 46 46 46
44687 -231 231 231 190 190 190 6 6 6 70 70 70
44688 - 90 90 90 238 238 238 158 158 158 2 2 6
44689 - 2 2 6 2 2 6 2 2 6 2 2 6
44690 - 70 70 70 58 58 58 22 22 22 6 6 6
44691 - 0 0 0 0 0 0 0 0 0 0 0 0
44692 - 0 0 0 0 0 0 0 0 0 0 0 0
44693 - 0 0 0 0 0 0 0 0 0 0 0 0
44694 - 0 0 0 0 0 0 0 0 0 0 0 0
44695 - 0 0 0 0 0 0 0 0 0 0 0 0
44696 - 0 0 0 0 0 0 0 0 0 0 0 0
44697 - 0 0 0 0 0 0 0 0 1 0 0 0
44698 - 0 0 1 0 0 1 0 0 1 0 0 0
44699 - 0 0 0 0 0 0 0 0 0 0 0 0
44700 - 0 0 0 0 0 0 0 0 0 0 0 0
44701 - 0 0 0 0 0 0 0 0 0 0 0 0
44702 - 0 0 0 0 0 0 0 0 0 0 0 0
44703 - 0 0 0 0 0 0 0 0 0 14 14 14
44704 - 42 42 42 86 86 86 6 6 6 116 116 116
44705 -106 106 106 6 6 6 70 70 70 149 149 149
44706 -128 128 128 18 18 18 38 38 38 54 54 54
44707 -221 221 221 106 106 106 2 2 6 14 14 14
44708 - 46 46 46 190 190 190 198 198 198 2 2 6
44709 - 2 2 6 2 2 6 2 2 6 2 2 6
44710 - 74 74 74 62 62 62 22 22 22 6 6 6
44711 - 0 0 0 0 0 0 0 0 0 0 0 0
44712 - 0 0 0 0 0 0 0 0 0 0 0 0
44713 - 0 0 0 0 0 0 0 0 0 0 0 0
44714 - 0 0 0 0 0 0 0 0 0 0 0 0
44715 - 0 0 0 0 0 0 0 0 0 0 0 0
44716 - 0 0 0 0 0 0 0 0 0 0 0 0
44717 - 0 0 0 0 0 0 0 0 1 0 0 0
44718 - 0 0 1 0 0 0 0 0 1 0 0 0
44719 - 0 0 0 0 0 0 0 0 0 0 0 0
44720 - 0 0 0 0 0 0 0 0 0 0 0 0
44721 - 0 0 0 0 0 0 0 0 0 0 0 0
44722 - 0 0 0 0 0 0 0 0 0 0 0 0
44723 - 0 0 0 0 0 0 0 0 0 14 14 14
44724 - 42 42 42 94 94 94 14 14 14 101 101 101
44725 -128 128 128 2 2 6 18 18 18 116 116 116
44726 -118 98 46 121 92 8 121 92 8 98 78 10
44727 -162 162 162 106 106 106 2 2 6 2 2 6
44728 - 2 2 6 195 195 195 195 195 195 6 6 6
44729 - 2 2 6 2 2 6 2 2 6 2 2 6
44730 - 74 74 74 62 62 62 22 22 22 6 6 6
44731 - 0 0 0 0 0 0 0 0 0 0 0 0
44732 - 0 0 0 0 0 0 0 0 0 0 0 0
44733 - 0 0 0 0 0 0 0 0 0 0 0 0
44734 - 0 0 0 0 0 0 0 0 0 0 0 0
44735 - 0 0 0 0 0 0 0 0 0 0 0 0
44736 - 0 0 0 0 0 0 0 0 0 0 0 0
44737 - 0 0 0 0 0 0 0 0 1 0 0 1
44738 - 0 0 1 0 0 0 0 0 1 0 0 0
44739 - 0 0 0 0 0 0 0 0 0 0 0 0
44740 - 0 0 0 0 0 0 0 0 0 0 0 0
44741 - 0 0 0 0 0 0 0 0 0 0 0 0
44742 - 0 0 0 0 0 0 0 0 0 0 0 0
44743 - 0 0 0 0 0 0 0 0 0 10 10 10
44744 - 38 38 38 90 90 90 14 14 14 58 58 58
44745 -210 210 210 26 26 26 54 38 6 154 114 10
44746 -226 170 11 236 186 11 225 175 15 184 144 12
44747 -215 174 15 175 146 61 37 26 9 2 2 6
44748 - 70 70 70 246 246 246 138 138 138 2 2 6
44749 - 2 2 6 2 2 6 2 2 6 2 2 6
44750 - 70 70 70 66 66 66 26 26 26 6 6 6
44751 - 0 0 0 0 0 0 0 0 0 0 0 0
44752 - 0 0 0 0 0 0 0 0 0 0 0 0
44753 - 0 0 0 0 0 0 0 0 0 0 0 0
44754 - 0 0 0 0 0 0 0 0 0 0 0 0
44755 - 0 0 0 0 0 0 0 0 0 0 0 0
44756 - 0 0 0 0 0 0 0 0 0 0 0 0
44757 - 0 0 0 0 0 0 0 0 0 0 0 0
44758 - 0 0 0 0 0 0 0 0 0 0 0 0
44759 - 0 0 0 0 0 0 0 0 0 0 0 0
44760 - 0 0 0 0 0 0 0 0 0 0 0 0
44761 - 0 0 0 0 0 0 0 0 0 0 0 0
44762 - 0 0 0 0 0 0 0 0 0 0 0 0
44763 - 0 0 0 0 0 0 0 0 0 10 10 10
44764 - 38 38 38 86 86 86 14 14 14 10 10 10
44765 -195 195 195 188 164 115 192 133 9 225 175 15
44766 -239 182 13 234 190 10 232 195 16 232 200 30
44767 -245 207 45 241 208 19 232 195 16 184 144 12
44768 -218 194 134 211 206 186 42 42 42 2 2 6
44769 - 2 2 6 2 2 6 2 2 6 2 2 6
44770 - 50 50 50 74 74 74 30 30 30 6 6 6
44771 - 0 0 0 0 0 0 0 0 0 0 0 0
44772 - 0 0 0 0 0 0 0 0 0 0 0 0
44773 - 0 0 0 0 0 0 0 0 0 0 0 0
44774 - 0 0 0 0 0 0 0 0 0 0 0 0
44775 - 0 0 0 0 0 0 0 0 0 0 0 0
44776 - 0 0 0 0 0 0 0 0 0 0 0 0
44777 - 0 0 0 0 0 0 0 0 0 0 0 0
44778 - 0 0 0 0 0 0 0 0 0 0 0 0
44779 - 0 0 0 0 0 0 0 0 0 0 0 0
44780 - 0 0 0 0 0 0 0 0 0 0 0 0
44781 - 0 0 0 0 0 0 0 0 0 0 0 0
44782 - 0 0 0 0 0 0 0 0 0 0 0 0
44783 - 0 0 0 0 0 0 0 0 0 10 10 10
44784 - 34 34 34 86 86 86 14 14 14 2 2 6
44785 -121 87 25 192 133 9 219 162 10 239 182 13
44786 -236 186 11 232 195 16 241 208 19 244 214 54
44787 -246 218 60 246 218 38 246 215 20 241 208 19
44788 -241 208 19 226 184 13 121 87 25 2 2 6
44789 - 2 2 6 2 2 6 2 2 6 2 2 6
44790 - 50 50 50 82 82 82 34 34 34 10 10 10
44791 - 0 0 0 0 0 0 0 0 0 0 0 0
44792 - 0 0 0 0 0 0 0 0 0 0 0 0
44793 - 0 0 0 0 0 0 0 0 0 0 0 0
44794 - 0 0 0 0 0 0 0 0 0 0 0 0
44795 - 0 0 0 0 0 0 0 0 0 0 0 0
44796 - 0 0 0 0 0 0 0 0 0 0 0 0
44797 - 0 0 0 0 0 0 0 0 0 0 0 0
44798 - 0 0 0 0 0 0 0 0 0 0 0 0
44799 - 0 0 0 0 0 0 0 0 0 0 0 0
44800 - 0 0 0 0 0 0 0 0 0 0 0 0
44801 - 0 0 0 0 0 0 0 0 0 0 0 0
44802 - 0 0 0 0 0 0 0 0 0 0 0 0
44803 - 0 0 0 0 0 0 0 0 0 10 10 10
44804 - 34 34 34 82 82 82 30 30 30 61 42 6
44805 -180 123 7 206 145 10 230 174 11 239 182 13
44806 -234 190 10 238 202 15 241 208 19 246 218 74
44807 -246 218 38 246 215 20 246 215 20 246 215 20
44808 -226 184 13 215 174 15 184 144 12 6 6 6
44809 - 2 2 6 2 2 6 2 2 6 2 2 6
44810 - 26 26 26 94 94 94 42 42 42 14 14 14
44811 - 0 0 0 0 0 0 0 0 0 0 0 0
44812 - 0 0 0 0 0 0 0 0 0 0 0 0
44813 - 0 0 0 0 0 0 0 0 0 0 0 0
44814 - 0 0 0 0 0 0 0 0 0 0 0 0
44815 - 0 0 0 0 0 0 0 0 0 0 0 0
44816 - 0 0 0 0 0 0 0 0 0 0 0 0
44817 - 0 0 0 0 0 0 0 0 0 0 0 0
44818 - 0 0 0 0 0 0 0 0 0 0 0 0
44819 - 0 0 0 0 0 0 0 0 0 0 0 0
44820 - 0 0 0 0 0 0 0 0 0 0 0 0
44821 - 0 0 0 0 0 0 0 0 0 0 0 0
44822 - 0 0 0 0 0 0 0 0 0 0 0 0
44823 - 0 0 0 0 0 0 0 0 0 10 10 10
44824 - 30 30 30 78 78 78 50 50 50 104 69 6
44825 -192 133 9 216 158 10 236 178 12 236 186 11
44826 -232 195 16 241 208 19 244 214 54 245 215 43
44827 -246 215 20 246 215 20 241 208 19 198 155 10
44828 -200 144 11 216 158 10 156 118 10 2 2 6
44829 - 2 2 6 2 2 6 2 2 6 2 2 6
44830 - 6 6 6 90 90 90 54 54 54 18 18 18
44831 - 6 6 6 0 0 0 0 0 0 0 0 0
44832 - 0 0 0 0 0 0 0 0 0 0 0 0
44833 - 0 0 0 0 0 0 0 0 0 0 0 0
44834 - 0 0 0 0 0 0 0 0 0 0 0 0
44835 - 0 0 0 0 0 0 0 0 0 0 0 0
44836 - 0 0 0 0 0 0 0 0 0 0 0 0
44837 - 0 0 0 0 0 0 0 0 0 0 0 0
44838 - 0 0 0 0 0 0 0 0 0 0 0 0
44839 - 0 0 0 0 0 0 0 0 0 0 0 0
44840 - 0 0 0 0 0 0 0 0 0 0 0 0
44841 - 0 0 0 0 0 0 0 0 0 0 0 0
44842 - 0 0 0 0 0 0 0 0 0 0 0 0
44843 - 0 0 0 0 0 0 0 0 0 10 10 10
44844 - 30 30 30 78 78 78 46 46 46 22 22 22
44845 -137 92 6 210 162 10 239 182 13 238 190 10
44846 -238 202 15 241 208 19 246 215 20 246 215 20
44847 -241 208 19 203 166 17 185 133 11 210 150 10
44848 -216 158 10 210 150 10 102 78 10 2 2 6
44849 - 6 6 6 54 54 54 14 14 14 2 2 6
44850 - 2 2 6 62 62 62 74 74 74 30 30 30
44851 - 10 10 10 0 0 0 0 0 0 0 0 0
44852 - 0 0 0 0 0 0 0 0 0 0 0 0
44853 - 0 0 0 0 0 0 0 0 0 0 0 0
44854 - 0 0 0 0 0 0 0 0 0 0 0 0
44855 - 0 0 0 0 0 0 0 0 0 0 0 0
44856 - 0 0 0 0 0 0 0 0 0 0 0 0
44857 - 0 0 0 0 0 0 0 0 0 0 0 0
44858 - 0 0 0 0 0 0 0 0 0 0 0 0
44859 - 0 0 0 0 0 0 0 0 0 0 0 0
44860 - 0 0 0 0 0 0 0 0 0 0 0 0
44861 - 0 0 0 0 0 0 0 0 0 0 0 0
44862 - 0 0 0 0 0 0 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 10 10 10
44864 - 34 34 34 78 78 78 50 50 50 6 6 6
44865 - 94 70 30 139 102 15 190 146 13 226 184 13
44866 -232 200 30 232 195 16 215 174 15 190 146 13
44867 -168 122 10 192 133 9 210 150 10 213 154 11
44868 -202 150 34 182 157 106 101 98 89 2 2 6
44869 - 2 2 6 78 78 78 116 116 116 58 58 58
44870 - 2 2 6 22 22 22 90 90 90 46 46 46
44871 - 18 18 18 6 6 6 0 0 0 0 0 0
44872 - 0 0 0 0 0 0 0 0 0 0 0 0
44873 - 0 0 0 0 0 0 0 0 0 0 0 0
44874 - 0 0 0 0 0 0 0 0 0 0 0 0
44875 - 0 0 0 0 0 0 0 0 0 0 0 0
44876 - 0 0 0 0 0 0 0 0 0 0 0 0
44877 - 0 0 0 0 0 0 0 0 0 0 0 0
44878 - 0 0 0 0 0 0 0 0 0 0 0 0
44879 - 0 0 0 0 0 0 0 0 0 0 0 0
44880 - 0 0 0 0 0 0 0 0 0 0 0 0
44881 - 0 0 0 0 0 0 0 0 0 0 0 0
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 10 10 10
44884 - 38 38 38 86 86 86 50 50 50 6 6 6
44885 -128 128 128 174 154 114 156 107 11 168 122 10
44886 -198 155 10 184 144 12 197 138 11 200 144 11
44887 -206 145 10 206 145 10 197 138 11 188 164 115
44888 -195 195 195 198 198 198 174 174 174 14 14 14
44889 - 2 2 6 22 22 22 116 116 116 116 116 116
44890 - 22 22 22 2 2 6 74 74 74 70 70 70
44891 - 30 30 30 10 10 10 0 0 0 0 0 0
44892 - 0 0 0 0 0 0 0 0 0 0 0 0
44893 - 0 0 0 0 0 0 0 0 0 0 0 0
44894 - 0 0 0 0 0 0 0 0 0 0 0 0
44895 - 0 0 0 0 0 0 0 0 0 0 0 0
44896 - 0 0 0 0 0 0 0 0 0 0 0 0
44897 - 0 0 0 0 0 0 0 0 0 0 0 0
44898 - 0 0 0 0 0 0 0 0 0 0 0 0
44899 - 0 0 0 0 0 0 0 0 0 0 0 0
44900 - 0 0 0 0 0 0 0 0 0 0 0 0
44901 - 0 0 0 0 0 0 0 0 0 0 0 0
44902 - 0 0 0 0 0 0 0 0 0 0 0 0
44903 - 0 0 0 0 0 0 6 6 6 18 18 18
44904 - 50 50 50 101 101 101 26 26 26 10 10 10
44905 -138 138 138 190 190 190 174 154 114 156 107 11
44906 -197 138 11 200 144 11 197 138 11 192 133 9
44907 -180 123 7 190 142 34 190 178 144 187 187 187
44908 -202 202 202 221 221 221 214 214 214 66 66 66
44909 - 2 2 6 2 2 6 50 50 50 62 62 62
44910 - 6 6 6 2 2 6 10 10 10 90 90 90
44911 - 50 50 50 18 18 18 6 6 6 0 0 0
44912 - 0 0 0 0 0 0 0 0 0 0 0 0
44913 - 0 0 0 0 0 0 0 0 0 0 0 0
44914 - 0 0 0 0 0 0 0 0 0 0 0 0
44915 - 0 0 0 0 0 0 0 0 0 0 0 0
44916 - 0 0 0 0 0 0 0 0 0 0 0 0
44917 - 0 0 0 0 0 0 0 0 0 0 0 0
44918 - 0 0 0 0 0 0 0 0 0 0 0 0
44919 - 0 0 0 0 0 0 0 0 0 0 0 0
44920 - 0 0 0 0 0 0 0 0 0 0 0 0
44921 - 0 0 0 0 0 0 0 0 0 0 0 0
44922 - 0 0 0 0 0 0 0 0 0 0 0 0
44923 - 0 0 0 0 0 0 10 10 10 34 34 34
44924 - 74 74 74 74 74 74 2 2 6 6 6 6
44925 -144 144 144 198 198 198 190 190 190 178 166 146
44926 -154 121 60 156 107 11 156 107 11 168 124 44
44927 -174 154 114 187 187 187 190 190 190 210 210 210
44928 -246 246 246 253 253 253 253 253 253 182 182 182
44929 - 6 6 6 2 2 6 2 2 6 2 2 6
44930 - 2 2 6 2 2 6 2 2 6 62 62 62
44931 - 74 74 74 34 34 34 14 14 14 0 0 0
44932 - 0 0 0 0 0 0 0 0 0 0 0 0
44933 - 0 0 0 0 0 0 0 0 0 0 0 0
44934 - 0 0 0 0 0 0 0 0 0 0 0 0
44935 - 0 0 0 0 0 0 0 0 0 0 0 0
44936 - 0 0 0 0 0 0 0 0 0 0 0 0
44937 - 0 0 0 0 0 0 0 0 0 0 0 0
44938 - 0 0 0 0 0 0 0 0 0 0 0 0
44939 - 0 0 0 0 0 0 0 0 0 0 0 0
44940 - 0 0 0 0 0 0 0 0 0 0 0 0
44941 - 0 0 0 0 0 0 0 0 0 0 0 0
44942 - 0 0 0 0 0 0 0 0 0 0 0 0
44943 - 0 0 0 10 10 10 22 22 22 54 54 54
44944 - 94 94 94 18 18 18 2 2 6 46 46 46
44945 -234 234 234 221 221 221 190 190 190 190 190 190
44946 -190 190 190 187 187 187 187 187 187 190 190 190
44947 -190 190 190 195 195 195 214 214 214 242 242 242
44948 -253 253 253 253 253 253 253 253 253 253 253 253
44949 - 82 82 82 2 2 6 2 2 6 2 2 6
44950 - 2 2 6 2 2 6 2 2 6 14 14 14
44951 - 86 86 86 54 54 54 22 22 22 6 6 6
44952 - 0 0 0 0 0 0 0 0 0 0 0 0
44953 - 0 0 0 0 0 0 0 0 0 0 0 0
44954 - 0 0 0 0 0 0 0 0 0 0 0 0
44955 - 0 0 0 0 0 0 0 0 0 0 0 0
44956 - 0 0 0 0 0 0 0 0 0 0 0 0
44957 - 0 0 0 0 0 0 0 0 0 0 0 0
44958 - 0 0 0 0 0 0 0 0 0 0 0 0
44959 - 0 0 0 0 0 0 0 0 0 0 0 0
44960 - 0 0 0 0 0 0 0 0 0 0 0 0
44961 - 0 0 0 0 0 0 0 0 0 0 0 0
44962 - 0 0 0 0 0 0 0 0 0 0 0 0
44963 - 6 6 6 18 18 18 46 46 46 90 90 90
44964 - 46 46 46 18 18 18 6 6 6 182 182 182
44965 -253 253 253 246 246 246 206 206 206 190 190 190
44966 -190 190 190 190 190 190 190 190 190 190 190 190
44967 -206 206 206 231 231 231 250 250 250 253 253 253
44968 -253 253 253 253 253 253 253 253 253 253 253 253
44969 -202 202 202 14 14 14 2 2 6 2 2 6
44970 - 2 2 6 2 2 6 2 2 6 2 2 6
44971 - 42 42 42 86 86 86 42 42 42 18 18 18
44972 - 6 6 6 0 0 0 0 0 0 0 0 0
44973 - 0 0 0 0 0 0 0 0 0 0 0 0
44974 - 0 0 0 0 0 0 0 0 0 0 0 0
44975 - 0 0 0 0 0 0 0 0 0 0 0 0
44976 - 0 0 0 0 0 0 0 0 0 0 0 0
44977 - 0 0 0 0 0 0 0 0 0 0 0 0
44978 - 0 0 0 0 0 0 0 0 0 0 0 0
44979 - 0 0 0 0 0 0 0 0 0 0 0 0
44980 - 0 0 0 0 0 0 0 0 0 0 0 0
44981 - 0 0 0 0 0 0 0 0 0 0 0 0
44982 - 0 0 0 0 0 0 0 0 0 6 6 6
44983 - 14 14 14 38 38 38 74 74 74 66 66 66
44984 - 2 2 6 6 6 6 90 90 90 250 250 250
44985 -253 253 253 253 253 253 238 238 238 198 198 198
44986 -190 190 190 190 190 190 195 195 195 221 221 221
44987 -246 246 246 253 253 253 253 253 253 253 253 253
44988 -253 253 253 253 253 253 253 253 253 253 253 253
44989 -253 253 253 82 82 82 2 2 6 2 2 6
44990 - 2 2 6 2 2 6 2 2 6 2 2 6
44991 - 2 2 6 78 78 78 70 70 70 34 34 34
44992 - 14 14 14 6 6 6 0 0 0 0 0 0
44993 - 0 0 0 0 0 0 0 0 0 0 0 0
44994 - 0 0 0 0 0 0 0 0 0 0 0 0
44995 - 0 0 0 0 0 0 0 0 0 0 0 0
44996 - 0 0 0 0 0 0 0 0 0 0 0 0
44997 - 0 0 0 0 0 0 0 0 0 0 0 0
44998 - 0 0 0 0 0 0 0 0 0 0 0 0
44999 - 0 0 0 0 0 0 0 0 0 0 0 0
45000 - 0 0 0 0 0 0 0 0 0 0 0 0
45001 - 0 0 0 0 0 0 0 0 0 0 0 0
45002 - 0 0 0 0 0 0 0 0 0 14 14 14
45003 - 34 34 34 66 66 66 78 78 78 6 6 6
45004 - 2 2 6 18 18 18 218 218 218 253 253 253
45005 -253 253 253 253 253 253 253 253 253 246 246 246
45006 -226 226 226 231 231 231 246 246 246 253 253 253
45007 -253 253 253 253 253 253 253 253 253 253 253 253
45008 -253 253 253 253 253 253 253 253 253 253 253 253
45009 -253 253 253 178 178 178 2 2 6 2 2 6
45010 - 2 2 6 2 2 6 2 2 6 2 2 6
45011 - 2 2 6 18 18 18 90 90 90 62 62 62
45012 - 30 30 30 10 10 10 0 0 0 0 0 0
45013 - 0 0 0 0 0 0 0 0 0 0 0 0
45014 - 0 0 0 0 0 0 0 0 0 0 0 0
45015 - 0 0 0 0 0 0 0 0 0 0 0 0
45016 - 0 0 0 0 0 0 0 0 0 0 0 0
45017 - 0 0 0 0 0 0 0 0 0 0 0 0
45018 - 0 0 0 0 0 0 0 0 0 0 0 0
45019 - 0 0 0 0 0 0 0 0 0 0 0 0
45020 - 0 0 0 0 0 0 0 0 0 0 0 0
45021 - 0 0 0 0 0 0 0 0 0 0 0 0
45022 - 0 0 0 0 0 0 10 10 10 26 26 26
45023 - 58 58 58 90 90 90 18 18 18 2 2 6
45024 - 2 2 6 110 110 110 253 253 253 253 253 253
45025 -253 253 253 253 253 253 253 253 253 253 253 253
45026 -250 250 250 253 253 253 253 253 253 253 253 253
45027 -253 253 253 253 253 253 253 253 253 253 253 253
45028 -253 253 253 253 253 253 253 253 253 253 253 253
45029 -253 253 253 231 231 231 18 18 18 2 2 6
45030 - 2 2 6 2 2 6 2 2 6 2 2 6
45031 - 2 2 6 2 2 6 18 18 18 94 94 94
45032 - 54 54 54 26 26 26 10 10 10 0 0 0
45033 - 0 0 0 0 0 0 0 0 0 0 0 0
45034 - 0 0 0 0 0 0 0 0 0 0 0 0
45035 - 0 0 0 0 0 0 0 0 0 0 0 0
45036 - 0 0 0 0 0 0 0 0 0 0 0 0
45037 - 0 0 0 0 0 0 0 0 0 0 0 0
45038 - 0 0 0 0 0 0 0 0 0 0 0 0
45039 - 0 0 0 0 0 0 0 0 0 0 0 0
45040 - 0 0 0 0 0 0 0 0 0 0 0 0
45041 - 0 0 0 0 0 0 0 0 0 0 0 0
45042 - 0 0 0 6 6 6 22 22 22 50 50 50
45043 - 90 90 90 26 26 26 2 2 6 2 2 6
45044 - 14 14 14 195 195 195 250 250 250 253 253 253
45045 -253 253 253 253 253 253 253 253 253 253 253 253
45046 -253 253 253 253 253 253 253 253 253 253 253 253
45047 -253 253 253 253 253 253 253 253 253 253 253 253
45048 -253 253 253 253 253 253 253 253 253 253 253 253
45049 -250 250 250 242 242 242 54 54 54 2 2 6
45050 - 2 2 6 2 2 6 2 2 6 2 2 6
45051 - 2 2 6 2 2 6 2 2 6 38 38 38
45052 - 86 86 86 50 50 50 22 22 22 6 6 6
45053 - 0 0 0 0 0 0 0 0 0 0 0 0
45054 - 0 0 0 0 0 0 0 0 0 0 0 0
45055 - 0 0 0 0 0 0 0 0 0 0 0 0
45056 - 0 0 0 0 0 0 0 0 0 0 0 0
45057 - 0 0 0 0 0 0 0 0 0 0 0 0
45058 - 0 0 0 0 0 0 0 0 0 0 0 0
45059 - 0 0 0 0 0 0 0 0 0 0 0 0
45060 - 0 0 0 0 0 0 0 0 0 0 0 0
45061 - 0 0 0 0 0 0 0 0 0 0 0 0
45062 - 6 6 6 14 14 14 38 38 38 82 82 82
45063 - 34 34 34 2 2 6 2 2 6 2 2 6
45064 - 42 42 42 195 195 195 246 246 246 253 253 253
45065 -253 253 253 253 253 253 253 253 253 250 250 250
45066 -242 242 242 242 242 242 250 250 250 253 253 253
45067 -253 253 253 253 253 253 253 253 253 253 253 253
45068 -253 253 253 250 250 250 246 246 246 238 238 238
45069 -226 226 226 231 231 231 101 101 101 6 6 6
45070 - 2 2 6 2 2 6 2 2 6 2 2 6
45071 - 2 2 6 2 2 6 2 2 6 2 2 6
45072 - 38 38 38 82 82 82 42 42 42 14 14 14
45073 - 6 6 6 0 0 0 0 0 0 0 0 0
45074 - 0 0 0 0 0 0 0 0 0 0 0 0
45075 - 0 0 0 0 0 0 0 0 0 0 0 0
45076 - 0 0 0 0 0 0 0 0 0 0 0 0
45077 - 0 0 0 0 0 0 0 0 0 0 0 0
45078 - 0 0 0 0 0 0 0 0 0 0 0 0
45079 - 0 0 0 0 0 0 0 0 0 0 0 0
45080 - 0 0 0 0 0 0 0 0 0 0 0 0
45081 - 0 0 0 0 0 0 0 0 0 0 0 0
45082 - 10 10 10 26 26 26 62 62 62 66 66 66
45083 - 2 2 6 2 2 6 2 2 6 6 6 6
45084 - 70 70 70 170 170 170 206 206 206 234 234 234
45085 -246 246 246 250 250 250 250 250 250 238 238 238
45086 -226 226 226 231 231 231 238 238 238 250 250 250
45087 -250 250 250 250 250 250 246 246 246 231 231 231
45088 -214 214 214 206 206 206 202 202 202 202 202 202
45089 -198 198 198 202 202 202 182 182 182 18 18 18
45090 - 2 2 6 2 2 6 2 2 6 2 2 6
45091 - 2 2 6 2 2 6 2 2 6 2 2 6
45092 - 2 2 6 62 62 62 66 66 66 30 30 30
45093 - 10 10 10 0 0 0 0 0 0 0 0 0
45094 - 0 0 0 0 0 0 0 0 0 0 0 0
45095 - 0 0 0 0 0 0 0 0 0 0 0 0
45096 - 0 0 0 0 0 0 0 0 0 0 0 0
45097 - 0 0 0 0 0 0 0 0 0 0 0 0
45098 - 0 0 0 0 0 0 0 0 0 0 0 0
45099 - 0 0 0 0 0 0 0 0 0 0 0 0
45100 - 0 0 0 0 0 0 0 0 0 0 0 0
45101 - 0 0 0 0 0 0 0 0 0 0 0 0
45102 - 14 14 14 42 42 42 82 82 82 18 18 18
45103 - 2 2 6 2 2 6 2 2 6 10 10 10
45104 - 94 94 94 182 182 182 218 218 218 242 242 242
45105 -250 250 250 253 253 253 253 253 253 250 250 250
45106 -234 234 234 253 253 253 253 253 253 253 253 253
45107 -253 253 253 253 253 253 253 253 253 246 246 246
45108 -238 238 238 226 226 226 210 210 210 202 202 202
45109 -195 195 195 195 195 195 210 210 210 158 158 158
45110 - 6 6 6 14 14 14 50 50 50 14 14 14
45111 - 2 2 6 2 2 6 2 2 6 2 2 6
45112 - 2 2 6 6 6 6 86 86 86 46 46 46
45113 - 18 18 18 6 6 6 0 0 0 0 0 0
45114 - 0 0 0 0 0 0 0 0 0 0 0 0
45115 - 0 0 0 0 0 0 0 0 0 0 0 0
45116 - 0 0 0 0 0 0 0 0 0 0 0 0
45117 - 0 0 0 0 0 0 0 0 0 0 0 0
45118 - 0 0 0 0 0 0 0 0 0 0 0 0
45119 - 0 0 0 0 0 0 0 0 0 0 0 0
45120 - 0 0 0 0 0 0 0 0 0 0 0 0
45121 - 0 0 0 0 0 0 0 0 0 6 6 6
45122 - 22 22 22 54 54 54 70 70 70 2 2 6
45123 - 2 2 6 10 10 10 2 2 6 22 22 22
45124 -166 166 166 231 231 231 250 250 250 253 253 253
45125 -253 253 253 253 253 253 253 253 253 250 250 250
45126 -242 242 242 253 253 253 253 253 253 253 253 253
45127 -253 253 253 253 253 253 253 253 253 253 253 253
45128 -253 253 253 253 253 253 253 253 253 246 246 246
45129 -231 231 231 206 206 206 198 198 198 226 226 226
45130 - 94 94 94 2 2 6 6 6 6 38 38 38
45131 - 30 30 30 2 2 6 2 2 6 2 2 6
45132 - 2 2 6 2 2 6 62 62 62 66 66 66
45133 - 26 26 26 10 10 10 0 0 0 0 0 0
45134 - 0 0 0 0 0 0 0 0 0 0 0 0
45135 - 0 0 0 0 0 0 0 0 0 0 0 0
45136 - 0 0 0 0 0 0 0 0 0 0 0 0
45137 - 0 0 0 0 0 0 0 0 0 0 0 0
45138 - 0 0 0 0 0 0 0 0 0 0 0 0
45139 - 0 0 0 0 0 0 0 0 0 0 0 0
45140 - 0 0 0 0 0 0 0 0 0 0 0 0
45141 - 0 0 0 0 0 0 0 0 0 10 10 10
45142 - 30 30 30 74 74 74 50 50 50 2 2 6
45143 - 26 26 26 26 26 26 2 2 6 106 106 106
45144 -238 238 238 253 253 253 253 253 253 253 253 253
45145 -253 253 253 253 253 253 253 253 253 253 253 253
45146 -253 253 253 253 253 253 253 253 253 253 253 253
45147 -253 253 253 253 253 253 253 253 253 253 253 253
45148 -253 253 253 253 253 253 253 253 253 253 253 253
45149 -253 253 253 246 246 246 218 218 218 202 202 202
45150 -210 210 210 14 14 14 2 2 6 2 2 6
45151 - 30 30 30 22 22 22 2 2 6 2 2 6
45152 - 2 2 6 2 2 6 18 18 18 86 86 86
45153 - 42 42 42 14 14 14 0 0 0 0 0 0
45154 - 0 0 0 0 0 0 0 0 0 0 0 0
45155 - 0 0 0 0 0 0 0 0 0 0 0 0
45156 - 0 0 0 0 0 0 0 0 0 0 0 0
45157 - 0 0 0 0 0 0 0 0 0 0 0 0
45158 - 0 0 0 0 0 0 0 0 0 0 0 0
45159 - 0 0 0 0 0 0 0 0 0 0 0 0
45160 - 0 0 0 0 0 0 0 0 0 0 0 0
45161 - 0 0 0 0 0 0 0 0 0 14 14 14
45162 - 42 42 42 90 90 90 22 22 22 2 2 6
45163 - 42 42 42 2 2 6 18 18 18 218 218 218
45164 -253 253 253 253 253 253 253 253 253 253 253 253
45165 -253 253 253 253 253 253 253 253 253 253 253 253
45166 -253 253 253 253 253 253 253 253 253 253 253 253
45167 -253 253 253 253 253 253 253 253 253 253 253 253
45168 -253 253 253 253 253 253 253 253 253 253 253 253
45169 -253 253 253 253 253 253 250 250 250 221 221 221
45170 -218 218 218 101 101 101 2 2 6 14 14 14
45171 - 18 18 18 38 38 38 10 10 10 2 2 6
45172 - 2 2 6 2 2 6 2 2 6 78 78 78
45173 - 58 58 58 22 22 22 6 6 6 0 0 0
45174 - 0 0 0 0 0 0 0 0 0 0 0 0
45175 - 0 0 0 0 0 0 0 0 0 0 0 0
45176 - 0 0 0 0 0 0 0 0 0 0 0 0
45177 - 0 0 0 0 0 0 0 0 0 0 0 0
45178 - 0 0 0 0 0 0 0 0 0 0 0 0
45179 - 0 0 0 0 0 0 0 0 0 0 0 0
45180 - 0 0 0 0 0 0 0 0 0 0 0 0
45181 - 0 0 0 0 0 0 6 6 6 18 18 18
45182 - 54 54 54 82 82 82 2 2 6 26 26 26
45183 - 22 22 22 2 2 6 123 123 123 253 253 253
45184 -253 253 253 253 253 253 253 253 253 253 253 253
45185 -253 253 253 253 253 253 253 253 253 253 253 253
45186 -253 253 253 253 253 253 253 253 253 253 253 253
45187 -253 253 253 253 253 253 253 253 253 253 253 253
45188 -253 253 253 253 253 253 253 253 253 253 253 253
45189 -253 253 253 253 253 253 253 253 253 250 250 250
45190 -238 238 238 198 198 198 6 6 6 38 38 38
45191 - 58 58 58 26 26 26 38 38 38 2 2 6
45192 - 2 2 6 2 2 6 2 2 6 46 46 46
45193 - 78 78 78 30 30 30 10 10 10 0 0 0
45194 - 0 0 0 0 0 0 0 0 0 0 0 0
45195 - 0 0 0 0 0 0 0 0 0 0 0 0
45196 - 0 0 0 0 0 0 0 0 0 0 0 0
45197 - 0 0 0 0 0 0 0 0 0 0 0 0
45198 - 0 0 0 0 0 0 0 0 0 0 0 0
45199 - 0 0 0 0 0 0 0 0 0 0 0 0
45200 - 0 0 0 0 0 0 0 0 0 0 0 0
45201 - 0 0 0 0 0 0 10 10 10 30 30 30
45202 - 74 74 74 58 58 58 2 2 6 42 42 42
45203 - 2 2 6 22 22 22 231 231 231 253 253 253
45204 -253 253 253 253 253 253 253 253 253 253 253 253
45205 -253 253 253 253 253 253 253 253 253 250 250 250
45206 -253 253 253 253 253 253 253 253 253 253 253 253
45207 -253 253 253 253 253 253 253 253 253 253 253 253
45208 -253 253 253 253 253 253 253 253 253 253 253 253
45209 -253 253 253 253 253 253 253 253 253 253 253 253
45210 -253 253 253 246 246 246 46 46 46 38 38 38
45211 - 42 42 42 14 14 14 38 38 38 14 14 14
45212 - 2 2 6 2 2 6 2 2 6 6 6 6
45213 - 86 86 86 46 46 46 14 14 14 0 0 0
45214 - 0 0 0 0 0 0 0 0 0 0 0 0
45215 - 0 0 0 0 0 0 0 0 0 0 0 0
45216 - 0 0 0 0 0 0 0 0 0 0 0 0
45217 - 0 0 0 0 0 0 0 0 0 0 0 0
45218 - 0 0 0 0 0 0 0 0 0 0 0 0
45219 - 0 0 0 0 0 0 0 0 0 0 0 0
45220 - 0 0 0 0 0 0 0 0 0 0 0 0
45221 - 0 0 0 6 6 6 14 14 14 42 42 42
45222 - 90 90 90 18 18 18 18 18 18 26 26 26
45223 - 2 2 6 116 116 116 253 253 253 253 253 253
45224 -253 253 253 253 253 253 253 253 253 253 253 253
45225 -253 253 253 253 253 253 250 250 250 238 238 238
45226 -253 253 253 253 253 253 253 253 253 253 253 253
45227 -253 253 253 253 253 253 253 253 253 253 253 253
45228 -253 253 253 253 253 253 253 253 253 253 253 253
45229 -253 253 253 253 253 253 253 253 253 253 253 253
45230 -253 253 253 253 253 253 94 94 94 6 6 6
45231 - 2 2 6 2 2 6 10 10 10 34 34 34
45232 - 2 2 6 2 2 6 2 2 6 2 2 6
45233 - 74 74 74 58 58 58 22 22 22 6 6 6
45234 - 0 0 0 0 0 0 0 0 0 0 0 0
45235 - 0 0 0 0 0 0 0 0 0 0 0 0
45236 - 0 0 0 0 0 0 0 0 0 0 0 0
45237 - 0 0 0 0 0 0 0 0 0 0 0 0
45238 - 0 0 0 0 0 0 0 0 0 0 0 0
45239 - 0 0 0 0 0 0 0 0 0 0 0 0
45240 - 0 0 0 0 0 0 0 0 0 0 0 0
45241 - 0 0 0 10 10 10 26 26 26 66 66 66
45242 - 82 82 82 2 2 6 38 38 38 6 6 6
45243 - 14 14 14 210 210 210 253 253 253 253 253 253
45244 -253 253 253 253 253 253 253 253 253 253 253 253
45245 -253 253 253 253 253 253 246 246 246 242 242 242
45246 -253 253 253 253 253 253 253 253 253 253 253 253
45247 -253 253 253 253 253 253 253 253 253 253 253 253
45248 -253 253 253 253 253 253 253 253 253 253 253 253
45249 -253 253 253 253 253 253 253 253 253 253 253 253
45250 -253 253 253 253 253 253 144 144 144 2 2 6
45251 - 2 2 6 2 2 6 2 2 6 46 46 46
45252 - 2 2 6 2 2 6 2 2 6 2 2 6
45253 - 42 42 42 74 74 74 30 30 30 10 10 10
45254 - 0 0 0 0 0 0 0 0 0 0 0 0
45255 - 0 0 0 0 0 0 0 0 0 0 0 0
45256 - 0 0 0 0 0 0 0 0 0 0 0 0
45257 - 0 0 0 0 0 0 0 0 0 0 0 0
45258 - 0 0 0 0 0 0 0 0 0 0 0 0
45259 - 0 0 0 0 0 0 0 0 0 0 0 0
45260 - 0 0 0 0 0 0 0 0 0 0 0 0
45261 - 6 6 6 14 14 14 42 42 42 90 90 90
45262 - 26 26 26 6 6 6 42 42 42 2 2 6
45263 - 74 74 74 250 250 250 253 253 253 253 253 253
45264 -253 253 253 253 253 253 253 253 253 253 253 253
45265 -253 253 253 253 253 253 242 242 242 242 242 242
45266 -253 253 253 253 253 253 253 253 253 253 253 253
45267 -253 253 253 253 253 253 253 253 253 253 253 253
45268 -253 253 253 253 253 253 253 253 253 253 253 253
45269 -253 253 253 253 253 253 253 253 253 253 253 253
45270 -253 253 253 253 253 253 182 182 182 2 2 6
45271 - 2 2 6 2 2 6 2 2 6 46 46 46
45272 - 2 2 6 2 2 6 2 2 6 2 2 6
45273 - 10 10 10 86 86 86 38 38 38 10 10 10
45274 - 0 0 0 0 0 0 0 0 0 0 0 0
45275 - 0 0 0 0 0 0 0 0 0 0 0 0
45276 - 0 0 0 0 0 0 0 0 0 0 0 0
45277 - 0 0 0 0 0 0 0 0 0 0 0 0
45278 - 0 0 0 0 0 0 0 0 0 0 0 0
45279 - 0 0 0 0 0 0 0 0 0 0 0 0
45280 - 0 0 0 0 0 0 0 0 0 0 0 0
45281 - 10 10 10 26 26 26 66 66 66 82 82 82
45282 - 2 2 6 22 22 22 18 18 18 2 2 6
45283 -149 149 149 253 253 253 253 253 253 253 253 253
45284 -253 253 253 253 253 253 253 253 253 253 253 253
45285 -253 253 253 253 253 253 234 234 234 242 242 242
45286 -253 253 253 253 253 253 253 253 253 253 253 253
45287 -253 253 253 253 253 253 253 253 253 253 253 253
45288 -253 253 253 253 253 253 253 253 253 253 253 253
45289 -253 253 253 253 253 253 253 253 253 253 253 253
45290 -253 253 253 253 253 253 206 206 206 2 2 6
45291 - 2 2 6 2 2 6 2 2 6 38 38 38
45292 - 2 2 6 2 2 6 2 2 6 2 2 6
45293 - 6 6 6 86 86 86 46 46 46 14 14 14
45294 - 0 0 0 0 0 0 0 0 0 0 0 0
45295 - 0 0 0 0 0 0 0 0 0 0 0 0
45296 - 0 0 0 0 0 0 0 0 0 0 0 0
45297 - 0 0 0 0 0 0 0 0 0 0 0 0
45298 - 0 0 0 0 0 0 0 0 0 0 0 0
45299 - 0 0 0 0 0 0 0 0 0 0 0 0
45300 - 0 0 0 0 0 0 0 0 0 6 6 6
45301 - 18 18 18 46 46 46 86 86 86 18 18 18
45302 - 2 2 6 34 34 34 10 10 10 6 6 6
45303 -210 210 210 253 253 253 253 253 253 253 253 253
45304 -253 253 253 253 253 253 253 253 253 253 253 253
45305 -253 253 253 253 253 253 234 234 234 242 242 242
45306 -253 253 253 253 253 253 253 253 253 253 253 253
45307 -253 253 253 253 253 253 253 253 253 253 253 253
45308 -253 253 253 253 253 253 253 253 253 253 253 253
45309 -253 253 253 253 253 253 253 253 253 253 253 253
45310 -253 253 253 253 253 253 221 221 221 6 6 6
45311 - 2 2 6 2 2 6 6 6 6 30 30 30
45312 - 2 2 6 2 2 6 2 2 6 2 2 6
45313 - 2 2 6 82 82 82 54 54 54 18 18 18
45314 - 6 6 6 0 0 0 0 0 0 0 0 0
45315 - 0 0 0 0 0 0 0 0 0 0 0 0
45316 - 0 0 0 0 0 0 0 0 0 0 0 0
45317 - 0 0 0 0 0 0 0 0 0 0 0 0
45318 - 0 0 0 0 0 0 0 0 0 0 0 0
45319 - 0 0 0 0 0 0 0 0 0 0 0 0
45320 - 0 0 0 0 0 0 0 0 0 10 10 10
45321 - 26 26 26 66 66 66 62 62 62 2 2 6
45322 - 2 2 6 38 38 38 10 10 10 26 26 26
45323 -238 238 238 253 253 253 253 253 253 253 253 253
45324 -253 253 253 253 253 253 253 253 253 253 253 253
45325 -253 253 253 253 253 253 231 231 231 238 238 238
45326 -253 253 253 253 253 253 253 253 253 253 253 253
45327 -253 253 253 253 253 253 253 253 253 253 253 253
45328 -253 253 253 253 253 253 253 253 253 253 253 253
45329 -253 253 253 253 253 253 253 253 253 253 253 253
45330 -253 253 253 253 253 253 231 231 231 6 6 6
45331 - 2 2 6 2 2 6 10 10 10 30 30 30
45332 - 2 2 6 2 2 6 2 2 6 2 2 6
45333 - 2 2 6 66 66 66 58 58 58 22 22 22
45334 - 6 6 6 0 0 0 0 0 0 0 0 0
45335 - 0 0 0 0 0 0 0 0 0 0 0 0
45336 - 0 0 0 0 0 0 0 0 0 0 0 0
45337 - 0 0 0 0 0 0 0 0 0 0 0 0
45338 - 0 0 0 0 0 0 0 0 0 0 0 0
45339 - 0 0 0 0 0 0 0 0 0 0 0 0
45340 - 0 0 0 0 0 0 0 0 0 10 10 10
45341 - 38 38 38 78 78 78 6 6 6 2 2 6
45342 - 2 2 6 46 46 46 14 14 14 42 42 42
45343 -246 246 246 253 253 253 253 253 253 253 253 253
45344 -253 253 253 253 253 253 253 253 253 253 253 253
45345 -253 253 253 253 253 253 231 231 231 242 242 242
45346 -253 253 253 253 253 253 253 253 253 253 253 253
45347 -253 253 253 253 253 253 253 253 253 253 253 253
45348 -253 253 253 253 253 253 253 253 253 253 253 253
45349 -253 253 253 253 253 253 253 253 253 253 253 253
45350 -253 253 253 253 253 253 234 234 234 10 10 10
45351 - 2 2 6 2 2 6 22 22 22 14 14 14
45352 - 2 2 6 2 2 6 2 2 6 2 2 6
45353 - 2 2 6 66 66 66 62 62 62 22 22 22
45354 - 6 6 6 0 0 0 0 0 0 0 0 0
45355 - 0 0 0 0 0 0 0 0 0 0 0 0
45356 - 0 0 0 0 0 0 0 0 0 0 0 0
45357 - 0 0 0 0 0 0 0 0 0 0 0 0
45358 - 0 0 0 0 0 0 0 0 0 0 0 0
45359 - 0 0 0 0 0 0 0 0 0 0 0 0
45360 - 0 0 0 0 0 0 6 6 6 18 18 18
45361 - 50 50 50 74 74 74 2 2 6 2 2 6
45362 - 14 14 14 70 70 70 34 34 34 62 62 62
45363 -250 250 250 253 253 253 253 253 253 253 253 253
45364 -253 253 253 253 253 253 253 253 253 253 253 253
45365 -253 253 253 253 253 253 231 231 231 246 246 246
45366 -253 253 253 253 253 253 253 253 253 253 253 253
45367 -253 253 253 253 253 253 253 253 253 253 253 253
45368 -253 253 253 253 253 253 253 253 253 253 253 253
45369 -253 253 253 253 253 253 253 253 253 253 253 253
45370 -253 253 253 253 253 253 234 234 234 14 14 14
45371 - 2 2 6 2 2 6 30 30 30 2 2 6
45372 - 2 2 6 2 2 6 2 2 6 2 2 6
45373 - 2 2 6 66 66 66 62 62 62 22 22 22
45374 - 6 6 6 0 0 0 0 0 0 0 0 0
45375 - 0 0 0 0 0 0 0 0 0 0 0 0
45376 - 0 0 0 0 0 0 0 0 0 0 0 0
45377 - 0 0 0 0 0 0 0 0 0 0 0 0
45378 - 0 0 0 0 0 0 0 0 0 0 0 0
45379 - 0 0 0 0 0 0 0 0 0 0 0 0
45380 - 0 0 0 0 0 0 6 6 6 18 18 18
45381 - 54 54 54 62 62 62 2 2 6 2 2 6
45382 - 2 2 6 30 30 30 46 46 46 70 70 70
45383 -250 250 250 253 253 253 253 253 253 253 253 253
45384 -253 253 253 253 253 253 253 253 253 253 253 253
45385 -253 253 253 253 253 253 231 231 231 246 246 246
45386 -253 253 253 253 253 253 253 253 253 253 253 253
45387 -253 253 253 253 253 253 253 253 253 253 253 253
45388 -253 253 253 253 253 253 253 253 253 253 253 253
45389 -253 253 253 253 253 253 253 253 253 253 253 253
45390 -253 253 253 253 253 253 226 226 226 10 10 10
45391 - 2 2 6 6 6 6 30 30 30 2 2 6
45392 - 2 2 6 2 2 6 2 2 6 2 2 6
45393 - 2 2 6 66 66 66 58 58 58 22 22 22
45394 - 6 6 6 0 0 0 0 0 0 0 0 0
45395 - 0 0 0 0 0 0 0 0 0 0 0 0
45396 - 0 0 0 0 0 0 0 0 0 0 0 0
45397 - 0 0 0 0 0 0 0 0 0 0 0 0
45398 - 0 0 0 0 0 0 0 0 0 0 0 0
45399 - 0 0 0 0 0 0 0 0 0 0 0 0
45400 - 0 0 0 0 0 0 6 6 6 22 22 22
45401 - 58 58 58 62 62 62 2 2 6 2 2 6
45402 - 2 2 6 2 2 6 30 30 30 78 78 78
45403 -250 250 250 253 253 253 253 253 253 253 253 253
45404 -253 253 253 253 253 253 253 253 253 253 253 253
45405 -253 253 253 253 253 253 231 231 231 246 246 246
45406 -253 253 253 253 253 253 253 253 253 253 253 253
45407 -253 253 253 253 253 253 253 253 253 253 253 253
45408 -253 253 253 253 253 253 253 253 253 253 253 253
45409 -253 253 253 253 253 253 253 253 253 253 253 253
45410 -253 253 253 253 253 253 206 206 206 2 2 6
45411 - 22 22 22 34 34 34 18 14 6 22 22 22
45412 - 26 26 26 18 18 18 6 6 6 2 2 6
45413 - 2 2 6 82 82 82 54 54 54 18 18 18
45414 - 6 6 6 0 0 0 0 0 0 0 0 0
45415 - 0 0 0 0 0 0 0 0 0 0 0 0
45416 - 0 0 0 0 0 0 0 0 0 0 0 0
45417 - 0 0 0 0 0 0 0 0 0 0 0 0
45418 - 0 0 0 0 0 0 0 0 0 0 0 0
45419 - 0 0 0 0 0 0 0 0 0 0 0 0
45420 - 0 0 0 0 0 0 6 6 6 26 26 26
45421 - 62 62 62 106 106 106 74 54 14 185 133 11
45422 -210 162 10 121 92 8 6 6 6 62 62 62
45423 -238 238 238 253 253 253 253 253 253 253 253 253
45424 -253 253 253 253 253 253 253 253 253 253 253 253
45425 -253 253 253 253 253 253 231 231 231 246 246 246
45426 -253 253 253 253 253 253 253 253 253 253 253 253
45427 -253 253 253 253 253 253 253 253 253 253 253 253
45428 -253 253 253 253 253 253 253 253 253 253 253 253
45429 -253 253 253 253 253 253 253 253 253 253 253 253
45430 -253 253 253 253 253 253 158 158 158 18 18 18
45431 - 14 14 14 2 2 6 2 2 6 2 2 6
45432 - 6 6 6 18 18 18 66 66 66 38 38 38
45433 - 6 6 6 94 94 94 50 50 50 18 18 18
45434 - 6 6 6 0 0 0 0 0 0 0 0 0
45435 - 0 0 0 0 0 0 0 0 0 0 0 0
45436 - 0 0 0 0 0 0 0 0 0 0 0 0
45437 - 0 0 0 0 0 0 0 0 0 0 0 0
45438 - 0 0 0 0 0 0 0 0 0 0 0 0
45439 - 0 0 0 0 0 0 0 0 0 6 6 6
45440 - 10 10 10 10 10 10 18 18 18 38 38 38
45441 - 78 78 78 142 134 106 216 158 10 242 186 14
45442 -246 190 14 246 190 14 156 118 10 10 10 10
45443 - 90 90 90 238 238 238 253 253 253 253 253 253
45444 -253 253 253 253 253 253 253 253 253 253 253 253
45445 -253 253 253 253 253 253 231 231 231 250 250 250
45446 -253 253 253 253 253 253 253 253 253 253 253 253
45447 -253 253 253 253 253 253 253 253 253 253 253 253
45448 -253 253 253 253 253 253 253 253 253 253 253 253
45449 -253 253 253 253 253 253 253 253 253 246 230 190
45450 -238 204 91 238 204 91 181 142 44 37 26 9
45451 - 2 2 6 2 2 6 2 2 6 2 2 6
45452 - 2 2 6 2 2 6 38 38 38 46 46 46
45453 - 26 26 26 106 106 106 54 54 54 18 18 18
45454 - 6 6 6 0 0 0 0 0 0 0 0 0
45455 - 0 0 0 0 0 0 0 0 0 0 0 0
45456 - 0 0 0 0 0 0 0 0 0 0 0 0
45457 - 0 0 0 0 0 0 0 0 0 0 0 0
45458 - 0 0 0 0 0 0 0 0 0 0 0 0
45459 - 0 0 0 6 6 6 14 14 14 22 22 22
45460 - 30 30 30 38 38 38 50 50 50 70 70 70
45461 -106 106 106 190 142 34 226 170 11 242 186 14
45462 -246 190 14 246 190 14 246 190 14 154 114 10
45463 - 6 6 6 74 74 74 226 226 226 253 253 253
45464 -253 253 253 253 253 253 253 253 253 253 253 253
45465 -253 253 253 253 253 253 231 231 231 250 250 250
45466 -253 253 253 253 253 253 253 253 253 253 253 253
45467 -253 253 253 253 253 253 253 253 253 253 253 253
45468 -253 253 253 253 253 253 253 253 253 253 253 253
45469 -253 253 253 253 253 253 253 253 253 228 184 62
45470 -241 196 14 241 208 19 232 195 16 38 30 10
45471 - 2 2 6 2 2 6 2 2 6 2 2 6
45472 - 2 2 6 6 6 6 30 30 30 26 26 26
45473 -203 166 17 154 142 90 66 66 66 26 26 26
45474 - 6 6 6 0 0 0 0 0 0 0 0 0
45475 - 0 0 0 0 0 0 0 0 0 0 0 0
45476 - 0 0 0 0 0 0 0 0 0 0 0 0
45477 - 0 0 0 0 0 0 0 0 0 0 0 0
45478 - 0 0 0 0 0 0 0 0 0 0 0 0
45479 - 6 6 6 18 18 18 38 38 38 58 58 58
45480 - 78 78 78 86 86 86 101 101 101 123 123 123
45481 -175 146 61 210 150 10 234 174 13 246 186 14
45482 -246 190 14 246 190 14 246 190 14 238 190 10
45483 -102 78 10 2 2 6 46 46 46 198 198 198
45484 -253 253 253 253 253 253 253 253 253 253 253 253
45485 -253 253 253 253 253 253 234 234 234 242 242 242
45486 -253 253 253 253 253 253 253 253 253 253 253 253
45487 -253 253 253 253 253 253 253 253 253 253 253 253
45488 -253 253 253 253 253 253 253 253 253 253 253 253
45489 -253 253 253 253 253 253 253 253 253 224 178 62
45490 -242 186 14 241 196 14 210 166 10 22 18 6
45491 - 2 2 6 2 2 6 2 2 6 2 2 6
45492 - 2 2 6 2 2 6 6 6 6 121 92 8
45493 -238 202 15 232 195 16 82 82 82 34 34 34
45494 - 10 10 10 0 0 0 0 0 0 0 0 0
45495 - 0 0 0 0 0 0 0 0 0 0 0 0
45496 - 0 0 0 0 0 0 0 0 0 0 0 0
45497 - 0 0 0 0 0 0 0 0 0 0 0 0
45498 - 0 0 0 0 0 0 0 0 0 0 0 0
45499 - 14 14 14 38 38 38 70 70 70 154 122 46
45500 -190 142 34 200 144 11 197 138 11 197 138 11
45501 -213 154 11 226 170 11 242 186 14 246 190 14
45502 -246 190 14 246 190 14 246 190 14 246 190 14
45503 -225 175 15 46 32 6 2 2 6 22 22 22
45504 -158 158 158 250 250 250 253 253 253 253 253 253
45505 -253 253 253 253 253 253 253 253 253 253 253 253
45506 -253 253 253 253 253 253 253 253 253 253 253 253
45507 -253 253 253 253 253 253 253 253 253 253 253 253
45508 -253 253 253 253 253 253 253 253 253 253 253 253
45509 -253 253 253 250 250 250 242 242 242 224 178 62
45510 -239 182 13 236 186 11 213 154 11 46 32 6
45511 - 2 2 6 2 2 6 2 2 6 2 2 6
45512 - 2 2 6 2 2 6 61 42 6 225 175 15
45513 -238 190 10 236 186 11 112 100 78 42 42 42
45514 - 14 14 14 0 0 0 0 0 0 0 0 0
45515 - 0 0 0 0 0 0 0 0 0 0 0 0
45516 - 0 0 0 0 0 0 0 0 0 0 0 0
45517 - 0 0 0 0 0 0 0 0 0 0 0 0
45518 - 0 0 0 0 0 0 0 0 0 6 6 6
45519 - 22 22 22 54 54 54 154 122 46 213 154 11
45520 -226 170 11 230 174 11 226 170 11 226 170 11
45521 -236 178 12 242 186 14 246 190 14 246 190 14
45522 -246 190 14 246 190 14 246 190 14 246 190 14
45523 -241 196 14 184 144 12 10 10 10 2 2 6
45524 - 6 6 6 116 116 116 242 242 242 253 253 253
45525 -253 253 253 253 253 253 253 253 253 253 253 253
45526 -253 253 253 253 253 253 253 253 253 253 253 253
45527 -253 253 253 253 253 253 253 253 253 253 253 253
45528 -253 253 253 253 253 253 253 253 253 253 253 253
45529 -253 253 253 231 231 231 198 198 198 214 170 54
45530 -236 178 12 236 178 12 210 150 10 137 92 6
45531 - 18 14 6 2 2 6 2 2 6 2 2 6
45532 - 6 6 6 70 47 6 200 144 11 236 178 12
45533 -239 182 13 239 182 13 124 112 88 58 58 58
45534 - 22 22 22 6 6 6 0 0 0 0 0 0
45535 - 0 0 0 0 0 0 0 0 0 0 0 0
45536 - 0 0 0 0 0 0 0 0 0 0 0 0
45537 - 0 0 0 0 0 0 0 0 0 0 0 0
45538 - 0 0 0 0 0 0 0 0 0 10 10 10
45539 - 30 30 30 70 70 70 180 133 36 226 170 11
45540 -239 182 13 242 186 14 242 186 14 246 186 14
45541 -246 190 14 246 190 14 246 190 14 246 190 14
45542 -246 190 14 246 190 14 246 190 14 246 190 14
45543 -246 190 14 232 195 16 98 70 6 2 2 6
45544 - 2 2 6 2 2 6 66 66 66 221 221 221
45545 -253 253 253 253 253 253 253 253 253 253 253 253
45546 -253 253 253 253 253 253 253 253 253 253 253 253
45547 -253 253 253 253 253 253 253 253 253 253 253 253
45548 -253 253 253 253 253 253 253 253 253 253 253 253
45549 -253 253 253 206 206 206 198 198 198 214 166 58
45550 -230 174 11 230 174 11 216 158 10 192 133 9
45551 -163 110 8 116 81 8 102 78 10 116 81 8
45552 -167 114 7 197 138 11 226 170 11 239 182 13
45553 -242 186 14 242 186 14 162 146 94 78 78 78
45554 - 34 34 34 14 14 14 6 6 6 0 0 0
45555 - 0 0 0 0 0 0 0 0 0 0 0 0
45556 - 0 0 0 0 0 0 0 0 0 0 0 0
45557 - 0 0 0 0 0 0 0 0 0 0 0 0
45558 - 0 0 0 0 0 0 0 0 0 6 6 6
45559 - 30 30 30 78 78 78 190 142 34 226 170 11
45560 -239 182 13 246 190 14 246 190 14 246 190 14
45561 -246 190 14 246 190 14 246 190 14 246 190 14
45562 -246 190 14 246 190 14 246 190 14 246 190 14
45563 -246 190 14 241 196 14 203 166 17 22 18 6
45564 - 2 2 6 2 2 6 2 2 6 38 38 38
45565 -218 218 218 253 253 253 253 253 253 253 253 253
45566 -253 253 253 253 253 253 253 253 253 253 253 253
45567 -253 253 253 253 253 253 253 253 253 253 253 253
45568 -253 253 253 253 253 253 253 253 253 253 253 253
45569 -250 250 250 206 206 206 198 198 198 202 162 69
45570 -226 170 11 236 178 12 224 166 10 210 150 10
45571 -200 144 11 197 138 11 192 133 9 197 138 11
45572 -210 150 10 226 170 11 242 186 14 246 190 14
45573 -246 190 14 246 186 14 225 175 15 124 112 88
45574 - 62 62 62 30 30 30 14 14 14 6 6 6
45575 - 0 0 0 0 0 0 0 0 0 0 0 0
45576 - 0 0 0 0 0 0 0 0 0 0 0 0
45577 - 0 0 0 0 0 0 0 0 0 0 0 0
45578 - 0 0 0 0 0 0 0 0 0 10 10 10
45579 - 30 30 30 78 78 78 174 135 50 224 166 10
45580 -239 182 13 246 190 14 246 190 14 246 190 14
45581 -246 190 14 246 190 14 246 190 14 246 190 14
45582 -246 190 14 246 190 14 246 190 14 246 190 14
45583 -246 190 14 246 190 14 241 196 14 139 102 15
45584 - 2 2 6 2 2 6 2 2 6 2 2 6
45585 - 78 78 78 250 250 250 253 253 253 253 253 253
45586 -253 253 253 253 253 253 253 253 253 253 253 253
45587 -253 253 253 253 253 253 253 253 253 253 253 253
45588 -253 253 253 253 253 253 253 253 253 253 253 253
45589 -250 250 250 214 214 214 198 198 198 190 150 46
45590 -219 162 10 236 178 12 234 174 13 224 166 10
45591 -216 158 10 213 154 11 213 154 11 216 158 10
45592 -226 170 11 239 182 13 246 190 14 246 190 14
45593 -246 190 14 246 190 14 242 186 14 206 162 42
45594 -101 101 101 58 58 58 30 30 30 14 14 14
45595 - 6 6 6 0 0 0 0 0 0 0 0 0
45596 - 0 0 0 0 0 0 0 0 0 0 0 0
45597 - 0 0 0 0 0 0 0 0 0 0 0 0
45598 - 0 0 0 0 0 0 0 0 0 10 10 10
45599 - 30 30 30 74 74 74 174 135 50 216 158 10
45600 -236 178 12 246 190 14 246 190 14 246 190 14
45601 -246 190 14 246 190 14 246 190 14 246 190 14
45602 -246 190 14 246 190 14 246 190 14 246 190 14
45603 -246 190 14 246 190 14 241 196 14 226 184 13
45604 - 61 42 6 2 2 6 2 2 6 2 2 6
45605 - 22 22 22 238 238 238 253 253 253 253 253 253
45606 -253 253 253 253 253 253 253 253 253 253 253 253
45607 -253 253 253 253 253 253 253 253 253 253 253 253
45608 -253 253 253 253 253 253 253 253 253 253 253 253
45609 -253 253 253 226 226 226 187 187 187 180 133 36
45610 -216 158 10 236 178 12 239 182 13 236 178 12
45611 -230 174 11 226 170 11 226 170 11 230 174 11
45612 -236 178 12 242 186 14 246 190 14 246 190 14
45613 -246 190 14 246 190 14 246 186 14 239 182 13
45614 -206 162 42 106 106 106 66 66 66 34 34 34
45615 - 14 14 14 6 6 6 0 0 0 0 0 0
45616 - 0 0 0 0 0 0 0 0 0 0 0 0
45617 - 0 0 0 0 0 0 0 0 0 0 0 0
45618 - 0 0 0 0 0 0 0 0 0 6 6 6
45619 - 26 26 26 70 70 70 163 133 67 213 154 11
45620 -236 178 12 246 190 14 246 190 14 246 190 14
45621 -246 190 14 246 190 14 246 190 14 246 190 14
45622 -246 190 14 246 190 14 246 190 14 246 190 14
45623 -246 190 14 246 190 14 246 190 14 241 196 14
45624 -190 146 13 18 14 6 2 2 6 2 2 6
45625 - 46 46 46 246 246 246 253 253 253 253 253 253
45626 -253 253 253 253 253 253 253 253 253 253 253 253
45627 -253 253 253 253 253 253 253 253 253 253 253 253
45628 -253 253 253 253 253 253 253 253 253 253 253 253
45629 -253 253 253 221 221 221 86 86 86 156 107 11
45630 -216 158 10 236 178 12 242 186 14 246 186 14
45631 -242 186 14 239 182 13 239 182 13 242 186 14
45632 -242 186 14 246 186 14 246 190 14 246 190 14
45633 -246 190 14 246 190 14 246 190 14 246 190 14
45634 -242 186 14 225 175 15 142 122 72 66 66 66
45635 - 30 30 30 10 10 10 0 0 0 0 0 0
45636 - 0 0 0 0 0 0 0 0 0 0 0 0
45637 - 0 0 0 0 0 0 0 0 0 0 0 0
45638 - 0 0 0 0 0 0 0 0 0 6 6 6
45639 - 26 26 26 70 70 70 163 133 67 210 150 10
45640 -236 178 12 246 190 14 246 190 14 246 190 14
45641 -246 190 14 246 190 14 246 190 14 246 190 14
45642 -246 190 14 246 190 14 246 190 14 246 190 14
45643 -246 190 14 246 190 14 246 190 14 246 190 14
45644 -232 195 16 121 92 8 34 34 34 106 106 106
45645 -221 221 221 253 253 253 253 253 253 253 253 253
45646 -253 253 253 253 253 253 253 253 253 253 253 253
45647 -253 253 253 253 253 253 253 253 253 253 253 253
45648 -253 253 253 253 253 253 253 253 253 253 253 253
45649 -242 242 242 82 82 82 18 14 6 163 110 8
45650 -216 158 10 236 178 12 242 186 14 246 190 14
45651 -246 190 14 246 190 14 246 190 14 246 190 14
45652 -246 190 14 246 190 14 246 190 14 246 190 14
45653 -246 190 14 246 190 14 246 190 14 246 190 14
45654 -246 190 14 246 190 14 242 186 14 163 133 67
45655 - 46 46 46 18 18 18 6 6 6 0 0 0
45656 - 0 0 0 0 0 0 0 0 0 0 0 0
45657 - 0 0 0 0 0 0 0 0 0 0 0 0
45658 - 0 0 0 0 0 0 0 0 0 10 10 10
45659 - 30 30 30 78 78 78 163 133 67 210 150 10
45660 -236 178 12 246 186 14 246 190 14 246 190 14
45661 -246 190 14 246 190 14 246 190 14 246 190 14
45662 -246 190 14 246 190 14 246 190 14 246 190 14
45663 -246 190 14 246 190 14 246 190 14 246 190 14
45664 -241 196 14 215 174 15 190 178 144 253 253 253
45665 -253 253 253 253 253 253 253 253 253 253 253 253
45666 -253 253 253 253 253 253 253 253 253 253 253 253
45667 -253 253 253 253 253 253 253 253 253 253 253 253
45668 -253 253 253 253 253 253 253 253 253 218 218 218
45669 - 58 58 58 2 2 6 22 18 6 167 114 7
45670 -216 158 10 236 178 12 246 186 14 246 190 14
45671 -246 190 14 246 190 14 246 190 14 246 190 14
45672 -246 190 14 246 190 14 246 190 14 246 190 14
45673 -246 190 14 246 190 14 246 190 14 246 190 14
45674 -246 190 14 246 186 14 242 186 14 190 150 46
45675 - 54 54 54 22 22 22 6 6 6 0 0 0
45676 - 0 0 0 0 0 0 0 0 0 0 0 0
45677 - 0 0 0 0 0 0 0 0 0 0 0 0
45678 - 0 0 0 0 0 0 0 0 0 14 14 14
45679 - 38 38 38 86 86 86 180 133 36 213 154 11
45680 -236 178 12 246 186 14 246 190 14 246 190 14
45681 -246 190 14 246 190 14 246 190 14 246 190 14
45682 -246 190 14 246 190 14 246 190 14 246 190 14
45683 -246 190 14 246 190 14 246 190 14 246 190 14
45684 -246 190 14 232 195 16 190 146 13 214 214 214
45685 -253 253 253 253 253 253 253 253 253 253 253 253
45686 -253 253 253 253 253 253 253 253 253 253 253 253
45687 -253 253 253 253 253 253 253 253 253 253 253 253
45688 -253 253 253 250 250 250 170 170 170 26 26 26
45689 - 2 2 6 2 2 6 37 26 9 163 110 8
45690 -219 162 10 239 182 13 246 186 14 246 190 14
45691 -246 190 14 246 190 14 246 190 14 246 190 14
45692 -246 190 14 246 190 14 246 190 14 246 190 14
45693 -246 190 14 246 190 14 246 190 14 246 190 14
45694 -246 186 14 236 178 12 224 166 10 142 122 72
45695 - 46 46 46 18 18 18 6 6 6 0 0 0
45696 - 0 0 0 0 0 0 0 0 0 0 0 0
45697 - 0 0 0 0 0 0 0 0 0 0 0 0
45698 - 0 0 0 0 0 0 6 6 6 18 18 18
45699 - 50 50 50 109 106 95 192 133 9 224 166 10
45700 -242 186 14 246 190 14 246 190 14 246 190 14
45701 -246 190 14 246 190 14 246 190 14 246 190 14
45702 -246 190 14 246 190 14 246 190 14 246 190 14
45703 -246 190 14 246 190 14 246 190 14 246 190 14
45704 -242 186 14 226 184 13 210 162 10 142 110 46
45705 -226 226 226 253 253 253 253 253 253 253 253 253
45706 -253 253 253 253 253 253 253 253 253 253 253 253
45707 -253 253 253 253 253 253 253 253 253 253 253 253
45708 -198 198 198 66 66 66 2 2 6 2 2 6
45709 - 2 2 6 2 2 6 50 34 6 156 107 11
45710 -219 162 10 239 182 13 246 186 14 246 190 14
45711 -246 190 14 246 190 14 246 190 14 246 190 14
45712 -246 190 14 246 190 14 246 190 14 246 190 14
45713 -246 190 14 246 190 14 246 190 14 242 186 14
45714 -234 174 13 213 154 11 154 122 46 66 66 66
45715 - 30 30 30 10 10 10 0 0 0 0 0 0
45716 - 0 0 0 0 0 0 0 0 0 0 0 0
45717 - 0 0 0 0 0 0 0 0 0 0 0 0
45718 - 0 0 0 0 0 0 6 6 6 22 22 22
45719 - 58 58 58 154 121 60 206 145 10 234 174 13
45720 -242 186 14 246 186 14 246 190 14 246 190 14
45721 -246 190 14 246 190 14 246 190 14 246 190 14
45722 -246 190 14 246 190 14 246 190 14 246 190 14
45723 -246 190 14 246 190 14 246 190 14 246 190 14
45724 -246 186 14 236 178 12 210 162 10 163 110 8
45725 - 61 42 6 138 138 138 218 218 218 250 250 250
45726 -253 253 253 253 253 253 253 253 253 250 250 250
45727 -242 242 242 210 210 210 144 144 144 66 66 66
45728 - 6 6 6 2 2 6 2 2 6 2 2 6
45729 - 2 2 6 2 2 6 61 42 6 163 110 8
45730 -216 158 10 236 178 12 246 190 14 246 190 14
45731 -246 190 14 246 190 14 246 190 14 246 190 14
45732 -246 190 14 246 190 14 246 190 14 246 190 14
45733 -246 190 14 239 182 13 230 174 11 216 158 10
45734 -190 142 34 124 112 88 70 70 70 38 38 38
45735 - 18 18 18 6 6 6 0 0 0 0 0 0
45736 - 0 0 0 0 0 0 0 0 0 0 0 0
45737 - 0 0 0 0 0 0 0 0 0 0 0 0
45738 - 0 0 0 0 0 0 6 6 6 22 22 22
45739 - 62 62 62 168 124 44 206 145 10 224 166 10
45740 -236 178 12 239 182 13 242 186 14 242 186 14
45741 -246 186 14 246 190 14 246 190 14 246 190 14
45742 -246 190 14 246 190 14 246 190 14 246 190 14
45743 -246 190 14 246 190 14 246 190 14 246 190 14
45744 -246 190 14 236 178 12 216 158 10 175 118 6
45745 - 80 54 7 2 2 6 6 6 6 30 30 30
45746 - 54 54 54 62 62 62 50 50 50 38 38 38
45747 - 14 14 14 2 2 6 2 2 6 2 2 6
45748 - 2 2 6 2 2 6 2 2 6 2 2 6
45749 - 2 2 6 6 6 6 80 54 7 167 114 7
45750 -213 154 11 236 178 12 246 190 14 246 190 14
45751 -246 190 14 246 190 14 246 190 14 246 190 14
45752 -246 190 14 242 186 14 239 182 13 239 182 13
45753 -230 174 11 210 150 10 174 135 50 124 112 88
45754 - 82 82 82 54 54 54 34 34 34 18 18 18
45755 - 6 6 6 0 0 0 0 0 0 0 0 0
45756 - 0 0 0 0 0 0 0 0 0 0 0 0
45757 - 0 0 0 0 0 0 0 0 0 0 0 0
45758 - 0 0 0 0 0 0 6 6 6 18 18 18
45759 - 50 50 50 158 118 36 192 133 9 200 144 11
45760 -216 158 10 219 162 10 224 166 10 226 170 11
45761 -230 174 11 236 178 12 239 182 13 239 182 13
45762 -242 186 14 246 186 14 246 190 14 246 190 14
45763 -246 190 14 246 190 14 246 190 14 246 190 14
45764 -246 186 14 230 174 11 210 150 10 163 110 8
45765 -104 69 6 10 10 10 2 2 6 2 2 6
45766 - 2 2 6 2 2 6 2 2 6 2 2 6
45767 - 2 2 6 2 2 6 2 2 6 2 2 6
45768 - 2 2 6 2 2 6 2 2 6 2 2 6
45769 - 2 2 6 6 6 6 91 60 6 167 114 7
45770 -206 145 10 230 174 11 242 186 14 246 190 14
45771 -246 190 14 246 190 14 246 186 14 242 186 14
45772 -239 182 13 230 174 11 224 166 10 213 154 11
45773 -180 133 36 124 112 88 86 86 86 58 58 58
45774 - 38 38 38 22 22 22 10 10 10 6 6 6
45775 - 0 0 0 0 0 0 0 0 0 0 0 0
45776 - 0 0 0 0 0 0 0 0 0 0 0 0
45777 - 0 0 0 0 0 0 0 0 0 0 0 0
45778 - 0 0 0 0 0 0 0 0 0 14 14 14
45779 - 34 34 34 70 70 70 138 110 50 158 118 36
45780 -167 114 7 180 123 7 192 133 9 197 138 11
45781 -200 144 11 206 145 10 213 154 11 219 162 10
45782 -224 166 10 230 174 11 239 182 13 242 186 14
45783 -246 186 14 246 186 14 246 186 14 246 186 14
45784 -239 182 13 216 158 10 185 133 11 152 99 6
45785 -104 69 6 18 14 6 2 2 6 2 2 6
45786 - 2 2 6 2 2 6 2 2 6 2 2 6
45787 - 2 2 6 2 2 6 2 2 6 2 2 6
45788 - 2 2 6 2 2 6 2 2 6 2 2 6
45789 - 2 2 6 6 6 6 80 54 7 152 99 6
45790 -192 133 9 219 162 10 236 178 12 239 182 13
45791 -246 186 14 242 186 14 239 182 13 236 178 12
45792 -224 166 10 206 145 10 192 133 9 154 121 60
45793 - 94 94 94 62 62 62 42 42 42 22 22 22
45794 - 14 14 14 6 6 6 0 0 0 0 0 0
45795 - 0 0 0 0 0 0 0 0 0 0 0 0
45796 - 0 0 0 0 0 0 0 0 0 0 0 0
45797 - 0 0 0 0 0 0 0 0 0 0 0 0
45798 - 0 0 0 0 0 0 0 0 0 6 6 6
45799 - 18 18 18 34 34 34 58 58 58 78 78 78
45800 -101 98 89 124 112 88 142 110 46 156 107 11
45801 -163 110 8 167 114 7 175 118 6 180 123 7
45802 -185 133 11 197 138 11 210 150 10 219 162 10
45803 -226 170 11 236 178 12 236 178 12 234 174 13
45804 -219 162 10 197 138 11 163 110 8 130 83 6
45805 - 91 60 6 10 10 10 2 2 6 2 2 6
45806 - 18 18 18 38 38 38 38 38 38 38 38 38
45807 - 38 38 38 38 38 38 38 38 38 38 38 38
45808 - 38 38 38 38 38 38 26 26 26 2 2 6
45809 - 2 2 6 6 6 6 70 47 6 137 92 6
45810 -175 118 6 200 144 11 219 162 10 230 174 11
45811 -234 174 13 230 174 11 219 162 10 210 150 10
45812 -192 133 9 163 110 8 124 112 88 82 82 82
45813 - 50 50 50 30 30 30 14 14 14 6 6 6
45814 - 0 0 0 0 0 0 0 0 0 0 0 0
45815 - 0 0 0 0 0 0 0 0 0 0 0 0
45816 - 0 0 0 0 0 0 0 0 0 0 0 0
45817 - 0 0 0 0 0 0 0 0 0 0 0 0
45818 - 0 0 0 0 0 0 0 0 0 0 0 0
45819 - 6 6 6 14 14 14 22 22 22 34 34 34
45820 - 42 42 42 58 58 58 74 74 74 86 86 86
45821 -101 98 89 122 102 70 130 98 46 121 87 25
45822 -137 92 6 152 99 6 163 110 8 180 123 7
45823 -185 133 11 197 138 11 206 145 10 200 144 11
45824 -180 123 7 156 107 11 130 83 6 104 69 6
45825 - 50 34 6 54 54 54 110 110 110 101 98 89
45826 - 86 86 86 82 82 82 78 78 78 78 78 78
45827 - 78 78 78 78 78 78 78 78 78 78 78 78
45828 - 78 78 78 82 82 82 86 86 86 94 94 94
45829 -106 106 106 101 101 101 86 66 34 124 80 6
45830 -156 107 11 180 123 7 192 133 9 200 144 11
45831 -206 145 10 200 144 11 192 133 9 175 118 6
45832 -139 102 15 109 106 95 70 70 70 42 42 42
45833 - 22 22 22 10 10 10 0 0 0 0 0 0
45834 - 0 0 0 0 0 0 0 0 0 0 0 0
45835 - 0 0 0 0 0 0 0 0 0 0 0 0
45836 - 0 0 0 0 0 0 0 0 0 0 0 0
45837 - 0 0 0 0 0 0 0 0 0 0 0 0
45838 - 0 0 0 0 0 0 0 0 0 0 0 0
45839 - 0 0 0 0 0 0 6 6 6 10 10 10
45840 - 14 14 14 22 22 22 30 30 30 38 38 38
45841 - 50 50 50 62 62 62 74 74 74 90 90 90
45842 -101 98 89 112 100 78 121 87 25 124 80 6
45843 -137 92 6 152 99 6 152 99 6 152 99 6
45844 -138 86 6 124 80 6 98 70 6 86 66 30
45845 -101 98 89 82 82 82 58 58 58 46 46 46
45846 - 38 38 38 34 34 34 34 34 34 34 34 34
45847 - 34 34 34 34 34 34 34 34 34 34 34 34
45848 - 34 34 34 34 34 34 38 38 38 42 42 42
45849 - 54 54 54 82 82 82 94 86 76 91 60 6
45850 -134 86 6 156 107 11 167 114 7 175 118 6
45851 -175 118 6 167 114 7 152 99 6 121 87 25
45852 -101 98 89 62 62 62 34 34 34 18 18 18
45853 - 6 6 6 0 0 0 0 0 0 0 0 0
45854 - 0 0 0 0 0 0 0 0 0 0 0 0
45855 - 0 0 0 0 0 0 0 0 0 0 0 0
45856 - 0 0 0 0 0 0 0 0 0 0 0 0
45857 - 0 0 0 0 0 0 0 0 0 0 0 0
45858 - 0 0 0 0 0 0 0 0 0 0 0 0
45859 - 0 0 0 0 0 0 0 0 0 0 0 0
45860 - 0 0 0 6 6 6 6 6 6 10 10 10
45861 - 18 18 18 22 22 22 30 30 30 42 42 42
45862 - 50 50 50 66 66 66 86 86 86 101 98 89
45863 -106 86 58 98 70 6 104 69 6 104 69 6
45864 -104 69 6 91 60 6 82 62 34 90 90 90
45865 - 62 62 62 38 38 38 22 22 22 14 14 14
45866 - 10 10 10 10 10 10 10 10 10 10 10 10
45867 - 10 10 10 10 10 10 6 6 6 10 10 10
45868 - 10 10 10 10 10 10 10 10 10 14 14 14
45869 - 22 22 22 42 42 42 70 70 70 89 81 66
45870 - 80 54 7 104 69 6 124 80 6 137 92 6
45871 -134 86 6 116 81 8 100 82 52 86 86 86
45872 - 58 58 58 30 30 30 14 14 14 6 6 6
45873 - 0 0 0 0 0 0 0 0 0 0 0 0
45874 - 0 0 0 0 0 0 0 0 0 0 0 0
45875 - 0 0 0 0 0 0 0 0 0 0 0 0
45876 - 0 0 0 0 0 0 0 0 0 0 0 0
45877 - 0 0 0 0 0 0 0 0 0 0 0 0
45878 - 0 0 0 0 0 0 0 0 0 0 0 0
45879 - 0 0 0 0 0 0 0 0 0 0 0 0
45880 - 0 0 0 0 0 0 0 0 0 0 0 0
45881 - 0 0 0 6 6 6 10 10 10 14 14 14
45882 - 18 18 18 26 26 26 38 38 38 54 54 54
45883 - 70 70 70 86 86 86 94 86 76 89 81 66
45884 - 89 81 66 86 86 86 74 74 74 50 50 50
45885 - 30 30 30 14 14 14 6 6 6 0 0 0
45886 - 0 0 0 0 0 0 0 0 0 0 0 0
45887 - 0 0 0 0 0 0 0 0 0 0 0 0
45888 - 0 0 0 0 0 0 0 0 0 0 0 0
45889 - 6 6 6 18 18 18 34 34 34 58 58 58
45890 - 82 82 82 89 81 66 89 81 66 89 81 66
45891 - 94 86 66 94 86 76 74 74 74 50 50 50
45892 - 26 26 26 14 14 14 6 6 6 0 0 0
45893 - 0 0 0 0 0 0 0 0 0 0 0 0
45894 - 0 0 0 0 0 0 0 0 0 0 0 0
45895 - 0 0 0 0 0 0 0 0 0 0 0 0
45896 - 0 0 0 0 0 0 0 0 0 0 0 0
45897 - 0 0 0 0 0 0 0 0 0 0 0 0
45898 - 0 0 0 0 0 0 0 0 0 0 0 0
45899 - 0 0 0 0 0 0 0 0 0 0 0 0
45900 - 0 0 0 0 0 0 0 0 0 0 0 0
45901 - 0 0 0 0 0 0 0 0 0 0 0 0
45902 - 6 6 6 6 6 6 14 14 14 18 18 18
45903 - 30 30 30 38 38 38 46 46 46 54 54 54
45904 - 50 50 50 42 42 42 30 30 30 18 18 18
45905 - 10 10 10 0 0 0 0 0 0 0 0 0
45906 - 0 0 0 0 0 0 0 0 0 0 0 0
45907 - 0 0 0 0 0 0 0 0 0 0 0 0
45908 - 0 0 0 0 0 0 0 0 0 0 0 0
45909 - 0 0 0 6 6 6 14 14 14 26 26 26
45910 - 38 38 38 50 50 50 58 58 58 58 58 58
45911 - 54 54 54 42 42 42 30 30 30 18 18 18
45912 - 10 10 10 0 0 0 0 0 0 0 0 0
45913 - 0 0 0 0 0 0 0 0 0 0 0 0
45914 - 0 0 0 0 0 0 0 0 0 0 0 0
45915 - 0 0 0 0 0 0 0 0 0 0 0 0
45916 - 0 0 0 0 0 0 0 0 0 0 0 0
45917 - 0 0 0 0 0 0 0 0 0 0 0 0
45918 - 0 0 0 0 0 0 0 0 0 0 0 0
45919 - 0 0 0 0 0 0 0 0 0 0 0 0
45920 - 0 0 0 0 0 0 0 0 0 0 0 0
45921 - 0 0 0 0 0 0 0 0 0 0 0 0
45922 - 0 0 0 0 0 0 0 0 0 6 6 6
45923 - 6 6 6 10 10 10 14 14 14 18 18 18
45924 - 18 18 18 14 14 14 10 10 10 6 6 6
45925 - 0 0 0 0 0 0 0 0 0 0 0 0
45926 - 0 0 0 0 0 0 0 0 0 0 0 0
45927 - 0 0 0 0 0 0 0 0 0 0 0 0
45928 - 0 0 0 0 0 0 0 0 0 0 0 0
45929 - 0 0 0 0 0 0 0 0 0 6 6 6
45930 - 14 14 14 18 18 18 22 22 22 22 22 22
45931 - 18 18 18 14 14 14 10 10 10 6 6 6
45932 - 0 0 0 0 0 0 0 0 0 0 0 0
45933 - 0 0 0 0 0 0 0 0 0 0 0 0
45934 - 0 0 0 0 0 0 0 0 0 0 0 0
45935 - 0 0 0 0 0 0 0 0 0 0 0 0
45936 - 0 0 0 0 0 0 0 0 0 0 0 0
45937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45950 +4 4 4 4 4 4
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45964 +4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45978 +4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45992 +4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46006 +4 4 4 4 4 4
46007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46020 +4 4 4 4 4 4
46021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46026 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46031 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46032 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46034 +4 4 4 4 4 4
46035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46040 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46041 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46045 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46046 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46047 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46048 +4 4 4 4 4 4
46049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46054 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46055 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46059 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46060 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46061 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46062 +4 4 4 4 4 4
46063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46067 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46068 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46069 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46072 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46073 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46074 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46075 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46076 +4 4 4 4 4 4
46077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46081 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46082 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46083 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46084 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46085 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46086 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46087 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46088 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46089 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46090 +4 4 4 4 4 4
46091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46094 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46095 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46096 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46097 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46098 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46099 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46100 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46101 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46102 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46103 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46104 +4 4 4 4 4 4
46105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46108 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46109 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46110 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46111 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46112 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46113 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46114 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46115 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46116 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46117 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46118 +4 4 4 4 4 4
46119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46122 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46123 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46124 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46125 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46126 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46127 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46128 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46129 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46130 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46131 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46132 +4 4 4 4 4 4
46133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46135 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46136 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46137 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46138 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46139 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46140 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46141 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46142 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46143 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46144 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46145 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46146 +4 4 4 4 4 4
46147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46149 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46150 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46151 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46152 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46153 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46154 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46155 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46156 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46157 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46158 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46159 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46160 +4 4 4 4 4 4
46161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46162 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46163 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46164 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46165 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46166 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46167 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46168 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46169 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46170 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46171 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46172 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46173 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46174 +4 4 4 4 4 4
46175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46176 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46177 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46178 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46179 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46180 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46181 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46182 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46183 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46184 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46185 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46186 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46187 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46188 +0 0 0 4 4 4
46189 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46190 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46191 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46192 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46193 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46194 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46195 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46196 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46197 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46198 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46199 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46200 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46201 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46202 +2 0 0 0 0 0
46203 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46204 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46205 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46206 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46207 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46208 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46209 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46210 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46211 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46212 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46213 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46214 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46215 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46216 +37 38 37 0 0 0
46217 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46218 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46219 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46220 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46221 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46222 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46223 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46224 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46225 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46226 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46227 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46228 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46229 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46230 +85 115 134 4 0 0
46231 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46232 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46233 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46234 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46235 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46236 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46237 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46238 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46239 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46240 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46241 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46242 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46243 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46244 +60 73 81 4 0 0
46245 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46246 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46247 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46248 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46249 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46250 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46251 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46252 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46253 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46254 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46255 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46256 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46257 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46258 +16 19 21 4 0 0
46259 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46260 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46261 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46262 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46263 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46264 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46265 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46266 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46267 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46268 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46269 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46270 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46271 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46272 +4 0 0 4 3 3
46273 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46274 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46275 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
46276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
46277 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
46278 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
46279 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
46280 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
46281 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
46282 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
46283 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
46284 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
46285 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
46286 +3 2 2 4 4 4
46287 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
46288 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
46289 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
46290 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46291 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
46292 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
46293 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
46294 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
46295 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
46296 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
46297 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
46298 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
46299 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
46300 +4 4 4 4 4 4
46301 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
46302 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
46303 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
46304 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
46305 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
46306 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
46307 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
46308 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
46309 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
46310 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
46311 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
46312 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
46313 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
46314 +4 4 4 4 4 4
46315 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
46316 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
46317 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
46318 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
46319 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
46320 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46321 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
46322 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
46323 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
46324 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
46325 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
46326 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
46327 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
46328 +5 5 5 5 5 5
46329 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
46330 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
46331 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
46332 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
46333 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
46334 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46335 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
46336 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
46337 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
46338 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
46339 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
46340 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
46341 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46342 +5 5 5 4 4 4
46343 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
46344 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
46345 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
46346 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
46347 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46348 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
46349 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
46350 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
46351 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
46352 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
46353 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
46354 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46356 +4 4 4 4 4 4
46357 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
46358 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
46359 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
46360 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
46361 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
46362 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46363 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46364 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
46365 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
46366 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
46367 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
46368 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
46369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46370 +4 4 4 4 4 4
46371 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
46372 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
46373 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46374 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46375 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46376 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46377 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46378 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46379 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46380 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46381 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46383 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46384 +4 4 4 4 4 4
46385 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46386 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46387 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46388 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46389 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46390 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46391 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46392 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46393 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46394 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46395 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46397 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46398 +4 4 4 4 4 4
46399 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46400 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46401 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46402 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46403 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46404 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46405 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46406 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46407 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46408 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46409 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46411 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46412 +4 4 4 4 4 4
46413 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
46414 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46415 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46416 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46417 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46418 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46419 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46420 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46421 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46422 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46423 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46425 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46426 +4 4 4 4 4 4
46427 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46428 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46429 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46430 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46431 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46432 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46433 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46434 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46435 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46436 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46437 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46440 +4 4 4 4 4 4
46441 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46442 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46443 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
46444 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46445 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
46446 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
46447 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
46448 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
46449 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
46450 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
46451 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46454 +4 4 4 4 4 4
46455 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
46456 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
46457 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
46458 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46459 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46460 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
46461 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
46462 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
46463 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
46464 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
46465 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46468 +4 4 4 4 4 4
46469 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
46470 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
46471 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46472 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46473 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46474 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
46475 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
46476 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
46477 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
46478 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
46479 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46482 +4 4 4 4 4 4
46483 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
46484 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
46485 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46486 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46487 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46488 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
46489 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
46490 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
46491 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46492 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46493 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46496 +4 4 4 4 4 4
46497 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46498 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
46499 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46500 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
46501 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
46502 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
46503 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
46504 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
46505 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46506 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46507 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46510 +4 4 4 4 4 4
46511 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46512 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
46513 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46514 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
46515 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46516 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
46517 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
46518 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
46519 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46520 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46521 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46524 +4 4 4 4 4 4
46525 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
46526 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
46527 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46528 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
46529 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
46530 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
46531 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
46532 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
46533 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46534 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46535 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46538 +4 4 4 4 4 4
46539 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
46540 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
46541 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46542 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
46543 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
46544 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
46545 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
46546 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
46547 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46548 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46549 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46552 +4 4 4 4 4 4
46553 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46554 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
46555 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46556 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
46557 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
46558 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
46559 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
46560 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
46561 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46562 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46563 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46566 +4 4 4 4 4 4
46567 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
46568 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
46569 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46570 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
46571 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
46572 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
46573 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
46574 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
46575 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
46576 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46577 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46580 +4 4 4 4 4 4
46581 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46582 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
46583 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
46584 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
46585 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
46586 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
46587 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
46588 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
46589 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46590 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46591 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594 +4 4 4 4 4 4
46595 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46596 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
46597 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46598 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
46599 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
46600 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
46601 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
46602 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
46603 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46604 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46605 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608 +4 4 4 4 4 4
46609 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46610 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
46611 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
46612 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
46613 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
46614 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
46615 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46616 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
46617 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46618 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46619 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46622 +4 4 4 4 4 4
46623 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46624 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
46625 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
46626 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46627 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
46628 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
46629 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46630 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
46631 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46632 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46633 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46636 +4 4 4 4 4 4
46637 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46638 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
46639 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
46640 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
46641 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
46642 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
46643 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
46644 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
46645 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
46646 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46647 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46650 +4 4 4 4 4 4
46651 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46652 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
46653 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
46654 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
46655 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
46656 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
46657 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
46658 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
46659 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
46660 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46661 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46664 +4 4 4 4 4 4
46665 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
46666 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
46667 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
46668 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
46669 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46670 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
46671 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
46672 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
46673 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
46674 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46675 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46678 +4 4 4 4 4 4
46679 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46680 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
46681 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
46682 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
46683 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
46684 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
46685 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
46686 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
46687 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
46688 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46689 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46692 +4 4 4 4 4 4
46693 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
46694 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
46695 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
46696 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
46697 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
46698 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
46699 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
46700 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
46701 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
46702 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
46703 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46706 +4 4 4 4 4 4
46707 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
46708 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46709 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
46710 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
46711 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
46712 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
46713 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
46714 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
46715 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
46716 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
46717 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46720 +4 4 4 4 4 4
46721 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
46722 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46723 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
46724 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
46725 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
46726 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
46727 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46728 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
46729 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
46730 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
46731 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46734 +4 4 4 4 4 4
46735 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
46736 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
46737 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
46738 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
46739 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
46740 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
46741 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
46742 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
46743 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
46744 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
46745 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46748 +4 4 4 4 4 4
46749 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
46750 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
46751 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46752 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
46753 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
46754 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
46755 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
46756 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
46757 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
46758 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
46759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46762 +4 4 4 4 4 4
46763 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46764 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
46765 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
46766 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
46767 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
46768 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
46769 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
46770 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
46771 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
46772 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46776 +4 4 4 4 4 4
46777 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
46778 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
46779 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
46780 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
46781 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
46782 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
46783 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
46784 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
46785 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
46786 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46790 +4 4 4 4 4 4
46791 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
46792 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
46793 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
46794 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
46795 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
46796 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
46797 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
46798 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
46799 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46800 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46804 +4 4 4 4 4 4
46805 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
46806 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46807 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
46808 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46809 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
46810 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
46811 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
46812 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
46813 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
46814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46818 +4 4 4 4 4 4
46819 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
46820 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
46821 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
46822 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
46823 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
46824 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
46825 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
46826 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
46827 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
46828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46832 +4 4 4 4 4 4
46833 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46834 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
46835 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
46836 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
46837 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
46838 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
46839 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
46840 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
46841 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46846 +4 4 4 4 4 4
46847 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
46848 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
46849 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46850 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
46851 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
46852 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
46853 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
46854 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
46855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46860 +4 4 4 4 4 4
46861 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46862 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
46863 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
46864 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
46865 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
46866 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
46867 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
46868 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46874 +4 4 4 4 4 4
46875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46876 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
46877 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46878 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
46879 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
46880 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
46881 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
46882 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
46883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46888 +4 4 4 4 4 4
46889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46890 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
46891 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
46892 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
46893 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
46894 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
46895 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
46896 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
46897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46902 +4 4 4 4 4 4
46903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46904 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46905 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
46906 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46907 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
46908 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
46909 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
46910 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46916 +4 4 4 4 4 4
46917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46919 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46920 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
46921 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
46922 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
46923 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
46924 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46930 +4 4 4 4 4 4
46931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46934 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46935 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46936 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46937 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46944 +4 4 4 4 4 4
46945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46948 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46949 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46950 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46951 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46958 +4 4 4 4 4 4
46959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46962 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46963 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46964 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46965 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46972 +4 4 4 4 4 4
46973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46976 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46977 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46978 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46979 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46986 +4 4 4 4 4 4
46987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46991 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46992 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46993 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47000 +4 4 4 4 4 4
47001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47005 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47006 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47007 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47014 +4 4 4 4 4 4
47015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47019 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47020 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47021 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47028 +4 4 4 4 4 4
47029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47033 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47034 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47042 +4 4 4 4 4 4
47043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47047 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47048 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47050 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47056 +4 4 4 4 4 4
47057 diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47058 index fe92eed..106e085 100644
47059 --- a/drivers/video/mb862xx/mb862xxfb_accel.c
47060 +++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47061 @@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47062 struct mb862xxfb_par *par = info->par;
47063
47064 if (info->var.bits_per_pixel == 32) {
47065 - info->fbops->fb_fillrect = cfb_fillrect;
47066 - info->fbops->fb_copyarea = cfb_copyarea;
47067 - info->fbops->fb_imageblit = cfb_imageblit;
47068 + pax_open_kernel();
47069 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47070 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47071 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47072 + pax_close_kernel();
47073 } else {
47074 outreg(disp, GC_L0EM, 3);
47075 - info->fbops->fb_fillrect = mb86290fb_fillrect;
47076 - info->fbops->fb_copyarea = mb86290fb_copyarea;
47077 - info->fbops->fb_imageblit = mb86290fb_imageblit;
47078 + pax_open_kernel();
47079 + *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47080 + *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47081 + *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47082 + pax_close_kernel();
47083 }
47084 outreg(draw, GDC_REG_DRAW_BASE, 0);
47085 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47086 diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47087 index ff22871..b129bed 100644
47088 --- a/drivers/video/nvidia/nvidia.c
47089 +++ b/drivers/video/nvidia/nvidia.c
47090 @@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47091 info->fix.line_length = (info->var.xres_virtual *
47092 info->var.bits_per_pixel) >> 3;
47093 if (info->var.accel_flags) {
47094 - info->fbops->fb_imageblit = nvidiafb_imageblit;
47095 - info->fbops->fb_fillrect = nvidiafb_fillrect;
47096 - info->fbops->fb_copyarea = nvidiafb_copyarea;
47097 - info->fbops->fb_sync = nvidiafb_sync;
47098 + pax_open_kernel();
47099 + *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47100 + *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47101 + *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47102 + *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47103 + pax_close_kernel();
47104 info->pixmap.scan_align = 4;
47105 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47106 info->flags |= FBINFO_READS_FAST;
47107 NVResetGraphics(info);
47108 } else {
47109 - info->fbops->fb_imageblit = cfb_imageblit;
47110 - info->fbops->fb_fillrect = cfb_fillrect;
47111 - info->fbops->fb_copyarea = cfb_copyarea;
47112 - info->fbops->fb_sync = NULL;
47113 + pax_open_kernel();
47114 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47115 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47116 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47117 + *(void **)&info->fbops->fb_sync = NULL;
47118 + pax_close_kernel();
47119 info->pixmap.scan_align = 1;
47120 info->flags |= FBINFO_HWACCEL_DISABLED;
47121 info->flags &= ~FBINFO_READS_FAST;
47122 @@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47123 info->pixmap.size = 8 * 1024;
47124 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47125
47126 - if (!hwcur)
47127 - info->fbops->fb_cursor = NULL;
47128 + if (!hwcur) {
47129 + pax_open_kernel();
47130 + *(void **)&info->fbops->fb_cursor = NULL;
47131 + pax_close_kernel();
47132 + }
47133
47134 info->var.accel_flags = (!noaccel);
47135
47136 diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47137 index 76d9053..dec2bfd 100644
47138 --- a/drivers/video/s1d13xxxfb.c
47139 +++ b/drivers/video/s1d13xxxfb.c
47140 @@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47141
47142 switch(prod_id) {
47143 case S1D13506_PROD_ID: /* activate acceleration */
47144 - s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47145 - s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47146 + pax_open_kernel();
47147 + *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47148 + *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47149 + pax_close_kernel();
47150 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47151 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47152 break;
47153 diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47154 index 97bd662..39fab85 100644
47155 --- a/drivers/video/smscufx.c
47156 +++ b/drivers/video/smscufx.c
47157 @@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47158 fb_deferred_io_cleanup(info);
47159 kfree(info->fbdefio);
47160 info->fbdefio = NULL;
47161 - info->fbops->fb_mmap = ufx_ops_mmap;
47162 + pax_open_kernel();
47163 + *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47164 + pax_close_kernel();
47165 }
47166
47167 pr_debug("released /dev/fb%d user=%d count=%d",
47168 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47169 index 86d449e..8e04dc5 100644
47170 --- a/drivers/video/udlfb.c
47171 +++ b/drivers/video/udlfb.c
47172 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47173 dlfb_urb_completion(urb);
47174
47175 error:
47176 - atomic_add(bytes_sent, &dev->bytes_sent);
47177 - atomic_add(bytes_identical, &dev->bytes_identical);
47178 - atomic_add(width*height*2, &dev->bytes_rendered);
47179 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47180 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47181 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47182 end_cycles = get_cycles();
47183 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
47184 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47185 >> 10)), /* Kcycles */
47186 &dev->cpu_kcycles_used);
47187
47188 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47189 dlfb_urb_completion(urb);
47190
47191 error:
47192 - atomic_add(bytes_sent, &dev->bytes_sent);
47193 - atomic_add(bytes_identical, &dev->bytes_identical);
47194 - atomic_add(bytes_rendered, &dev->bytes_rendered);
47195 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47196 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47197 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47198 end_cycles = get_cycles();
47199 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
47200 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47201 >> 10)), /* Kcycles */
47202 &dev->cpu_kcycles_used);
47203 }
47204 @@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47205 fb_deferred_io_cleanup(info);
47206 kfree(info->fbdefio);
47207 info->fbdefio = NULL;
47208 - info->fbops->fb_mmap = dlfb_ops_mmap;
47209 + pax_open_kernel();
47210 + *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47211 + pax_close_kernel();
47212 }
47213
47214 pr_warn("released /dev/fb%d user=%d count=%d\n",
47215 @@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47216 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47217 struct dlfb_data *dev = fb_info->par;
47218 return snprintf(buf, PAGE_SIZE, "%u\n",
47219 - atomic_read(&dev->bytes_rendered));
47220 + atomic_read_unchecked(&dev->bytes_rendered));
47221 }
47222
47223 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47224 @@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47225 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47226 struct dlfb_data *dev = fb_info->par;
47227 return snprintf(buf, PAGE_SIZE, "%u\n",
47228 - atomic_read(&dev->bytes_identical));
47229 + atomic_read_unchecked(&dev->bytes_identical));
47230 }
47231
47232 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47233 @@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47234 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47235 struct dlfb_data *dev = fb_info->par;
47236 return snprintf(buf, PAGE_SIZE, "%u\n",
47237 - atomic_read(&dev->bytes_sent));
47238 + atomic_read_unchecked(&dev->bytes_sent));
47239 }
47240
47241 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47242 @@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47243 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47244 struct dlfb_data *dev = fb_info->par;
47245 return snprintf(buf, PAGE_SIZE, "%u\n",
47246 - atomic_read(&dev->cpu_kcycles_used));
47247 + atomic_read_unchecked(&dev->cpu_kcycles_used));
47248 }
47249
47250 static ssize_t edid_show(
47251 @@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47252 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47253 struct dlfb_data *dev = fb_info->par;
47254
47255 - atomic_set(&dev->bytes_rendered, 0);
47256 - atomic_set(&dev->bytes_identical, 0);
47257 - atomic_set(&dev->bytes_sent, 0);
47258 - atomic_set(&dev->cpu_kcycles_used, 0);
47259 + atomic_set_unchecked(&dev->bytes_rendered, 0);
47260 + atomic_set_unchecked(&dev->bytes_identical, 0);
47261 + atomic_set_unchecked(&dev->bytes_sent, 0);
47262 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
47263
47264 return count;
47265 }
47266 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
47267 index d428445..79a78df 100644
47268 --- a/drivers/video/uvesafb.c
47269 +++ b/drivers/video/uvesafb.c
47270 @@ -19,6 +19,7 @@
47271 #include <linux/io.h>
47272 #include <linux/mutex.h>
47273 #include <linux/slab.h>
47274 +#include <linux/moduleloader.h>
47275 #include <video/edid.h>
47276 #include <video/uvesafb.h>
47277 #ifdef CONFIG_X86
47278 @@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
47279 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
47280 par->pmi_setpal = par->ypan = 0;
47281 } else {
47282 +
47283 +#ifdef CONFIG_PAX_KERNEXEC
47284 +#ifdef CONFIG_MODULES
47285 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
47286 +#endif
47287 + if (!par->pmi_code) {
47288 + par->pmi_setpal = par->ypan = 0;
47289 + return 0;
47290 + }
47291 +#endif
47292 +
47293 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
47294 + task->t.regs.edi);
47295 +
47296 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47297 + pax_open_kernel();
47298 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
47299 + pax_close_kernel();
47300 +
47301 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
47302 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
47303 +#else
47304 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
47305 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
47306 +#endif
47307 +
47308 printk(KERN_INFO "uvesafb: protected mode interface info at "
47309 "%04x:%04x\n",
47310 (u16)task->t.regs.es, (u16)task->t.regs.edi);
47311 @@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
47312 par->ypan = ypan;
47313
47314 if (par->pmi_setpal || par->ypan) {
47315 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
47316 if (__supported_pte_mask & _PAGE_NX) {
47317 par->pmi_setpal = par->ypan = 0;
47318 printk(KERN_WARNING "uvesafb: NX protection is actively."
47319 "We have better not to use the PMI.\n");
47320 - } else {
47321 + } else
47322 +#endif
47323 uvesafb_vbe_getpmi(task, par);
47324 - }
47325 }
47326 #else
47327 /* The protected mode interface is not available on non-x86. */
47328 @@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47329 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
47330
47331 /* Disable blanking if the user requested so. */
47332 - if (!blank)
47333 - info->fbops->fb_blank = NULL;
47334 + if (!blank) {
47335 + pax_open_kernel();
47336 + *(void **)&info->fbops->fb_blank = NULL;
47337 + pax_close_kernel();
47338 + }
47339
47340 /*
47341 * Find out how much IO memory is required for the mode with
47342 @@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47343 info->flags = FBINFO_FLAG_DEFAULT |
47344 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
47345
47346 - if (!par->ypan)
47347 - info->fbops->fb_pan_display = NULL;
47348 + if (!par->ypan) {
47349 + pax_open_kernel();
47350 + *(void **)&info->fbops->fb_pan_display = NULL;
47351 + pax_close_kernel();
47352 + }
47353 }
47354
47355 static void uvesafb_init_mtrr(struct fb_info *info)
47356 @@ -1836,6 +1866,11 @@ out:
47357 if (par->vbe_modes)
47358 kfree(par->vbe_modes);
47359
47360 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47361 + if (par->pmi_code)
47362 + module_free_exec(NULL, par->pmi_code);
47363 +#endif
47364 +
47365 framebuffer_release(info);
47366 return err;
47367 }
47368 @@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
47369 kfree(par->vbe_state_orig);
47370 if (par->vbe_state_saved)
47371 kfree(par->vbe_state_saved);
47372 +
47373 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47374 + if (par->pmi_code)
47375 + module_free_exec(NULL, par->pmi_code);
47376 +#endif
47377 +
47378 }
47379
47380 framebuffer_release(info);
47381 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47382 index 501b340..d80aa17 100644
47383 --- a/drivers/video/vesafb.c
47384 +++ b/drivers/video/vesafb.c
47385 @@ -9,6 +9,7 @@
47386 */
47387
47388 #include <linux/module.h>
47389 +#include <linux/moduleloader.h>
47390 #include <linux/kernel.h>
47391 #include <linux/errno.h>
47392 #include <linux/string.h>
47393 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47394 static int vram_total __initdata; /* Set total amount of memory */
47395 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47396 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47397 -static void (*pmi_start)(void) __read_mostly;
47398 -static void (*pmi_pal) (void) __read_mostly;
47399 +static void (*pmi_start)(void) __read_only;
47400 +static void (*pmi_pal) (void) __read_only;
47401 static int depth __read_mostly;
47402 static int vga_compat __read_mostly;
47403 /* --------------------------------------------------------------------- */
47404 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47405 unsigned int size_vmode;
47406 unsigned int size_remap;
47407 unsigned int size_total;
47408 + void *pmi_code = NULL;
47409
47410 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
47411 return -ENODEV;
47412 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
47413 size_remap = size_total;
47414 vesafb_fix.smem_len = size_remap;
47415
47416 -#ifndef __i386__
47417 - screen_info.vesapm_seg = 0;
47418 -#endif
47419 -
47420 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
47421 printk(KERN_WARNING
47422 "vesafb: cannot reserve video memory at 0x%lx\n",
47423 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
47424 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
47425 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
47426
47427 +#ifdef __i386__
47428 +
47429 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47430 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
47431 + if (!pmi_code)
47432 +#elif !defined(CONFIG_PAX_KERNEXEC)
47433 + if (0)
47434 +#endif
47435 +
47436 +#endif
47437 + screen_info.vesapm_seg = 0;
47438 +
47439 if (screen_info.vesapm_seg) {
47440 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
47441 - screen_info.vesapm_seg,screen_info.vesapm_off);
47442 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
47443 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
47444 }
47445
47446 if (screen_info.vesapm_seg < 0xc000)
47447 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
47448
47449 if (ypan || pmi_setpal) {
47450 unsigned short *pmi_base;
47451 +
47452 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
47453 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
47454 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
47455 +
47456 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47457 + pax_open_kernel();
47458 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
47459 +#else
47460 + pmi_code = pmi_base;
47461 +#endif
47462 +
47463 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
47464 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
47465 +
47466 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47467 + pmi_start = ktva_ktla(pmi_start);
47468 + pmi_pal = ktva_ktla(pmi_pal);
47469 + pax_close_kernel();
47470 +#endif
47471 +
47472 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
47473 if (pmi_base[3]) {
47474 printk(KERN_INFO "vesafb: pmi: ports = ");
47475 @@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47476 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
47477 (ypan ? FBINFO_HWACCEL_YPAN : 0);
47478
47479 - if (!ypan)
47480 - info->fbops->fb_pan_display = NULL;
47481 + if (!ypan) {
47482 + pax_open_kernel();
47483 + *(void **)&info->fbops->fb_pan_display = NULL;
47484 + pax_close_kernel();
47485 + }
47486
47487 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
47488 err = -ENOMEM;
47489 @@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47490 info->node, info->fix.id);
47491 return 0;
47492 err:
47493 +
47494 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47495 + module_free_exec(NULL, pmi_code);
47496 +#endif
47497 +
47498 if (info->screen_base)
47499 iounmap(info->screen_base);
47500 framebuffer_release(info);
47501 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
47502 index 88714ae..16c2e11 100644
47503 --- a/drivers/video/via/via_clock.h
47504 +++ b/drivers/video/via/via_clock.h
47505 @@ -56,7 +56,7 @@ struct via_clock {
47506
47507 void (*set_engine_pll_state)(u8 state);
47508 void (*set_engine_pll)(struct via_pll_config config);
47509 -};
47510 +} __no_const;
47511
47512
47513 static inline u32 get_pll_internal_frequency(u32 ref_freq,
47514 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
47515 index fef20db..d28b1ab 100644
47516 --- a/drivers/xen/xenfs/xenstored.c
47517 +++ b/drivers/xen/xenfs/xenstored.c
47518 @@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
47519 static int xsd_kva_open(struct inode *inode, struct file *file)
47520 {
47521 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
47522 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47523 + NULL);
47524 +#else
47525 xen_store_interface);
47526 +#endif
47527 +
47528 if (!file->private_data)
47529 return -ENOMEM;
47530 return 0;
47531 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
47532 index d86edc8..40ff2fb 100644
47533 --- a/fs/9p/vfs_inode.c
47534 +++ b/fs/9p/vfs_inode.c
47535 @@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47536 void
47537 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47538 {
47539 - char *s = nd_get_link(nd);
47540 + const char *s = nd_get_link(nd);
47541
47542 p9_debug(P9_DEBUG_VFS, " %s %s\n",
47543 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
47544 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
47545 index 0efd152..b5802ad 100644
47546 --- a/fs/Kconfig.binfmt
47547 +++ b/fs/Kconfig.binfmt
47548 @@ -89,7 +89,7 @@ config HAVE_AOUT
47549
47550 config BINFMT_AOUT
47551 tristate "Kernel support for a.out and ECOFF binaries"
47552 - depends on HAVE_AOUT
47553 + depends on HAVE_AOUT && BROKEN
47554 ---help---
47555 A.out (Assembler.OUTput) is a set of formats for libraries and
47556 executables used in the earliest versions of UNIX. Linux used
47557 diff --git a/fs/aio.c b/fs/aio.c
47558 index 1dc8786..d3b29e8 100644
47559 --- a/fs/aio.c
47560 +++ b/fs/aio.c
47561 @@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
47562 size += sizeof(struct io_event) * nr_events;
47563 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
47564
47565 - if (nr_pages < 0)
47566 + if (nr_pages <= 0)
47567 return -EINVAL;
47568
47569 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
47570 @@ -1375,18 +1375,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
47571 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47572 {
47573 ssize_t ret;
47574 + struct iovec iovstack;
47575
47576 #ifdef CONFIG_COMPAT
47577 if (compat)
47578 ret = compat_rw_copy_check_uvector(type,
47579 (struct compat_iovec __user *)kiocb->ki_buf,
47580 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47581 + kiocb->ki_nbytes, 1, &iovstack,
47582 &kiocb->ki_iovec);
47583 else
47584 #endif
47585 ret = rw_copy_check_uvector(type,
47586 (struct iovec __user *)kiocb->ki_buf,
47587 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47588 + kiocb->ki_nbytes, 1, &iovstack,
47589 &kiocb->ki_iovec);
47590 if (ret < 0)
47591 goto out;
47592 @@ -1395,6 +1396,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47593 if (ret < 0)
47594 goto out;
47595
47596 + if (kiocb->ki_iovec == &iovstack) {
47597 + kiocb->ki_inline_vec = iovstack;
47598 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
47599 + }
47600 kiocb->ki_nr_segs = kiocb->ki_nbytes;
47601 kiocb->ki_cur_seg = 0;
47602 /* ki_nbytes/left now reflect bytes instead of segs */
47603 diff --git a/fs/attr.c b/fs/attr.c
47604 index 1449adb..a2038c2 100644
47605 --- a/fs/attr.c
47606 +++ b/fs/attr.c
47607 @@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
47608 unsigned long limit;
47609
47610 limit = rlimit(RLIMIT_FSIZE);
47611 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
47612 if (limit != RLIM_INFINITY && offset > limit)
47613 goto out_sig;
47614 if (offset > inode->i_sb->s_maxbytes)
47615 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
47616 index 3db70da..7aeec5b 100644
47617 --- a/fs/autofs4/waitq.c
47618 +++ b/fs/autofs4/waitq.c
47619 @@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
47620 {
47621 unsigned long sigpipe, flags;
47622 mm_segment_t fs;
47623 - const char *data = (const char *)addr;
47624 + const char __user *data = (const char __force_user *)addr;
47625 ssize_t wr = 0;
47626
47627 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
47628 @@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
47629 return 1;
47630 }
47631
47632 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47633 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
47634 +#endif
47635 +
47636 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47637 enum autofs_notify notify)
47638 {
47639 @@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47640
47641 /* If this is a direct mount request create a dummy name */
47642 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
47643 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47644 + /* this name does get written to userland via autofs4_write() */
47645 + qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
47646 +#else
47647 qstr.len = sprintf(name, "%p", dentry);
47648 +#endif
47649 else {
47650 qstr.len = autofs4_getpath(sbi, dentry, &name);
47651 if (!qstr.len) {
47652 diff --git a/fs/befs/endian.h b/fs/befs/endian.h
47653 index 2722387..c8dd2a7 100644
47654 --- a/fs/befs/endian.h
47655 +++ b/fs/befs/endian.h
47656 @@ -11,7 +11,7 @@
47657
47658 #include <asm/byteorder.h>
47659
47660 -static inline u64
47661 +static inline u64 __intentional_overflow(-1)
47662 fs64_to_cpu(const struct super_block *sb, fs64 n)
47663 {
47664 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
47665 @@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
47666 return (__force fs64)cpu_to_be64(n);
47667 }
47668
47669 -static inline u32
47670 +static inline u32 __intentional_overflow(-1)
47671 fs32_to_cpu(const struct super_block *sb, fs32 n)
47672 {
47673 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
47674 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
47675 index 8615ee8..388ed68 100644
47676 --- a/fs/befs/linuxvfs.c
47677 +++ b/fs/befs/linuxvfs.c
47678 @@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47679 {
47680 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
47681 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
47682 - char *link = nd_get_link(nd);
47683 + const char *link = nd_get_link(nd);
47684 if (!IS_ERR(link))
47685 kfree(link);
47686 }
47687 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
47688 index bbc8f88..7c7ac97 100644
47689 --- a/fs/binfmt_aout.c
47690 +++ b/fs/binfmt_aout.c
47691 @@ -16,6 +16,7 @@
47692 #include <linux/string.h>
47693 #include <linux/fs.h>
47694 #include <linux/file.h>
47695 +#include <linux/security.h>
47696 #include <linux/stat.h>
47697 #include <linux/fcntl.h>
47698 #include <linux/ptrace.h>
47699 @@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
47700 #endif
47701 # define START_STACK(u) ((void __user *)u.start_stack)
47702
47703 + memset(&dump, 0, sizeof(dump));
47704 +
47705 fs = get_fs();
47706 set_fs(KERNEL_DS);
47707 has_dumped = 1;
47708 @@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
47709
47710 /* If the size of the dump file exceeds the rlimit, then see what would happen
47711 if we wrote the stack, but not the data area. */
47712 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
47713 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
47714 dump.u_dsize = 0;
47715
47716 /* Make sure we have enough room to write the stack and data areas. */
47717 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
47718 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
47719 dump.u_ssize = 0;
47720
47721 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
47722 rlim = rlimit(RLIMIT_DATA);
47723 if (rlim >= RLIM_INFINITY)
47724 rlim = ~0;
47725 +
47726 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
47727 if (ex.a_data + ex.a_bss > rlim)
47728 return -ENOMEM;
47729
47730 @@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
47731
47732 install_exec_creds(bprm);
47733
47734 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47735 + current->mm->pax_flags = 0UL;
47736 +#endif
47737 +
47738 +#ifdef CONFIG_PAX_PAGEEXEC
47739 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
47740 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
47741 +
47742 +#ifdef CONFIG_PAX_EMUTRAMP
47743 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
47744 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
47745 +#endif
47746 +
47747 +#ifdef CONFIG_PAX_MPROTECT
47748 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
47749 + current->mm->pax_flags |= MF_PAX_MPROTECT;
47750 +#endif
47751 +
47752 + }
47753 +#endif
47754 +
47755 if (N_MAGIC(ex) == OMAGIC) {
47756 unsigned long text_addr, map_size;
47757 loff_t pos;
47758 @@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
47759 }
47760
47761 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
47762 - PROT_READ | PROT_WRITE | PROT_EXEC,
47763 + PROT_READ | PROT_WRITE,
47764 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
47765 fd_offset + ex.a_text);
47766 if (error != N_DATADDR(ex)) {
47767 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
47768 index 86af964..8a1da7e 100644
47769 --- a/fs/binfmt_elf.c
47770 +++ b/fs/binfmt_elf.c
47771 @@ -34,6 +34,7 @@
47772 #include <linux/utsname.h>
47773 #include <linux/coredump.h>
47774 #include <linux/sched.h>
47775 +#include <linux/xattr.h>
47776 #include <asm/uaccess.h>
47777 #include <asm/param.h>
47778 #include <asm/page.h>
47779 @@ -60,6 +61,10 @@ static int elf_core_dump(struct coredump_params *cprm);
47780 #define elf_core_dump NULL
47781 #endif
47782
47783 +#ifdef CONFIG_PAX_MPROTECT
47784 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
47785 +#endif
47786 +
47787 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
47788 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
47789 #else
47790 @@ -79,6 +84,11 @@ static struct linux_binfmt elf_format = {
47791 .load_binary = load_elf_binary,
47792 .load_shlib = load_elf_library,
47793 .core_dump = elf_core_dump,
47794 +
47795 +#ifdef CONFIG_PAX_MPROTECT
47796 + .handle_mprotect= elf_handle_mprotect,
47797 +#endif
47798 +
47799 .min_coredump = ELF_EXEC_PAGESIZE,
47800 };
47801
47802 @@ -86,6 +96,8 @@ static struct linux_binfmt elf_format = {
47803
47804 static int set_brk(unsigned long start, unsigned long end)
47805 {
47806 + unsigned long e = end;
47807 +
47808 start = ELF_PAGEALIGN(start);
47809 end = ELF_PAGEALIGN(end);
47810 if (end > start) {
47811 @@ -94,7 +106,7 @@ static int set_brk(unsigned long start, unsigned long end)
47812 if (BAD_ADDR(addr))
47813 return addr;
47814 }
47815 - current->mm->start_brk = current->mm->brk = end;
47816 + current->mm->start_brk = current->mm->brk = e;
47817 return 0;
47818 }
47819
47820 @@ -155,12 +167,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47821 elf_addr_t __user *u_rand_bytes;
47822 const char *k_platform = ELF_PLATFORM;
47823 const char *k_base_platform = ELF_BASE_PLATFORM;
47824 - unsigned char k_rand_bytes[16];
47825 + u32 k_rand_bytes[4];
47826 int items;
47827 elf_addr_t *elf_info;
47828 int ei_index = 0;
47829 const struct cred *cred = current_cred();
47830 struct vm_area_struct *vma;
47831 + unsigned long saved_auxv[AT_VECTOR_SIZE];
47832
47833 /*
47834 * In some cases (e.g. Hyper-Threading), we want to avoid L1
47835 @@ -202,8 +215,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47836 * Generate 16 random bytes for userspace PRNG seeding.
47837 */
47838 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
47839 - u_rand_bytes = (elf_addr_t __user *)
47840 - STACK_ALLOC(p, sizeof(k_rand_bytes));
47841 + srandom32(k_rand_bytes[0] ^ random32());
47842 + srandom32(k_rand_bytes[1] ^ random32());
47843 + srandom32(k_rand_bytes[2] ^ random32());
47844 + srandom32(k_rand_bytes[3] ^ random32());
47845 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
47846 + u_rand_bytes = (elf_addr_t __user *) p;
47847 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
47848 return -EFAULT;
47849
47850 @@ -315,9 +332,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47851 return -EFAULT;
47852 current->mm->env_end = p;
47853
47854 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
47855 +
47856 /* Put the elf_info on the stack in the right place. */
47857 sp = (elf_addr_t __user *)envp + 1;
47858 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
47859 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
47860 return -EFAULT;
47861 return 0;
47862 }
47863 @@ -385,15 +404,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
47864 an ELF header */
47865
47866 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47867 - struct file *interpreter, unsigned long *interp_map_addr,
47868 - unsigned long no_base)
47869 + struct file *interpreter, unsigned long no_base)
47870 {
47871 struct elf_phdr *elf_phdata;
47872 struct elf_phdr *eppnt;
47873 - unsigned long load_addr = 0;
47874 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
47875 int load_addr_set = 0;
47876 unsigned long last_bss = 0, elf_bss = 0;
47877 - unsigned long error = ~0UL;
47878 + unsigned long error = -EINVAL;
47879 unsigned long total_size;
47880 int retval, i, size;
47881
47882 @@ -439,6 +457,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47883 goto out_close;
47884 }
47885
47886 +#ifdef CONFIG_PAX_SEGMEXEC
47887 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
47888 + pax_task_size = SEGMEXEC_TASK_SIZE;
47889 +#endif
47890 +
47891 eppnt = elf_phdata;
47892 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
47893 if (eppnt->p_type == PT_LOAD) {
47894 @@ -462,8 +485,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47895 map_addr = elf_map(interpreter, load_addr + vaddr,
47896 eppnt, elf_prot, elf_type, total_size);
47897 total_size = 0;
47898 - if (!*interp_map_addr)
47899 - *interp_map_addr = map_addr;
47900 error = map_addr;
47901 if (BAD_ADDR(map_addr))
47902 goto out_close;
47903 @@ -482,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47904 k = load_addr + eppnt->p_vaddr;
47905 if (BAD_ADDR(k) ||
47906 eppnt->p_filesz > eppnt->p_memsz ||
47907 - eppnt->p_memsz > TASK_SIZE ||
47908 - TASK_SIZE - eppnt->p_memsz < k) {
47909 + eppnt->p_memsz > pax_task_size ||
47910 + pax_task_size - eppnt->p_memsz < k) {
47911 error = -ENOMEM;
47912 goto out_close;
47913 }
47914 @@ -535,6 +556,315 @@ out:
47915 return error;
47916 }
47917
47918 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
47919 +#ifdef CONFIG_PAX_SOFTMODE
47920 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
47921 +{
47922 + unsigned long pax_flags = 0UL;
47923 +
47924 +#ifdef CONFIG_PAX_PAGEEXEC
47925 + if (elf_phdata->p_flags & PF_PAGEEXEC)
47926 + pax_flags |= MF_PAX_PAGEEXEC;
47927 +#endif
47928 +
47929 +#ifdef CONFIG_PAX_SEGMEXEC
47930 + if (elf_phdata->p_flags & PF_SEGMEXEC)
47931 + pax_flags |= MF_PAX_SEGMEXEC;
47932 +#endif
47933 +
47934 +#ifdef CONFIG_PAX_EMUTRAMP
47935 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
47936 + pax_flags |= MF_PAX_EMUTRAMP;
47937 +#endif
47938 +
47939 +#ifdef CONFIG_PAX_MPROTECT
47940 + if (elf_phdata->p_flags & PF_MPROTECT)
47941 + pax_flags |= MF_PAX_MPROTECT;
47942 +#endif
47943 +
47944 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47945 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
47946 + pax_flags |= MF_PAX_RANDMMAP;
47947 +#endif
47948 +
47949 + return pax_flags;
47950 +}
47951 +#endif
47952 +
47953 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
47954 +{
47955 + unsigned long pax_flags = 0UL;
47956 +
47957 +#ifdef CONFIG_PAX_PAGEEXEC
47958 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
47959 + pax_flags |= MF_PAX_PAGEEXEC;
47960 +#endif
47961 +
47962 +#ifdef CONFIG_PAX_SEGMEXEC
47963 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
47964 + pax_flags |= MF_PAX_SEGMEXEC;
47965 +#endif
47966 +
47967 +#ifdef CONFIG_PAX_EMUTRAMP
47968 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
47969 + pax_flags |= MF_PAX_EMUTRAMP;
47970 +#endif
47971 +
47972 +#ifdef CONFIG_PAX_MPROTECT
47973 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
47974 + pax_flags |= MF_PAX_MPROTECT;
47975 +#endif
47976 +
47977 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47978 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
47979 + pax_flags |= MF_PAX_RANDMMAP;
47980 +#endif
47981 +
47982 + return pax_flags;
47983 +}
47984 +#endif
47985 +
47986 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47987 +#ifdef CONFIG_PAX_SOFTMODE
47988 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
47989 +{
47990 + unsigned long pax_flags = 0UL;
47991 +
47992 +#ifdef CONFIG_PAX_PAGEEXEC
47993 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
47994 + pax_flags |= MF_PAX_PAGEEXEC;
47995 +#endif
47996 +
47997 +#ifdef CONFIG_PAX_SEGMEXEC
47998 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
47999 + pax_flags |= MF_PAX_SEGMEXEC;
48000 +#endif
48001 +
48002 +#ifdef CONFIG_PAX_EMUTRAMP
48003 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
48004 + pax_flags |= MF_PAX_EMUTRAMP;
48005 +#endif
48006 +
48007 +#ifdef CONFIG_PAX_MPROTECT
48008 + if (pax_flags_softmode & MF_PAX_MPROTECT)
48009 + pax_flags |= MF_PAX_MPROTECT;
48010 +#endif
48011 +
48012 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48013 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48014 + pax_flags |= MF_PAX_RANDMMAP;
48015 +#endif
48016 +
48017 + return pax_flags;
48018 +}
48019 +#endif
48020 +
48021 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48022 +{
48023 + unsigned long pax_flags = 0UL;
48024 +
48025 +#ifdef CONFIG_PAX_PAGEEXEC
48026 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48027 + pax_flags |= MF_PAX_PAGEEXEC;
48028 +#endif
48029 +
48030 +#ifdef CONFIG_PAX_SEGMEXEC
48031 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48032 + pax_flags |= MF_PAX_SEGMEXEC;
48033 +#endif
48034 +
48035 +#ifdef CONFIG_PAX_EMUTRAMP
48036 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48037 + pax_flags |= MF_PAX_EMUTRAMP;
48038 +#endif
48039 +
48040 +#ifdef CONFIG_PAX_MPROTECT
48041 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48042 + pax_flags |= MF_PAX_MPROTECT;
48043 +#endif
48044 +
48045 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48046 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48047 + pax_flags |= MF_PAX_RANDMMAP;
48048 +#endif
48049 +
48050 + return pax_flags;
48051 +}
48052 +#endif
48053 +
48054 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48055 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48056 +{
48057 + unsigned long pax_flags = 0UL;
48058 +
48059 +#ifdef CONFIG_PAX_EI_PAX
48060 +
48061 +#ifdef CONFIG_PAX_PAGEEXEC
48062 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48063 + pax_flags |= MF_PAX_PAGEEXEC;
48064 +#endif
48065 +
48066 +#ifdef CONFIG_PAX_SEGMEXEC
48067 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48068 + pax_flags |= MF_PAX_SEGMEXEC;
48069 +#endif
48070 +
48071 +#ifdef CONFIG_PAX_EMUTRAMP
48072 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48073 + pax_flags |= MF_PAX_EMUTRAMP;
48074 +#endif
48075 +
48076 +#ifdef CONFIG_PAX_MPROTECT
48077 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48078 + pax_flags |= MF_PAX_MPROTECT;
48079 +#endif
48080 +
48081 +#ifdef CONFIG_PAX_ASLR
48082 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48083 + pax_flags |= MF_PAX_RANDMMAP;
48084 +#endif
48085 +
48086 +#else
48087 +
48088 +#ifdef CONFIG_PAX_PAGEEXEC
48089 + pax_flags |= MF_PAX_PAGEEXEC;
48090 +#endif
48091 +
48092 +#ifdef CONFIG_PAX_SEGMEXEC
48093 + pax_flags |= MF_PAX_SEGMEXEC;
48094 +#endif
48095 +
48096 +#ifdef CONFIG_PAX_MPROTECT
48097 + pax_flags |= MF_PAX_MPROTECT;
48098 +#endif
48099 +
48100 +#ifdef CONFIG_PAX_RANDMMAP
48101 + if (randomize_va_space)
48102 + pax_flags |= MF_PAX_RANDMMAP;
48103 +#endif
48104 +
48105 +#endif
48106 +
48107 + return pax_flags;
48108 +}
48109 +
48110 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48111 +{
48112 +
48113 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
48114 + unsigned long i;
48115 +
48116 + for (i = 0UL; i < elf_ex->e_phnum; i++)
48117 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48118 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48119 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48120 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48121 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48122 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48123 + return ~0UL;
48124 +
48125 +#ifdef CONFIG_PAX_SOFTMODE
48126 + if (pax_softmode)
48127 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48128 + else
48129 +#endif
48130 +
48131 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48132 + break;
48133 + }
48134 +#endif
48135 +
48136 + return ~0UL;
48137 +}
48138 +
48139 +static unsigned long pax_parse_xattr_pax(struct file * const file)
48140 +{
48141 +
48142 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48143 + ssize_t xattr_size, i;
48144 + unsigned char xattr_value[sizeof("pemrs") - 1];
48145 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48146 +
48147 + xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
48148 + if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
48149 + return ~0UL;
48150 +
48151 + for (i = 0; i < xattr_size; i++)
48152 + switch (xattr_value[i]) {
48153 + default:
48154 + return ~0UL;
48155 +
48156 +#define parse_flag(option1, option2, flag) \
48157 + case option1: \
48158 + if (pax_flags_hardmode & MF_PAX_##flag) \
48159 + return ~0UL; \
48160 + pax_flags_hardmode |= MF_PAX_##flag; \
48161 + break; \
48162 + case option2: \
48163 + if (pax_flags_softmode & MF_PAX_##flag) \
48164 + return ~0UL; \
48165 + pax_flags_softmode |= MF_PAX_##flag; \
48166 + break;
48167 +
48168 + parse_flag('p', 'P', PAGEEXEC);
48169 + parse_flag('e', 'E', EMUTRAMP);
48170 + parse_flag('m', 'M', MPROTECT);
48171 + parse_flag('r', 'R', RANDMMAP);
48172 + parse_flag('s', 'S', SEGMEXEC);
48173 +
48174 +#undef parse_flag
48175 + }
48176 +
48177 + if (pax_flags_hardmode & pax_flags_softmode)
48178 + return ~0UL;
48179 +
48180 +#ifdef CONFIG_PAX_SOFTMODE
48181 + if (pax_softmode)
48182 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48183 + else
48184 +#endif
48185 +
48186 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48187 +#else
48188 + return ~0UL;
48189 +#endif
48190 +
48191 +}
48192 +
48193 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48194 +{
48195 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48196 +
48197 + pax_flags = pax_parse_ei_pax(elf_ex);
48198 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48199 + xattr_pax_flags = pax_parse_xattr_pax(file);
48200 +
48201 + if (pt_pax_flags == ~0UL)
48202 + pt_pax_flags = xattr_pax_flags;
48203 + else if (xattr_pax_flags == ~0UL)
48204 + xattr_pax_flags = pt_pax_flags;
48205 + if (pt_pax_flags != xattr_pax_flags)
48206 + return -EINVAL;
48207 + if (pt_pax_flags != ~0UL)
48208 + pax_flags = pt_pax_flags;
48209 +
48210 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48211 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48212 + if ((__supported_pte_mask & _PAGE_NX))
48213 + pax_flags &= ~MF_PAX_SEGMEXEC;
48214 + else
48215 + pax_flags &= ~MF_PAX_PAGEEXEC;
48216 + }
48217 +#endif
48218 +
48219 + if (0 > pax_check_flags(&pax_flags))
48220 + return -EINVAL;
48221 +
48222 + current->mm->pax_flags = pax_flags;
48223 + return 0;
48224 +}
48225 +#endif
48226 +
48227 /*
48228 * These are the functions used to load ELF style executables and shared
48229 * libraries. There is no binary dependent code anywhere else.
48230 @@ -551,6 +881,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48231 {
48232 unsigned int random_variable = 0;
48233
48234 +#ifdef CONFIG_PAX_RANDUSTACK
48235 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48236 + return stack_top - current->mm->delta_stack;
48237 +#endif
48238 +
48239 if ((current->flags & PF_RANDOMIZE) &&
48240 !(current->personality & ADDR_NO_RANDOMIZE)) {
48241 random_variable = get_random_int() & STACK_RND_MASK;
48242 @@ -569,7 +904,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48243 unsigned long load_addr = 0, load_bias = 0;
48244 int load_addr_set = 0;
48245 char * elf_interpreter = NULL;
48246 - unsigned long error;
48247 + unsigned long error = 0;
48248 struct elf_phdr *elf_ppnt, *elf_phdata;
48249 unsigned long elf_bss, elf_brk;
48250 int retval, i;
48251 @@ -579,12 +914,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
48252 unsigned long start_code, end_code, start_data, end_data;
48253 unsigned long reloc_func_desc __maybe_unused = 0;
48254 int executable_stack = EXSTACK_DEFAULT;
48255 - unsigned long def_flags = 0;
48256 struct pt_regs *regs = current_pt_regs();
48257 struct {
48258 struct elfhdr elf_ex;
48259 struct elfhdr interp_elf_ex;
48260 } *loc;
48261 + unsigned long pax_task_size = TASK_SIZE;
48262
48263 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
48264 if (!loc) {
48265 @@ -720,11 +1055,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
48266 goto out_free_dentry;
48267
48268 /* OK, This is the point of no return */
48269 - current->mm->def_flags = def_flags;
48270 +
48271 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48272 + current->mm->pax_flags = 0UL;
48273 +#endif
48274 +
48275 +#ifdef CONFIG_PAX_DLRESOLVE
48276 + current->mm->call_dl_resolve = 0UL;
48277 +#endif
48278 +
48279 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
48280 + current->mm->call_syscall = 0UL;
48281 +#endif
48282 +
48283 +#ifdef CONFIG_PAX_ASLR
48284 + current->mm->delta_mmap = 0UL;
48285 + current->mm->delta_stack = 0UL;
48286 +#endif
48287 +
48288 + current->mm->def_flags = 0;
48289 +
48290 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48291 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
48292 + send_sig(SIGKILL, current, 0);
48293 + goto out_free_dentry;
48294 + }
48295 +#endif
48296 +
48297 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
48298 + pax_set_initial_flags(bprm);
48299 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
48300 + if (pax_set_initial_flags_func)
48301 + (pax_set_initial_flags_func)(bprm);
48302 +#endif
48303 +
48304 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48305 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
48306 + current->mm->context.user_cs_limit = PAGE_SIZE;
48307 + current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
48308 + }
48309 +#endif
48310 +
48311 +#ifdef CONFIG_PAX_SEGMEXEC
48312 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
48313 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
48314 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
48315 + pax_task_size = SEGMEXEC_TASK_SIZE;
48316 + current->mm->def_flags |= VM_NOHUGEPAGE;
48317 + }
48318 +#endif
48319 +
48320 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
48321 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48322 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
48323 + put_cpu();
48324 + }
48325 +#endif
48326
48327 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
48328 may depend on the personality. */
48329 SET_PERSONALITY(loc->elf_ex);
48330 +
48331 +#ifdef CONFIG_PAX_ASLR
48332 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48333 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
48334 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
48335 + }
48336 +#endif
48337 +
48338 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48339 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48340 + executable_stack = EXSTACK_DISABLE_X;
48341 + current->personality &= ~READ_IMPLIES_EXEC;
48342 + } else
48343 +#endif
48344 +
48345 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
48346 current->personality |= READ_IMPLIES_EXEC;
48347
48348 @@ -815,6 +1220,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
48349 #else
48350 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
48351 #endif
48352 +
48353 +#ifdef CONFIG_PAX_RANDMMAP
48354 + /* PaX: randomize base address at the default exe base if requested */
48355 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
48356 +#ifdef CONFIG_SPARC64
48357 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
48358 +#else
48359 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
48360 +#endif
48361 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
48362 + elf_flags |= MAP_FIXED;
48363 + }
48364 +#endif
48365 +
48366 }
48367
48368 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
48369 @@ -847,9 +1266,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
48370 * allowed task size. Note that p_filesz must always be
48371 * <= p_memsz so it is only necessary to check p_memsz.
48372 */
48373 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48374 - elf_ppnt->p_memsz > TASK_SIZE ||
48375 - TASK_SIZE - elf_ppnt->p_memsz < k) {
48376 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48377 + elf_ppnt->p_memsz > pax_task_size ||
48378 + pax_task_size - elf_ppnt->p_memsz < k) {
48379 /* set_brk can never work. Avoid overflows. */
48380 send_sig(SIGKILL, current, 0);
48381 retval = -EINVAL;
48382 @@ -888,17 +1307,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
48383 goto out_free_dentry;
48384 }
48385 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
48386 - send_sig(SIGSEGV, current, 0);
48387 - retval = -EFAULT; /* Nobody gets to see this, but.. */
48388 - goto out_free_dentry;
48389 + /*
48390 + * This bss-zeroing can fail if the ELF
48391 + * file specifies odd protections. So
48392 + * we don't check the return value
48393 + */
48394 }
48395
48396 +#ifdef CONFIG_PAX_RANDMMAP
48397 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48398 + unsigned long start, size, flags;
48399 + vm_flags_t vm_flags;
48400 +
48401 + start = ELF_PAGEALIGN(elf_brk);
48402 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48403 + flags = MAP_FIXED | MAP_PRIVATE;
48404 + vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
48405 +
48406 + down_write(&current->mm->mmap_sem);
48407 + start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
48408 + retval = -ENOMEM;
48409 + if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48410 +// if (current->personality & ADDR_NO_RANDOMIZE)
48411 +// vm_flags |= VM_READ | VM_MAYREAD;
48412 + start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
48413 + retval = IS_ERR_VALUE(start) ? start : 0;
48414 + }
48415 + up_write(&current->mm->mmap_sem);
48416 + if (retval == 0)
48417 + retval = set_brk(start + size, start + size + PAGE_SIZE);
48418 + if (retval < 0) {
48419 + send_sig(SIGKILL, current, 0);
48420 + goto out_free_dentry;
48421 + }
48422 + }
48423 +#endif
48424 +
48425 if (elf_interpreter) {
48426 - unsigned long interp_map_addr = 0;
48427 -
48428 elf_entry = load_elf_interp(&loc->interp_elf_ex,
48429 interpreter,
48430 - &interp_map_addr,
48431 load_bias);
48432 if (!IS_ERR((void *)elf_entry)) {
48433 /*
48434 @@ -1120,7 +1567,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
48435 * Decide what to dump of a segment, part, all or none.
48436 */
48437 static unsigned long vma_dump_size(struct vm_area_struct *vma,
48438 - unsigned long mm_flags)
48439 + unsigned long mm_flags, long signr)
48440 {
48441 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
48442
48443 @@ -1158,7 +1605,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
48444 if (vma->vm_file == NULL)
48445 return 0;
48446
48447 - if (FILTER(MAPPED_PRIVATE))
48448 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
48449 goto whole;
48450
48451 /*
48452 @@ -1383,9 +1830,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
48453 {
48454 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
48455 int i = 0;
48456 - do
48457 + do {
48458 i += 2;
48459 - while (auxv[i - 2] != AT_NULL);
48460 + } while (auxv[i - 2] != AT_NULL);
48461 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
48462 }
48463
48464 @@ -2015,14 +2462,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
48465 }
48466
48467 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
48468 - unsigned long mm_flags)
48469 + struct coredump_params *cprm)
48470 {
48471 struct vm_area_struct *vma;
48472 size_t size = 0;
48473
48474 for (vma = first_vma(current, gate_vma); vma != NULL;
48475 vma = next_vma(vma, gate_vma))
48476 - size += vma_dump_size(vma, mm_flags);
48477 + size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48478 return size;
48479 }
48480
48481 @@ -2116,7 +2563,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48482
48483 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
48484
48485 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
48486 + offset += elf_core_vma_data_size(gate_vma, cprm);
48487 offset += elf_core_extra_data_size();
48488 e_shoff = offset;
48489
48490 @@ -2130,10 +2577,12 @@ static int elf_core_dump(struct coredump_params *cprm)
48491 offset = dataoff;
48492
48493 size += sizeof(*elf);
48494 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
48495 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
48496 goto end_coredump;
48497
48498 size += sizeof(*phdr4note);
48499 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
48500 if (size > cprm->limit
48501 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
48502 goto end_coredump;
48503 @@ -2147,7 +2596,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48504 phdr.p_offset = offset;
48505 phdr.p_vaddr = vma->vm_start;
48506 phdr.p_paddr = 0;
48507 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
48508 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48509 phdr.p_memsz = vma->vm_end - vma->vm_start;
48510 offset += phdr.p_filesz;
48511 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
48512 @@ -2158,6 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48513 phdr.p_align = ELF_EXEC_PAGESIZE;
48514
48515 size += sizeof(phdr);
48516 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
48517 if (size > cprm->limit
48518 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
48519 goto end_coredump;
48520 @@ -2182,7 +2632,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48521 unsigned long addr;
48522 unsigned long end;
48523
48524 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
48525 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48526
48527 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
48528 struct page *page;
48529 @@ -2191,6 +2641,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48530 page = get_dump_page(addr);
48531 if (page) {
48532 void *kaddr = kmap(page);
48533 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
48534 stop = ((size += PAGE_SIZE) > cprm->limit) ||
48535 !dump_write(cprm->file, kaddr,
48536 PAGE_SIZE);
48537 @@ -2208,6 +2659,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48538
48539 if (e_phnum == PN_XNUM) {
48540 size += sizeof(*shdr4extnum);
48541 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
48542 if (size > cprm->limit
48543 || !dump_write(cprm->file, shdr4extnum,
48544 sizeof(*shdr4extnum)))
48545 @@ -2228,6 +2680,97 @@ out:
48546
48547 #endif /* CONFIG_ELF_CORE */
48548
48549 +#ifdef CONFIG_PAX_MPROTECT
48550 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
48551 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
48552 + * we'll remove VM_MAYWRITE for good on RELRO segments.
48553 + *
48554 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
48555 + * basis because we want to allow the common case and not the special ones.
48556 + */
48557 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
48558 +{
48559 + struct elfhdr elf_h;
48560 + struct elf_phdr elf_p;
48561 + unsigned long i;
48562 + unsigned long oldflags;
48563 + bool is_textrel_rw, is_textrel_rx, is_relro;
48564 +
48565 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
48566 + return;
48567 +
48568 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
48569 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
48570 +
48571 +#ifdef CONFIG_PAX_ELFRELOCS
48572 + /* possible TEXTREL */
48573 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
48574 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
48575 +#else
48576 + is_textrel_rw = false;
48577 + is_textrel_rx = false;
48578 +#endif
48579 +
48580 + /* possible RELRO */
48581 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
48582 +
48583 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
48584 + return;
48585 +
48586 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
48587 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
48588 +
48589 +#ifdef CONFIG_PAX_ETEXECRELOCS
48590 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48591 +#else
48592 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
48593 +#endif
48594 +
48595 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48596 + !elf_check_arch(&elf_h) ||
48597 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
48598 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
48599 + return;
48600 +
48601 + for (i = 0UL; i < elf_h.e_phnum; i++) {
48602 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
48603 + return;
48604 + switch (elf_p.p_type) {
48605 + case PT_DYNAMIC:
48606 + if (!is_textrel_rw && !is_textrel_rx)
48607 + continue;
48608 + i = 0UL;
48609 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
48610 + elf_dyn dyn;
48611 +
48612 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
48613 + return;
48614 + if (dyn.d_tag == DT_NULL)
48615 + return;
48616 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
48617 + gr_log_textrel(vma);
48618 + if (is_textrel_rw)
48619 + vma->vm_flags |= VM_MAYWRITE;
48620 + else
48621 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
48622 + vma->vm_flags &= ~VM_MAYWRITE;
48623 + return;
48624 + }
48625 + i++;
48626 + }
48627 + return;
48628 +
48629 + case PT_GNU_RELRO:
48630 + if (!is_relro)
48631 + continue;
48632 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
48633 + vma->vm_flags &= ~VM_MAYWRITE;
48634 + return;
48635 + }
48636 + }
48637 +}
48638 +#endif
48639 +
48640 static int __init init_elf_binfmt(void)
48641 {
48642 register_binfmt(&elf_format);
48643 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
48644 index 2036d21..b0430d0 100644
48645 --- a/fs/binfmt_flat.c
48646 +++ b/fs/binfmt_flat.c
48647 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
48648 realdatastart = (unsigned long) -ENOMEM;
48649 printk("Unable to allocate RAM for process data, errno %d\n",
48650 (int)-realdatastart);
48651 + down_write(&current->mm->mmap_sem);
48652 vm_munmap(textpos, text_len);
48653 + up_write(&current->mm->mmap_sem);
48654 ret = realdatastart;
48655 goto err;
48656 }
48657 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48658 }
48659 if (IS_ERR_VALUE(result)) {
48660 printk("Unable to read data+bss, errno %d\n", (int)-result);
48661 + down_write(&current->mm->mmap_sem);
48662 vm_munmap(textpos, text_len);
48663 vm_munmap(realdatastart, len);
48664 + up_write(&current->mm->mmap_sem);
48665 ret = result;
48666 goto err;
48667 }
48668 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48669 }
48670 if (IS_ERR_VALUE(result)) {
48671 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
48672 + down_write(&current->mm->mmap_sem);
48673 vm_munmap(textpos, text_len + data_len + extra +
48674 MAX_SHARED_LIBS * sizeof(unsigned long));
48675 + up_write(&current->mm->mmap_sem);
48676 ret = result;
48677 goto err;
48678 }
48679 diff --git a/fs/bio.c b/fs/bio.c
48680 index b96fc6c..431d628 100644
48681 --- a/fs/bio.c
48682 +++ b/fs/bio.c
48683 @@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
48684 /*
48685 * Overflow, abort
48686 */
48687 - if (end < start)
48688 + if (end < start || end - start > INT_MAX - nr_pages)
48689 return ERR_PTR(-EINVAL);
48690
48691 nr_pages += end - start;
48692 @@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
48693 /*
48694 * Overflow, abort
48695 */
48696 - if (end < start)
48697 + if (end < start || end - start > INT_MAX - nr_pages)
48698 return ERR_PTR(-EINVAL);
48699
48700 nr_pages += end - start;
48701 @@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
48702 const int read = bio_data_dir(bio) == READ;
48703 struct bio_map_data *bmd = bio->bi_private;
48704 int i;
48705 - char *p = bmd->sgvecs[0].iov_base;
48706 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
48707
48708 __bio_for_each_segment(bvec, bio, i, 0) {
48709 char *addr = page_address(bvec->bv_page);
48710 diff --git a/fs/block_dev.c b/fs/block_dev.c
48711 index aae187a..fd790ba 100644
48712 --- a/fs/block_dev.c
48713 +++ b/fs/block_dev.c
48714 @@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
48715 else if (bdev->bd_contains == bdev)
48716 return true; /* is a whole device which isn't held */
48717
48718 - else if (whole->bd_holder == bd_may_claim)
48719 + else if (whole->bd_holder == (void *)bd_may_claim)
48720 return true; /* is a partition of a device that is being partitioned */
48721 else if (whole->bd_holder != NULL)
48722 return false; /* is a partition of a held device */
48723 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
48724 index ca9d8f1..8c0142d 100644
48725 --- a/fs/btrfs/ctree.c
48726 +++ b/fs/btrfs/ctree.c
48727 @@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
48728 free_extent_buffer(buf);
48729 add_root_to_dirty_list(root);
48730 } else {
48731 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
48732 - parent_start = parent->start;
48733 - else
48734 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
48735 + if (parent)
48736 + parent_start = parent->start;
48737 + else
48738 + parent_start = 0;
48739 + } else
48740 parent_start = 0;
48741
48742 WARN_ON(trans->transid != btrfs_header_generation(parent));
48743 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
48744 index 2c02310..a0c895e 100644
48745 --- a/fs/btrfs/ioctl.c
48746 +++ b/fs/btrfs/ioctl.c
48747 @@ -3077,9 +3077,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48748 for (i = 0; i < num_types; i++) {
48749 struct btrfs_space_info *tmp;
48750
48751 + /* Don't copy in more than we allocated */
48752 if (!slot_count)
48753 break;
48754
48755 + slot_count--;
48756 +
48757 info = NULL;
48758 rcu_read_lock();
48759 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
48760 @@ -3101,10 +3104,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48761 memcpy(dest, &space, sizeof(space));
48762 dest++;
48763 space_args.total_spaces++;
48764 - slot_count--;
48765 }
48766 - if (!slot_count)
48767 - break;
48768 }
48769 up_read(&info->groups_sem);
48770 }
48771 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
48772 index f6b8859..54fe8c5 100644
48773 --- a/fs/btrfs/super.c
48774 +++ b/fs/btrfs/super.c
48775 @@ -266,7 +266,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
48776 function, line, errstr);
48777 return;
48778 }
48779 - ACCESS_ONCE(trans->transaction->aborted) = errno;
48780 + ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
48781 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
48782 }
48783 /*
48784 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
48785 index 622f469..e8d2d55 100644
48786 --- a/fs/cachefiles/bind.c
48787 +++ b/fs/cachefiles/bind.c
48788 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
48789 args);
48790
48791 /* start by checking things over */
48792 - ASSERT(cache->fstop_percent >= 0 &&
48793 - cache->fstop_percent < cache->fcull_percent &&
48794 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
48795 cache->fcull_percent < cache->frun_percent &&
48796 cache->frun_percent < 100);
48797
48798 - ASSERT(cache->bstop_percent >= 0 &&
48799 - cache->bstop_percent < cache->bcull_percent &&
48800 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
48801 cache->bcull_percent < cache->brun_percent &&
48802 cache->brun_percent < 100);
48803
48804 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
48805 index 0a1467b..6a53245 100644
48806 --- a/fs/cachefiles/daemon.c
48807 +++ b/fs/cachefiles/daemon.c
48808 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
48809 if (n > buflen)
48810 return -EMSGSIZE;
48811
48812 - if (copy_to_user(_buffer, buffer, n) != 0)
48813 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
48814 return -EFAULT;
48815
48816 return n;
48817 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
48818 if (test_bit(CACHEFILES_DEAD, &cache->flags))
48819 return -EIO;
48820
48821 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
48822 + if (datalen > PAGE_SIZE - 1)
48823 return -EOPNOTSUPP;
48824
48825 /* drag the command string into the kernel so we can parse it */
48826 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
48827 if (args[0] != '%' || args[1] != '\0')
48828 return -EINVAL;
48829
48830 - if (fstop < 0 || fstop >= cache->fcull_percent)
48831 + if (fstop >= cache->fcull_percent)
48832 return cachefiles_daemon_range_error(cache, args);
48833
48834 cache->fstop_percent = fstop;
48835 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
48836 if (args[0] != '%' || args[1] != '\0')
48837 return -EINVAL;
48838
48839 - if (bstop < 0 || bstop >= cache->bcull_percent)
48840 + if (bstop >= cache->bcull_percent)
48841 return cachefiles_daemon_range_error(cache, args);
48842
48843 cache->bstop_percent = bstop;
48844 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
48845 index 4938251..7e01445 100644
48846 --- a/fs/cachefiles/internal.h
48847 +++ b/fs/cachefiles/internal.h
48848 @@ -59,7 +59,7 @@ struct cachefiles_cache {
48849 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
48850 struct rb_root active_nodes; /* active nodes (can't be culled) */
48851 rwlock_t active_lock; /* lock for active_nodes */
48852 - atomic_t gravecounter; /* graveyard uniquifier */
48853 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
48854 unsigned frun_percent; /* when to stop culling (% files) */
48855 unsigned fcull_percent; /* when to start culling (% files) */
48856 unsigned fstop_percent; /* when to stop allocating (% files) */
48857 @@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
48858 * proc.c
48859 */
48860 #ifdef CONFIG_CACHEFILES_HISTOGRAM
48861 -extern atomic_t cachefiles_lookup_histogram[HZ];
48862 -extern atomic_t cachefiles_mkdir_histogram[HZ];
48863 -extern atomic_t cachefiles_create_histogram[HZ];
48864 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48865 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48866 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
48867
48868 extern int __init cachefiles_proc_init(void);
48869 extern void cachefiles_proc_cleanup(void);
48870 static inline
48871 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
48872 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
48873 {
48874 unsigned long jif = jiffies - start_jif;
48875 if (jif >= HZ)
48876 jif = HZ - 1;
48877 - atomic_inc(&histogram[jif]);
48878 + atomic_inc_unchecked(&histogram[jif]);
48879 }
48880
48881 #else
48882 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
48883 index 8c01c5fc..15f982e 100644
48884 --- a/fs/cachefiles/namei.c
48885 +++ b/fs/cachefiles/namei.c
48886 @@ -317,7 +317,7 @@ try_again:
48887 /* first step is to make up a grave dentry in the graveyard */
48888 sprintf(nbuffer, "%08x%08x",
48889 (uint32_t) get_seconds(),
48890 - (uint32_t) atomic_inc_return(&cache->gravecounter));
48891 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
48892
48893 /* do the multiway lock magic */
48894 trap = lock_rename(cache->graveyard, dir);
48895 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
48896 index eccd339..4c1d995 100644
48897 --- a/fs/cachefiles/proc.c
48898 +++ b/fs/cachefiles/proc.c
48899 @@ -14,9 +14,9 @@
48900 #include <linux/seq_file.h>
48901 #include "internal.h"
48902
48903 -atomic_t cachefiles_lookup_histogram[HZ];
48904 -atomic_t cachefiles_mkdir_histogram[HZ];
48905 -atomic_t cachefiles_create_histogram[HZ];
48906 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48907 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48908 +atomic_unchecked_t cachefiles_create_histogram[HZ];
48909
48910 /*
48911 * display the latency histogram
48912 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
48913 return 0;
48914 default:
48915 index = (unsigned long) v - 3;
48916 - x = atomic_read(&cachefiles_lookup_histogram[index]);
48917 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
48918 - z = atomic_read(&cachefiles_create_histogram[index]);
48919 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
48920 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
48921 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
48922 if (x == 0 && y == 0 && z == 0)
48923 return 0;
48924
48925 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
48926 index 4809922..aab2c39 100644
48927 --- a/fs/cachefiles/rdwr.c
48928 +++ b/fs/cachefiles/rdwr.c
48929 @@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
48930 old_fs = get_fs();
48931 set_fs(KERNEL_DS);
48932 ret = file->f_op->write(
48933 - file, (const void __user *) data, len, &pos);
48934 + file, (const void __force_user *) data, len, &pos);
48935 set_fs(old_fs);
48936 kunmap(page);
48937 if (ret != len)
48938 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
48939 index 6d797f4..0ace2e5 100644
48940 --- a/fs/ceph/dir.c
48941 +++ b/fs/ceph/dir.c
48942 @@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
48943 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
48944 struct ceph_mds_client *mdsc = fsc->mdsc;
48945 unsigned frag = fpos_frag(filp->f_pos);
48946 - int off = fpos_off(filp->f_pos);
48947 + unsigned int off = fpos_off(filp->f_pos);
48948 int err;
48949 u32 ftype;
48950 struct ceph_mds_reply_info_parsed *rinfo;
48951 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
48952 index d9ea6ed..1e6c8ac 100644
48953 --- a/fs/cifs/cifs_debug.c
48954 +++ b/fs/cifs/cifs_debug.c
48955 @@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48956
48957 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
48958 #ifdef CONFIG_CIFS_STATS2
48959 - atomic_set(&totBufAllocCount, 0);
48960 - atomic_set(&totSmBufAllocCount, 0);
48961 + atomic_set_unchecked(&totBufAllocCount, 0);
48962 + atomic_set_unchecked(&totSmBufAllocCount, 0);
48963 #endif /* CONFIG_CIFS_STATS2 */
48964 spin_lock(&cifs_tcp_ses_lock);
48965 list_for_each(tmp1, &cifs_tcp_ses_list) {
48966 @@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48967 tcon = list_entry(tmp3,
48968 struct cifs_tcon,
48969 tcon_list);
48970 - atomic_set(&tcon->num_smbs_sent, 0);
48971 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
48972 if (server->ops->clear_stats)
48973 server->ops->clear_stats(tcon);
48974 }
48975 @@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48976 smBufAllocCount.counter, cifs_min_small);
48977 #ifdef CONFIG_CIFS_STATS2
48978 seq_printf(m, "Total Large %d Small %d Allocations\n",
48979 - atomic_read(&totBufAllocCount),
48980 - atomic_read(&totSmBufAllocCount));
48981 + atomic_read_unchecked(&totBufAllocCount),
48982 + atomic_read_unchecked(&totSmBufAllocCount));
48983 #endif /* CONFIG_CIFS_STATS2 */
48984
48985 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
48986 @@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48987 if (tcon->need_reconnect)
48988 seq_puts(m, "\tDISCONNECTED ");
48989 seq_printf(m, "\nSMBs: %d",
48990 - atomic_read(&tcon->num_smbs_sent));
48991 + atomic_read_unchecked(&tcon->num_smbs_sent));
48992 if (server->ops->print_stats)
48993 server->ops->print_stats(m, tcon);
48994 }
48995 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48996 index 345fc89..b2acae5 100644
48997 --- a/fs/cifs/cifsfs.c
48998 +++ b/fs/cifs/cifsfs.c
48999 @@ -1033,7 +1033,7 @@ cifs_init_request_bufs(void)
49000 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49001 cifs_req_cachep = kmem_cache_create("cifs_request",
49002 CIFSMaxBufSize + max_hdr_size, 0,
49003 - SLAB_HWCACHE_ALIGN, NULL);
49004 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49005 if (cifs_req_cachep == NULL)
49006 return -ENOMEM;
49007
49008 @@ -1060,7 +1060,7 @@ cifs_init_request_bufs(void)
49009 efficient to alloc 1 per page off the slab compared to 17K (5page)
49010 alloc of large cifs buffers even when page debugging is on */
49011 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49012 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49013 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49014 NULL);
49015 if (cifs_sm_req_cachep == NULL) {
49016 mempool_destroy(cifs_req_poolp);
49017 @@ -1145,8 +1145,8 @@ init_cifs(void)
49018 atomic_set(&bufAllocCount, 0);
49019 atomic_set(&smBufAllocCount, 0);
49020 #ifdef CONFIG_CIFS_STATS2
49021 - atomic_set(&totBufAllocCount, 0);
49022 - atomic_set(&totSmBufAllocCount, 0);
49023 + atomic_set_unchecked(&totBufAllocCount, 0);
49024 + atomic_set_unchecked(&totSmBufAllocCount, 0);
49025 #endif /* CONFIG_CIFS_STATS2 */
49026
49027 atomic_set(&midCount, 0);
49028 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49029 index 4f07f6f..55de8ce 100644
49030 --- a/fs/cifs/cifsglob.h
49031 +++ b/fs/cifs/cifsglob.h
49032 @@ -751,35 +751,35 @@ struct cifs_tcon {
49033 __u16 Flags; /* optional support bits */
49034 enum statusEnum tidStatus;
49035 #ifdef CONFIG_CIFS_STATS
49036 - atomic_t num_smbs_sent;
49037 + atomic_unchecked_t num_smbs_sent;
49038 union {
49039 struct {
49040 - atomic_t num_writes;
49041 - atomic_t num_reads;
49042 - atomic_t num_flushes;
49043 - atomic_t num_oplock_brks;
49044 - atomic_t num_opens;
49045 - atomic_t num_closes;
49046 - atomic_t num_deletes;
49047 - atomic_t num_mkdirs;
49048 - atomic_t num_posixopens;
49049 - atomic_t num_posixmkdirs;
49050 - atomic_t num_rmdirs;
49051 - atomic_t num_renames;
49052 - atomic_t num_t2renames;
49053 - atomic_t num_ffirst;
49054 - atomic_t num_fnext;
49055 - atomic_t num_fclose;
49056 - atomic_t num_hardlinks;
49057 - atomic_t num_symlinks;
49058 - atomic_t num_locks;
49059 - atomic_t num_acl_get;
49060 - atomic_t num_acl_set;
49061 + atomic_unchecked_t num_writes;
49062 + atomic_unchecked_t num_reads;
49063 + atomic_unchecked_t num_flushes;
49064 + atomic_unchecked_t num_oplock_brks;
49065 + atomic_unchecked_t num_opens;
49066 + atomic_unchecked_t num_closes;
49067 + atomic_unchecked_t num_deletes;
49068 + atomic_unchecked_t num_mkdirs;
49069 + atomic_unchecked_t num_posixopens;
49070 + atomic_unchecked_t num_posixmkdirs;
49071 + atomic_unchecked_t num_rmdirs;
49072 + atomic_unchecked_t num_renames;
49073 + atomic_unchecked_t num_t2renames;
49074 + atomic_unchecked_t num_ffirst;
49075 + atomic_unchecked_t num_fnext;
49076 + atomic_unchecked_t num_fclose;
49077 + atomic_unchecked_t num_hardlinks;
49078 + atomic_unchecked_t num_symlinks;
49079 + atomic_unchecked_t num_locks;
49080 + atomic_unchecked_t num_acl_get;
49081 + atomic_unchecked_t num_acl_set;
49082 } cifs_stats;
49083 #ifdef CONFIG_CIFS_SMB2
49084 struct {
49085 - atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49086 - atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49087 + atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49088 + atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49089 } smb2_stats;
49090 #endif /* CONFIG_CIFS_SMB2 */
49091 } stats;
49092 @@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49093 }
49094
49095 #ifdef CONFIG_CIFS_STATS
49096 -#define cifs_stats_inc atomic_inc
49097 +#define cifs_stats_inc atomic_inc_unchecked
49098
49099 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49100 unsigned int bytes)
49101 @@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49102 /* Various Debug counters */
49103 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49104 #ifdef CONFIG_CIFS_STATS2
49105 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49106 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49107 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49108 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49109 #endif
49110 GLOBAL_EXTERN atomic_t smBufAllocCount;
49111 GLOBAL_EXTERN atomic_t midCount;
49112 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49113 index 9f6c4c4..8de307a 100644
49114 --- a/fs/cifs/link.c
49115 +++ b/fs/cifs/link.c
49116 @@ -616,7 +616,7 @@ symlink_exit:
49117
49118 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49119 {
49120 - char *p = nd_get_link(nd);
49121 + const char *p = nd_get_link(nd);
49122 if (!IS_ERR(p))
49123 kfree(p);
49124 }
49125 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49126 index 1b15bf8..1ce489e 100644
49127 --- a/fs/cifs/misc.c
49128 +++ b/fs/cifs/misc.c
49129 @@ -169,7 +169,7 @@ cifs_buf_get(void)
49130 memset(ret_buf, 0, buf_size + 3);
49131 atomic_inc(&bufAllocCount);
49132 #ifdef CONFIG_CIFS_STATS2
49133 - atomic_inc(&totBufAllocCount);
49134 + atomic_inc_unchecked(&totBufAllocCount);
49135 #endif /* CONFIG_CIFS_STATS2 */
49136 }
49137
49138 @@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49139 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49140 atomic_inc(&smBufAllocCount);
49141 #ifdef CONFIG_CIFS_STATS2
49142 - atomic_inc(&totSmBufAllocCount);
49143 + atomic_inc_unchecked(&totSmBufAllocCount);
49144 #endif /* CONFIG_CIFS_STATS2 */
49145
49146 }
49147 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49148 index 47bc5a8..10decbe 100644
49149 --- a/fs/cifs/smb1ops.c
49150 +++ b/fs/cifs/smb1ops.c
49151 @@ -586,27 +586,27 @@ static void
49152 cifs_clear_stats(struct cifs_tcon *tcon)
49153 {
49154 #ifdef CONFIG_CIFS_STATS
49155 - atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49156 - atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49157 - atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49158 - atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49159 - atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49160 - atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49161 - atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49162 - atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49163 - atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49164 - atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49165 - atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49166 - atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49167 - atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49168 - atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49169 - atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49170 - atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49171 - atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49172 - atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49173 - atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49174 - atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49175 - atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49176 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49177 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49178 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49179 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49180 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49181 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49182 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49183 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49184 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49185 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49186 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49187 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49188 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49189 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49190 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49191 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49192 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49193 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49194 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49195 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49196 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49197 #endif
49198 }
49199
49200 @@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49201 {
49202 #ifdef CONFIG_CIFS_STATS
49203 seq_printf(m, " Oplocks breaks: %d",
49204 - atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49205 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49206 seq_printf(m, "\nReads: %d Bytes: %llu",
49207 - atomic_read(&tcon->stats.cifs_stats.num_reads),
49208 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49209 (long long)(tcon->bytes_read));
49210 seq_printf(m, "\nWrites: %d Bytes: %llu",
49211 - atomic_read(&tcon->stats.cifs_stats.num_writes),
49212 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49213 (long long)(tcon->bytes_written));
49214 seq_printf(m, "\nFlushes: %d",
49215 - atomic_read(&tcon->stats.cifs_stats.num_flushes));
49216 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49217 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49218 - atomic_read(&tcon->stats.cifs_stats.num_locks),
49219 - atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49220 - atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49221 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49222 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49223 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49224 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49225 - atomic_read(&tcon->stats.cifs_stats.num_opens),
49226 - atomic_read(&tcon->stats.cifs_stats.num_closes),
49227 - atomic_read(&tcon->stats.cifs_stats.num_deletes));
49228 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49229 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49230 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49231 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49232 - atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49233 - atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49234 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49235 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49236 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49237 - atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49238 - atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49239 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49240 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
49241 seq_printf(m, "\nRenames: %d T2 Renames %d",
49242 - atomic_read(&tcon->stats.cifs_stats.num_renames),
49243 - atomic_read(&tcon->stats.cifs_stats.num_t2renames));
49244 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
49245 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
49246 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
49247 - atomic_read(&tcon->stats.cifs_stats.num_ffirst),
49248 - atomic_read(&tcon->stats.cifs_stats.num_fnext),
49249 - atomic_read(&tcon->stats.cifs_stats.num_fclose));
49250 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
49251 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
49252 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
49253 #endif
49254 }
49255
49256 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
49257 index bceffe7..cd1ae59 100644
49258 --- a/fs/cifs/smb2ops.c
49259 +++ b/fs/cifs/smb2ops.c
49260 @@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
49261 #ifdef CONFIG_CIFS_STATS
49262 int i;
49263 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
49264 - atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49265 - atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49266 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49267 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49268 }
49269 #endif
49270 }
49271 @@ -284,66 +284,66 @@ static void
49272 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49273 {
49274 #ifdef CONFIG_CIFS_STATS
49275 - atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49276 - atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49277 + atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49278 + atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49279 seq_printf(m, "\nNegotiates: %d sent %d failed",
49280 - atomic_read(&sent[SMB2_NEGOTIATE_HE]),
49281 - atomic_read(&failed[SMB2_NEGOTIATE_HE]));
49282 + atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
49283 + atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
49284 seq_printf(m, "\nSessionSetups: %d sent %d failed",
49285 - atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
49286 - atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
49287 + atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
49288 + atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
49289 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
49290 seq_printf(m, "\nLogoffs: %d sent %d failed",
49291 - atomic_read(&sent[SMB2_LOGOFF_HE]),
49292 - atomic_read(&failed[SMB2_LOGOFF_HE]));
49293 + atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
49294 + atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
49295 seq_printf(m, "\nTreeConnects: %d sent %d failed",
49296 - atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
49297 - atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
49298 + atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
49299 + atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
49300 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
49301 - atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
49302 - atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
49303 + atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
49304 + atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
49305 seq_printf(m, "\nCreates: %d sent %d failed",
49306 - atomic_read(&sent[SMB2_CREATE_HE]),
49307 - atomic_read(&failed[SMB2_CREATE_HE]));
49308 + atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
49309 + atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
49310 seq_printf(m, "\nCloses: %d sent %d failed",
49311 - atomic_read(&sent[SMB2_CLOSE_HE]),
49312 - atomic_read(&failed[SMB2_CLOSE_HE]));
49313 + atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
49314 + atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
49315 seq_printf(m, "\nFlushes: %d sent %d failed",
49316 - atomic_read(&sent[SMB2_FLUSH_HE]),
49317 - atomic_read(&failed[SMB2_FLUSH_HE]));
49318 + atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
49319 + atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
49320 seq_printf(m, "\nReads: %d sent %d failed",
49321 - atomic_read(&sent[SMB2_READ_HE]),
49322 - atomic_read(&failed[SMB2_READ_HE]));
49323 + atomic_read_unchecked(&sent[SMB2_READ_HE]),
49324 + atomic_read_unchecked(&failed[SMB2_READ_HE]));
49325 seq_printf(m, "\nWrites: %d sent %d failed",
49326 - atomic_read(&sent[SMB2_WRITE_HE]),
49327 - atomic_read(&failed[SMB2_WRITE_HE]));
49328 + atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
49329 + atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
49330 seq_printf(m, "\nLocks: %d sent %d failed",
49331 - atomic_read(&sent[SMB2_LOCK_HE]),
49332 - atomic_read(&failed[SMB2_LOCK_HE]));
49333 + atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
49334 + atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
49335 seq_printf(m, "\nIOCTLs: %d sent %d failed",
49336 - atomic_read(&sent[SMB2_IOCTL_HE]),
49337 - atomic_read(&failed[SMB2_IOCTL_HE]));
49338 + atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
49339 + atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
49340 seq_printf(m, "\nCancels: %d sent %d failed",
49341 - atomic_read(&sent[SMB2_CANCEL_HE]),
49342 - atomic_read(&failed[SMB2_CANCEL_HE]));
49343 + atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
49344 + atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
49345 seq_printf(m, "\nEchos: %d sent %d failed",
49346 - atomic_read(&sent[SMB2_ECHO_HE]),
49347 - atomic_read(&failed[SMB2_ECHO_HE]));
49348 + atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
49349 + atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
49350 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
49351 - atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
49352 - atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
49353 + atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
49354 + atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
49355 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49356 - atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49357 - atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49358 + atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49359 + atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49360 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49361 - atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49362 - atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49363 + atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49364 + atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49365 seq_printf(m, "\nSetInfos: %d sent %d failed",
49366 - atomic_read(&sent[SMB2_SET_INFO_HE]),
49367 - atomic_read(&failed[SMB2_SET_INFO_HE]));
49368 + atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49369 + atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49370 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
49371 - atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
49372 - atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
49373 + atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
49374 + atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
49375 #endif
49376 }
49377
49378 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
49379 index 41d9d07..dbb4772 100644
49380 --- a/fs/cifs/smb2pdu.c
49381 +++ b/fs/cifs/smb2pdu.c
49382 @@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
49383 default:
49384 cERROR(1, "info level %u isn't supported",
49385 srch_inf->info_level);
49386 - rc = -EINVAL;
49387 - goto qdir_exit;
49388 + return -EINVAL;
49389 }
49390
49391 req->FileIndex = cpu_to_le32(index);
49392 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
49393 index 1da168c..8bc7ff6 100644
49394 --- a/fs/coda/cache.c
49395 +++ b/fs/coda/cache.c
49396 @@ -24,7 +24,7 @@
49397 #include "coda_linux.h"
49398 #include "coda_cache.h"
49399
49400 -static atomic_t permission_epoch = ATOMIC_INIT(0);
49401 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
49402
49403 /* replace or extend an acl cache hit */
49404 void coda_cache_enter(struct inode *inode, int mask)
49405 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
49406 struct coda_inode_info *cii = ITOC(inode);
49407
49408 spin_lock(&cii->c_lock);
49409 - cii->c_cached_epoch = atomic_read(&permission_epoch);
49410 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
49411 if (!uid_eq(cii->c_uid, current_fsuid())) {
49412 cii->c_uid = current_fsuid();
49413 cii->c_cached_perm = mask;
49414 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
49415 {
49416 struct coda_inode_info *cii = ITOC(inode);
49417 spin_lock(&cii->c_lock);
49418 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
49419 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
49420 spin_unlock(&cii->c_lock);
49421 }
49422
49423 /* remove all acl caches */
49424 void coda_cache_clear_all(struct super_block *sb)
49425 {
49426 - atomic_inc(&permission_epoch);
49427 + atomic_inc_unchecked(&permission_epoch);
49428 }
49429
49430
49431 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
49432 spin_lock(&cii->c_lock);
49433 hit = (mask & cii->c_cached_perm) == mask &&
49434 uid_eq(cii->c_uid, current_fsuid()) &&
49435 - cii->c_cached_epoch == atomic_read(&permission_epoch);
49436 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
49437 spin_unlock(&cii->c_lock);
49438
49439 return hit;
49440 diff --git a/fs/compat.c b/fs/compat.c
49441 index d487985..c9e04b1 100644
49442 --- a/fs/compat.c
49443 +++ b/fs/compat.c
49444 @@ -54,7 +54,7 @@
49445 #include <asm/ioctls.h>
49446 #include "internal.h"
49447
49448 -int compat_log = 1;
49449 +int compat_log = 0;
49450
49451 int compat_printk(const char *fmt, ...)
49452 {
49453 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
49454
49455 set_fs(KERNEL_DS);
49456 /* The __user pointer cast is valid because of the set_fs() */
49457 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
49458 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
49459 set_fs(oldfs);
49460 /* truncating is ok because it's a user address */
49461 if (!ret)
49462 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
49463 goto out;
49464
49465 ret = -EINVAL;
49466 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
49467 + if (nr_segs > UIO_MAXIOV)
49468 goto out;
49469 if (nr_segs > fast_segs) {
49470 ret = -ENOMEM;
49471 @@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
49472
49473 struct compat_readdir_callback {
49474 struct compat_old_linux_dirent __user *dirent;
49475 + struct file * file;
49476 int result;
49477 };
49478
49479 @@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
49480 buf->result = -EOVERFLOW;
49481 return -EOVERFLOW;
49482 }
49483 +
49484 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49485 + return 0;
49486 +
49487 buf->result++;
49488 dirent = buf->dirent;
49489 if (!access_ok(VERIFY_WRITE, dirent,
49490 @@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
49491
49492 buf.result = 0;
49493 buf.dirent = dirent;
49494 + buf.file = f.file;
49495
49496 error = vfs_readdir(f.file, compat_fillonedir, &buf);
49497 if (buf.result)
49498 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
49499 struct compat_getdents_callback {
49500 struct compat_linux_dirent __user *current_dir;
49501 struct compat_linux_dirent __user *previous;
49502 + struct file * file;
49503 int count;
49504 int error;
49505 };
49506 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
49507 buf->error = -EOVERFLOW;
49508 return -EOVERFLOW;
49509 }
49510 +
49511 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49512 + return 0;
49513 +
49514 dirent = buf->previous;
49515 if (dirent) {
49516 if (__put_user(offset, &dirent->d_off))
49517 @@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49518 buf.previous = NULL;
49519 buf.count = count;
49520 buf.error = 0;
49521 + buf.file = f.file;
49522
49523 error = vfs_readdir(f.file, compat_filldir, &buf);
49524 if (error >= 0)
49525 @@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49526 struct compat_getdents_callback64 {
49527 struct linux_dirent64 __user *current_dir;
49528 struct linux_dirent64 __user *previous;
49529 + struct file * file;
49530 int count;
49531 int error;
49532 };
49533 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
49534 buf->error = -EINVAL; /* only used if we fail.. */
49535 if (reclen > buf->count)
49536 return -EINVAL;
49537 +
49538 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49539 + return 0;
49540 +
49541 dirent = buf->previous;
49542
49543 if (dirent) {
49544 @@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
49545 buf.previous = NULL;
49546 buf.count = count;
49547 buf.error = 0;
49548 + buf.file = f.file;
49549
49550 error = vfs_readdir(f.file, compat_filldir64, &buf);
49551 if (error >= 0)
49552 error = buf.error;
49553 lastdirent = buf.previous;
49554 if (lastdirent) {
49555 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
49556 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
49557 if (__put_user_unaligned(d_off, &lastdirent->d_off))
49558 error = -EFAULT;
49559 else
49560 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
49561 index a81147e..20bf2b5 100644
49562 --- a/fs/compat_binfmt_elf.c
49563 +++ b/fs/compat_binfmt_elf.c
49564 @@ -30,11 +30,13 @@
49565 #undef elf_phdr
49566 #undef elf_shdr
49567 #undef elf_note
49568 +#undef elf_dyn
49569 #undef elf_addr_t
49570 #define elfhdr elf32_hdr
49571 #define elf_phdr elf32_phdr
49572 #define elf_shdr elf32_shdr
49573 #define elf_note elf32_note
49574 +#define elf_dyn Elf32_Dyn
49575 #define elf_addr_t Elf32_Addr
49576
49577 /*
49578 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
49579 index 3ced75f..1eeca06 100644
49580 --- a/fs/compat_ioctl.c
49581 +++ b/fs/compat_ioctl.c
49582 @@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
49583 return -EFAULT;
49584 if (__get_user(udata, &ss32->iomem_base))
49585 return -EFAULT;
49586 - ss.iomem_base = compat_ptr(udata);
49587 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
49588 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
49589 __get_user(ss.port_high, &ss32->port_high))
49590 return -EFAULT;
49591 @@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
49592 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
49593 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
49594 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
49595 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49596 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49597 return -EFAULT;
49598
49599 return ioctl_preallocate(file, p);
49600 @@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
49601 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
49602 {
49603 unsigned int a, b;
49604 - a = *(unsigned int *)p;
49605 - b = *(unsigned int *)q;
49606 + a = *(const unsigned int *)p;
49607 + b = *(const unsigned int *)q;
49608 if (a > b)
49609 return 1;
49610 if (a < b)
49611 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
49612 index 7aabc6a..34c1197 100644
49613 --- a/fs/configfs/dir.c
49614 +++ b/fs/configfs/dir.c
49615 @@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49616 }
49617 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
49618 struct configfs_dirent *next;
49619 - const char * name;
49620 + const unsigned char * name;
49621 + char d_name[sizeof(next->s_dentry->d_iname)];
49622 int len;
49623 struct inode *inode = NULL;
49624
49625 @@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49626 continue;
49627
49628 name = configfs_get_name(next);
49629 - len = strlen(name);
49630 + if (next->s_dentry && name == next->s_dentry->d_iname) {
49631 + len = next->s_dentry->d_name.len;
49632 + memcpy(d_name, name, len);
49633 + name = d_name;
49634 + } else
49635 + len = strlen(name);
49636
49637 /*
49638 * We'll have a dentry and an inode for
49639 diff --git a/fs/coredump.c b/fs/coredump.c
49640 index c647965..a77bff3 100644
49641 --- a/fs/coredump.c
49642 +++ b/fs/coredump.c
49643 @@ -52,7 +52,7 @@ struct core_name {
49644 char *corename;
49645 int used, size;
49646 };
49647 -static atomic_t call_count = ATOMIC_INIT(1);
49648 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
49649
49650 /* The maximal length of core_pattern is also specified in sysctl.c */
49651
49652 @@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
49653 {
49654 char *old_corename = cn->corename;
49655
49656 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
49657 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
49658 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
49659
49660 if (!cn->corename) {
49661 @@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
49662 int pid_in_pattern = 0;
49663 int err = 0;
49664
49665 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
49666 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
49667 cn->corename = kmalloc(cn->size, GFP_KERNEL);
49668 cn->used = 0;
49669
49670 @@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
49671 pipe = file_inode(file)->i_pipe;
49672
49673 pipe_lock(pipe);
49674 - pipe->readers++;
49675 - pipe->writers--;
49676 + atomic_inc(&pipe->readers);
49677 + atomic_dec(&pipe->writers);
49678
49679 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49680 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49681 wake_up_interruptible_sync(&pipe->wait);
49682 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49683 pipe_wait(pipe);
49684 }
49685
49686 - pipe->readers--;
49687 - pipe->writers++;
49688 + atomic_dec(&pipe->readers);
49689 + atomic_inc(&pipe->writers);
49690 pipe_unlock(pipe);
49691
49692 }
49693 @@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
49694 int ispipe;
49695 struct files_struct *displaced;
49696 bool need_nonrelative = false;
49697 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49698 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49699 + long signr = siginfo->si_signo;
49700 struct coredump_params cprm = {
49701 .siginfo = siginfo,
49702 .regs = signal_pt_regs(),
49703 @@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
49704 .mm_flags = mm->flags,
49705 };
49706
49707 - audit_core_dumps(siginfo->si_signo);
49708 + audit_core_dumps(signr);
49709 +
49710 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49711 + gr_handle_brute_attach(cprm.mm_flags);
49712
49713 binfmt = mm->binfmt;
49714 if (!binfmt || !binfmt->core_dump)
49715 @@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
49716 need_nonrelative = true;
49717 }
49718
49719 - retval = coredump_wait(siginfo->si_signo, &core_state);
49720 + retval = coredump_wait(signr, &core_state);
49721 if (retval < 0)
49722 goto fail_creds;
49723
49724 @@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
49725 }
49726 cprm.limit = RLIM_INFINITY;
49727
49728 - dump_count = atomic_inc_return(&core_dump_count);
49729 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49730 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49731 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49732 task_tgid_vnr(current), current->comm);
49733 @@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
49734 } else {
49735 struct inode *inode;
49736
49737 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49738 +
49739 if (cprm.limit < binfmt->min_coredump)
49740 goto fail_unlock;
49741
49742 @@ -640,7 +646,7 @@ close_fail:
49743 filp_close(cprm.file, NULL);
49744 fail_dropcount:
49745 if (ispipe)
49746 - atomic_dec(&core_dump_count);
49747 + atomic_dec_unchecked(&core_dump_count);
49748 fail_unlock:
49749 kfree(cn.corename);
49750 fail_corename:
49751 @@ -659,7 +665,7 @@ fail:
49752 */
49753 int dump_write(struct file *file, const void *addr, int nr)
49754 {
49755 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
49756 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
49757 }
49758 EXPORT_SYMBOL(dump_write);
49759
49760 diff --git a/fs/dcache.c b/fs/dcache.c
49761 index e689268..f36956e 100644
49762 --- a/fs/dcache.c
49763 +++ b/fs/dcache.c
49764 @@ -3100,7 +3100,7 @@ void __init vfs_caches_init(unsigned long mempages)
49765 mempages -= reserve;
49766
49767 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
49768 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
49769 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
49770
49771 dcache_init();
49772 inode_init();
49773 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
49774 index 4888cb3..e0f7cf8 100644
49775 --- a/fs/debugfs/inode.c
49776 +++ b/fs/debugfs/inode.c
49777 @@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
49778 */
49779 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
49780 {
49781 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49782 + return __create_file(name, S_IFDIR | S_IRWXU,
49783 +#else
49784 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49785 +#endif
49786 parent, NULL, NULL);
49787 }
49788 EXPORT_SYMBOL_GPL(debugfs_create_dir);
49789 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
49790 index 5eab400..810a3f5 100644
49791 --- a/fs/ecryptfs/inode.c
49792 +++ b/fs/ecryptfs/inode.c
49793 @@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
49794 old_fs = get_fs();
49795 set_fs(get_ds());
49796 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
49797 - (char __user *)lower_buf,
49798 + (char __force_user *)lower_buf,
49799 PATH_MAX);
49800 set_fs(old_fs);
49801 if (rc < 0)
49802 @@ -706,7 +706,7 @@ out:
49803 static void
49804 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
49805 {
49806 - char *buf = nd_get_link(nd);
49807 + const char *buf = nd_get_link(nd);
49808 if (!IS_ERR(buf)) {
49809 /* Free the char* */
49810 kfree(buf);
49811 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
49812 index e4141f2..d8263e8 100644
49813 --- a/fs/ecryptfs/miscdev.c
49814 +++ b/fs/ecryptfs/miscdev.c
49815 @@ -304,7 +304,7 @@ check_list:
49816 goto out_unlock_msg_ctx;
49817 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
49818 if (msg_ctx->msg) {
49819 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
49820 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
49821 goto out_unlock_msg_ctx;
49822 i += packet_length_size;
49823 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
49824 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
49825 index 6a16053..2155147 100644
49826 --- a/fs/ecryptfs/read_write.c
49827 +++ b/fs/ecryptfs/read_write.c
49828 @@ -240,7 +240,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
49829 return -EIO;
49830 fs_save = get_fs();
49831 set_fs(get_ds());
49832 - rc = vfs_read(lower_file, data, size, &offset);
49833 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
49834 set_fs(fs_save);
49835 return rc;
49836 }
49837 diff --git a/fs/exec.c b/fs/exec.c
49838 index 6d56ff2..b56586d 100644
49839 --- a/fs/exec.c
49840 +++ b/fs/exec.c
49841 @@ -55,8 +55,20 @@
49842 #include <linux/pipe_fs_i.h>
49843 #include <linux/oom.h>
49844 #include <linux/compat.h>
49845 +#include <linux/random.h>
49846 +#include <linux/seq_file.h>
49847 +#include <linux/coredump.h>
49848 +#include <linux/mman.h>
49849 +
49850 +#ifdef CONFIG_PAX_REFCOUNT
49851 +#include <linux/kallsyms.h>
49852 +#include <linux/kdebug.h>
49853 +#endif
49854 +
49855 +#include <trace/events/fs.h>
49856
49857 #include <asm/uaccess.h>
49858 +#include <asm/sections.h>
49859 #include <asm/mmu_context.h>
49860 #include <asm/tlb.h>
49861
49862 @@ -66,6 +78,18 @@
49863
49864 #include <trace/events/sched.h>
49865
49866 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49867 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
49868 +{
49869 + pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
49870 +}
49871 +#endif
49872 +
49873 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
49874 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
49875 +EXPORT_SYMBOL(pax_set_initial_flags_func);
49876 +#endif
49877 +
49878 int suid_dumpable = 0;
49879
49880 static LIST_HEAD(formats);
49881 @@ -75,8 +99,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
49882 {
49883 BUG_ON(!fmt);
49884 write_lock(&binfmt_lock);
49885 - insert ? list_add(&fmt->lh, &formats) :
49886 - list_add_tail(&fmt->lh, &formats);
49887 + insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
49888 + pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
49889 write_unlock(&binfmt_lock);
49890 }
49891
49892 @@ -85,7 +109,7 @@ EXPORT_SYMBOL(__register_binfmt);
49893 void unregister_binfmt(struct linux_binfmt * fmt)
49894 {
49895 write_lock(&binfmt_lock);
49896 - list_del(&fmt->lh);
49897 + pax_list_del((struct list_head *)&fmt->lh);
49898 write_unlock(&binfmt_lock);
49899 }
49900
49901 @@ -180,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49902 int write)
49903 {
49904 struct page *page;
49905 - int ret;
49906
49907 -#ifdef CONFIG_STACK_GROWSUP
49908 - if (write) {
49909 - ret = expand_downwards(bprm->vma, pos);
49910 - if (ret < 0)
49911 - return NULL;
49912 - }
49913 -#endif
49914 - ret = get_user_pages(current, bprm->mm, pos,
49915 - 1, write, 1, &page, NULL);
49916 - if (ret <= 0)
49917 + if (0 > expand_downwards(bprm->vma, pos))
49918 + return NULL;
49919 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
49920 return NULL;
49921
49922 if (write) {
49923 @@ -207,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49924 if (size <= ARG_MAX)
49925 return page;
49926
49927 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49928 + // only allow 512KB for argv+env on suid/sgid binaries
49929 + // to prevent easy ASLR exhaustion
49930 + if (((!uid_eq(bprm->cred->euid, current_euid())) ||
49931 + (!gid_eq(bprm->cred->egid, current_egid()))) &&
49932 + (size > (512 * 1024))) {
49933 + put_page(page);
49934 + return NULL;
49935 + }
49936 +#endif
49937 +
49938 /*
49939 * Limit to 1/4-th the stack size for the argv+env strings.
49940 * This ensures that:
49941 @@ -266,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49942 vma->vm_end = STACK_TOP_MAX;
49943 vma->vm_start = vma->vm_end - PAGE_SIZE;
49944 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
49945 +
49946 +#ifdef CONFIG_PAX_SEGMEXEC
49947 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49948 +#endif
49949 +
49950 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49951 INIT_LIST_HEAD(&vma->anon_vma_chain);
49952
49953 @@ -276,6 +308,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49954 mm->stack_vm = mm->total_vm = 1;
49955 up_write(&mm->mmap_sem);
49956 bprm->p = vma->vm_end - sizeof(void *);
49957 +
49958 +#ifdef CONFIG_PAX_RANDUSTACK
49959 + if (randomize_va_space)
49960 + bprm->p ^= random32() & ~PAGE_MASK;
49961 +#endif
49962 +
49963 return 0;
49964 err:
49965 up_write(&mm->mmap_sem);
49966 @@ -396,7 +434,7 @@ struct user_arg_ptr {
49967 } ptr;
49968 };
49969
49970 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49971 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49972 {
49973 const char __user *native;
49974
49975 @@ -405,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49976 compat_uptr_t compat;
49977
49978 if (get_user(compat, argv.ptr.compat + nr))
49979 - return ERR_PTR(-EFAULT);
49980 + return (const char __force_user *)ERR_PTR(-EFAULT);
49981
49982 return compat_ptr(compat);
49983 }
49984 #endif
49985
49986 if (get_user(native, argv.ptr.native + nr))
49987 - return ERR_PTR(-EFAULT);
49988 + return (const char __force_user *)ERR_PTR(-EFAULT);
49989
49990 return native;
49991 }
49992 @@ -431,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
49993 if (!p)
49994 break;
49995
49996 - if (IS_ERR(p))
49997 + if (IS_ERR((const char __force_kernel *)p))
49998 return -EFAULT;
49999
50000 if (i >= max)
50001 @@ -466,7 +504,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50002
50003 ret = -EFAULT;
50004 str = get_user_arg_ptr(argv, argc);
50005 - if (IS_ERR(str))
50006 + if (IS_ERR((const char __force_kernel *)str))
50007 goto out;
50008
50009 len = strnlen_user(str, MAX_ARG_STRLEN);
50010 @@ -548,7 +586,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50011 int r;
50012 mm_segment_t oldfs = get_fs();
50013 struct user_arg_ptr argv = {
50014 - .ptr.native = (const char __user *const __user *)__argv,
50015 + .ptr.native = (const char __force_user *const __force_user *)__argv,
50016 };
50017
50018 set_fs(KERNEL_DS);
50019 @@ -583,7 +621,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50020 unsigned long new_end = old_end - shift;
50021 struct mmu_gather tlb;
50022
50023 - BUG_ON(new_start > new_end);
50024 + if (new_start >= new_end || new_start < mmap_min_addr)
50025 + return -ENOMEM;
50026
50027 /*
50028 * ensure there are no vmas between where we want to go
50029 @@ -592,6 +631,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50030 if (vma != find_vma(mm, new_start))
50031 return -EFAULT;
50032
50033 +#ifdef CONFIG_PAX_SEGMEXEC
50034 + BUG_ON(pax_find_mirror_vma(vma));
50035 +#endif
50036 +
50037 /*
50038 * cover the whole range: [new_start, old_end)
50039 */
50040 @@ -672,10 +715,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50041 stack_top = arch_align_stack(stack_top);
50042 stack_top = PAGE_ALIGN(stack_top);
50043
50044 - if (unlikely(stack_top < mmap_min_addr) ||
50045 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50046 - return -ENOMEM;
50047 -
50048 stack_shift = vma->vm_end - stack_top;
50049
50050 bprm->p -= stack_shift;
50051 @@ -687,8 +726,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50052 bprm->exec -= stack_shift;
50053
50054 down_write(&mm->mmap_sem);
50055 +
50056 + /* Move stack pages down in memory. */
50057 + if (stack_shift) {
50058 + ret = shift_arg_pages(vma, stack_shift);
50059 + if (ret)
50060 + goto out_unlock;
50061 + }
50062 +
50063 vm_flags = VM_STACK_FLAGS;
50064
50065 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50066 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50067 + vm_flags &= ~VM_EXEC;
50068 +
50069 +#ifdef CONFIG_PAX_MPROTECT
50070 + if (mm->pax_flags & MF_PAX_MPROTECT)
50071 + vm_flags &= ~VM_MAYEXEC;
50072 +#endif
50073 +
50074 + }
50075 +#endif
50076 +
50077 /*
50078 * Adjust stack execute permissions; explicitly enable for
50079 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50080 @@ -707,13 +766,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50081 goto out_unlock;
50082 BUG_ON(prev != vma);
50083
50084 - /* Move stack pages down in memory. */
50085 - if (stack_shift) {
50086 - ret = shift_arg_pages(vma, stack_shift);
50087 - if (ret)
50088 - goto out_unlock;
50089 - }
50090 -
50091 /* mprotect_fixup is overkill to remove the temporary stack flags */
50092 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50093
50094 @@ -737,6 +789,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50095 #endif
50096 current->mm->start_stack = bprm->p;
50097 ret = expand_stack(vma, stack_base);
50098 +
50099 +#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50100 + if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50101 + unsigned long size;
50102 + vm_flags_t vm_flags;
50103 +
50104 + size = STACK_TOP - vma->vm_end;
50105 + vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50106 +
50107 + ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
50108 +
50109 +#ifdef CONFIG_X86
50110 + if (!ret) {
50111 + size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50112 + ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
50113 + }
50114 +#endif
50115 +
50116 + }
50117 +#endif
50118 +
50119 if (ret)
50120 ret = -EFAULT;
50121
50122 @@ -772,6 +845,8 @@ struct file *open_exec(const char *name)
50123
50124 fsnotify_open(file);
50125
50126 + trace_open_exec(name);
50127 +
50128 err = deny_write_access(file);
50129 if (err)
50130 goto exit;
50131 @@ -795,7 +870,7 @@ int kernel_read(struct file *file, loff_t offset,
50132 old_fs = get_fs();
50133 set_fs(get_ds());
50134 /* The cast to a user pointer is valid due to the set_fs() */
50135 - result = vfs_read(file, (void __user *)addr, count, &pos);
50136 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
50137 set_fs(old_fs);
50138 return result;
50139 }
50140 @@ -1250,7 +1325,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50141 }
50142 rcu_read_unlock();
50143
50144 - if (p->fs->users > n_fs) {
50145 + if (atomic_read(&p->fs->users) > n_fs) {
50146 bprm->unsafe |= LSM_UNSAFE_SHARE;
50147 } else {
50148 res = -EAGAIN;
50149 @@ -1450,6 +1525,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50150
50151 EXPORT_SYMBOL(search_binary_handler);
50152
50153 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50154 +static DEFINE_PER_CPU(u64, exec_counter);
50155 +static int __init init_exec_counters(void)
50156 +{
50157 + unsigned int cpu;
50158 +
50159 + for_each_possible_cpu(cpu) {
50160 + per_cpu(exec_counter, cpu) = (u64)cpu;
50161 + }
50162 +
50163 + return 0;
50164 +}
50165 +early_initcall(init_exec_counters);
50166 +static inline void increment_exec_counter(void)
50167 +{
50168 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
50169 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50170 +}
50171 +#else
50172 +static inline void increment_exec_counter(void) {}
50173 +#endif
50174 +
50175 +extern void gr_handle_exec_args(struct linux_binprm *bprm,
50176 + struct user_arg_ptr argv);
50177 +
50178 /*
50179 * sys_execve() executes a new program.
50180 */
50181 @@ -1457,6 +1557,11 @@ static int do_execve_common(const char *filename,
50182 struct user_arg_ptr argv,
50183 struct user_arg_ptr envp)
50184 {
50185 +#ifdef CONFIG_GRKERNSEC
50186 + struct file *old_exec_file;
50187 + struct acl_subject_label *old_acl;
50188 + struct rlimit old_rlim[RLIM_NLIMITS];
50189 +#endif
50190 struct linux_binprm *bprm;
50191 struct file *file;
50192 struct files_struct *displaced;
50193 @@ -1464,6 +1569,8 @@ static int do_execve_common(const char *filename,
50194 int retval;
50195 const struct cred *cred = current_cred();
50196
50197 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
50198 +
50199 /*
50200 * We move the actual failure in case of RLIMIT_NPROC excess from
50201 * set*uid() to execve() because too many poorly written programs
50202 @@ -1504,12 +1611,27 @@ static int do_execve_common(const char *filename,
50203 if (IS_ERR(file))
50204 goto out_unmark;
50205
50206 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
50207 + retval = -EPERM;
50208 + goto out_file;
50209 + }
50210 +
50211 sched_exec();
50212
50213 bprm->file = file;
50214 bprm->filename = filename;
50215 bprm->interp = filename;
50216
50217 + if (gr_process_user_ban()) {
50218 + retval = -EPERM;
50219 + goto out_file;
50220 + }
50221 +
50222 + if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
50223 + retval = -EACCES;
50224 + goto out_file;
50225 + }
50226 +
50227 retval = bprm_mm_init(bprm);
50228 if (retval)
50229 goto out_file;
50230 @@ -1526,24 +1648,65 @@ static int do_execve_common(const char *filename,
50231 if (retval < 0)
50232 goto out;
50233
50234 +#ifdef CONFIG_GRKERNSEC
50235 + old_acl = current->acl;
50236 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
50237 + old_exec_file = current->exec_file;
50238 + get_file(file);
50239 + current->exec_file = file;
50240 +#endif
50241 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50242 + /* limit suid stack to 8MB
50243 + * we saved the old limits above and will restore them if this exec fails
50244 + */
50245 + if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
50246 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
50247 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
50248 +#endif
50249 +
50250 + if (!gr_tpe_allow(file)) {
50251 + retval = -EACCES;
50252 + goto out_fail;
50253 + }
50254 +
50255 + if (gr_check_crash_exec(file)) {
50256 + retval = -EACCES;
50257 + goto out_fail;
50258 + }
50259 +
50260 + retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
50261 + bprm->unsafe);
50262 + if (retval < 0)
50263 + goto out_fail;
50264 +
50265 retval = copy_strings_kernel(1, &bprm->filename, bprm);
50266 if (retval < 0)
50267 - goto out;
50268 + goto out_fail;
50269
50270 bprm->exec = bprm->p;
50271 retval = copy_strings(bprm->envc, envp, bprm);
50272 if (retval < 0)
50273 - goto out;
50274 + goto out_fail;
50275
50276 retval = copy_strings(bprm->argc, argv, bprm);
50277 if (retval < 0)
50278 - goto out;
50279 + goto out_fail;
50280 +
50281 + gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
50282 +
50283 + gr_handle_exec_args(bprm, argv);
50284
50285 retval = search_binary_handler(bprm);
50286 if (retval < 0)
50287 - goto out;
50288 + goto out_fail;
50289 +#ifdef CONFIG_GRKERNSEC
50290 + if (old_exec_file)
50291 + fput(old_exec_file);
50292 +#endif
50293
50294 /* execve succeeded */
50295 +
50296 + increment_exec_counter();
50297 current->fs->in_exec = 0;
50298 current->in_execve = 0;
50299 acct_update_integrals(current);
50300 @@ -1552,6 +1715,14 @@ static int do_execve_common(const char *filename,
50301 put_files_struct(displaced);
50302 return retval;
50303
50304 +out_fail:
50305 +#ifdef CONFIG_GRKERNSEC
50306 + current->acl = old_acl;
50307 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
50308 + fput(current->exec_file);
50309 + current->exec_file = old_exec_file;
50310 +#endif
50311 +
50312 out:
50313 if (bprm->mm) {
50314 acct_arg_size(bprm, 0);
50315 @@ -1700,3 +1871,283 @@ asmlinkage long compat_sys_execve(const char __user * filename,
50316 return error;
50317 }
50318 #endif
50319 +
50320 +int pax_check_flags(unsigned long *flags)
50321 +{
50322 + int retval = 0;
50323 +
50324 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
50325 + if (*flags & MF_PAX_SEGMEXEC)
50326 + {
50327 + *flags &= ~MF_PAX_SEGMEXEC;
50328 + retval = -EINVAL;
50329 + }
50330 +#endif
50331 +
50332 + if ((*flags & MF_PAX_PAGEEXEC)
50333 +
50334 +#ifdef CONFIG_PAX_PAGEEXEC
50335 + && (*flags & MF_PAX_SEGMEXEC)
50336 +#endif
50337 +
50338 + )
50339 + {
50340 + *flags &= ~MF_PAX_PAGEEXEC;
50341 + retval = -EINVAL;
50342 + }
50343 +
50344 + if ((*flags & MF_PAX_MPROTECT)
50345 +
50346 +#ifdef CONFIG_PAX_MPROTECT
50347 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50348 +#endif
50349 +
50350 + )
50351 + {
50352 + *flags &= ~MF_PAX_MPROTECT;
50353 + retval = -EINVAL;
50354 + }
50355 +
50356 + if ((*flags & MF_PAX_EMUTRAMP)
50357 +
50358 +#ifdef CONFIG_PAX_EMUTRAMP
50359 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50360 +#endif
50361 +
50362 + )
50363 + {
50364 + *flags &= ~MF_PAX_EMUTRAMP;
50365 + retval = -EINVAL;
50366 + }
50367 +
50368 + return retval;
50369 +}
50370 +
50371 +EXPORT_SYMBOL(pax_check_flags);
50372 +
50373 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50374 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
50375 +{
50376 + struct task_struct *tsk = current;
50377 + struct mm_struct *mm = current->mm;
50378 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
50379 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
50380 + char *path_exec = NULL;
50381 + char *path_fault = NULL;
50382 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
50383 + siginfo_t info = { };
50384 +
50385 + if (buffer_exec && buffer_fault) {
50386 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
50387 +
50388 + down_read(&mm->mmap_sem);
50389 + vma = mm->mmap;
50390 + while (vma && (!vma_exec || !vma_fault)) {
50391 + if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
50392 + vma_exec = vma;
50393 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
50394 + vma_fault = vma;
50395 + vma = vma->vm_next;
50396 + }
50397 + if (vma_exec) {
50398 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
50399 + if (IS_ERR(path_exec))
50400 + path_exec = "<path too long>";
50401 + else {
50402 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
50403 + if (path_exec) {
50404 + *path_exec = 0;
50405 + path_exec = buffer_exec;
50406 + } else
50407 + path_exec = "<path too long>";
50408 + }
50409 + }
50410 + if (vma_fault) {
50411 + start = vma_fault->vm_start;
50412 + end = vma_fault->vm_end;
50413 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
50414 + if (vma_fault->vm_file) {
50415 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
50416 + if (IS_ERR(path_fault))
50417 + path_fault = "<path too long>";
50418 + else {
50419 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
50420 + if (path_fault) {
50421 + *path_fault = 0;
50422 + path_fault = buffer_fault;
50423 + } else
50424 + path_fault = "<path too long>";
50425 + }
50426 + } else
50427 + path_fault = "<anonymous mapping>";
50428 + }
50429 + up_read(&mm->mmap_sem);
50430 + }
50431 + if (tsk->signal->curr_ip)
50432 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
50433 + else
50434 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
50435 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
50436 + from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
50437 + free_page((unsigned long)buffer_exec);
50438 + free_page((unsigned long)buffer_fault);
50439 + pax_report_insns(regs, pc, sp);
50440 + info.si_signo = SIGKILL;
50441 + info.si_errno = 0;
50442 + info.si_code = SI_KERNEL;
50443 + info.si_pid = 0;
50444 + info.si_uid = 0;
50445 + do_coredump(&info);
50446 +}
50447 +#endif
50448 +
50449 +#ifdef CONFIG_PAX_REFCOUNT
50450 +void pax_report_refcount_overflow(struct pt_regs *regs)
50451 +{
50452 + if (current->signal->curr_ip)
50453 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
50454 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
50455 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
50456 + else
50457 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
50458 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
50459 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
50460 + show_regs(regs);
50461 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
50462 +}
50463 +#endif
50464 +
50465 +#ifdef CONFIG_PAX_USERCOPY
50466 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
50467 +static noinline int check_stack_object(const void *obj, unsigned long len)
50468 +{
50469 + const void * const stack = task_stack_page(current);
50470 + const void * const stackend = stack + THREAD_SIZE;
50471 +
50472 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50473 + const void *frame = NULL;
50474 + const void *oldframe;
50475 +#endif
50476 +
50477 + if (obj + len < obj)
50478 + return -1;
50479 +
50480 + if (obj + len <= stack || stackend <= obj)
50481 + return 0;
50482 +
50483 + if (obj < stack || stackend < obj + len)
50484 + return -1;
50485 +
50486 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50487 + oldframe = __builtin_frame_address(1);
50488 + if (oldframe)
50489 + frame = __builtin_frame_address(2);
50490 + /*
50491 + low ----------------------------------------------> high
50492 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
50493 + ^----------------^
50494 + allow copies only within here
50495 + */
50496 + while (stack <= frame && frame < stackend) {
50497 + /* if obj + len extends past the last frame, this
50498 + check won't pass and the next frame will be 0,
50499 + causing us to bail out and correctly report
50500 + the copy as invalid
50501 + */
50502 + if (obj + len <= frame)
50503 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
50504 + oldframe = frame;
50505 + frame = *(const void * const *)frame;
50506 + }
50507 + return -1;
50508 +#else
50509 + return 1;
50510 +#endif
50511 +}
50512 +
50513 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
50514 +{
50515 + if (current->signal->curr_ip)
50516 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50517 + &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
50518 + else
50519 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50520 + to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
50521 + dump_stack();
50522 + gr_handle_kernel_exploit();
50523 + do_group_exit(SIGKILL);
50524 +}
50525 +#endif
50526 +
50527 +#ifdef CONFIG_PAX_USERCOPY
50528 +static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
50529 +{
50530 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
50531 + unsigned long textlow = ktla_ktva((unsigned long)_stext);
50532 +#ifdef CONFIG_MODULES
50533 + unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
50534 +#else
50535 + unsigned long texthigh = ktla_ktva((unsigned long)_etext);
50536 +#endif
50537 +
50538 +#else
50539 + unsigned long textlow = _stext;
50540 + unsigned long texthigh = _etext;
50541 +#endif
50542 +
50543 + if (high <= textlow || low > texthigh)
50544 + return false;
50545 + else
50546 + return true;
50547 +}
50548 +#endif
50549 +
50550 +void __check_object_size(const void *ptr, unsigned long n, bool to_user)
50551 +{
50552 +
50553 +#ifdef CONFIG_PAX_USERCOPY
50554 + const char *type;
50555 +
50556 + if (!n)
50557 + return;
50558 +
50559 + type = check_heap_object(ptr, n);
50560 + if (!type) {
50561 + int ret = check_stack_object(ptr, n);
50562 + if (ret == 1 || ret == 2)
50563 + return;
50564 + if (ret == 0) {
50565 + if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
50566 + type = "<kernel text>";
50567 + else
50568 + return;
50569 + } else
50570 + type = "<process stack>";
50571 + }
50572 +
50573 + pax_report_usercopy(ptr, n, to_user, type);
50574 +#endif
50575 +
50576 +}
50577 +EXPORT_SYMBOL(__check_object_size);
50578 +
50579 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
50580 +void pax_track_stack(void)
50581 +{
50582 + unsigned long sp = (unsigned long)&sp;
50583 + if (sp < current_thread_info()->lowest_stack &&
50584 + sp > (unsigned long)task_stack_page(current))
50585 + current_thread_info()->lowest_stack = sp;
50586 +}
50587 +EXPORT_SYMBOL(pax_track_stack);
50588 +#endif
50589 +
50590 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
50591 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
50592 +{
50593 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
50594 + dump_stack();
50595 + do_group_exit(SIGKILL);
50596 +}
50597 +EXPORT_SYMBOL(report_size_overflow);
50598 +#endif
50599 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
50600 index 9f9992b..8b59411 100644
50601 --- a/fs/ext2/balloc.c
50602 +++ b/fs/ext2/balloc.c
50603 @@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
50604
50605 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50606 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50607 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50608 + if (free_blocks < root_blocks + 1 &&
50609 !uid_eq(sbi->s_resuid, current_fsuid()) &&
50610 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50611 - !in_group_p (sbi->s_resgid))) {
50612 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50613 return 0;
50614 }
50615 return 1;
50616 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
50617 index 22548f5..41521d8 100644
50618 --- a/fs/ext3/balloc.c
50619 +++ b/fs/ext3/balloc.c
50620 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
50621
50622 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50623 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50624 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50625 + if (free_blocks < root_blocks + 1 &&
50626 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
50627 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50628 - !in_group_p (sbi->s_resgid))) {
50629 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50630 return 0;
50631 }
50632 return 1;
50633 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
50634 index 92e68b3..115d987 100644
50635 --- a/fs/ext4/balloc.c
50636 +++ b/fs/ext4/balloc.c
50637 @@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
50638 /* Hm, nope. Are (enough) root reserved clusters available? */
50639 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
50640 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
50641 - capable(CAP_SYS_RESOURCE) ||
50642 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
50643 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
50644 + capable_nolog(CAP_SYS_RESOURCE)) {
50645
50646 if (free_clusters >= (nclusters + dirty_clusters))
50647 return 1;
50648 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
50649 index 3b83cd6..0f34dcd 100644
50650 --- a/fs/ext4/ext4.h
50651 +++ b/fs/ext4/ext4.h
50652 @@ -1254,19 +1254,19 @@ struct ext4_sb_info {
50653 unsigned long s_mb_last_start;
50654
50655 /* stats for buddy allocator */
50656 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
50657 - atomic_t s_bal_success; /* we found long enough chunks */
50658 - atomic_t s_bal_allocated; /* in blocks */
50659 - atomic_t s_bal_ex_scanned; /* total extents scanned */
50660 - atomic_t s_bal_goals; /* goal hits */
50661 - atomic_t s_bal_breaks; /* too long searches */
50662 - atomic_t s_bal_2orders; /* 2^order hits */
50663 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
50664 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
50665 + atomic_unchecked_t s_bal_allocated; /* in blocks */
50666 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
50667 + atomic_unchecked_t s_bal_goals; /* goal hits */
50668 + atomic_unchecked_t s_bal_breaks; /* too long searches */
50669 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
50670 spinlock_t s_bal_lock;
50671 unsigned long s_mb_buddies_generated;
50672 unsigned long long s_mb_generation_time;
50673 - atomic_t s_mb_lost_chunks;
50674 - atomic_t s_mb_preallocated;
50675 - atomic_t s_mb_discarded;
50676 + atomic_unchecked_t s_mb_lost_chunks;
50677 + atomic_unchecked_t s_mb_preallocated;
50678 + atomic_unchecked_t s_mb_discarded;
50679 atomic_t s_lock_busy;
50680
50681 /* locality groups */
50682 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
50683 index cf3025c..cac6011 100644
50684 --- a/fs/ext4/mballoc.c
50685 +++ b/fs/ext4/mballoc.c
50686 @@ -1754,7 +1754,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
50687 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
50688
50689 if (EXT4_SB(sb)->s_mb_stats)
50690 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
50691 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
50692
50693 break;
50694 }
50695 @@ -2055,7 +2055,7 @@ repeat:
50696 ac->ac_status = AC_STATUS_CONTINUE;
50697 ac->ac_flags |= EXT4_MB_HINT_FIRST;
50698 cr = 3;
50699 - atomic_inc(&sbi->s_mb_lost_chunks);
50700 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
50701 goto repeat;
50702 }
50703 }
50704 @@ -2563,25 +2563,25 @@ int ext4_mb_release(struct super_block *sb)
50705 if (sbi->s_mb_stats) {
50706 ext4_msg(sb, KERN_INFO,
50707 "mballoc: %u blocks %u reqs (%u success)",
50708 - atomic_read(&sbi->s_bal_allocated),
50709 - atomic_read(&sbi->s_bal_reqs),
50710 - atomic_read(&sbi->s_bal_success));
50711 + atomic_read_unchecked(&sbi->s_bal_allocated),
50712 + atomic_read_unchecked(&sbi->s_bal_reqs),
50713 + atomic_read_unchecked(&sbi->s_bal_success));
50714 ext4_msg(sb, KERN_INFO,
50715 "mballoc: %u extents scanned, %u goal hits, "
50716 "%u 2^N hits, %u breaks, %u lost",
50717 - atomic_read(&sbi->s_bal_ex_scanned),
50718 - atomic_read(&sbi->s_bal_goals),
50719 - atomic_read(&sbi->s_bal_2orders),
50720 - atomic_read(&sbi->s_bal_breaks),
50721 - atomic_read(&sbi->s_mb_lost_chunks));
50722 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
50723 + atomic_read_unchecked(&sbi->s_bal_goals),
50724 + atomic_read_unchecked(&sbi->s_bal_2orders),
50725 + atomic_read_unchecked(&sbi->s_bal_breaks),
50726 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
50727 ext4_msg(sb, KERN_INFO,
50728 "mballoc: %lu generated and it took %Lu",
50729 sbi->s_mb_buddies_generated,
50730 sbi->s_mb_generation_time);
50731 ext4_msg(sb, KERN_INFO,
50732 "mballoc: %u preallocated, %u discarded",
50733 - atomic_read(&sbi->s_mb_preallocated),
50734 - atomic_read(&sbi->s_mb_discarded));
50735 + atomic_read_unchecked(&sbi->s_mb_preallocated),
50736 + atomic_read_unchecked(&sbi->s_mb_discarded));
50737 }
50738
50739 free_percpu(sbi->s_locality_groups);
50740 @@ -3035,16 +3035,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
50741 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
50742
50743 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
50744 - atomic_inc(&sbi->s_bal_reqs);
50745 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50746 + atomic_inc_unchecked(&sbi->s_bal_reqs);
50747 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50748 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
50749 - atomic_inc(&sbi->s_bal_success);
50750 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
50751 + atomic_inc_unchecked(&sbi->s_bal_success);
50752 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
50753 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
50754 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
50755 - atomic_inc(&sbi->s_bal_goals);
50756 + atomic_inc_unchecked(&sbi->s_bal_goals);
50757 if (ac->ac_found > sbi->s_mb_max_to_scan)
50758 - atomic_inc(&sbi->s_bal_breaks);
50759 + atomic_inc_unchecked(&sbi->s_bal_breaks);
50760 }
50761
50762 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
50763 @@ -3444,7 +3444,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
50764 trace_ext4_mb_new_inode_pa(ac, pa);
50765
50766 ext4_mb_use_inode_pa(ac, pa);
50767 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
50768 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
50769
50770 ei = EXT4_I(ac->ac_inode);
50771 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50772 @@ -3504,7 +3504,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
50773 trace_ext4_mb_new_group_pa(ac, pa);
50774
50775 ext4_mb_use_group_pa(ac, pa);
50776 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50777 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50778
50779 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50780 lg = ac->ac_lg;
50781 @@ -3593,7 +3593,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
50782 * from the bitmap and continue.
50783 */
50784 }
50785 - atomic_add(free, &sbi->s_mb_discarded);
50786 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
50787
50788 return err;
50789 }
50790 @@ -3611,7 +3611,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
50791 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
50792 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
50793 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
50794 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50795 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50796 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
50797
50798 return 0;
50799 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
50800 index febbe0e..782c4fd 100644
50801 --- a/fs/ext4/super.c
50802 +++ b/fs/ext4/super.c
50803 @@ -2380,7 +2380,7 @@ struct ext4_attr {
50804 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
50805 const char *, size_t);
50806 int offset;
50807 -};
50808 +} __do_const;
50809
50810 static int parse_strtoul(const char *buf,
50811 unsigned long max, unsigned long *value)
50812 diff --git a/fs/fcntl.c b/fs/fcntl.c
50813 index 6599222..e7bf0de 100644
50814 --- a/fs/fcntl.c
50815 +++ b/fs/fcntl.c
50816 @@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
50817 if (err)
50818 return err;
50819
50820 + if (gr_handle_chroot_fowner(pid, type))
50821 + return -ENOENT;
50822 + if (gr_check_protected_task_fowner(pid, type))
50823 + return -EACCES;
50824 +
50825 f_modown(filp, pid, type, force);
50826 return 0;
50827 }
50828 diff --git a/fs/fhandle.c b/fs/fhandle.c
50829 index 999ff5c..41f4109 100644
50830 --- a/fs/fhandle.c
50831 +++ b/fs/fhandle.c
50832 @@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
50833 } else
50834 retval = 0;
50835 /* copy the mount id */
50836 - if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
50837 - sizeof(*mnt_id)) ||
50838 + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
50839 copy_to_user(ufh, handle,
50840 sizeof(struct file_handle) + handle_bytes))
50841 retval = -EFAULT;
50842 diff --git a/fs/fifo.c b/fs/fifo.c
50843 index cf6f434..3d7942c 100644
50844 --- a/fs/fifo.c
50845 +++ b/fs/fifo.c
50846 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
50847 */
50848 filp->f_op = &read_pipefifo_fops;
50849 pipe->r_counter++;
50850 - if (pipe->readers++ == 0)
50851 + if (atomic_inc_return(&pipe->readers) == 1)
50852 wake_up_partner(inode);
50853
50854 - if (!pipe->writers) {
50855 + if (!atomic_read(&pipe->writers)) {
50856 if ((filp->f_flags & O_NONBLOCK)) {
50857 /* suppress POLLHUP until we have
50858 * seen a writer */
50859 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
50860 * errno=ENXIO when there is no process reading the FIFO.
50861 */
50862 ret = -ENXIO;
50863 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
50864 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
50865 goto err;
50866
50867 filp->f_op = &write_pipefifo_fops;
50868 pipe->w_counter++;
50869 - if (!pipe->writers++)
50870 + if (atomic_inc_return(&pipe->writers) == 1)
50871 wake_up_partner(inode);
50872
50873 - if (!pipe->readers) {
50874 + if (!atomic_read(&pipe->readers)) {
50875 if (wait_for_partner(inode, &pipe->r_counter))
50876 goto err_wr;
50877 }
50878 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
50879 */
50880 filp->f_op = &rdwr_pipefifo_fops;
50881
50882 - pipe->readers++;
50883 - pipe->writers++;
50884 + atomic_inc(&pipe->readers);
50885 + atomic_inc(&pipe->writers);
50886 pipe->r_counter++;
50887 pipe->w_counter++;
50888 - if (pipe->readers == 1 || pipe->writers == 1)
50889 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
50890 wake_up_partner(inode);
50891 break;
50892
50893 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
50894 return 0;
50895
50896 err_rd:
50897 - if (!--pipe->readers)
50898 + if (atomic_dec_and_test(&pipe->readers))
50899 wake_up_interruptible(&pipe->wait);
50900 ret = -ERESTARTSYS;
50901 goto err;
50902
50903 err_wr:
50904 - if (!--pipe->writers)
50905 + if (atomic_dec_and_test(&pipe->writers))
50906 wake_up_interruptible(&pipe->wait);
50907 ret = -ERESTARTSYS;
50908 goto err;
50909
50910 err:
50911 - if (!pipe->readers && !pipe->writers)
50912 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
50913 free_pipe_info(inode);
50914
50915 err_nocleanup:
50916 diff --git a/fs/file.c b/fs/file.c
50917 index 3906d95..5fe379b 100644
50918 --- a/fs/file.c
50919 +++ b/fs/file.c
50920 @@ -16,6 +16,7 @@
50921 #include <linux/slab.h>
50922 #include <linux/vmalloc.h>
50923 #include <linux/file.h>
50924 +#include <linux/security.h>
50925 #include <linux/fdtable.h>
50926 #include <linux/bitops.h>
50927 #include <linux/interrupt.h>
50928 @@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
50929 if (!file)
50930 return __close_fd(files, fd);
50931
50932 + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
50933 if (fd >= rlimit(RLIMIT_NOFILE))
50934 return -EBADF;
50935
50936 @@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
50937 if (unlikely(oldfd == newfd))
50938 return -EINVAL;
50939
50940 + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
50941 if (newfd >= rlimit(RLIMIT_NOFILE))
50942 return -EBADF;
50943
50944 @@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
50945 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
50946 {
50947 int err;
50948 + gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
50949 if (from >= rlimit(RLIMIT_NOFILE))
50950 return -EINVAL;
50951 err = alloc_fd(from, flags);
50952 diff --git a/fs/filesystems.c b/fs/filesystems.c
50953 index 92567d9..fcd8cbf 100644
50954 --- a/fs/filesystems.c
50955 +++ b/fs/filesystems.c
50956 @@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
50957 int len = dot ? dot - name : strlen(name);
50958
50959 fs = __get_fs_type(name, len);
50960 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
50961 + if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
50962 +#else
50963 if (!fs && (request_module("fs-%.*s", len, name) == 0))
50964 +#endif
50965 fs = __get_fs_type(name, len);
50966
50967 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
50968 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
50969 index d8ac61d..79a36f0 100644
50970 --- a/fs/fs_struct.c
50971 +++ b/fs/fs_struct.c
50972 @@ -4,6 +4,7 @@
50973 #include <linux/path.h>
50974 #include <linux/slab.h>
50975 #include <linux/fs_struct.h>
50976 +#include <linux/grsecurity.h>
50977 #include "internal.h"
50978
50979 /*
50980 @@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
50981 write_seqcount_begin(&fs->seq);
50982 old_root = fs->root;
50983 fs->root = *path;
50984 + gr_set_chroot_entries(current, path);
50985 write_seqcount_end(&fs->seq);
50986 spin_unlock(&fs->lock);
50987 if (old_root.dentry)
50988 @@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
50989 int hits = 0;
50990 spin_lock(&fs->lock);
50991 write_seqcount_begin(&fs->seq);
50992 + /* this root replacement is only done by pivot_root,
50993 + leave grsec's chroot tagging alone for this task
50994 + so that a pivoted root isn't treated as a chroot
50995 + */
50996 hits += replace_path(&fs->root, old_root, new_root);
50997 hits += replace_path(&fs->pwd, old_root, new_root);
50998 write_seqcount_end(&fs->seq);
50999 @@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
51000 task_lock(tsk);
51001 spin_lock(&fs->lock);
51002 tsk->fs = NULL;
51003 - kill = !--fs->users;
51004 + gr_clear_chroot_entries(tsk);
51005 + kill = !atomic_dec_return(&fs->users);
51006 spin_unlock(&fs->lock);
51007 task_unlock(tsk);
51008 if (kill)
51009 @@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51010 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51011 /* We don't need to lock fs - think why ;-) */
51012 if (fs) {
51013 - fs->users = 1;
51014 + atomic_set(&fs->users, 1);
51015 fs->in_exec = 0;
51016 spin_lock_init(&fs->lock);
51017 seqcount_init(&fs->seq);
51018 @@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51019 spin_lock(&old->lock);
51020 fs->root = old->root;
51021 path_get(&fs->root);
51022 + /* instead of calling gr_set_chroot_entries here,
51023 + we call it from every caller of this function
51024 + */
51025 fs->pwd = old->pwd;
51026 path_get(&fs->pwd);
51027 spin_unlock(&old->lock);
51028 @@ -139,8 +149,9 @@ int unshare_fs_struct(void)
51029
51030 task_lock(current);
51031 spin_lock(&fs->lock);
51032 - kill = !--fs->users;
51033 + kill = !atomic_dec_return(&fs->users);
51034 current->fs = new_fs;
51035 + gr_set_chroot_entries(current, &new_fs->root);
51036 spin_unlock(&fs->lock);
51037 task_unlock(current);
51038
51039 @@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51040
51041 int current_umask(void)
51042 {
51043 - return current->fs->umask;
51044 + return current->fs->umask | gr_acl_umask();
51045 }
51046 EXPORT_SYMBOL(current_umask);
51047
51048 /* to be mentioned only in INIT_TASK */
51049 struct fs_struct init_fs = {
51050 - .users = 1,
51051 + .users = ATOMIC_INIT(1),
51052 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51053 .seq = SEQCNT_ZERO,
51054 .umask = 0022,
51055 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51056 index e2cba1f..17a25bb 100644
51057 --- a/fs/fscache/cookie.c
51058 +++ b/fs/fscache/cookie.c
51059 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51060 parent ? (char *) parent->def->name : "<no-parent>",
51061 def->name, netfs_data);
51062
51063 - fscache_stat(&fscache_n_acquires);
51064 + fscache_stat_unchecked(&fscache_n_acquires);
51065
51066 /* if there's no parent cookie, then we don't create one here either */
51067 if (!parent) {
51068 - fscache_stat(&fscache_n_acquires_null);
51069 + fscache_stat_unchecked(&fscache_n_acquires_null);
51070 _leave(" [no parent]");
51071 return NULL;
51072 }
51073 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51074 /* allocate and initialise a cookie */
51075 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51076 if (!cookie) {
51077 - fscache_stat(&fscache_n_acquires_oom);
51078 + fscache_stat_unchecked(&fscache_n_acquires_oom);
51079 _leave(" [ENOMEM]");
51080 return NULL;
51081 }
51082 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51083
51084 switch (cookie->def->type) {
51085 case FSCACHE_COOKIE_TYPE_INDEX:
51086 - fscache_stat(&fscache_n_cookie_index);
51087 + fscache_stat_unchecked(&fscache_n_cookie_index);
51088 break;
51089 case FSCACHE_COOKIE_TYPE_DATAFILE:
51090 - fscache_stat(&fscache_n_cookie_data);
51091 + fscache_stat_unchecked(&fscache_n_cookie_data);
51092 break;
51093 default:
51094 - fscache_stat(&fscache_n_cookie_special);
51095 + fscache_stat_unchecked(&fscache_n_cookie_special);
51096 break;
51097 }
51098
51099 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51100 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51101 atomic_dec(&parent->n_children);
51102 __fscache_cookie_put(cookie);
51103 - fscache_stat(&fscache_n_acquires_nobufs);
51104 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51105 _leave(" = NULL");
51106 return NULL;
51107 }
51108 }
51109
51110 - fscache_stat(&fscache_n_acquires_ok);
51111 + fscache_stat_unchecked(&fscache_n_acquires_ok);
51112 _leave(" = %p", cookie);
51113 return cookie;
51114 }
51115 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51116 cache = fscache_select_cache_for_object(cookie->parent);
51117 if (!cache) {
51118 up_read(&fscache_addremove_sem);
51119 - fscache_stat(&fscache_n_acquires_no_cache);
51120 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51121 _leave(" = -ENOMEDIUM [no cache]");
51122 return -ENOMEDIUM;
51123 }
51124 @@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51125 object = cache->ops->alloc_object(cache, cookie);
51126 fscache_stat_d(&fscache_n_cop_alloc_object);
51127 if (IS_ERR(object)) {
51128 - fscache_stat(&fscache_n_object_no_alloc);
51129 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
51130 ret = PTR_ERR(object);
51131 goto error;
51132 }
51133
51134 - fscache_stat(&fscache_n_object_alloc);
51135 + fscache_stat_unchecked(&fscache_n_object_alloc);
51136
51137 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51138
51139 @@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51140
51141 _enter("{%s}", cookie->def->name);
51142
51143 - fscache_stat(&fscache_n_invalidates);
51144 + fscache_stat_unchecked(&fscache_n_invalidates);
51145
51146 /* Only permit invalidation of data files. Invalidating an index will
51147 * require the caller to release all its attachments to the tree rooted
51148 @@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51149 {
51150 struct fscache_object *object;
51151
51152 - fscache_stat(&fscache_n_updates);
51153 + fscache_stat_unchecked(&fscache_n_updates);
51154
51155 if (!cookie) {
51156 - fscache_stat(&fscache_n_updates_null);
51157 + fscache_stat_unchecked(&fscache_n_updates_null);
51158 _leave(" [no cookie]");
51159 return;
51160 }
51161 @@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51162 struct fscache_object *object;
51163 unsigned long event;
51164
51165 - fscache_stat(&fscache_n_relinquishes);
51166 + fscache_stat_unchecked(&fscache_n_relinquishes);
51167 if (retire)
51168 - fscache_stat(&fscache_n_relinquishes_retire);
51169 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
51170
51171 if (!cookie) {
51172 - fscache_stat(&fscache_n_relinquishes_null);
51173 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
51174 _leave(" [no cookie]");
51175 return;
51176 }
51177 @@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51178
51179 /* wait for the cookie to finish being instantiated (or to fail) */
51180 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
51181 - fscache_stat(&fscache_n_relinquishes_waitcrt);
51182 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
51183 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
51184 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
51185 }
51186 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
51187 index ee38fef..0a326d4 100644
51188 --- a/fs/fscache/internal.h
51189 +++ b/fs/fscache/internal.h
51190 @@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
51191 * stats.c
51192 */
51193 #ifdef CONFIG_FSCACHE_STATS
51194 -extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51195 -extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51196 +extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51197 +extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51198
51199 -extern atomic_t fscache_n_op_pend;
51200 -extern atomic_t fscache_n_op_run;
51201 -extern atomic_t fscache_n_op_enqueue;
51202 -extern atomic_t fscache_n_op_deferred_release;
51203 -extern atomic_t fscache_n_op_release;
51204 -extern atomic_t fscache_n_op_gc;
51205 -extern atomic_t fscache_n_op_cancelled;
51206 -extern atomic_t fscache_n_op_rejected;
51207 +extern atomic_unchecked_t fscache_n_op_pend;
51208 +extern atomic_unchecked_t fscache_n_op_run;
51209 +extern atomic_unchecked_t fscache_n_op_enqueue;
51210 +extern atomic_unchecked_t fscache_n_op_deferred_release;
51211 +extern atomic_unchecked_t fscache_n_op_release;
51212 +extern atomic_unchecked_t fscache_n_op_gc;
51213 +extern atomic_unchecked_t fscache_n_op_cancelled;
51214 +extern atomic_unchecked_t fscache_n_op_rejected;
51215
51216 -extern atomic_t fscache_n_attr_changed;
51217 -extern atomic_t fscache_n_attr_changed_ok;
51218 -extern atomic_t fscache_n_attr_changed_nobufs;
51219 -extern atomic_t fscache_n_attr_changed_nomem;
51220 -extern atomic_t fscache_n_attr_changed_calls;
51221 +extern atomic_unchecked_t fscache_n_attr_changed;
51222 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
51223 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
51224 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
51225 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
51226
51227 -extern atomic_t fscache_n_allocs;
51228 -extern atomic_t fscache_n_allocs_ok;
51229 -extern atomic_t fscache_n_allocs_wait;
51230 -extern atomic_t fscache_n_allocs_nobufs;
51231 -extern atomic_t fscache_n_allocs_intr;
51232 -extern atomic_t fscache_n_allocs_object_dead;
51233 -extern atomic_t fscache_n_alloc_ops;
51234 -extern atomic_t fscache_n_alloc_op_waits;
51235 +extern atomic_unchecked_t fscache_n_allocs;
51236 +extern atomic_unchecked_t fscache_n_allocs_ok;
51237 +extern atomic_unchecked_t fscache_n_allocs_wait;
51238 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
51239 +extern atomic_unchecked_t fscache_n_allocs_intr;
51240 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
51241 +extern atomic_unchecked_t fscache_n_alloc_ops;
51242 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
51243
51244 -extern atomic_t fscache_n_retrievals;
51245 -extern atomic_t fscache_n_retrievals_ok;
51246 -extern atomic_t fscache_n_retrievals_wait;
51247 -extern atomic_t fscache_n_retrievals_nodata;
51248 -extern atomic_t fscache_n_retrievals_nobufs;
51249 -extern atomic_t fscache_n_retrievals_intr;
51250 -extern atomic_t fscache_n_retrievals_nomem;
51251 -extern atomic_t fscache_n_retrievals_object_dead;
51252 -extern atomic_t fscache_n_retrieval_ops;
51253 -extern atomic_t fscache_n_retrieval_op_waits;
51254 +extern atomic_unchecked_t fscache_n_retrievals;
51255 +extern atomic_unchecked_t fscache_n_retrievals_ok;
51256 +extern atomic_unchecked_t fscache_n_retrievals_wait;
51257 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
51258 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
51259 +extern atomic_unchecked_t fscache_n_retrievals_intr;
51260 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
51261 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
51262 +extern atomic_unchecked_t fscache_n_retrieval_ops;
51263 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
51264
51265 -extern atomic_t fscache_n_stores;
51266 -extern atomic_t fscache_n_stores_ok;
51267 -extern atomic_t fscache_n_stores_again;
51268 -extern atomic_t fscache_n_stores_nobufs;
51269 -extern atomic_t fscache_n_stores_oom;
51270 -extern atomic_t fscache_n_store_ops;
51271 -extern atomic_t fscache_n_store_calls;
51272 -extern atomic_t fscache_n_store_pages;
51273 -extern atomic_t fscache_n_store_radix_deletes;
51274 -extern atomic_t fscache_n_store_pages_over_limit;
51275 +extern atomic_unchecked_t fscache_n_stores;
51276 +extern atomic_unchecked_t fscache_n_stores_ok;
51277 +extern atomic_unchecked_t fscache_n_stores_again;
51278 +extern atomic_unchecked_t fscache_n_stores_nobufs;
51279 +extern atomic_unchecked_t fscache_n_stores_oom;
51280 +extern atomic_unchecked_t fscache_n_store_ops;
51281 +extern atomic_unchecked_t fscache_n_store_calls;
51282 +extern atomic_unchecked_t fscache_n_store_pages;
51283 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
51284 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
51285
51286 -extern atomic_t fscache_n_store_vmscan_not_storing;
51287 -extern atomic_t fscache_n_store_vmscan_gone;
51288 -extern atomic_t fscache_n_store_vmscan_busy;
51289 -extern atomic_t fscache_n_store_vmscan_cancelled;
51290 -extern atomic_t fscache_n_store_vmscan_wait;
51291 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51292 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
51293 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
51294 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51295 +extern atomic_unchecked_t fscache_n_store_vmscan_wait;
51296
51297 -extern atomic_t fscache_n_marks;
51298 -extern atomic_t fscache_n_uncaches;
51299 +extern atomic_unchecked_t fscache_n_marks;
51300 +extern atomic_unchecked_t fscache_n_uncaches;
51301
51302 -extern atomic_t fscache_n_acquires;
51303 -extern atomic_t fscache_n_acquires_null;
51304 -extern atomic_t fscache_n_acquires_no_cache;
51305 -extern atomic_t fscache_n_acquires_ok;
51306 -extern atomic_t fscache_n_acquires_nobufs;
51307 -extern atomic_t fscache_n_acquires_oom;
51308 +extern atomic_unchecked_t fscache_n_acquires;
51309 +extern atomic_unchecked_t fscache_n_acquires_null;
51310 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
51311 +extern atomic_unchecked_t fscache_n_acquires_ok;
51312 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
51313 +extern atomic_unchecked_t fscache_n_acquires_oom;
51314
51315 -extern atomic_t fscache_n_invalidates;
51316 -extern atomic_t fscache_n_invalidates_run;
51317 +extern atomic_unchecked_t fscache_n_invalidates;
51318 +extern atomic_unchecked_t fscache_n_invalidates_run;
51319
51320 -extern atomic_t fscache_n_updates;
51321 -extern atomic_t fscache_n_updates_null;
51322 -extern atomic_t fscache_n_updates_run;
51323 +extern atomic_unchecked_t fscache_n_updates;
51324 +extern atomic_unchecked_t fscache_n_updates_null;
51325 +extern atomic_unchecked_t fscache_n_updates_run;
51326
51327 -extern atomic_t fscache_n_relinquishes;
51328 -extern atomic_t fscache_n_relinquishes_null;
51329 -extern atomic_t fscache_n_relinquishes_waitcrt;
51330 -extern atomic_t fscache_n_relinquishes_retire;
51331 +extern atomic_unchecked_t fscache_n_relinquishes;
51332 +extern atomic_unchecked_t fscache_n_relinquishes_null;
51333 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51334 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
51335
51336 -extern atomic_t fscache_n_cookie_index;
51337 -extern atomic_t fscache_n_cookie_data;
51338 -extern atomic_t fscache_n_cookie_special;
51339 +extern atomic_unchecked_t fscache_n_cookie_index;
51340 +extern atomic_unchecked_t fscache_n_cookie_data;
51341 +extern atomic_unchecked_t fscache_n_cookie_special;
51342
51343 -extern atomic_t fscache_n_object_alloc;
51344 -extern atomic_t fscache_n_object_no_alloc;
51345 -extern atomic_t fscache_n_object_lookups;
51346 -extern atomic_t fscache_n_object_lookups_negative;
51347 -extern atomic_t fscache_n_object_lookups_positive;
51348 -extern atomic_t fscache_n_object_lookups_timed_out;
51349 -extern atomic_t fscache_n_object_created;
51350 -extern atomic_t fscache_n_object_avail;
51351 -extern atomic_t fscache_n_object_dead;
51352 +extern atomic_unchecked_t fscache_n_object_alloc;
51353 +extern atomic_unchecked_t fscache_n_object_no_alloc;
51354 +extern atomic_unchecked_t fscache_n_object_lookups;
51355 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
51356 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
51357 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51358 +extern atomic_unchecked_t fscache_n_object_created;
51359 +extern atomic_unchecked_t fscache_n_object_avail;
51360 +extern atomic_unchecked_t fscache_n_object_dead;
51361
51362 -extern atomic_t fscache_n_checkaux_none;
51363 -extern atomic_t fscache_n_checkaux_okay;
51364 -extern atomic_t fscache_n_checkaux_update;
51365 -extern atomic_t fscache_n_checkaux_obsolete;
51366 +extern atomic_unchecked_t fscache_n_checkaux_none;
51367 +extern atomic_unchecked_t fscache_n_checkaux_okay;
51368 +extern atomic_unchecked_t fscache_n_checkaux_update;
51369 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
51370
51371 extern atomic_t fscache_n_cop_alloc_object;
51372 extern atomic_t fscache_n_cop_lookup_object;
51373 @@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
51374 atomic_inc(stat);
51375 }
51376
51377 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
51378 +{
51379 + atomic_inc_unchecked(stat);
51380 +}
51381 +
51382 static inline void fscache_stat_d(atomic_t *stat)
51383 {
51384 atomic_dec(stat);
51385 @@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
51386
51387 #define __fscache_stat(stat) (NULL)
51388 #define fscache_stat(stat) do {} while (0)
51389 +#define fscache_stat_unchecked(stat) do {} while (0)
51390 #define fscache_stat_d(stat) do {} while (0)
51391 #endif
51392
51393 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
51394 index 50d41c1..10ee117 100644
51395 --- a/fs/fscache/object.c
51396 +++ b/fs/fscache/object.c
51397 @@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51398 /* Invalidate an object on disk */
51399 case FSCACHE_OBJECT_INVALIDATING:
51400 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
51401 - fscache_stat(&fscache_n_invalidates_run);
51402 + fscache_stat_unchecked(&fscache_n_invalidates_run);
51403 fscache_stat(&fscache_n_cop_invalidate_object);
51404 fscache_invalidate_object(object);
51405 fscache_stat_d(&fscache_n_cop_invalidate_object);
51406 @@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51407 /* update the object metadata on disk */
51408 case FSCACHE_OBJECT_UPDATING:
51409 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
51410 - fscache_stat(&fscache_n_updates_run);
51411 + fscache_stat_unchecked(&fscache_n_updates_run);
51412 fscache_stat(&fscache_n_cop_update_object);
51413 object->cache->ops->update_object(object);
51414 fscache_stat_d(&fscache_n_cop_update_object);
51415 @@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51416 spin_lock(&object->lock);
51417 object->state = FSCACHE_OBJECT_DEAD;
51418 spin_unlock(&object->lock);
51419 - fscache_stat(&fscache_n_object_dead);
51420 + fscache_stat_unchecked(&fscache_n_object_dead);
51421 goto terminal_transit;
51422
51423 /* handle the parent cache of this object being withdrawn from
51424 @@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51425 spin_lock(&object->lock);
51426 object->state = FSCACHE_OBJECT_DEAD;
51427 spin_unlock(&object->lock);
51428 - fscache_stat(&fscache_n_object_dead);
51429 + fscache_stat_unchecked(&fscache_n_object_dead);
51430 goto terminal_transit;
51431
51432 /* complain about the object being woken up once it is
51433 @@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51434 parent->cookie->def->name, cookie->def->name,
51435 object->cache->tag->name);
51436
51437 - fscache_stat(&fscache_n_object_lookups);
51438 + fscache_stat_unchecked(&fscache_n_object_lookups);
51439 fscache_stat(&fscache_n_cop_lookup_object);
51440 ret = object->cache->ops->lookup_object(object);
51441 fscache_stat_d(&fscache_n_cop_lookup_object);
51442 @@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51443 if (ret == -ETIMEDOUT) {
51444 /* probably stuck behind another object, so move this one to
51445 * the back of the queue */
51446 - fscache_stat(&fscache_n_object_lookups_timed_out);
51447 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
51448 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51449 }
51450
51451 @@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
51452
51453 spin_lock(&object->lock);
51454 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51455 - fscache_stat(&fscache_n_object_lookups_negative);
51456 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
51457
51458 /* transit here to allow write requests to begin stacking up
51459 * and read requests to begin returning ENODATA */
51460 @@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
51461 * result, in which case there may be data available */
51462 spin_lock(&object->lock);
51463 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51464 - fscache_stat(&fscache_n_object_lookups_positive);
51465 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
51466
51467 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
51468
51469 @@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
51470 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51471 } else {
51472 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
51473 - fscache_stat(&fscache_n_object_created);
51474 + fscache_stat_unchecked(&fscache_n_object_created);
51475
51476 object->state = FSCACHE_OBJECT_AVAILABLE;
51477 spin_unlock(&object->lock);
51478 @@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
51479 fscache_enqueue_dependents(object);
51480
51481 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
51482 - fscache_stat(&fscache_n_object_avail);
51483 + fscache_stat_unchecked(&fscache_n_object_avail);
51484
51485 _leave("");
51486 }
51487 @@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51488 enum fscache_checkaux result;
51489
51490 if (!object->cookie->def->check_aux) {
51491 - fscache_stat(&fscache_n_checkaux_none);
51492 + fscache_stat_unchecked(&fscache_n_checkaux_none);
51493 return FSCACHE_CHECKAUX_OKAY;
51494 }
51495
51496 @@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51497 switch (result) {
51498 /* entry okay as is */
51499 case FSCACHE_CHECKAUX_OKAY:
51500 - fscache_stat(&fscache_n_checkaux_okay);
51501 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
51502 break;
51503
51504 /* entry requires update */
51505 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
51506 - fscache_stat(&fscache_n_checkaux_update);
51507 + fscache_stat_unchecked(&fscache_n_checkaux_update);
51508 break;
51509
51510 /* entry requires deletion */
51511 case FSCACHE_CHECKAUX_OBSOLETE:
51512 - fscache_stat(&fscache_n_checkaux_obsolete);
51513 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
51514 break;
51515
51516 default:
51517 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
51518 index 762a9ec..2023284 100644
51519 --- a/fs/fscache/operation.c
51520 +++ b/fs/fscache/operation.c
51521 @@ -17,7 +17,7 @@
51522 #include <linux/slab.h>
51523 #include "internal.h"
51524
51525 -atomic_t fscache_op_debug_id;
51526 +atomic_unchecked_t fscache_op_debug_id;
51527 EXPORT_SYMBOL(fscache_op_debug_id);
51528
51529 /**
51530 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
51531 ASSERTCMP(atomic_read(&op->usage), >, 0);
51532 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
51533
51534 - fscache_stat(&fscache_n_op_enqueue);
51535 + fscache_stat_unchecked(&fscache_n_op_enqueue);
51536 switch (op->flags & FSCACHE_OP_TYPE) {
51537 case FSCACHE_OP_ASYNC:
51538 _debug("queue async");
51539 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
51540 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
51541 if (op->processor)
51542 fscache_enqueue_operation(op);
51543 - fscache_stat(&fscache_n_op_run);
51544 + fscache_stat_unchecked(&fscache_n_op_run);
51545 }
51546
51547 /*
51548 @@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51549 if (object->n_in_progress > 0) {
51550 atomic_inc(&op->usage);
51551 list_add_tail(&op->pend_link, &object->pending_ops);
51552 - fscache_stat(&fscache_n_op_pend);
51553 + fscache_stat_unchecked(&fscache_n_op_pend);
51554 } else if (!list_empty(&object->pending_ops)) {
51555 atomic_inc(&op->usage);
51556 list_add_tail(&op->pend_link, &object->pending_ops);
51557 - fscache_stat(&fscache_n_op_pend);
51558 + fscache_stat_unchecked(&fscache_n_op_pend);
51559 fscache_start_operations(object);
51560 } else {
51561 ASSERTCMP(object->n_in_progress, ==, 0);
51562 @@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51563 object->n_exclusive++; /* reads and writes must wait */
51564 atomic_inc(&op->usage);
51565 list_add_tail(&op->pend_link, &object->pending_ops);
51566 - fscache_stat(&fscache_n_op_pend);
51567 + fscache_stat_unchecked(&fscache_n_op_pend);
51568 ret = 0;
51569 } else {
51570 /* If we're in any other state, there must have been an I/O
51571 @@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
51572 if (object->n_exclusive > 0) {
51573 atomic_inc(&op->usage);
51574 list_add_tail(&op->pend_link, &object->pending_ops);
51575 - fscache_stat(&fscache_n_op_pend);
51576 + fscache_stat_unchecked(&fscache_n_op_pend);
51577 } else if (!list_empty(&object->pending_ops)) {
51578 atomic_inc(&op->usage);
51579 list_add_tail(&op->pend_link, &object->pending_ops);
51580 - fscache_stat(&fscache_n_op_pend);
51581 + fscache_stat_unchecked(&fscache_n_op_pend);
51582 fscache_start_operations(object);
51583 } else {
51584 ASSERTCMP(object->n_exclusive, ==, 0);
51585 @@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
51586 object->n_ops++;
51587 atomic_inc(&op->usage);
51588 list_add_tail(&op->pend_link, &object->pending_ops);
51589 - fscache_stat(&fscache_n_op_pend);
51590 + fscache_stat_unchecked(&fscache_n_op_pend);
51591 ret = 0;
51592 } else if (object->state == FSCACHE_OBJECT_DYING ||
51593 object->state == FSCACHE_OBJECT_LC_DYING ||
51594 object->state == FSCACHE_OBJECT_WITHDRAWING) {
51595 - fscache_stat(&fscache_n_op_rejected);
51596 + fscache_stat_unchecked(&fscache_n_op_rejected);
51597 op->state = FSCACHE_OP_ST_CANCELLED;
51598 ret = -ENOBUFS;
51599 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
51600 @@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
51601 ret = -EBUSY;
51602 if (op->state == FSCACHE_OP_ST_PENDING) {
51603 ASSERT(!list_empty(&op->pend_link));
51604 - fscache_stat(&fscache_n_op_cancelled);
51605 + fscache_stat_unchecked(&fscache_n_op_cancelled);
51606 list_del_init(&op->pend_link);
51607 if (do_cancel)
51608 do_cancel(op);
51609 @@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
51610 while (!list_empty(&object->pending_ops)) {
51611 op = list_entry(object->pending_ops.next,
51612 struct fscache_operation, pend_link);
51613 - fscache_stat(&fscache_n_op_cancelled);
51614 + fscache_stat_unchecked(&fscache_n_op_cancelled);
51615 list_del_init(&op->pend_link);
51616
51617 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
51618 @@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
51619 op->state, ==, FSCACHE_OP_ST_CANCELLED);
51620 op->state = FSCACHE_OP_ST_DEAD;
51621
51622 - fscache_stat(&fscache_n_op_release);
51623 + fscache_stat_unchecked(&fscache_n_op_release);
51624
51625 if (op->release) {
51626 op->release(op);
51627 @@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
51628 * lock, and defer it otherwise */
51629 if (!spin_trylock(&object->lock)) {
51630 _debug("defer put");
51631 - fscache_stat(&fscache_n_op_deferred_release);
51632 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
51633
51634 cache = object->cache;
51635 spin_lock(&cache->op_gc_list_lock);
51636 @@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
51637
51638 _debug("GC DEFERRED REL OBJ%x OP%x",
51639 object->debug_id, op->debug_id);
51640 - fscache_stat(&fscache_n_op_gc);
51641 + fscache_stat_unchecked(&fscache_n_op_gc);
51642
51643 ASSERTCMP(atomic_read(&op->usage), ==, 0);
51644 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
51645 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
51646 index ff000e5..c44ec6d 100644
51647 --- a/fs/fscache/page.c
51648 +++ b/fs/fscache/page.c
51649 @@ -61,7 +61,7 @@ try_again:
51650 val = radix_tree_lookup(&cookie->stores, page->index);
51651 if (!val) {
51652 rcu_read_unlock();
51653 - fscache_stat(&fscache_n_store_vmscan_not_storing);
51654 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
51655 __fscache_uncache_page(cookie, page);
51656 return true;
51657 }
51658 @@ -91,11 +91,11 @@ try_again:
51659 spin_unlock(&cookie->stores_lock);
51660
51661 if (xpage) {
51662 - fscache_stat(&fscache_n_store_vmscan_cancelled);
51663 - fscache_stat(&fscache_n_store_radix_deletes);
51664 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
51665 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51666 ASSERTCMP(xpage, ==, page);
51667 } else {
51668 - fscache_stat(&fscache_n_store_vmscan_gone);
51669 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
51670 }
51671
51672 wake_up_bit(&cookie->flags, 0);
51673 @@ -110,11 +110,11 @@ page_busy:
51674 * sleeping on memory allocation, so we may need to impose a timeout
51675 * too. */
51676 if (!(gfp & __GFP_WAIT)) {
51677 - fscache_stat(&fscache_n_store_vmscan_busy);
51678 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
51679 return false;
51680 }
51681
51682 - fscache_stat(&fscache_n_store_vmscan_wait);
51683 + fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
51684 __fscache_wait_on_page_write(cookie, page);
51685 gfp &= ~__GFP_WAIT;
51686 goto try_again;
51687 @@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
51688 FSCACHE_COOKIE_STORING_TAG);
51689 if (!radix_tree_tag_get(&cookie->stores, page->index,
51690 FSCACHE_COOKIE_PENDING_TAG)) {
51691 - fscache_stat(&fscache_n_store_radix_deletes);
51692 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51693 xpage = radix_tree_delete(&cookie->stores, page->index);
51694 }
51695 spin_unlock(&cookie->stores_lock);
51696 @@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
51697
51698 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
51699
51700 - fscache_stat(&fscache_n_attr_changed_calls);
51701 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
51702
51703 if (fscache_object_is_active(object)) {
51704 fscache_stat(&fscache_n_cop_attr_changed);
51705 @@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51706
51707 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51708
51709 - fscache_stat(&fscache_n_attr_changed);
51710 + fscache_stat_unchecked(&fscache_n_attr_changed);
51711
51712 op = kzalloc(sizeof(*op), GFP_KERNEL);
51713 if (!op) {
51714 - fscache_stat(&fscache_n_attr_changed_nomem);
51715 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
51716 _leave(" = -ENOMEM");
51717 return -ENOMEM;
51718 }
51719 @@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51720 if (fscache_submit_exclusive_op(object, op) < 0)
51721 goto nobufs;
51722 spin_unlock(&cookie->lock);
51723 - fscache_stat(&fscache_n_attr_changed_ok);
51724 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
51725 fscache_put_operation(op);
51726 _leave(" = 0");
51727 return 0;
51728 @@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51729 nobufs:
51730 spin_unlock(&cookie->lock);
51731 kfree(op);
51732 - fscache_stat(&fscache_n_attr_changed_nobufs);
51733 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
51734 _leave(" = %d", -ENOBUFS);
51735 return -ENOBUFS;
51736 }
51737 @@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
51738 /* allocate a retrieval operation and attempt to submit it */
51739 op = kzalloc(sizeof(*op), GFP_NOIO);
51740 if (!op) {
51741 - fscache_stat(&fscache_n_retrievals_nomem);
51742 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51743 return NULL;
51744 }
51745
51746 @@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
51747 return 0;
51748 }
51749
51750 - fscache_stat(&fscache_n_retrievals_wait);
51751 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
51752
51753 jif = jiffies;
51754 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
51755 fscache_wait_bit_interruptible,
51756 TASK_INTERRUPTIBLE) != 0) {
51757 - fscache_stat(&fscache_n_retrievals_intr);
51758 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51759 _leave(" = -ERESTARTSYS");
51760 return -ERESTARTSYS;
51761 }
51762 @@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
51763 */
51764 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51765 struct fscache_retrieval *op,
51766 - atomic_t *stat_op_waits,
51767 - atomic_t *stat_object_dead)
51768 + atomic_unchecked_t *stat_op_waits,
51769 + atomic_unchecked_t *stat_object_dead)
51770 {
51771 int ret;
51772
51773 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51774 goto check_if_dead;
51775
51776 _debug(">>> WT");
51777 - fscache_stat(stat_op_waits);
51778 + fscache_stat_unchecked(stat_op_waits);
51779 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
51780 fscache_wait_bit_interruptible,
51781 TASK_INTERRUPTIBLE) != 0) {
51782 @@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51783
51784 check_if_dead:
51785 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
51786 - fscache_stat(stat_object_dead);
51787 + fscache_stat_unchecked(stat_object_dead);
51788 _leave(" = -ENOBUFS [cancelled]");
51789 return -ENOBUFS;
51790 }
51791 if (unlikely(fscache_object_is_dead(object))) {
51792 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
51793 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
51794 - fscache_stat(stat_object_dead);
51795 + fscache_stat_unchecked(stat_object_dead);
51796 return -ENOBUFS;
51797 }
51798 return 0;
51799 @@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51800
51801 _enter("%p,%p,,,", cookie, page);
51802
51803 - fscache_stat(&fscache_n_retrievals);
51804 + fscache_stat_unchecked(&fscache_n_retrievals);
51805
51806 if (hlist_empty(&cookie->backing_objects))
51807 goto nobufs;
51808 @@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51809 goto nobufs_unlock_dec;
51810 spin_unlock(&cookie->lock);
51811
51812 - fscache_stat(&fscache_n_retrieval_ops);
51813 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
51814
51815 /* pin the netfs read context in case we need to do the actual netfs
51816 * read because we've encountered a cache read failure */
51817 @@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51818
51819 error:
51820 if (ret == -ENOMEM)
51821 - fscache_stat(&fscache_n_retrievals_nomem);
51822 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51823 else if (ret == -ERESTARTSYS)
51824 - fscache_stat(&fscache_n_retrievals_intr);
51825 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51826 else if (ret == -ENODATA)
51827 - fscache_stat(&fscache_n_retrievals_nodata);
51828 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51829 else if (ret < 0)
51830 - fscache_stat(&fscache_n_retrievals_nobufs);
51831 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51832 else
51833 - fscache_stat(&fscache_n_retrievals_ok);
51834 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
51835
51836 fscache_put_retrieval(op);
51837 _leave(" = %d", ret);
51838 @@ -467,7 +467,7 @@ nobufs_unlock:
51839 spin_unlock(&cookie->lock);
51840 kfree(op);
51841 nobufs:
51842 - fscache_stat(&fscache_n_retrievals_nobufs);
51843 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51844 _leave(" = -ENOBUFS");
51845 return -ENOBUFS;
51846 }
51847 @@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51848
51849 _enter("%p,,%d,,,", cookie, *nr_pages);
51850
51851 - fscache_stat(&fscache_n_retrievals);
51852 + fscache_stat_unchecked(&fscache_n_retrievals);
51853
51854 if (hlist_empty(&cookie->backing_objects))
51855 goto nobufs;
51856 @@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51857 goto nobufs_unlock_dec;
51858 spin_unlock(&cookie->lock);
51859
51860 - fscache_stat(&fscache_n_retrieval_ops);
51861 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
51862
51863 /* pin the netfs read context in case we need to do the actual netfs
51864 * read because we've encountered a cache read failure */
51865 @@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51866
51867 error:
51868 if (ret == -ENOMEM)
51869 - fscache_stat(&fscache_n_retrievals_nomem);
51870 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51871 else if (ret == -ERESTARTSYS)
51872 - fscache_stat(&fscache_n_retrievals_intr);
51873 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
51874 else if (ret == -ENODATA)
51875 - fscache_stat(&fscache_n_retrievals_nodata);
51876 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51877 else if (ret < 0)
51878 - fscache_stat(&fscache_n_retrievals_nobufs);
51879 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51880 else
51881 - fscache_stat(&fscache_n_retrievals_ok);
51882 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
51883
51884 fscache_put_retrieval(op);
51885 _leave(" = %d", ret);
51886 @@ -591,7 +591,7 @@ nobufs_unlock:
51887 spin_unlock(&cookie->lock);
51888 kfree(op);
51889 nobufs:
51890 - fscache_stat(&fscache_n_retrievals_nobufs);
51891 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51892 _leave(" = -ENOBUFS");
51893 return -ENOBUFS;
51894 }
51895 @@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51896
51897 _enter("%p,%p,,,", cookie, page);
51898
51899 - fscache_stat(&fscache_n_allocs);
51900 + fscache_stat_unchecked(&fscache_n_allocs);
51901
51902 if (hlist_empty(&cookie->backing_objects))
51903 goto nobufs;
51904 @@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51905 goto nobufs_unlock;
51906 spin_unlock(&cookie->lock);
51907
51908 - fscache_stat(&fscache_n_alloc_ops);
51909 + fscache_stat_unchecked(&fscache_n_alloc_ops);
51910
51911 ret = fscache_wait_for_retrieval_activation(
51912 object, op,
51913 @@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51914
51915 error:
51916 if (ret == -ERESTARTSYS)
51917 - fscache_stat(&fscache_n_allocs_intr);
51918 + fscache_stat_unchecked(&fscache_n_allocs_intr);
51919 else if (ret < 0)
51920 - fscache_stat(&fscache_n_allocs_nobufs);
51921 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51922 else
51923 - fscache_stat(&fscache_n_allocs_ok);
51924 + fscache_stat_unchecked(&fscache_n_allocs_ok);
51925
51926 fscache_put_retrieval(op);
51927 _leave(" = %d", ret);
51928 @@ -677,7 +677,7 @@ nobufs_unlock:
51929 spin_unlock(&cookie->lock);
51930 kfree(op);
51931 nobufs:
51932 - fscache_stat(&fscache_n_allocs_nobufs);
51933 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51934 _leave(" = -ENOBUFS");
51935 return -ENOBUFS;
51936 }
51937 @@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51938
51939 spin_lock(&cookie->stores_lock);
51940
51941 - fscache_stat(&fscache_n_store_calls);
51942 + fscache_stat_unchecked(&fscache_n_store_calls);
51943
51944 /* find a page to store */
51945 page = NULL;
51946 @@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51947 page = results[0];
51948 _debug("gang %d [%lx]", n, page->index);
51949 if (page->index > op->store_limit) {
51950 - fscache_stat(&fscache_n_store_pages_over_limit);
51951 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
51952 goto superseded;
51953 }
51954
51955 @@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51956 spin_unlock(&cookie->stores_lock);
51957 spin_unlock(&object->lock);
51958
51959 - fscache_stat(&fscache_n_store_pages);
51960 + fscache_stat_unchecked(&fscache_n_store_pages);
51961 fscache_stat(&fscache_n_cop_write_page);
51962 ret = object->cache->ops->write_page(op, page);
51963 fscache_stat_d(&fscache_n_cop_write_page);
51964 @@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51965 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51966 ASSERT(PageFsCache(page));
51967
51968 - fscache_stat(&fscache_n_stores);
51969 + fscache_stat_unchecked(&fscache_n_stores);
51970
51971 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
51972 _leave(" = -ENOBUFS [invalidating]");
51973 @@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51974 spin_unlock(&cookie->stores_lock);
51975 spin_unlock(&object->lock);
51976
51977 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
51978 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51979 op->store_limit = object->store_limit;
51980
51981 if (fscache_submit_op(object, &op->op) < 0)
51982 @@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51983
51984 spin_unlock(&cookie->lock);
51985 radix_tree_preload_end();
51986 - fscache_stat(&fscache_n_store_ops);
51987 - fscache_stat(&fscache_n_stores_ok);
51988 + fscache_stat_unchecked(&fscache_n_store_ops);
51989 + fscache_stat_unchecked(&fscache_n_stores_ok);
51990
51991 /* the work queue now carries its own ref on the object */
51992 fscache_put_operation(&op->op);
51993 @@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51994 return 0;
51995
51996 already_queued:
51997 - fscache_stat(&fscache_n_stores_again);
51998 + fscache_stat_unchecked(&fscache_n_stores_again);
51999 already_pending:
52000 spin_unlock(&cookie->stores_lock);
52001 spin_unlock(&object->lock);
52002 spin_unlock(&cookie->lock);
52003 radix_tree_preload_end();
52004 kfree(op);
52005 - fscache_stat(&fscache_n_stores_ok);
52006 + fscache_stat_unchecked(&fscache_n_stores_ok);
52007 _leave(" = 0");
52008 return 0;
52009
52010 @@ -959,14 +959,14 @@ nobufs:
52011 spin_unlock(&cookie->lock);
52012 radix_tree_preload_end();
52013 kfree(op);
52014 - fscache_stat(&fscache_n_stores_nobufs);
52015 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
52016 _leave(" = -ENOBUFS");
52017 return -ENOBUFS;
52018
52019 nomem_free:
52020 kfree(op);
52021 nomem:
52022 - fscache_stat(&fscache_n_stores_oom);
52023 + fscache_stat_unchecked(&fscache_n_stores_oom);
52024 _leave(" = -ENOMEM");
52025 return -ENOMEM;
52026 }
52027 @@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52028 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52029 ASSERTCMP(page, !=, NULL);
52030
52031 - fscache_stat(&fscache_n_uncaches);
52032 + fscache_stat_unchecked(&fscache_n_uncaches);
52033
52034 /* cache withdrawal may beat us to it */
52035 if (!PageFsCache(page))
52036 @@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52037 struct fscache_cookie *cookie = op->op.object->cookie;
52038
52039 #ifdef CONFIG_FSCACHE_STATS
52040 - atomic_inc(&fscache_n_marks);
52041 + atomic_inc_unchecked(&fscache_n_marks);
52042 #endif
52043
52044 _debug("- mark %p{%lx}", page, page->index);
52045 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52046 index 40d13c7..ddf52b9 100644
52047 --- a/fs/fscache/stats.c
52048 +++ b/fs/fscache/stats.c
52049 @@ -18,99 +18,99 @@
52050 /*
52051 * operation counters
52052 */
52053 -atomic_t fscache_n_op_pend;
52054 -atomic_t fscache_n_op_run;
52055 -atomic_t fscache_n_op_enqueue;
52056 -atomic_t fscache_n_op_requeue;
52057 -atomic_t fscache_n_op_deferred_release;
52058 -atomic_t fscache_n_op_release;
52059 -atomic_t fscache_n_op_gc;
52060 -atomic_t fscache_n_op_cancelled;
52061 -atomic_t fscache_n_op_rejected;
52062 +atomic_unchecked_t fscache_n_op_pend;
52063 +atomic_unchecked_t fscache_n_op_run;
52064 +atomic_unchecked_t fscache_n_op_enqueue;
52065 +atomic_unchecked_t fscache_n_op_requeue;
52066 +atomic_unchecked_t fscache_n_op_deferred_release;
52067 +atomic_unchecked_t fscache_n_op_release;
52068 +atomic_unchecked_t fscache_n_op_gc;
52069 +atomic_unchecked_t fscache_n_op_cancelled;
52070 +atomic_unchecked_t fscache_n_op_rejected;
52071
52072 -atomic_t fscache_n_attr_changed;
52073 -atomic_t fscache_n_attr_changed_ok;
52074 -atomic_t fscache_n_attr_changed_nobufs;
52075 -atomic_t fscache_n_attr_changed_nomem;
52076 -atomic_t fscache_n_attr_changed_calls;
52077 +atomic_unchecked_t fscache_n_attr_changed;
52078 +atomic_unchecked_t fscache_n_attr_changed_ok;
52079 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
52080 +atomic_unchecked_t fscache_n_attr_changed_nomem;
52081 +atomic_unchecked_t fscache_n_attr_changed_calls;
52082
52083 -atomic_t fscache_n_allocs;
52084 -atomic_t fscache_n_allocs_ok;
52085 -atomic_t fscache_n_allocs_wait;
52086 -atomic_t fscache_n_allocs_nobufs;
52087 -atomic_t fscache_n_allocs_intr;
52088 -atomic_t fscache_n_allocs_object_dead;
52089 -atomic_t fscache_n_alloc_ops;
52090 -atomic_t fscache_n_alloc_op_waits;
52091 +atomic_unchecked_t fscache_n_allocs;
52092 +atomic_unchecked_t fscache_n_allocs_ok;
52093 +atomic_unchecked_t fscache_n_allocs_wait;
52094 +atomic_unchecked_t fscache_n_allocs_nobufs;
52095 +atomic_unchecked_t fscache_n_allocs_intr;
52096 +atomic_unchecked_t fscache_n_allocs_object_dead;
52097 +atomic_unchecked_t fscache_n_alloc_ops;
52098 +atomic_unchecked_t fscache_n_alloc_op_waits;
52099
52100 -atomic_t fscache_n_retrievals;
52101 -atomic_t fscache_n_retrievals_ok;
52102 -atomic_t fscache_n_retrievals_wait;
52103 -atomic_t fscache_n_retrievals_nodata;
52104 -atomic_t fscache_n_retrievals_nobufs;
52105 -atomic_t fscache_n_retrievals_intr;
52106 -atomic_t fscache_n_retrievals_nomem;
52107 -atomic_t fscache_n_retrievals_object_dead;
52108 -atomic_t fscache_n_retrieval_ops;
52109 -atomic_t fscache_n_retrieval_op_waits;
52110 +atomic_unchecked_t fscache_n_retrievals;
52111 +atomic_unchecked_t fscache_n_retrievals_ok;
52112 +atomic_unchecked_t fscache_n_retrievals_wait;
52113 +atomic_unchecked_t fscache_n_retrievals_nodata;
52114 +atomic_unchecked_t fscache_n_retrievals_nobufs;
52115 +atomic_unchecked_t fscache_n_retrievals_intr;
52116 +atomic_unchecked_t fscache_n_retrievals_nomem;
52117 +atomic_unchecked_t fscache_n_retrievals_object_dead;
52118 +atomic_unchecked_t fscache_n_retrieval_ops;
52119 +atomic_unchecked_t fscache_n_retrieval_op_waits;
52120
52121 -atomic_t fscache_n_stores;
52122 -atomic_t fscache_n_stores_ok;
52123 -atomic_t fscache_n_stores_again;
52124 -atomic_t fscache_n_stores_nobufs;
52125 -atomic_t fscache_n_stores_oom;
52126 -atomic_t fscache_n_store_ops;
52127 -atomic_t fscache_n_store_calls;
52128 -atomic_t fscache_n_store_pages;
52129 -atomic_t fscache_n_store_radix_deletes;
52130 -atomic_t fscache_n_store_pages_over_limit;
52131 +atomic_unchecked_t fscache_n_stores;
52132 +atomic_unchecked_t fscache_n_stores_ok;
52133 +atomic_unchecked_t fscache_n_stores_again;
52134 +atomic_unchecked_t fscache_n_stores_nobufs;
52135 +atomic_unchecked_t fscache_n_stores_oom;
52136 +atomic_unchecked_t fscache_n_store_ops;
52137 +atomic_unchecked_t fscache_n_store_calls;
52138 +atomic_unchecked_t fscache_n_store_pages;
52139 +atomic_unchecked_t fscache_n_store_radix_deletes;
52140 +atomic_unchecked_t fscache_n_store_pages_over_limit;
52141
52142 -atomic_t fscache_n_store_vmscan_not_storing;
52143 -atomic_t fscache_n_store_vmscan_gone;
52144 -atomic_t fscache_n_store_vmscan_busy;
52145 -atomic_t fscache_n_store_vmscan_cancelled;
52146 -atomic_t fscache_n_store_vmscan_wait;
52147 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52148 +atomic_unchecked_t fscache_n_store_vmscan_gone;
52149 +atomic_unchecked_t fscache_n_store_vmscan_busy;
52150 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52151 +atomic_unchecked_t fscache_n_store_vmscan_wait;
52152
52153 -atomic_t fscache_n_marks;
52154 -atomic_t fscache_n_uncaches;
52155 +atomic_unchecked_t fscache_n_marks;
52156 +atomic_unchecked_t fscache_n_uncaches;
52157
52158 -atomic_t fscache_n_acquires;
52159 -atomic_t fscache_n_acquires_null;
52160 -atomic_t fscache_n_acquires_no_cache;
52161 -atomic_t fscache_n_acquires_ok;
52162 -atomic_t fscache_n_acquires_nobufs;
52163 -atomic_t fscache_n_acquires_oom;
52164 +atomic_unchecked_t fscache_n_acquires;
52165 +atomic_unchecked_t fscache_n_acquires_null;
52166 +atomic_unchecked_t fscache_n_acquires_no_cache;
52167 +atomic_unchecked_t fscache_n_acquires_ok;
52168 +atomic_unchecked_t fscache_n_acquires_nobufs;
52169 +atomic_unchecked_t fscache_n_acquires_oom;
52170
52171 -atomic_t fscache_n_invalidates;
52172 -atomic_t fscache_n_invalidates_run;
52173 +atomic_unchecked_t fscache_n_invalidates;
52174 +atomic_unchecked_t fscache_n_invalidates_run;
52175
52176 -atomic_t fscache_n_updates;
52177 -atomic_t fscache_n_updates_null;
52178 -atomic_t fscache_n_updates_run;
52179 +atomic_unchecked_t fscache_n_updates;
52180 +atomic_unchecked_t fscache_n_updates_null;
52181 +atomic_unchecked_t fscache_n_updates_run;
52182
52183 -atomic_t fscache_n_relinquishes;
52184 -atomic_t fscache_n_relinquishes_null;
52185 -atomic_t fscache_n_relinquishes_waitcrt;
52186 -atomic_t fscache_n_relinquishes_retire;
52187 +atomic_unchecked_t fscache_n_relinquishes;
52188 +atomic_unchecked_t fscache_n_relinquishes_null;
52189 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52190 +atomic_unchecked_t fscache_n_relinquishes_retire;
52191
52192 -atomic_t fscache_n_cookie_index;
52193 -atomic_t fscache_n_cookie_data;
52194 -atomic_t fscache_n_cookie_special;
52195 +atomic_unchecked_t fscache_n_cookie_index;
52196 +atomic_unchecked_t fscache_n_cookie_data;
52197 +atomic_unchecked_t fscache_n_cookie_special;
52198
52199 -atomic_t fscache_n_object_alloc;
52200 -atomic_t fscache_n_object_no_alloc;
52201 -atomic_t fscache_n_object_lookups;
52202 -atomic_t fscache_n_object_lookups_negative;
52203 -atomic_t fscache_n_object_lookups_positive;
52204 -atomic_t fscache_n_object_lookups_timed_out;
52205 -atomic_t fscache_n_object_created;
52206 -atomic_t fscache_n_object_avail;
52207 -atomic_t fscache_n_object_dead;
52208 +atomic_unchecked_t fscache_n_object_alloc;
52209 +atomic_unchecked_t fscache_n_object_no_alloc;
52210 +atomic_unchecked_t fscache_n_object_lookups;
52211 +atomic_unchecked_t fscache_n_object_lookups_negative;
52212 +atomic_unchecked_t fscache_n_object_lookups_positive;
52213 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
52214 +atomic_unchecked_t fscache_n_object_created;
52215 +atomic_unchecked_t fscache_n_object_avail;
52216 +atomic_unchecked_t fscache_n_object_dead;
52217
52218 -atomic_t fscache_n_checkaux_none;
52219 -atomic_t fscache_n_checkaux_okay;
52220 -atomic_t fscache_n_checkaux_update;
52221 -atomic_t fscache_n_checkaux_obsolete;
52222 +atomic_unchecked_t fscache_n_checkaux_none;
52223 +atomic_unchecked_t fscache_n_checkaux_okay;
52224 +atomic_unchecked_t fscache_n_checkaux_update;
52225 +atomic_unchecked_t fscache_n_checkaux_obsolete;
52226
52227 atomic_t fscache_n_cop_alloc_object;
52228 atomic_t fscache_n_cop_lookup_object;
52229 @@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
52230 seq_puts(m, "FS-Cache statistics\n");
52231
52232 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
52233 - atomic_read(&fscache_n_cookie_index),
52234 - atomic_read(&fscache_n_cookie_data),
52235 - atomic_read(&fscache_n_cookie_special));
52236 + atomic_read_unchecked(&fscache_n_cookie_index),
52237 + atomic_read_unchecked(&fscache_n_cookie_data),
52238 + atomic_read_unchecked(&fscache_n_cookie_special));
52239
52240 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
52241 - atomic_read(&fscache_n_object_alloc),
52242 - atomic_read(&fscache_n_object_no_alloc),
52243 - atomic_read(&fscache_n_object_avail),
52244 - atomic_read(&fscache_n_object_dead));
52245 + atomic_read_unchecked(&fscache_n_object_alloc),
52246 + atomic_read_unchecked(&fscache_n_object_no_alloc),
52247 + atomic_read_unchecked(&fscache_n_object_avail),
52248 + atomic_read_unchecked(&fscache_n_object_dead));
52249 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
52250 - atomic_read(&fscache_n_checkaux_none),
52251 - atomic_read(&fscache_n_checkaux_okay),
52252 - atomic_read(&fscache_n_checkaux_update),
52253 - atomic_read(&fscache_n_checkaux_obsolete));
52254 + atomic_read_unchecked(&fscache_n_checkaux_none),
52255 + atomic_read_unchecked(&fscache_n_checkaux_okay),
52256 + atomic_read_unchecked(&fscache_n_checkaux_update),
52257 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
52258
52259 seq_printf(m, "Pages : mrk=%u unc=%u\n",
52260 - atomic_read(&fscache_n_marks),
52261 - atomic_read(&fscache_n_uncaches));
52262 + atomic_read_unchecked(&fscache_n_marks),
52263 + atomic_read_unchecked(&fscache_n_uncaches));
52264
52265 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
52266 " oom=%u\n",
52267 - atomic_read(&fscache_n_acquires),
52268 - atomic_read(&fscache_n_acquires_null),
52269 - atomic_read(&fscache_n_acquires_no_cache),
52270 - atomic_read(&fscache_n_acquires_ok),
52271 - atomic_read(&fscache_n_acquires_nobufs),
52272 - atomic_read(&fscache_n_acquires_oom));
52273 + atomic_read_unchecked(&fscache_n_acquires),
52274 + atomic_read_unchecked(&fscache_n_acquires_null),
52275 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
52276 + atomic_read_unchecked(&fscache_n_acquires_ok),
52277 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
52278 + atomic_read_unchecked(&fscache_n_acquires_oom));
52279
52280 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
52281 - atomic_read(&fscache_n_object_lookups),
52282 - atomic_read(&fscache_n_object_lookups_negative),
52283 - atomic_read(&fscache_n_object_lookups_positive),
52284 - atomic_read(&fscache_n_object_created),
52285 - atomic_read(&fscache_n_object_lookups_timed_out));
52286 + atomic_read_unchecked(&fscache_n_object_lookups),
52287 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
52288 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
52289 + atomic_read_unchecked(&fscache_n_object_created),
52290 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
52291
52292 seq_printf(m, "Invals : n=%u run=%u\n",
52293 - atomic_read(&fscache_n_invalidates),
52294 - atomic_read(&fscache_n_invalidates_run));
52295 + atomic_read_unchecked(&fscache_n_invalidates),
52296 + atomic_read_unchecked(&fscache_n_invalidates_run));
52297
52298 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
52299 - atomic_read(&fscache_n_updates),
52300 - atomic_read(&fscache_n_updates_null),
52301 - atomic_read(&fscache_n_updates_run));
52302 + atomic_read_unchecked(&fscache_n_updates),
52303 + atomic_read_unchecked(&fscache_n_updates_null),
52304 + atomic_read_unchecked(&fscache_n_updates_run));
52305
52306 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
52307 - atomic_read(&fscache_n_relinquishes),
52308 - atomic_read(&fscache_n_relinquishes_null),
52309 - atomic_read(&fscache_n_relinquishes_waitcrt),
52310 - atomic_read(&fscache_n_relinquishes_retire));
52311 + atomic_read_unchecked(&fscache_n_relinquishes),
52312 + atomic_read_unchecked(&fscache_n_relinquishes_null),
52313 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
52314 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
52315
52316 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
52317 - atomic_read(&fscache_n_attr_changed),
52318 - atomic_read(&fscache_n_attr_changed_ok),
52319 - atomic_read(&fscache_n_attr_changed_nobufs),
52320 - atomic_read(&fscache_n_attr_changed_nomem),
52321 - atomic_read(&fscache_n_attr_changed_calls));
52322 + atomic_read_unchecked(&fscache_n_attr_changed),
52323 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
52324 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
52325 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
52326 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
52327
52328 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
52329 - atomic_read(&fscache_n_allocs),
52330 - atomic_read(&fscache_n_allocs_ok),
52331 - atomic_read(&fscache_n_allocs_wait),
52332 - atomic_read(&fscache_n_allocs_nobufs),
52333 - atomic_read(&fscache_n_allocs_intr));
52334 + atomic_read_unchecked(&fscache_n_allocs),
52335 + atomic_read_unchecked(&fscache_n_allocs_ok),
52336 + atomic_read_unchecked(&fscache_n_allocs_wait),
52337 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
52338 + atomic_read_unchecked(&fscache_n_allocs_intr));
52339 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52340 - atomic_read(&fscache_n_alloc_ops),
52341 - atomic_read(&fscache_n_alloc_op_waits),
52342 - atomic_read(&fscache_n_allocs_object_dead));
52343 + atomic_read_unchecked(&fscache_n_alloc_ops),
52344 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
52345 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
52346
52347 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52348 " int=%u oom=%u\n",
52349 - atomic_read(&fscache_n_retrievals),
52350 - atomic_read(&fscache_n_retrievals_ok),
52351 - atomic_read(&fscache_n_retrievals_wait),
52352 - atomic_read(&fscache_n_retrievals_nodata),
52353 - atomic_read(&fscache_n_retrievals_nobufs),
52354 - atomic_read(&fscache_n_retrievals_intr),
52355 - atomic_read(&fscache_n_retrievals_nomem));
52356 + atomic_read_unchecked(&fscache_n_retrievals),
52357 + atomic_read_unchecked(&fscache_n_retrievals_ok),
52358 + atomic_read_unchecked(&fscache_n_retrievals_wait),
52359 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
52360 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
52361 + atomic_read_unchecked(&fscache_n_retrievals_intr),
52362 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
52363 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
52364 - atomic_read(&fscache_n_retrieval_ops),
52365 - atomic_read(&fscache_n_retrieval_op_waits),
52366 - atomic_read(&fscache_n_retrievals_object_dead));
52367 + atomic_read_unchecked(&fscache_n_retrieval_ops),
52368 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
52369 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
52370
52371 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
52372 - atomic_read(&fscache_n_stores),
52373 - atomic_read(&fscache_n_stores_ok),
52374 - atomic_read(&fscache_n_stores_again),
52375 - atomic_read(&fscache_n_stores_nobufs),
52376 - atomic_read(&fscache_n_stores_oom));
52377 + atomic_read_unchecked(&fscache_n_stores),
52378 + atomic_read_unchecked(&fscache_n_stores_ok),
52379 + atomic_read_unchecked(&fscache_n_stores_again),
52380 + atomic_read_unchecked(&fscache_n_stores_nobufs),
52381 + atomic_read_unchecked(&fscache_n_stores_oom));
52382 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
52383 - atomic_read(&fscache_n_store_ops),
52384 - atomic_read(&fscache_n_store_calls),
52385 - atomic_read(&fscache_n_store_pages),
52386 - atomic_read(&fscache_n_store_radix_deletes),
52387 - atomic_read(&fscache_n_store_pages_over_limit));
52388 + atomic_read_unchecked(&fscache_n_store_ops),
52389 + atomic_read_unchecked(&fscache_n_store_calls),
52390 + atomic_read_unchecked(&fscache_n_store_pages),
52391 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
52392 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
52393
52394 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
52395 - atomic_read(&fscache_n_store_vmscan_not_storing),
52396 - atomic_read(&fscache_n_store_vmscan_gone),
52397 - atomic_read(&fscache_n_store_vmscan_busy),
52398 - atomic_read(&fscache_n_store_vmscan_cancelled),
52399 - atomic_read(&fscache_n_store_vmscan_wait));
52400 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
52401 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
52402 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
52403 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
52404 + atomic_read_unchecked(&fscache_n_store_vmscan_wait));
52405
52406 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
52407 - atomic_read(&fscache_n_op_pend),
52408 - atomic_read(&fscache_n_op_run),
52409 - atomic_read(&fscache_n_op_enqueue),
52410 - atomic_read(&fscache_n_op_cancelled),
52411 - atomic_read(&fscache_n_op_rejected));
52412 + atomic_read_unchecked(&fscache_n_op_pend),
52413 + atomic_read_unchecked(&fscache_n_op_run),
52414 + atomic_read_unchecked(&fscache_n_op_enqueue),
52415 + atomic_read_unchecked(&fscache_n_op_cancelled),
52416 + atomic_read_unchecked(&fscache_n_op_rejected));
52417 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
52418 - atomic_read(&fscache_n_op_deferred_release),
52419 - atomic_read(&fscache_n_op_release),
52420 - atomic_read(&fscache_n_op_gc));
52421 + atomic_read_unchecked(&fscache_n_op_deferred_release),
52422 + atomic_read_unchecked(&fscache_n_op_release),
52423 + atomic_read_unchecked(&fscache_n_op_gc));
52424
52425 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
52426 atomic_read(&fscache_n_cop_alloc_object),
52427 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
52428 index 6f96a8d..6019bb9 100644
52429 --- a/fs/fuse/cuse.c
52430 +++ b/fs/fuse/cuse.c
52431 @@ -597,10 +597,12 @@ static int __init cuse_init(void)
52432 INIT_LIST_HEAD(&cuse_conntbl[i]);
52433
52434 /* inherit and extend fuse_dev_operations */
52435 - cuse_channel_fops = fuse_dev_operations;
52436 - cuse_channel_fops.owner = THIS_MODULE;
52437 - cuse_channel_fops.open = cuse_channel_open;
52438 - cuse_channel_fops.release = cuse_channel_release;
52439 + pax_open_kernel();
52440 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
52441 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
52442 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
52443 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
52444 + pax_close_kernel();
52445
52446 cuse_class = class_create(THIS_MODULE, "cuse");
52447 if (IS_ERR(cuse_class))
52448 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
52449 index 11dfa0c..6f64416 100644
52450 --- a/fs/fuse/dev.c
52451 +++ b/fs/fuse/dev.c
52452 @@ -1294,7 +1294,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
52453 ret = 0;
52454 pipe_lock(pipe);
52455
52456 - if (!pipe->readers) {
52457 + if (!atomic_read(&pipe->readers)) {
52458 send_sig(SIGPIPE, current, 0);
52459 if (!ret)
52460 ret = -EPIPE;
52461 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
52462 index ff15522..092a0f6 100644
52463 --- a/fs/fuse/dir.c
52464 +++ b/fs/fuse/dir.c
52465 @@ -1409,7 +1409,7 @@ static char *read_link(struct dentry *dentry)
52466 return link;
52467 }
52468
52469 -static void free_link(char *link)
52470 +static void free_link(const char *link)
52471 {
52472 if (!IS_ERR(link))
52473 free_page((unsigned long) link);
52474 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
52475 index cc00bd1..3edb692 100644
52476 --- a/fs/gfs2/inode.c
52477 +++ b/fs/gfs2/inode.c
52478 @@ -1500,7 +1500,7 @@ out:
52479
52480 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
52481 {
52482 - char *s = nd_get_link(nd);
52483 + const char *s = nd_get_link(nd);
52484 if (!IS_ERR(s))
52485 kfree(s);
52486 }
52487 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
52488 index a3f868a..bb308ae 100644
52489 --- a/fs/hugetlbfs/inode.c
52490 +++ b/fs/hugetlbfs/inode.c
52491 @@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52492 struct mm_struct *mm = current->mm;
52493 struct vm_area_struct *vma;
52494 struct hstate *h = hstate_file(file);
52495 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
52496 struct vm_unmapped_area_info info;
52497
52498 if (len & ~huge_page_mask(h))
52499 @@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52500 return addr;
52501 }
52502
52503 +#ifdef CONFIG_PAX_RANDMMAP
52504 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
52505 +#endif
52506 +
52507 if (addr) {
52508 addr = ALIGN(addr, huge_page_size(h));
52509 vma = find_vma(mm, addr);
52510 - if (TASK_SIZE - len >= addr &&
52511 - (!vma || addr + len <= vma->vm_start))
52512 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
52513 return addr;
52514 }
52515
52516 info.flags = 0;
52517 info.length = len;
52518 info.low_limit = TASK_UNMAPPED_BASE;
52519 +
52520 +#ifdef CONFIG_PAX_RANDMMAP
52521 + if (mm->pax_flags & MF_PAX_RANDMMAP)
52522 + info.low_limit += mm->delta_mmap;
52523 +#endif
52524 +
52525 info.high_limit = TASK_SIZE;
52526 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
52527 info.align_offset = 0;
52528 @@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
52529 };
52530 MODULE_ALIAS_FS("hugetlbfs");
52531
52532 -static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52533 +struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52534
52535 static int can_do_hugetlb_shm(void)
52536 {
52537 diff --git a/fs/inode.c b/fs/inode.c
52538 index a898b3d..9b5a214 100644
52539 --- a/fs/inode.c
52540 +++ b/fs/inode.c
52541 @@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
52542
52543 #ifdef CONFIG_SMP
52544 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
52545 - static atomic_t shared_last_ino;
52546 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
52547 + static atomic_unchecked_t shared_last_ino;
52548 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
52549
52550 res = next - LAST_INO_BATCH;
52551 }
52552 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
52553 index 4a6cf28..d3a29d3 100644
52554 --- a/fs/jffs2/erase.c
52555 +++ b/fs/jffs2/erase.c
52556 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
52557 struct jffs2_unknown_node marker = {
52558 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
52559 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52560 - .totlen = cpu_to_je32(c->cleanmarker_size)
52561 + .totlen = cpu_to_je32(c->cleanmarker_size),
52562 + .hdr_crc = cpu_to_je32(0)
52563 };
52564
52565 jffs2_prealloc_raw_node_refs(c, jeb, 1);
52566 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
52567 index a6597d6..41b30ec 100644
52568 --- a/fs/jffs2/wbuf.c
52569 +++ b/fs/jffs2/wbuf.c
52570 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
52571 {
52572 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
52573 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52574 - .totlen = constant_cpu_to_je32(8)
52575 + .totlen = constant_cpu_to_je32(8),
52576 + .hdr_crc = constant_cpu_to_je32(0)
52577 };
52578
52579 /*
52580 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
52581 index 2003e83..40db287 100644
52582 --- a/fs/jfs/super.c
52583 +++ b/fs/jfs/super.c
52584 @@ -856,7 +856,7 @@ static int __init init_jfs_fs(void)
52585
52586 jfs_inode_cachep =
52587 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
52588 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52589 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
52590 init_once);
52591 if (jfs_inode_cachep == NULL)
52592 return -ENOMEM;
52593 diff --git a/fs/libfs.c b/fs/libfs.c
52594 index 916da8c..1588998 100644
52595 --- a/fs/libfs.c
52596 +++ b/fs/libfs.c
52597 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52598
52599 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
52600 struct dentry *next;
52601 + char d_name[sizeof(next->d_iname)];
52602 + const unsigned char *name;
52603 +
52604 next = list_entry(p, struct dentry, d_u.d_child);
52605 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
52606 if (!simple_positive(next)) {
52607 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52608
52609 spin_unlock(&next->d_lock);
52610 spin_unlock(&dentry->d_lock);
52611 - if (filldir(dirent, next->d_name.name,
52612 + name = next->d_name.name;
52613 + if (name == next->d_iname) {
52614 + memcpy(d_name, name, next->d_name.len);
52615 + name = d_name;
52616 + }
52617 + if (filldir(dirent, name,
52618 next->d_name.len, filp->f_pos,
52619 next->d_inode->i_ino,
52620 dt_type(next->d_inode)) < 0)
52621 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
52622 index 9760ecb..9b838ef 100644
52623 --- a/fs/lockd/clntproc.c
52624 +++ b/fs/lockd/clntproc.c
52625 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
52626 /*
52627 * Cookie counter for NLM requests
52628 */
52629 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
52630 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
52631
52632 void nlmclnt_next_cookie(struct nlm_cookie *c)
52633 {
52634 - u32 cookie = atomic_inc_return(&nlm_cookie);
52635 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
52636
52637 memcpy(c->data, &cookie, 4);
52638 c->len=4;
52639 diff --git a/fs/locks.c b/fs/locks.c
52640 index cb424a4..850e4dd 100644
52641 --- a/fs/locks.c
52642 +++ b/fs/locks.c
52643 @@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
52644 return;
52645
52646 if (filp->f_op && filp->f_op->flock) {
52647 - struct file_lock fl = {
52648 + struct file_lock flock = {
52649 .fl_pid = current->tgid,
52650 .fl_file = filp,
52651 .fl_flags = FL_FLOCK,
52652 .fl_type = F_UNLCK,
52653 .fl_end = OFFSET_MAX,
52654 };
52655 - filp->f_op->flock(filp, F_SETLKW, &fl);
52656 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
52657 - fl.fl_ops->fl_release_private(&fl);
52658 + filp->f_op->flock(filp, F_SETLKW, &flock);
52659 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
52660 + flock.fl_ops->fl_release_private(&flock);
52661 }
52662
52663 lock_flocks();
52664 diff --git a/fs/namei.c b/fs/namei.c
52665 index 57ae9c8..b018eba 100644
52666 --- a/fs/namei.c
52667 +++ b/fs/namei.c
52668 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
52669 if (ret != -EACCES)
52670 return ret;
52671
52672 +#ifdef CONFIG_GRKERNSEC
52673 + /* we'll block if we have to log due to a denied capability use */
52674 + if (mask & MAY_NOT_BLOCK)
52675 + return -ECHILD;
52676 +#endif
52677 +
52678 if (S_ISDIR(inode->i_mode)) {
52679 /* DACs are overridable for directories */
52680 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
52681 - return 0;
52682 if (!(mask & MAY_WRITE))
52683 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52684 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52685 + inode_capable(inode, CAP_DAC_READ_SEARCH))
52686 return 0;
52687 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
52688 + return 0;
52689 return -EACCES;
52690 }
52691 /*
52692 + * Searching includes executable on directories, else just read.
52693 + */
52694 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52695 + if (mask == MAY_READ)
52696 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52697 + inode_capable(inode, CAP_DAC_READ_SEARCH))
52698 + return 0;
52699 +
52700 + /*
52701 * Read/write DACs are always overridable.
52702 * Executable DACs are overridable when there is
52703 * at least one exec bit set.
52704 @@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
52705 if (inode_capable(inode, CAP_DAC_OVERRIDE))
52706 return 0;
52707
52708 - /*
52709 - * Searching includes executable on directories, else just read.
52710 - */
52711 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52712 - if (mask == MAY_READ)
52713 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52714 - return 0;
52715 -
52716 return -EACCES;
52717 }
52718
52719 @@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52720 {
52721 struct dentry *dentry = link->dentry;
52722 int error;
52723 - char *s;
52724 + const char *s;
52725
52726 BUG_ON(nd->flags & LOOKUP_RCU);
52727
52728 @@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52729 if (error)
52730 goto out_put_nd_path;
52731
52732 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
52733 + dentry->d_inode, dentry, nd->path.mnt)) {
52734 + error = -EACCES;
52735 + goto out_put_nd_path;
52736 + }
52737 +
52738 nd->last_type = LAST_BIND;
52739 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
52740 error = PTR_ERR(*p);
52741 @@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
52742 if (res)
52743 break;
52744 res = walk_component(nd, path, LOOKUP_FOLLOW);
52745 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
52746 + res = -EACCES;
52747 put_link(nd, &link, cookie);
52748 } while (res > 0);
52749
52750 @@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
52751 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
52752 {
52753 unsigned long a, b, adata, bdata, mask, hash, len;
52754 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52755 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52756
52757 hash = a = 0;
52758 len = -sizeof(unsigned long);
52759 @@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
52760 if (err)
52761 break;
52762 err = lookup_last(nd, &path);
52763 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
52764 + err = -EACCES;
52765 put_link(nd, &link, cookie);
52766 }
52767 }
52768 @@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
52769 if (!err)
52770 err = complete_walk(nd);
52771
52772 + if (!err && !(nd->flags & LOOKUP_PARENT)) {
52773 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52774 + path_put(&nd->path);
52775 + err = -ENOENT;
52776 + }
52777 + }
52778 +
52779 if (!err && nd->flags & LOOKUP_DIRECTORY) {
52780 if (!nd->inode->i_op->lookup) {
52781 path_put(&nd->path);
52782 @@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
52783 retval = path_lookupat(dfd, name->name,
52784 flags | LOOKUP_REVAL, nd);
52785
52786 - if (likely(!retval))
52787 + if (likely(!retval)) {
52788 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
52789 + if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
52790 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
52791 + path_put(&nd->path);
52792 + return -ENOENT;
52793 + }
52794 + }
52795 + }
52796 return retval;
52797 }
52798
52799 @@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
52800 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
52801 return -EPERM;
52802
52803 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
52804 + return -EPERM;
52805 + if (gr_handle_rawio(inode))
52806 + return -EPERM;
52807 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
52808 + return -EACCES;
52809 +
52810 return 0;
52811 }
52812
52813 @@ -2602,7 +2641,7 @@ looked_up:
52814 * cleared otherwise prior to returning.
52815 */
52816 static int lookup_open(struct nameidata *nd, struct path *path,
52817 - struct file *file,
52818 + struct path *link, struct file *file,
52819 const struct open_flags *op,
52820 bool got_write, int *opened)
52821 {
52822 @@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52823 /* Negative dentry, just create the file */
52824 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
52825 umode_t mode = op->mode;
52826 +
52827 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
52828 + error = -EACCES;
52829 + goto out_dput;
52830 + }
52831 +
52832 + if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
52833 + error = -EACCES;
52834 + goto out_dput;
52835 + }
52836 +
52837 if (!IS_POSIXACL(dir->d_inode))
52838 mode &= ~current_umask();
52839 /*
52840 @@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52841 nd->flags & LOOKUP_EXCL);
52842 if (error)
52843 goto out_dput;
52844 + else
52845 + gr_handle_create(dentry, nd->path.mnt);
52846 }
52847 out_no_open:
52848 path->dentry = dentry;
52849 @@ -2672,7 +2724,7 @@ out_dput:
52850 /*
52851 * Handle the last step of open()
52852 */
52853 -static int do_last(struct nameidata *nd, struct path *path,
52854 +static int do_last(struct nameidata *nd, struct path *path, struct path *link,
52855 struct file *file, const struct open_flags *op,
52856 int *opened, struct filename *name)
52857 {
52858 @@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
52859 error = complete_walk(nd);
52860 if (error)
52861 return error;
52862 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52863 + error = -ENOENT;
52864 + goto out;
52865 + }
52866 audit_inode(name, nd->path.dentry, 0);
52867 if (open_flag & O_CREAT) {
52868 error = -EISDIR;
52869 goto out;
52870 }
52871 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
52872 + error = -EACCES;
52873 + goto out;
52874 + }
52875 goto finish_open;
52876 case LAST_BIND:
52877 error = complete_walk(nd);
52878 if (error)
52879 return error;
52880 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
52881 + error = -ENOENT;
52882 + goto out;
52883 + }
52884 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
52885 + error = -EACCES;
52886 + goto out;
52887 + }
52888 audit_inode(name, dir, 0);
52889 goto finish_open;
52890 }
52891 @@ -2759,7 +2827,7 @@ retry_lookup:
52892 */
52893 }
52894 mutex_lock(&dir->d_inode->i_mutex);
52895 - error = lookup_open(nd, path, file, op, got_write, opened);
52896 + error = lookup_open(nd, path, link, file, op, got_write, opened);
52897 mutex_unlock(&dir->d_inode->i_mutex);
52898
52899 if (error <= 0) {
52900 @@ -2783,11 +2851,28 @@ retry_lookup:
52901 goto finish_open_created;
52902 }
52903
52904 + if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
52905 + error = -ENOENT;
52906 + goto exit_dput;
52907 + }
52908 + if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
52909 + error = -EACCES;
52910 + goto exit_dput;
52911 + }
52912 +
52913 /*
52914 * create/update audit record if it already exists.
52915 */
52916 - if (path->dentry->d_inode)
52917 + if (path->dentry->d_inode) {
52918 + /* only check if O_CREAT is specified, all other checks need to go
52919 + into may_open */
52920 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
52921 + error = -EACCES;
52922 + goto exit_dput;
52923 + }
52924 +
52925 audit_inode(name, path->dentry, 0);
52926 + }
52927
52928 /*
52929 * If atomic_open() acquired write access it is dropped now due to
52930 @@ -2828,6 +2913,11 @@ finish_lookup:
52931 }
52932 }
52933 BUG_ON(inode != path->dentry->d_inode);
52934 + /* if we're resolving a symlink to another symlink */
52935 + if (link && gr_handle_symlink_owner(link, inode)) {
52936 + error = -EACCES;
52937 + goto out;
52938 + }
52939 return 1;
52940 }
52941
52942 @@ -2837,7 +2927,6 @@ finish_lookup:
52943 save_parent.dentry = nd->path.dentry;
52944 save_parent.mnt = mntget(path->mnt);
52945 nd->path.dentry = path->dentry;
52946 -
52947 }
52948 nd->inode = inode;
52949 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
52950 @@ -2846,6 +2935,16 @@ finish_lookup:
52951 path_put(&save_parent);
52952 return error;
52953 }
52954 +
52955 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52956 + error = -ENOENT;
52957 + goto out;
52958 + }
52959 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
52960 + error = -EACCES;
52961 + goto out;
52962 + }
52963 +
52964 error = -EISDIR;
52965 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
52966 goto out;
52967 @@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52968 if (unlikely(error))
52969 goto out;
52970
52971 - error = do_last(nd, &path, file, op, &opened, pathname);
52972 + error = do_last(nd, &path, NULL, file, op, &opened, pathname);
52973 while (unlikely(error > 0)) { /* trailing symlink */
52974 struct path link = path;
52975 void *cookie;
52976 @@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52977 error = follow_link(&link, nd, &cookie);
52978 if (unlikely(error))
52979 break;
52980 - error = do_last(nd, &path, file, op, &opened, pathname);
52981 + error = do_last(nd, &path, &link, file, op, &opened, pathname);
52982 put_link(nd, &link, cookie);
52983 }
52984 out:
52985 @@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
52986 goto unlock;
52987
52988 error = -EEXIST;
52989 - if (dentry->d_inode)
52990 + if (dentry->d_inode) {
52991 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
52992 + error = -ENOENT;
52993 + }
52994 goto fail;
52995 + }
52996 /*
52997 * Special case - lookup gave negative, but... we had foo/bar/
52998 * From the vfs_mknod() POV we just have a negative dentry -
52999 @@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53000 }
53001 EXPORT_SYMBOL(user_path_create);
53002
53003 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53004 +{
53005 + struct filename *tmp = getname(pathname);
53006 + struct dentry *res;
53007 + if (IS_ERR(tmp))
53008 + return ERR_CAST(tmp);
53009 + res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53010 + if (IS_ERR(res))
53011 + putname(tmp);
53012 + else
53013 + *to = tmp;
53014 + return res;
53015 +}
53016 +
53017 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53018 {
53019 int error = may_create(dir, dentry);
53020 @@ -3177,6 +3294,17 @@ retry:
53021
53022 if (!IS_POSIXACL(path.dentry->d_inode))
53023 mode &= ~current_umask();
53024 +
53025 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53026 + error = -EPERM;
53027 + goto out;
53028 + }
53029 +
53030 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53031 + error = -EACCES;
53032 + goto out;
53033 + }
53034 +
53035 error = security_path_mknod(&path, dentry, mode, dev);
53036 if (error)
53037 goto out;
53038 @@ -3193,6 +3321,8 @@ retry:
53039 break;
53040 }
53041 out:
53042 + if (!error)
53043 + gr_handle_create(dentry, path.mnt);
53044 done_path_create(&path, dentry);
53045 if (retry_estale(error, lookup_flags)) {
53046 lookup_flags |= LOOKUP_REVAL;
53047 @@ -3245,9 +3375,16 @@ retry:
53048
53049 if (!IS_POSIXACL(path.dentry->d_inode))
53050 mode &= ~current_umask();
53051 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53052 + error = -EACCES;
53053 + goto out;
53054 + }
53055 error = security_path_mkdir(&path, dentry, mode);
53056 if (!error)
53057 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53058 + if (!error)
53059 + gr_handle_create(dentry, path.mnt);
53060 +out:
53061 done_path_create(&path, dentry);
53062 if (retry_estale(error, lookup_flags)) {
53063 lookup_flags |= LOOKUP_REVAL;
53064 @@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53065 struct filename *name;
53066 struct dentry *dentry;
53067 struct nameidata nd;
53068 + ino_t saved_ino = 0;
53069 + dev_t saved_dev = 0;
53070 unsigned int lookup_flags = 0;
53071 retry:
53072 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53073 @@ -3360,10 +3499,21 @@ retry:
53074 error = -ENOENT;
53075 goto exit3;
53076 }
53077 +
53078 + saved_ino = dentry->d_inode->i_ino;
53079 + saved_dev = gr_get_dev_from_dentry(dentry);
53080 +
53081 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53082 + error = -EACCES;
53083 + goto exit3;
53084 + }
53085 +
53086 error = security_path_rmdir(&nd.path, dentry);
53087 if (error)
53088 goto exit3;
53089 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53090 + if (!error && (saved_dev || saved_ino))
53091 + gr_handle_delete(saved_ino, saved_dev);
53092 exit3:
53093 dput(dentry);
53094 exit2:
53095 @@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53096 struct dentry *dentry;
53097 struct nameidata nd;
53098 struct inode *inode = NULL;
53099 + ino_t saved_ino = 0;
53100 + dev_t saved_dev = 0;
53101 unsigned int lookup_flags = 0;
53102 retry:
53103 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53104 @@ -3455,10 +3607,22 @@ retry:
53105 if (!inode)
53106 goto slashes;
53107 ihold(inode);
53108 +
53109 + if (inode->i_nlink <= 1) {
53110 + saved_ino = inode->i_ino;
53111 + saved_dev = gr_get_dev_from_dentry(dentry);
53112 + }
53113 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53114 + error = -EACCES;
53115 + goto exit2;
53116 + }
53117 +
53118 error = security_path_unlink(&nd.path, dentry);
53119 if (error)
53120 goto exit2;
53121 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53122 + if (!error && (saved_ino || saved_dev))
53123 + gr_handle_delete(saved_ino, saved_dev);
53124 exit2:
53125 dput(dentry);
53126 }
53127 @@ -3536,9 +3700,17 @@ retry:
53128 if (IS_ERR(dentry))
53129 goto out_putname;
53130
53131 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53132 + error = -EACCES;
53133 + goto out;
53134 + }
53135 +
53136 error = security_path_symlink(&path, dentry, from->name);
53137 if (!error)
53138 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53139 + if (!error)
53140 + gr_handle_create(dentry, path.mnt);
53141 +out:
53142 done_path_create(&path, dentry);
53143 if (retry_estale(error, lookup_flags)) {
53144 lookup_flags |= LOOKUP_REVAL;
53145 @@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53146 {
53147 struct dentry *new_dentry;
53148 struct path old_path, new_path;
53149 + struct filename *to = NULL;
53150 int how = 0;
53151 int error;
53152
53153 @@ -3635,7 +3808,7 @@ retry:
53154 if (error)
53155 return error;
53156
53157 - new_dentry = user_path_create(newdfd, newname, &new_path,
53158 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
53159 (how & LOOKUP_REVAL));
53160 error = PTR_ERR(new_dentry);
53161 if (IS_ERR(new_dentry))
53162 @@ -3647,11 +3820,28 @@ retry:
53163 error = may_linkat(&old_path);
53164 if (unlikely(error))
53165 goto out_dput;
53166 +
53167 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
53168 + old_path.dentry->d_inode,
53169 + old_path.dentry->d_inode->i_mode, to)) {
53170 + error = -EACCES;
53171 + goto out_dput;
53172 + }
53173 +
53174 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
53175 + old_path.dentry, old_path.mnt, to)) {
53176 + error = -EACCES;
53177 + goto out_dput;
53178 + }
53179 +
53180 error = security_path_link(old_path.dentry, &new_path, new_dentry);
53181 if (error)
53182 goto out_dput;
53183 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
53184 + if (!error)
53185 + gr_handle_create(new_dentry, new_path.mnt);
53186 out_dput:
53187 + putname(to);
53188 done_path_create(&new_path, new_dentry);
53189 if (retry_estale(error, how)) {
53190 how |= LOOKUP_REVAL;
53191 @@ -3897,12 +4087,21 @@ retry:
53192 if (new_dentry == trap)
53193 goto exit5;
53194
53195 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
53196 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
53197 + to);
53198 + if (error)
53199 + goto exit5;
53200 +
53201 error = security_path_rename(&oldnd.path, old_dentry,
53202 &newnd.path, new_dentry);
53203 if (error)
53204 goto exit5;
53205 error = vfs_rename(old_dir->d_inode, old_dentry,
53206 new_dir->d_inode, new_dentry);
53207 + if (!error)
53208 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
53209 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
53210 exit5:
53211 dput(new_dentry);
53212 exit4:
53213 @@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
53214
53215 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
53216 {
53217 + char tmpbuf[64];
53218 + const char *newlink;
53219 int len;
53220
53221 len = PTR_ERR(link);
53222 @@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
53223 len = strlen(link);
53224 if (len > (unsigned) buflen)
53225 len = buflen;
53226 - if (copy_to_user(buffer, link, len))
53227 +
53228 + if (len < sizeof(tmpbuf)) {
53229 + memcpy(tmpbuf, link, len);
53230 + newlink = tmpbuf;
53231 + } else
53232 + newlink = link;
53233 +
53234 + if (copy_to_user(buffer, newlink, len))
53235 len = -EFAULT;
53236 out:
53237 return len;
53238 diff --git a/fs/namespace.c b/fs/namespace.c
53239 index e945b81..1dd8104 100644
53240 --- a/fs/namespace.c
53241 +++ b/fs/namespace.c
53242 @@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
53243 if (!(sb->s_flags & MS_RDONLY))
53244 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
53245 up_write(&sb->s_umount);
53246 +
53247 + gr_log_remount(mnt->mnt_devname, retval);
53248 +
53249 return retval;
53250 }
53251
53252 @@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
53253 br_write_unlock(&vfsmount_lock);
53254 up_write(&namespace_sem);
53255 release_mounts(&umount_list);
53256 +
53257 + gr_log_unmount(mnt->mnt_devname, retval);
53258 +
53259 return retval;
53260 }
53261
53262 @@ -2267,6 +2273,16 @@ long do_mount(const char *dev_name, const char *dir_name,
53263 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
53264 MS_STRICTATIME);
53265
53266 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
53267 + retval = -EPERM;
53268 + goto dput_out;
53269 + }
53270 +
53271 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
53272 + retval = -EPERM;
53273 + goto dput_out;
53274 + }
53275 +
53276 if (flags & MS_REMOUNT)
53277 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
53278 data_page);
53279 @@ -2281,6 +2297,9 @@ long do_mount(const char *dev_name, const char *dir_name,
53280 dev_name, data_page);
53281 dput_out:
53282 path_put(&path);
53283 +
53284 + gr_log_mount(dev_name, dir_name, retval);
53285 +
53286 return retval;
53287 }
53288
53289 @@ -2567,6 +2586,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
53290 if (error)
53291 goto out2;
53292
53293 + if (gr_handle_chroot_pivot()) {
53294 + error = -EPERM;
53295 + goto out2;
53296 + }
53297 +
53298 get_fs_root(current->fs, &root);
53299 error = lock_mount(&old);
53300 if (error)
53301 @@ -2815,7 +2839,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53302 !nsown_capable(CAP_SYS_ADMIN))
53303 return -EPERM;
53304
53305 - if (fs->users != 1)
53306 + if (atomic_read(&fs->users) != 1)
53307 return -EINVAL;
53308
53309 get_mnt_ns(mnt_ns);
53310 diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53311 index 59461c9..b17c57e 100644
53312 --- a/fs/nfs/callback_xdr.c
53313 +++ b/fs/nfs/callback_xdr.c
53314 @@ -51,7 +51,7 @@ struct callback_op {
53315 callback_decode_arg_t decode_args;
53316 callback_encode_res_t encode_res;
53317 long res_maxsize;
53318 -};
53319 +} __do_const;
53320
53321 static struct callback_op callback_ops[];
53322
53323 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53324 index 1f94167..79c4ce4 100644
53325 --- a/fs/nfs/inode.c
53326 +++ b/fs/nfs/inode.c
53327 @@ -1041,16 +1041,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53328 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53329 }
53330
53331 -static atomic_long_t nfs_attr_generation_counter;
53332 +static atomic_long_unchecked_t nfs_attr_generation_counter;
53333
53334 static unsigned long nfs_read_attr_generation_counter(void)
53335 {
53336 - return atomic_long_read(&nfs_attr_generation_counter);
53337 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53338 }
53339
53340 unsigned long nfs_inc_attr_generation_counter(void)
53341 {
53342 - return atomic_long_inc_return(&nfs_attr_generation_counter);
53343 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53344 }
53345
53346 void nfs_fattr_init(struct nfs_fattr *fattr)
53347 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
53348 index 8288b08..4a140d4 100644
53349 --- a/fs/nfsd/nfs4proc.c
53350 +++ b/fs/nfsd/nfs4proc.c
53351 @@ -1098,7 +1098,7 @@ struct nfsd4_operation {
53352 nfsd4op_rsize op_rsize_bop;
53353 stateid_getter op_get_currentstateid;
53354 stateid_setter op_set_currentstateid;
53355 -};
53356 +} __do_const;
53357
53358 static struct nfsd4_operation nfsd4_ops[];
53359
53360 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53361 index 6eb0dc5..29067a9 100644
53362 --- a/fs/nfsd/nfs4xdr.c
53363 +++ b/fs/nfsd/nfs4xdr.c
53364 @@ -1457,7 +1457,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
53365
53366 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
53367
53368 -static nfsd4_dec nfsd4_dec_ops[] = {
53369 +static const nfsd4_dec nfsd4_dec_ops[] = {
53370 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53371 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53372 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53373 @@ -1497,7 +1497,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
53374 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
53375 };
53376
53377 -static nfsd4_dec nfsd41_dec_ops[] = {
53378 +static const nfsd4_dec nfsd41_dec_ops[] = {
53379 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53380 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53381 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53382 @@ -1559,7 +1559,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
53383 };
53384
53385 struct nfsd4_minorversion_ops {
53386 - nfsd4_dec *decoders;
53387 + const nfsd4_dec *decoders;
53388 int nops;
53389 };
53390
53391 diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
53392 index ca05f6d..411a576 100644
53393 --- a/fs/nfsd/nfscache.c
53394 +++ b/fs/nfsd/nfscache.c
53395 @@ -461,13 +461,15 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
53396 {
53397 struct svc_cacherep *rp = rqstp->rq_cacherep;
53398 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
53399 - int len;
53400 + long len;
53401
53402 if (!rp)
53403 return;
53404
53405 - len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
53406 - len >>= 2;
53407 + if (statp) {
53408 + len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
53409 + len >>= 2;
53410 + }
53411
53412 /* Don't cache excessive amounts of data and XDR failures */
53413 if (!statp || len > (256 >> 2)) {
53414 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
53415 index 2b2e239..c915b48 100644
53416 --- a/fs/nfsd/vfs.c
53417 +++ b/fs/nfsd/vfs.c
53418 @@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53419 } else {
53420 oldfs = get_fs();
53421 set_fs(KERNEL_DS);
53422 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
53423 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
53424 set_fs(oldfs);
53425 }
53426
53427 @@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53428
53429 /* Write the data. */
53430 oldfs = get_fs(); set_fs(KERNEL_DS);
53431 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
53432 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
53433 set_fs(oldfs);
53434 if (host_err < 0)
53435 goto out_nfserr;
53436 @@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
53437 */
53438
53439 oldfs = get_fs(); set_fs(KERNEL_DS);
53440 - host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
53441 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
53442 set_fs(oldfs);
53443
53444 if (host_err < 0)
53445 diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
53446 index fea6bd5..8ee9d81 100644
53447 --- a/fs/nls/nls_base.c
53448 +++ b/fs/nls/nls_base.c
53449 @@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
53450
53451 int register_nls(struct nls_table * nls)
53452 {
53453 - struct nls_table ** tmp = &tables;
53454 + struct nls_table *tmp = tables;
53455
53456 if (nls->next)
53457 return -EBUSY;
53458
53459 spin_lock(&nls_lock);
53460 - while (*tmp) {
53461 - if (nls == *tmp) {
53462 + while (tmp) {
53463 + if (nls == tmp) {
53464 spin_unlock(&nls_lock);
53465 return -EBUSY;
53466 }
53467 - tmp = &(*tmp)->next;
53468 + tmp = tmp->next;
53469 }
53470 - nls->next = tables;
53471 + pax_open_kernel();
53472 + *(struct nls_table **)&nls->next = tables;
53473 + pax_close_kernel();
53474 tables = nls;
53475 spin_unlock(&nls_lock);
53476 return 0;
53477 @@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
53478
53479 int unregister_nls(struct nls_table * nls)
53480 {
53481 - struct nls_table ** tmp = &tables;
53482 + struct nls_table * const * tmp = &tables;
53483
53484 spin_lock(&nls_lock);
53485 while (*tmp) {
53486 if (nls == *tmp) {
53487 - *tmp = nls->next;
53488 + pax_open_kernel();
53489 + *(struct nls_table **)tmp = nls->next;
53490 + pax_close_kernel();
53491 spin_unlock(&nls_lock);
53492 return 0;
53493 }
53494 diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
53495 index 7424929..35f6be5 100644
53496 --- a/fs/nls/nls_euc-jp.c
53497 +++ b/fs/nls/nls_euc-jp.c
53498 @@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
53499 p_nls = load_nls("cp932");
53500
53501 if (p_nls) {
53502 - table.charset2upper = p_nls->charset2upper;
53503 - table.charset2lower = p_nls->charset2lower;
53504 + pax_open_kernel();
53505 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53506 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53507 + pax_close_kernel();
53508 return register_nls(&table);
53509 }
53510
53511 diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
53512 index e7bc1d7..06bd4bb 100644
53513 --- a/fs/nls/nls_koi8-ru.c
53514 +++ b/fs/nls/nls_koi8-ru.c
53515 @@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
53516 p_nls = load_nls("koi8-u");
53517
53518 if (p_nls) {
53519 - table.charset2upper = p_nls->charset2upper;
53520 - table.charset2lower = p_nls->charset2lower;
53521 + pax_open_kernel();
53522 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53523 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53524 + pax_close_kernel();
53525 return register_nls(&table);
53526 }
53527
53528 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
53529 index 5d84442..bf24453 100644
53530 --- a/fs/notify/fanotify/fanotify_user.c
53531 +++ b/fs/notify/fanotify/fanotify_user.c
53532 @@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
53533
53534 fd = fanotify_event_metadata.fd;
53535 ret = -EFAULT;
53536 - if (copy_to_user(buf, &fanotify_event_metadata,
53537 - fanotify_event_metadata.event_len))
53538 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
53539 + copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
53540 goto out_close_fd;
53541
53542 ret = prepare_for_access_response(group, event, fd);
53543 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
53544 index 7b51b05..5ea5ef6 100644
53545 --- a/fs/notify/notification.c
53546 +++ b/fs/notify/notification.c
53547 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
53548 * get set to 0 so it will never get 'freed'
53549 */
53550 static struct fsnotify_event *q_overflow_event;
53551 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53552 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53553
53554 /**
53555 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
53556 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53557 */
53558 u32 fsnotify_get_cookie(void)
53559 {
53560 - return atomic_inc_return(&fsnotify_sync_cookie);
53561 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
53562 }
53563 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
53564
53565 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
53566 index aa411c3..c260a84 100644
53567 --- a/fs/ntfs/dir.c
53568 +++ b/fs/ntfs/dir.c
53569 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
53570 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
53571 ~(s64)(ndir->itype.index.block_size - 1)));
53572 /* Bounds checks. */
53573 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53574 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53575 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
53576 "inode 0x%lx or driver bug.", vdir->i_ino);
53577 goto err_out;
53578 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
53579 index 5b2d4f0..c6de396 100644
53580 --- a/fs/ntfs/file.c
53581 +++ b/fs/ntfs/file.c
53582 @@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
53583 #endif /* NTFS_RW */
53584 };
53585
53586 -const struct file_operations ntfs_empty_file_ops = {};
53587 +const struct file_operations ntfs_empty_file_ops __read_only;
53588
53589 -const struct inode_operations ntfs_empty_inode_ops = {};
53590 +const struct inode_operations ntfs_empty_inode_ops __read_only;
53591 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
53592 index aebeacd..0dcdd26 100644
53593 --- a/fs/ocfs2/localalloc.c
53594 +++ b/fs/ocfs2/localalloc.c
53595 @@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
53596 goto bail;
53597 }
53598
53599 - atomic_inc(&osb->alloc_stats.moves);
53600 + atomic_inc_unchecked(&osb->alloc_stats.moves);
53601
53602 bail:
53603 if (handle)
53604 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
53605 index d355e6e..578d905 100644
53606 --- a/fs/ocfs2/ocfs2.h
53607 +++ b/fs/ocfs2/ocfs2.h
53608 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
53609
53610 struct ocfs2_alloc_stats
53611 {
53612 - atomic_t moves;
53613 - atomic_t local_data;
53614 - atomic_t bitmap_data;
53615 - atomic_t bg_allocs;
53616 - atomic_t bg_extends;
53617 + atomic_unchecked_t moves;
53618 + atomic_unchecked_t local_data;
53619 + atomic_unchecked_t bitmap_data;
53620 + atomic_unchecked_t bg_allocs;
53621 + atomic_unchecked_t bg_extends;
53622 };
53623
53624 enum ocfs2_local_alloc_state
53625 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
53626 index b7e74b5..19c6536 100644
53627 --- a/fs/ocfs2/suballoc.c
53628 +++ b/fs/ocfs2/suballoc.c
53629 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
53630 mlog_errno(status);
53631 goto bail;
53632 }
53633 - atomic_inc(&osb->alloc_stats.bg_extends);
53634 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
53635
53636 /* You should never ask for this much metadata */
53637 BUG_ON(bits_wanted >
53638 @@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
53639 mlog_errno(status);
53640 goto bail;
53641 }
53642 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53643 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53644
53645 *suballoc_loc = res.sr_bg_blkno;
53646 *suballoc_bit_start = res.sr_bit_offset;
53647 @@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
53648 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
53649 res->sr_bits);
53650
53651 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53652 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53653
53654 BUG_ON(res->sr_bits != 1);
53655
53656 @@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
53657 mlog_errno(status);
53658 goto bail;
53659 }
53660 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53661 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53662
53663 BUG_ON(res.sr_bits != 1);
53664
53665 @@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53666 cluster_start,
53667 num_clusters);
53668 if (!status)
53669 - atomic_inc(&osb->alloc_stats.local_data);
53670 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
53671 } else {
53672 if (min_clusters > (osb->bitmap_cpg - 1)) {
53673 /* The only paths asking for contiguousness
53674 @@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53675 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
53676 res.sr_bg_blkno,
53677 res.sr_bit_offset);
53678 - atomic_inc(&osb->alloc_stats.bitmap_data);
53679 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
53680 *num_clusters = res.sr_bits;
53681 }
53682 }
53683 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
53684 index 01b8516..579c4df 100644
53685 --- a/fs/ocfs2/super.c
53686 +++ b/fs/ocfs2/super.c
53687 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
53688 "%10s => GlobalAllocs: %d LocalAllocs: %d "
53689 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
53690 "Stats",
53691 - atomic_read(&osb->alloc_stats.bitmap_data),
53692 - atomic_read(&osb->alloc_stats.local_data),
53693 - atomic_read(&osb->alloc_stats.bg_allocs),
53694 - atomic_read(&osb->alloc_stats.moves),
53695 - atomic_read(&osb->alloc_stats.bg_extends));
53696 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
53697 + atomic_read_unchecked(&osb->alloc_stats.local_data),
53698 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
53699 + atomic_read_unchecked(&osb->alloc_stats.moves),
53700 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
53701
53702 out += snprintf(buf + out, len - out,
53703 "%10s => State: %u Descriptor: %llu Size: %u bits "
53704 @@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
53705 spin_lock_init(&osb->osb_xattr_lock);
53706 ocfs2_init_steal_slots(osb);
53707
53708 - atomic_set(&osb->alloc_stats.moves, 0);
53709 - atomic_set(&osb->alloc_stats.local_data, 0);
53710 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
53711 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
53712 - atomic_set(&osb->alloc_stats.bg_extends, 0);
53713 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
53714 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
53715 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
53716 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
53717 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
53718
53719 /* Copy the blockcheck stats from the superblock probe */
53720 osb->osb_ecc_stats = *stats;
53721 diff --git a/fs/open.c b/fs/open.c
53722 index 6835446..eadf09f 100644
53723 --- a/fs/open.c
53724 +++ b/fs/open.c
53725 @@ -32,6 +32,8 @@
53726 #include <linux/dnotify.h>
53727 #include <linux/compat.h>
53728
53729 +#define CREATE_TRACE_POINTS
53730 +#include <trace/events/fs.h>
53731 #include "internal.h"
53732
53733 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
53734 @@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
53735 error = locks_verify_truncate(inode, NULL, length);
53736 if (!error)
53737 error = security_path_truncate(path);
53738 + if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
53739 + error = -EACCES;
53740 if (!error)
53741 error = do_truncate(path->dentry, length, 0, NULL);
53742
53743 @@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
53744 error = locks_verify_truncate(inode, f.file, length);
53745 if (!error)
53746 error = security_path_truncate(&f.file->f_path);
53747 + if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
53748 + error = -EACCES;
53749 if (!error)
53750 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
53751 sb_end_write(inode->i_sb);
53752 @@ -388,6 +394,9 @@ retry:
53753 if (__mnt_is_readonly(path.mnt))
53754 res = -EROFS;
53755
53756 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
53757 + res = -EACCES;
53758 +
53759 out_path_release:
53760 path_put(&path);
53761 if (retry_estale(res, lookup_flags)) {
53762 @@ -419,6 +428,8 @@ retry:
53763 if (error)
53764 goto dput_and_out;
53765
53766 + gr_log_chdir(path.dentry, path.mnt);
53767 +
53768 set_fs_pwd(current->fs, &path);
53769
53770 dput_and_out:
53771 @@ -448,6 +459,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
53772 goto out_putf;
53773
53774 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
53775 +
53776 + if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
53777 + error = -EPERM;
53778 +
53779 + if (!error)
53780 + gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
53781 +
53782 if (!error)
53783 set_fs_pwd(current->fs, &f.file->f_path);
53784 out_putf:
53785 @@ -477,7 +495,13 @@ retry:
53786 if (error)
53787 goto dput_and_out;
53788
53789 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
53790 + goto dput_and_out;
53791 +
53792 set_fs_root(current->fs, &path);
53793 +
53794 + gr_handle_chroot_chdir(&path);
53795 +
53796 error = 0;
53797 dput_and_out:
53798 path_put(&path);
53799 @@ -499,6 +523,16 @@ static int chmod_common(struct path *path, umode_t mode)
53800 if (error)
53801 return error;
53802 mutex_lock(&inode->i_mutex);
53803 +
53804 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
53805 + error = -EACCES;
53806 + goto out_unlock;
53807 + }
53808 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
53809 + error = -EACCES;
53810 + goto out_unlock;
53811 + }
53812 +
53813 error = security_path_chmod(path, mode);
53814 if (error)
53815 goto out_unlock;
53816 @@ -559,6 +593,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
53817 uid = make_kuid(current_user_ns(), user);
53818 gid = make_kgid(current_user_ns(), group);
53819
53820 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
53821 + return -EACCES;
53822 +
53823 newattrs.ia_valid = ATTR_CTIME;
53824 if (user != (uid_t) -1) {
53825 if (!uid_valid(uid))
53826 @@ -974,6 +1011,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
53827 } else {
53828 fsnotify_open(f);
53829 fd_install(fd, f);
53830 + trace_do_sys_open(tmp->name, flags, mode);
53831 }
53832 }
53833 putname(tmp);
53834 diff --git a/fs/pipe.c b/fs/pipe.c
53835 index 2234f3f..f9083a1 100644
53836 --- a/fs/pipe.c
53837 +++ b/fs/pipe.c
53838 @@ -438,9 +438,9 @@ redo:
53839 }
53840 if (bufs) /* More to do? */
53841 continue;
53842 - if (!pipe->writers)
53843 + if (!atomic_read(&pipe->writers))
53844 break;
53845 - if (!pipe->waiting_writers) {
53846 + if (!atomic_read(&pipe->waiting_writers)) {
53847 /* syscall merging: Usually we must not sleep
53848 * if O_NONBLOCK is set, or if we got some data.
53849 * But if a writer sleeps in kernel space, then
53850 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53851 mutex_lock(&inode->i_mutex);
53852 pipe = inode->i_pipe;
53853
53854 - if (!pipe->readers) {
53855 + if (!atomic_read(&pipe->readers)) {
53856 send_sig(SIGPIPE, current, 0);
53857 ret = -EPIPE;
53858 goto out;
53859 @@ -553,7 +553,7 @@ redo1:
53860 for (;;) {
53861 int bufs;
53862
53863 - if (!pipe->readers) {
53864 + if (!atomic_read(&pipe->readers)) {
53865 send_sig(SIGPIPE, current, 0);
53866 if (!ret)
53867 ret = -EPIPE;
53868 @@ -644,9 +644,9 @@ redo2:
53869 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53870 do_wakeup = 0;
53871 }
53872 - pipe->waiting_writers++;
53873 + atomic_inc(&pipe->waiting_writers);
53874 pipe_wait(pipe);
53875 - pipe->waiting_writers--;
53876 + atomic_dec(&pipe->waiting_writers);
53877 }
53878 out:
53879 mutex_unlock(&inode->i_mutex);
53880 @@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53881 mask = 0;
53882 if (filp->f_mode & FMODE_READ) {
53883 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53884 - if (!pipe->writers && filp->f_version != pipe->w_counter)
53885 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53886 mask |= POLLHUP;
53887 }
53888
53889 @@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53890 * Most Unices do not set POLLERR for FIFOs but on Linux they
53891 * behave exactly like pipes for poll().
53892 */
53893 - if (!pipe->readers)
53894 + if (!atomic_read(&pipe->readers))
53895 mask |= POLLERR;
53896 }
53897
53898 @@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53899
53900 mutex_lock(&inode->i_mutex);
53901 pipe = inode->i_pipe;
53902 - pipe->readers -= decr;
53903 - pipe->writers -= decw;
53904 + atomic_sub(decr, &pipe->readers);
53905 + atomic_sub(decw, &pipe->writers);
53906
53907 - if (!pipe->readers && !pipe->writers) {
53908 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53909 free_pipe_info(inode);
53910 } else {
53911 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
53912 @@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53913
53914 if (inode->i_pipe) {
53915 ret = 0;
53916 - inode->i_pipe->readers++;
53917 + atomic_inc(&inode->i_pipe->readers);
53918 }
53919
53920 mutex_unlock(&inode->i_mutex);
53921 @@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53922
53923 if (inode->i_pipe) {
53924 ret = 0;
53925 - inode->i_pipe->writers++;
53926 + atomic_inc(&inode->i_pipe->writers);
53927 }
53928
53929 mutex_unlock(&inode->i_mutex);
53930 @@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53931 if (inode->i_pipe) {
53932 ret = 0;
53933 if (filp->f_mode & FMODE_READ)
53934 - inode->i_pipe->readers++;
53935 + atomic_inc(&inode->i_pipe->readers);
53936 if (filp->f_mode & FMODE_WRITE)
53937 - inode->i_pipe->writers++;
53938 + atomic_inc(&inode->i_pipe->writers);
53939 }
53940
53941 mutex_unlock(&inode->i_mutex);
53942 @@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
53943 inode->i_pipe = NULL;
53944 }
53945
53946 -static struct vfsmount *pipe_mnt __read_mostly;
53947 +struct vfsmount *pipe_mnt __read_mostly;
53948
53949 /*
53950 * pipefs_dname() is called from d_path().
53951 @@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
53952 goto fail_iput;
53953 inode->i_pipe = pipe;
53954
53955 - pipe->readers = pipe->writers = 1;
53956 + atomic_set(&pipe->readers, 1);
53957 + atomic_set(&pipe->writers, 1);
53958 inode->i_fop = &rdwr_pipefifo_fops;
53959
53960 /*
53961 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53962 index 15af622..0e9f4467 100644
53963 --- a/fs/proc/Kconfig
53964 +++ b/fs/proc/Kconfig
53965 @@ -30,12 +30,12 @@ config PROC_FS
53966
53967 config PROC_KCORE
53968 bool "/proc/kcore support" if !ARM
53969 - depends on PROC_FS && MMU
53970 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53971
53972 config PROC_VMCORE
53973 bool "/proc/vmcore support"
53974 - depends on PROC_FS && CRASH_DUMP
53975 - default y
53976 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53977 + default n
53978 help
53979 Exports the dump image of crashed kernel in ELF format.
53980
53981 @@ -59,8 +59,8 @@ config PROC_SYSCTL
53982 limited in memory.
53983
53984 config PROC_PAGE_MONITOR
53985 - default y
53986 - depends on PROC_FS && MMU
53987 + default n
53988 + depends on PROC_FS && MMU && !GRKERNSEC
53989 bool "Enable /proc page monitoring" if EXPERT
53990 help
53991 Various /proc files exist to monitor process memory utilization:
53992 diff --git a/fs/proc/array.c b/fs/proc/array.c
53993 index cbd0f1b..adec3f0 100644
53994 --- a/fs/proc/array.c
53995 +++ b/fs/proc/array.c
53996 @@ -60,6 +60,7 @@
53997 #include <linux/tty.h>
53998 #include <linux/string.h>
53999 #include <linux/mman.h>
54000 +#include <linux/grsecurity.h>
54001 #include <linux/proc_fs.h>
54002 #include <linux/ioport.h>
54003 #include <linux/uaccess.h>
54004 @@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54005 seq_putc(m, '\n');
54006 }
54007
54008 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54009 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
54010 +{
54011 + if (p->mm)
54012 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54013 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54014 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54015 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54016 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54017 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54018 + else
54019 + seq_printf(m, "PaX:\t-----\n");
54020 +}
54021 +#endif
54022 +
54023 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54024 struct pid *pid, struct task_struct *task)
54025 {
54026 @@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54027 task_cpus_allowed(m, task);
54028 cpuset_task_status_allowed(m, task);
54029 task_context_switch_counts(m, task);
54030 +
54031 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54032 + task_pax(m, task);
54033 +#endif
54034 +
54035 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54036 + task_grsec_rbac(m, task);
54037 +#endif
54038 +
54039 return 0;
54040 }
54041
54042 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54043 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54044 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54045 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54046 +#endif
54047 +
54048 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54049 struct pid *pid, struct task_struct *task, int whole)
54050 {
54051 @@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54052 char tcomm[sizeof(task->comm)];
54053 unsigned long flags;
54054
54055 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54056 + if (current->exec_id != m->exec_id) {
54057 + gr_log_badprocpid("stat");
54058 + return 0;
54059 + }
54060 +#endif
54061 +
54062 state = *get_task_state(task);
54063 vsize = eip = esp = 0;
54064 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54065 @@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54066 gtime = task_gtime(task);
54067 }
54068
54069 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54070 + if (PAX_RAND_FLAGS(mm)) {
54071 + eip = 0;
54072 + esp = 0;
54073 + wchan = 0;
54074 + }
54075 +#endif
54076 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54077 + wchan = 0;
54078 + eip =0;
54079 + esp =0;
54080 +#endif
54081 +
54082 /* scale priority and nice values from timeslices to -20..20 */
54083 /* to make it look like a "normal" Unix priority/nice value */
54084 priority = task_prio(task);
54085 @@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54086 seq_put_decimal_ull(m, ' ', vsize);
54087 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
54088 seq_put_decimal_ull(m, ' ', rsslim);
54089 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54090 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
54091 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
54092 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
54093 +#else
54094 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
54095 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
54096 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
54097 +#endif
54098 seq_put_decimal_ull(m, ' ', esp);
54099 seq_put_decimal_ull(m, ' ', eip);
54100 /* The signal information here is obsolete.
54101 @@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54102 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
54103 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
54104
54105 - if (mm && permitted) {
54106 + if (mm && permitted
54107 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54108 + && !PAX_RAND_FLAGS(mm)
54109 +#endif
54110 + ) {
54111 seq_put_decimal_ull(m, ' ', mm->start_data);
54112 seq_put_decimal_ull(m, ' ', mm->end_data);
54113 seq_put_decimal_ull(m, ' ', mm->start_brk);
54114 @@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54115 struct pid *pid, struct task_struct *task)
54116 {
54117 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
54118 - struct mm_struct *mm = get_task_mm(task);
54119 + struct mm_struct *mm;
54120
54121 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54122 + if (current->exec_id != m->exec_id) {
54123 + gr_log_badprocpid("statm");
54124 + return 0;
54125 + }
54126 +#endif
54127 + mm = get_task_mm(task);
54128 if (mm) {
54129 size = task_statm(mm, &shared, &text, &data, &resident);
54130 mmput(mm);
54131 @@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54132 return 0;
54133 }
54134
54135 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54136 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
54137 +{
54138 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
54139 +}
54140 +#endif
54141 +
54142 #ifdef CONFIG_CHECKPOINT_RESTORE
54143 static struct pid *
54144 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
54145 diff --git a/fs/proc/base.c b/fs/proc/base.c
54146 index 69078c7..3e12a75 100644
54147 --- a/fs/proc/base.c
54148 +++ b/fs/proc/base.c
54149 @@ -112,6 +112,14 @@ struct pid_entry {
54150 union proc_op op;
54151 };
54152
54153 +struct getdents_callback {
54154 + struct linux_dirent __user * current_dir;
54155 + struct linux_dirent __user * previous;
54156 + struct file * file;
54157 + int count;
54158 + int error;
54159 +};
54160 +
54161 #define NOD(NAME, MODE, IOP, FOP, OP) { \
54162 .name = (NAME), \
54163 .len = sizeof(NAME) - 1, \
54164 @@ -209,6 +217,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
54165 if (!mm->arg_end)
54166 goto out_mm; /* Shh! No looking before we're done */
54167
54168 + if (gr_acl_handle_procpidmem(task))
54169 + goto out_mm;
54170 +
54171 len = mm->arg_end - mm->arg_start;
54172
54173 if (len > PAGE_SIZE)
54174 @@ -236,12 +247,28 @@ out:
54175 return res;
54176 }
54177
54178 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54179 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54180 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
54181 + _mm->pax_flags & MF_PAX_SEGMEXEC))
54182 +#endif
54183 +
54184 static int proc_pid_auxv(struct task_struct *task, char *buffer)
54185 {
54186 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
54187 int res = PTR_ERR(mm);
54188 if (mm && !IS_ERR(mm)) {
54189 unsigned int nwords = 0;
54190 +
54191 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54192 + /* allow if we're currently ptracing this task */
54193 + if (PAX_RAND_FLAGS(mm) &&
54194 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
54195 + mmput(mm);
54196 + return 0;
54197 + }
54198 +#endif
54199 +
54200 do {
54201 nwords += 2;
54202 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
54203 @@ -255,7 +282,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
54204 }
54205
54206
54207 -#ifdef CONFIG_KALLSYMS
54208 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54209 /*
54210 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
54211 * Returns the resolved symbol. If that fails, simply return the address.
54212 @@ -294,7 +321,7 @@ static void unlock_trace(struct task_struct *task)
54213 mutex_unlock(&task->signal->cred_guard_mutex);
54214 }
54215
54216 -#ifdef CONFIG_STACKTRACE
54217 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54218
54219 #define MAX_STACK_TRACE_DEPTH 64
54220
54221 @@ -486,7 +513,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
54222 return count;
54223 }
54224
54225 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54226 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54227 static int proc_pid_syscall(struct task_struct *task, char *buffer)
54228 {
54229 long nr;
54230 @@ -515,7 +542,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
54231 /************************************************************************/
54232
54233 /* permission checks */
54234 -static int proc_fd_access_allowed(struct inode *inode)
54235 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
54236 {
54237 struct task_struct *task;
54238 int allowed = 0;
54239 @@ -525,7 +552,10 @@ static int proc_fd_access_allowed(struct inode *inode)
54240 */
54241 task = get_proc_task(inode);
54242 if (task) {
54243 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54244 + if (log)
54245 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54246 + else
54247 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54248 put_task_struct(task);
54249 }
54250 return allowed;
54251 @@ -556,10 +586,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
54252 struct task_struct *task,
54253 int hide_pid_min)
54254 {
54255 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54256 + return false;
54257 +
54258 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54259 + rcu_read_lock();
54260 + {
54261 + const struct cred *tmpcred = current_cred();
54262 + const struct cred *cred = __task_cred(task);
54263 +
54264 + if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
54265 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54266 + || in_group_p(grsec_proc_gid)
54267 +#endif
54268 + ) {
54269 + rcu_read_unlock();
54270 + return true;
54271 + }
54272 + }
54273 + rcu_read_unlock();
54274 +
54275 + if (!pid->hide_pid)
54276 + return false;
54277 +#endif
54278 +
54279 if (pid->hide_pid < hide_pid_min)
54280 return true;
54281 if (in_group_p(pid->pid_gid))
54282 return true;
54283 +
54284 return ptrace_may_access(task, PTRACE_MODE_READ);
54285 }
54286
54287 @@ -577,7 +632,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
54288 put_task_struct(task);
54289
54290 if (!has_perms) {
54291 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54292 + {
54293 +#else
54294 if (pid->hide_pid == 2) {
54295 +#endif
54296 /*
54297 * Let's make getdents(), stat(), and open()
54298 * consistent with each other. If a process
54299 @@ -675,6 +734,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54300 if (!task)
54301 return -ESRCH;
54302
54303 + if (gr_acl_handle_procpidmem(task)) {
54304 + put_task_struct(task);
54305 + return -EPERM;
54306 + }
54307 +
54308 mm = mm_access(task, mode);
54309 put_task_struct(task);
54310
54311 @@ -690,6 +754,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54312
54313 file->private_data = mm;
54314
54315 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54316 + file->f_version = current->exec_id;
54317 +#endif
54318 +
54319 return 0;
54320 }
54321
54322 @@ -711,6 +779,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54323 ssize_t copied;
54324 char *page;
54325
54326 +#ifdef CONFIG_GRKERNSEC
54327 + if (write)
54328 + return -EPERM;
54329 +#endif
54330 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54331 + if (file->f_version != current->exec_id) {
54332 + gr_log_badprocpid("mem");
54333 + return 0;
54334 + }
54335 +#endif
54336 +
54337 if (!mm)
54338 return 0;
54339
54340 @@ -723,7 +802,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54341 goto free;
54342
54343 while (count > 0) {
54344 - int this_len = min_t(int, count, PAGE_SIZE);
54345 + ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
54346
54347 if (write && copy_from_user(page, buf, this_len)) {
54348 copied = -EFAULT;
54349 @@ -815,6 +894,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54350 if (!mm)
54351 return 0;
54352
54353 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54354 + if (file->f_version != current->exec_id) {
54355 + gr_log_badprocpid("environ");
54356 + return 0;
54357 + }
54358 +#endif
54359 +
54360 page = (char *)__get_free_page(GFP_TEMPORARY);
54361 if (!page)
54362 return -ENOMEM;
54363 @@ -824,7 +910,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54364 goto free;
54365 while (count > 0) {
54366 size_t this_len, max_len;
54367 - int retval;
54368 + ssize_t retval;
54369
54370 if (src >= (mm->env_end - mm->env_start))
54371 break;
54372 @@ -1430,7 +1516,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54373 int error = -EACCES;
54374
54375 /* Are we allowed to snoop on the tasks file descriptors? */
54376 - if (!proc_fd_access_allowed(inode))
54377 + if (!proc_fd_access_allowed(inode, 0))
54378 goto out;
54379
54380 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54381 @@ -1474,8 +1560,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54382 struct path path;
54383
54384 /* Are we allowed to snoop on the tasks file descriptors? */
54385 - if (!proc_fd_access_allowed(inode))
54386 - goto out;
54387 + /* logging this is needed for learning on chromium to work properly,
54388 + but we don't want to flood the logs from 'ps' which does a readlink
54389 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54390 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
54391 + */
54392 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54393 + if (!proc_fd_access_allowed(inode,0))
54394 + goto out;
54395 + } else {
54396 + if (!proc_fd_access_allowed(inode,1))
54397 + goto out;
54398 + }
54399
54400 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54401 if (error)
54402 @@ -1525,7 +1621,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
54403 rcu_read_lock();
54404 cred = __task_cred(task);
54405 inode->i_uid = cred->euid;
54406 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54407 + inode->i_gid = grsec_proc_gid;
54408 +#else
54409 inode->i_gid = cred->egid;
54410 +#endif
54411 rcu_read_unlock();
54412 }
54413 security_task_to_inode(task, inode);
54414 @@ -1561,10 +1661,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
54415 return -ENOENT;
54416 }
54417 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54418 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54419 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54420 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54421 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54422 +#endif
54423 task_dumpable(task)) {
54424 cred = __task_cred(task);
54425 stat->uid = cred->euid;
54426 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54427 + stat->gid = grsec_proc_gid;
54428 +#else
54429 stat->gid = cred->egid;
54430 +#endif
54431 }
54432 }
54433 rcu_read_unlock();
54434 @@ -1602,11 +1711,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
54435
54436 if (task) {
54437 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54438 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54439 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54440 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54441 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54442 +#endif
54443 task_dumpable(task)) {
54444 rcu_read_lock();
54445 cred = __task_cred(task);
54446 inode->i_uid = cred->euid;
54447 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54448 + inode->i_gid = grsec_proc_gid;
54449 +#else
54450 inode->i_gid = cred->egid;
54451 +#endif
54452 rcu_read_unlock();
54453 } else {
54454 inode->i_uid = GLOBAL_ROOT_UID;
54455 @@ -2059,6 +2177,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
54456 if (!task)
54457 goto out_no_task;
54458
54459 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54460 + goto out;
54461 +
54462 /*
54463 * Yes, it does not scale. And it should not. Don't add
54464 * new entries into /proc/<tgid>/ without very good reasons.
54465 @@ -2103,6 +2224,9 @@ static int proc_pident_readdir(struct file *filp,
54466 if (!task)
54467 goto out_no_task;
54468
54469 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54470 + goto out;
54471 +
54472 ret = 0;
54473 i = filp->f_pos;
54474 switch (i) {
54475 @@ -2516,7 +2640,7 @@ static const struct pid_entry tgid_base_stuff[] = {
54476 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
54477 #endif
54478 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54479 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54480 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54481 INF("syscall", S_IRUGO, proc_pid_syscall),
54482 #endif
54483 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54484 @@ -2541,10 +2665,10 @@ static const struct pid_entry tgid_base_stuff[] = {
54485 #ifdef CONFIG_SECURITY
54486 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54487 #endif
54488 -#ifdef CONFIG_KALLSYMS
54489 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54490 INF("wchan", S_IRUGO, proc_pid_wchan),
54491 #endif
54492 -#ifdef CONFIG_STACKTRACE
54493 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54494 ONE("stack", S_IRUGO, proc_pid_stack),
54495 #endif
54496 #ifdef CONFIG_SCHEDSTATS
54497 @@ -2578,6 +2702,9 @@ static const struct pid_entry tgid_base_stuff[] = {
54498 #ifdef CONFIG_HARDWALL
54499 INF("hardwall", S_IRUGO, proc_pid_hardwall),
54500 #endif
54501 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54502 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
54503 +#endif
54504 #ifdef CONFIG_USER_NS
54505 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
54506 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
54507 @@ -2707,7 +2834,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
54508 if (!inode)
54509 goto out;
54510
54511 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54512 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
54513 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54514 + inode->i_gid = grsec_proc_gid;
54515 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
54516 +#else
54517 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
54518 +#endif
54519 inode->i_op = &proc_tgid_base_inode_operations;
54520 inode->i_fop = &proc_tgid_base_operations;
54521 inode->i_flags|=S_IMMUTABLE;
54522 @@ -2745,7 +2879,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
54523 if (!task)
54524 goto out;
54525
54526 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54527 + goto out_put_task;
54528 +
54529 result = proc_pid_instantiate(dir, dentry, task, NULL);
54530 +out_put_task:
54531 put_task_struct(task);
54532 out:
54533 return result;
54534 @@ -2808,6 +2946,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
54535 static int fake_filldir(void *buf, const char *name, int namelen,
54536 loff_t offset, u64 ino, unsigned d_type)
54537 {
54538 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
54539 + __buf->error = -EINVAL;
54540 return 0;
54541 }
54542
54543 @@ -2859,7 +2999,7 @@ static const struct pid_entry tid_base_stuff[] = {
54544 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54545 #endif
54546 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54547 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54548 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54549 INF("syscall", S_IRUGO, proc_pid_syscall),
54550 #endif
54551 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54552 @@ -2886,10 +3026,10 @@ static const struct pid_entry tid_base_stuff[] = {
54553 #ifdef CONFIG_SECURITY
54554 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54555 #endif
54556 -#ifdef CONFIG_KALLSYMS
54557 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54558 INF("wchan", S_IRUGO, proc_pid_wchan),
54559 #endif
54560 -#ifdef CONFIG_STACKTRACE
54561 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54562 ONE("stack", S_IRUGO, proc_pid_stack),
54563 #endif
54564 #ifdef CONFIG_SCHEDSTATS
54565 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
54566 index 82676e3..5f8518a 100644
54567 --- a/fs/proc/cmdline.c
54568 +++ b/fs/proc/cmdline.c
54569 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
54570
54571 static int __init proc_cmdline_init(void)
54572 {
54573 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54574 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
54575 +#else
54576 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
54577 +#endif
54578 return 0;
54579 }
54580 module_init(proc_cmdline_init);
54581 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
54582 index b143471..bb105e5 100644
54583 --- a/fs/proc/devices.c
54584 +++ b/fs/proc/devices.c
54585 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
54586
54587 static int __init proc_devices_init(void)
54588 {
54589 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
54590 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
54591 +#else
54592 proc_create("devices", 0, NULL, &proc_devinfo_operations);
54593 +#endif
54594 return 0;
54595 }
54596 module_init(proc_devices_init);
54597 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
54598 index d7a4a28..0201742 100644
54599 --- a/fs/proc/fd.c
54600 +++ b/fs/proc/fd.c
54601 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
54602 if (!task)
54603 return -ENOENT;
54604
54605 - files = get_files_struct(task);
54606 + if (!gr_acl_handle_procpidmem(task))
54607 + files = get_files_struct(task);
54608 put_task_struct(task);
54609
54610 if (files) {
54611 @@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
54612 */
54613 int proc_fd_permission(struct inode *inode, int mask)
54614 {
54615 + struct task_struct *task;
54616 int rv = generic_permission(inode, mask);
54617 - if (rv == 0)
54618 - return 0;
54619 +
54620 if (task_pid(current) == proc_pid(inode))
54621 rv = 0;
54622 +
54623 + task = get_proc_task(inode);
54624 + if (task == NULL)
54625 + return rv;
54626 +
54627 + if (gr_acl_handle_procpidmem(task))
54628 + rv = -EACCES;
54629 +
54630 + put_task_struct(task);
54631 +
54632 return rv;
54633 }
54634
54635 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
54636 index 869116c..820cb27 100644
54637 --- a/fs/proc/inode.c
54638 +++ b/fs/proc/inode.c
54639 @@ -22,11 +22,17 @@
54640 #include <linux/seq_file.h>
54641 #include <linux/slab.h>
54642 #include <linux/mount.h>
54643 +#include <linux/grsecurity.h>
54644
54645 #include <asm/uaccess.h>
54646
54647 #include "internal.h"
54648
54649 +#ifdef CONFIG_PROC_SYSCTL
54650 +extern const struct inode_operations proc_sys_inode_operations;
54651 +extern const struct inode_operations proc_sys_dir_operations;
54652 +#endif
54653 +
54654 static void proc_evict_inode(struct inode *inode)
54655 {
54656 struct proc_dir_entry *de;
54657 @@ -54,6 +60,13 @@ static void proc_evict_inode(struct inode *inode)
54658 ns = PROC_I(inode)->ns;
54659 if (ns_ops && ns)
54660 ns_ops->put(ns);
54661 +
54662 +#ifdef CONFIG_PROC_SYSCTL
54663 + if (inode->i_op == &proc_sys_inode_operations ||
54664 + inode->i_op == &proc_sys_dir_operations)
54665 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54666 +#endif
54667 +
54668 }
54669
54670 static struct kmem_cache * proc_inode_cachep;
54671 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
54672 if (de->mode) {
54673 inode->i_mode = de->mode;
54674 inode->i_uid = de->uid;
54675 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54676 + inode->i_gid = grsec_proc_gid;
54677 +#else
54678 inode->i_gid = de->gid;
54679 +#endif
54680 }
54681 if (de->size)
54682 inode->i_size = de->size;
54683 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
54684 index 85ff3a4..a512bd8 100644
54685 --- a/fs/proc/internal.h
54686 +++ b/fs/proc/internal.h
54687 @@ -56,6 +56,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54688 struct pid *pid, struct task_struct *task);
54689 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54690 struct pid *pid, struct task_struct *task);
54691 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54692 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
54693 +#endif
54694 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
54695
54696 extern const struct file_operations proc_tid_children_operations;
54697 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
54698 index eda6f01..006ae24 100644
54699 --- a/fs/proc/kcore.c
54700 +++ b/fs/proc/kcore.c
54701 @@ -481,9 +481,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54702 * the addresses in the elf_phdr on our list.
54703 */
54704 start = kc_offset_to_vaddr(*fpos - elf_buflen);
54705 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
54706 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
54707 + if (tsz > buflen)
54708 tsz = buflen;
54709 -
54710 +
54711 while (buflen) {
54712 struct kcore_list *m;
54713
54714 @@ -512,20 +513,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54715 kfree(elf_buf);
54716 } else {
54717 if (kern_addr_valid(start)) {
54718 - unsigned long n;
54719 + char *elf_buf;
54720 + mm_segment_t oldfs;
54721
54722 - n = copy_to_user(buffer, (char *)start, tsz);
54723 - /*
54724 - * We cannot distinguish between fault on source
54725 - * and fault on destination. When this happens
54726 - * we clear too and hope it will trigger the
54727 - * EFAULT again.
54728 - */
54729 - if (n) {
54730 - if (clear_user(buffer + tsz - n,
54731 - n))
54732 + elf_buf = kmalloc(tsz, GFP_KERNEL);
54733 + if (!elf_buf)
54734 + return -ENOMEM;
54735 + oldfs = get_fs();
54736 + set_fs(KERNEL_DS);
54737 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54738 + set_fs(oldfs);
54739 + if (copy_to_user(buffer, elf_buf, tsz)) {
54740 + kfree(elf_buf);
54741 return -EFAULT;
54742 + }
54743 }
54744 + set_fs(oldfs);
54745 + kfree(elf_buf);
54746 } else {
54747 if (clear_user(buffer, tsz))
54748 return -EFAULT;
54749 @@ -545,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54750
54751 static int open_kcore(struct inode *inode, struct file *filp)
54752 {
54753 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54754 + return -EPERM;
54755 +#endif
54756 if (!capable(CAP_SYS_RAWIO))
54757 return -EPERM;
54758 if (kcore_need_update)
54759 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54760 index 1efaaa1..834e49a 100644
54761 --- a/fs/proc/meminfo.c
54762 +++ b/fs/proc/meminfo.c
54763 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54764 vmi.used >> 10,
54765 vmi.largest_chunk >> 10
54766 #ifdef CONFIG_MEMORY_FAILURE
54767 - ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
54768 + ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
54769 #endif
54770 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
54771 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
54772 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54773 index ccfd99b..1b7e255 100644
54774 --- a/fs/proc/nommu.c
54775 +++ b/fs/proc/nommu.c
54776 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54777 if (len < 1)
54778 len = 1;
54779 seq_printf(m, "%*c", len, ' ');
54780 - seq_path(m, &file->f_path, "");
54781 + seq_path(m, &file->f_path, "\n\\");
54782 }
54783
54784 seq_putc(m, '\n');
54785 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54786 index b4ac657..0842bd2 100644
54787 --- a/fs/proc/proc_net.c
54788 +++ b/fs/proc/proc_net.c
54789 @@ -23,6 +23,7 @@
54790 #include <linux/nsproxy.h>
54791 #include <net/net_namespace.h>
54792 #include <linux/seq_file.h>
54793 +#include <linux/grsecurity.h>
54794
54795 #include "internal.h"
54796
54797 @@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54798 struct task_struct *task;
54799 struct nsproxy *ns;
54800 struct net *net = NULL;
54801 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54802 + const struct cred *cred = current_cred();
54803 +#endif
54804 +
54805 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54806 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
54807 + return net;
54808 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54809 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
54810 + return net;
54811 +#endif
54812
54813 rcu_read_lock();
54814 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54815 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54816 index ac05f33..1e6dc7e 100644
54817 --- a/fs/proc/proc_sysctl.c
54818 +++ b/fs/proc/proc_sysctl.c
54819 @@ -13,11 +13,15 @@
54820 #include <linux/module.h>
54821 #include "internal.h"
54822
54823 +extern int gr_handle_chroot_sysctl(const int op);
54824 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
54825 + const int op);
54826 +
54827 static const struct dentry_operations proc_sys_dentry_operations;
54828 static const struct file_operations proc_sys_file_operations;
54829 -static const struct inode_operations proc_sys_inode_operations;
54830 +const struct inode_operations proc_sys_inode_operations;
54831 static const struct file_operations proc_sys_dir_file_operations;
54832 -static const struct inode_operations proc_sys_dir_operations;
54833 +const struct inode_operations proc_sys_dir_operations;
54834
54835 void proc_sys_poll_notify(struct ctl_table_poll *poll)
54836 {
54837 @@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54838
54839 err = NULL;
54840 d_set_d_op(dentry, &proc_sys_dentry_operations);
54841 +
54842 + gr_handle_proc_create(dentry, inode);
54843 +
54844 d_add(dentry, inode);
54845
54846 out:
54847 @@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54848 struct inode *inode = file_inode(filp);
54849 struct ctl_table_header *head = grab_header(inode);
54850 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
54851 + int op = write ? MAY_WRITE : MAY_READ;
54852 ssize_t error;
54853 size_t res;
54854
54855 @@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54856 * and won't be until we finish.
54857 */
54858 error = -EPERM;
54859 - if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
54860 + if (sysctl_perm(head, table, op))
54861 goto out;
54862
54863 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
54864 @@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54865 if (!table->proc_handler)
54866 goto out;
54867
54868 +#ifdef CONFIG_GRKERNSEC
54869 + error = -EPERM;
54870 + if (gr_handle_chroot_sysctl(op))
54871 + goto out;
54872 + dget(filp->f_path.dentry);
54873 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
54874 + dput(filp->f_path.dentry);
54875 + goto out;
54876 + }
54877 + dput(filp->f_path.dentry);
54878 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
54879 + goto out;
54880 + if (write && !capable(CAP_SYS_ADMIN))
54881 + goto out;
54882 +#endif
54883 +
54884 /* careful: calling conventions are nasty here */
54885 res = count;
54886 error = table->proc_handler(table, write, buf, &res, ppos);
54887 @@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54888 return -ENOMEM;
54889 } else {
54890 d_set_d_op(child, &proc_sys_dentry_operations);
54891 +
54892 + gr_handle_proc_create(child, inode);
54893 +
54894 d_add(child, inode);
54895 }
54896 } else {
54897 @@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54898 if ((*pos)++ < file->f_pos)
54899 return 0;
54900
54901 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
54902 + return 0;
54903 +
54904 if (unlikely(S_ISLNK(table->mode)))
54905 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
54906 else
54907 @@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54908 if (IS_ERR(head))
54909 return PTR_ERR(head);
54910
54911 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
54912 + return -ENOENT;
54913 +
54914 generic_fillattr(inode, stat);
54915 if (table)
54916 stat->mode = (stat->mode & S_IFMT) | table->mode;
54917 @@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
54918 .llseek = generic_file_llseek,
54919 };
54920
54921 -static const struct inode_operations proc_sys_inode_operations = {
54922 +const struct inode_operations proc_sys_inode_operations = {
54923 .permission = proc_sys_permission,
54924 .setattr = proc_sys_setattr,
54925 .getattr = proc_sys_getattr,
54926 };
54927
54928 -static const struct inode_operations proc_sys_dir_operations = {
54929 +const struct inode_operations proc_sys_dir_operations = {
54930 .lookup = proc_sys_lookup,
54931 .permission = proc_sys_permission,
54932 .setattr = proc_sys_setattr,
54933 @@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
54934 static struct ctl_dir *new_dir(struct ctl_table_set *set,
54935 const char *name, int namelen)
54936 {
54937 - struct ctl_table *table;
54938 + ctl_table_no_const *table;
54939 struct ctl_dir *new;
54940 struct ctl_node *node;
54941 char *new_name;
54942 @@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
54943 return NULL;
54944
54945 node = (struct ctl_node *)(new + 1);
54946 - table = (struct ctl_table *)(node + 1);
54947 + table = (ctl_table_no_const *)(node + 1);
54948 new_name = (char *)(table + 2);
54949 memcpy(new_name, name, namelen);
54950 new_name[namelen] = '\0';
54951 @@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
54952 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
54953 struct ctl_table_root *link_root)
54954 {
54955 - struct ctl_table *link_table, *entry, *link;
54956 + ctl_table_no_const *link_table, *link;
54957 + struct ctl_table *entry;
54958 struct ctl_table_header *links;
54959 struct ctl_node *node;
54960 char *link_name;
54961 @@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
54962 return NULL;
54963
54964 node = (struct ctl_node *)(links + 1);
54965 - link_table = (struct ctl_table *)(node + nr_entries);
54966 + link_table = (ctl_table_no_const *)(node + nr_entries);
54967 link_name = (char *)&link_table[nr_entries + 1];
54968
54969 for (link = link_table, entry = table; entry->procname; link++, entry++) {
54970 @@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54971 struct ctl_table_header ***subheader, struct ctl_table_set *set,
54972 struct ctl_table *table)
54973 {
54974 - struct ctl_table *ctl_table_arg = NULL;
54975 - struct ctl_table *entry, *files;
54976 + ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
54977 + struct ctl_table *entry;
54978 int nr_files = 0;
54979 int nr_dirs = 0;
54980 int err = -ENOMEM;
54981 @@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54982 nr_files++;
54983 }
54984
54985 - files = table;
54986 /* If there are mixed files and directories we need a new table */
54987 if (nr_dirs && nr_files) {
54988 - struct ctl_table *new;
54989 + ctl_table_no_const *new;
54990 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
54991 GFP_KERNEL);
54992 if (!files)
54993 @@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54994 /* Register everything except a directory full of subdirectories */
54995 if (nr_files || !nr_dirs) {
54996 struct ctl_table_header *header;
54997 - header = __register_sysctl_table(set, path, files);
54998 + header = __register_sysctl_table(set, path, files ? files : table);
54999 if (!header) {
55000 kfree(ctl_table_arg);
55001 goto out;
55002 diff --git a/fs/proc/root.c b/fs/proc/root.c
55003 index 9c7fab1..ed1c8e0 100644
55004 --- a/fs/proc/root.c
55005 +++ b/fs/proc/root.c
55006 @@ -180,7 +180,15 @@ void __init proc_root_init(void)
55007 #ifdef CONFIG_PROC_DEVICETREE
55008 proc_device_tree_init();
55009 #endif
55010 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
55011 +#ifdef CONFIG_GRKERNSEC_PROC_USER
55012 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55013 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55014 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55015 +#endif
55016 +#else
55017 proc_mkdir("bus", NULL);
55018 +#endif
55019 proc_sys_init();
55020 }
55021
55022 diff --git a/fs/proc/self.c b/fs/proc/self.c
55023 index aa5cc3b..c91a5d0 100644
55024 --- a/fs/proc/self.c
55025 +++ b/fs/proc/self.c
55026 @@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55027 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55028 void *cookie)
55029 {
55030 - char *s = nd_get_link(nd);
55031 + const char *s = nd_get_link(nd);
55032 if (!IS_ERR(s))
55033 kfree(s);
55034 }
55035 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55036 index 3e636d8..83e3b71 100644
55037 --- a/fs/proc/task_mmu.c
55038 +++ b/fs/proc/task_mmu.c
55039 @@ -11,12 +11,19 @@
55040 #include <linux/rmap.h>
55041 #include <linux/swap.h>
55042 #include <linux/swapops.h>
55043 +#include <linux/grsecurity.h>
55044
55045 #include <asm/elf.h>
55046 #include <asm/uaccess.h>
55047 #include <asm/tlbflush.h>
55048 #include "internal.h"
55049
55050 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55051 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55052 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
55053 + _mm->pax_flags & MF_PAX_SEGMEXEC))
55054 +#endif
55055 +
55056 void task_mem(struct seq_file *m, struct mm_struct *mm)
55057 {
55058 unsigned long data, text, lib, swap;
55059 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55060 "VmExe:\t%8lu kB\n"
55061 "VmLib:\t%8lu kB\n"
55062 "VmPTE:\t%8lu kB\n"
55063 - "VmSwap:\t%8lu kB\n",
55064 - hiwater_vm << (PAGE_SHIFT-10),
55065 + "VmSwap:\t%8lu kB\n"
55066 +
55067 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55068 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55069 +#endif
55070 +
55071 + ,hiwater_vm << (PAGE_SHIFT-10),
55072 total_vm << (PAGE_SHIFT-10),
55073 mm->locked_vm << (PAGE_SHIFT-10),
55074 mm->pinned_vm << (PAGE_SHIFT-10),
55075 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55076 data << (PAGE_SHIFT-10),
55077 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55078 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55079 - swap << (PAGE_SHIFT-10));
55080 + swap << (PAGE_SHIFT-10)
55081 +
55082 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55083 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55084 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
55085 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
55086 +#else
55087 + , mm->context.user_cs_base
55088 + , mm->context.user_cs_limit
55089 +#endif
55090 +#endif
55091 +
55092 + );
55093 }
55094
55095 unsigned long task_vsize(struct mm_struct *mm)
55096 @@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55097 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
55098 }
55099
55100 - /* We don't show the stack guard page in /proc/maps */
55101 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55102 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
55103 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
55104 +#else
55105 start = vma->vm_start;
55106 - if (stack_guard_page_start(vma, start))
55107 - start += PAGE_SIZE;
55108 end = vma->vm_end;
55109 - if (stack_guard_page_end(vma, end))
55110 - end -= PAGE_SIZE;
55111 +#endif
55112
55113 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
55114 start,
55115 @@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55116 flags & VM_WRITE ? 'w' : '-',
55117 flags & VM_EXEC ? 'x' : '-',
55118 flags & VM_MAYSHARE ? 's' : 'p',
55119 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55120 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
55121 +#else
55122 pgoff,
55123 +#endif
55124 MAJOR(dev), MINOR(dev), ino, &len);
55125
55126 /*
55127 @@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55128 */
55129 if (file) {
55130 pad_len_spaces(m, len);
55131 - seq_path(m, &file->f_path, "\n");
55132 + seq_path(m, &file->f_path, "\n\\");
55133 goto done;
55134 }
55135
55136 @@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55137 * Thread stack in /proc/PID/task/TID/maps or
55138 * the main process stack.
55139 */
55140 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
55141 - vma->vm_end >= mm->start_stack)) {
55142 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
55143 + (vma->vm_start <= mm->start_stack &&
55144 + vma->vm_end >= mm->start_stack)) {
55145 name = "[stack]";
55146 } else {
55147 /* Thread stack in /proc/PID/maps */
55148 @@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
55149 struct proc_maps_private *priv = m->private;
55150 struct task_struct *task = priv->task;
55151
55152 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55153 + if (current->exec_id != m->exec_id) {
55154 + gr_log_badprocpid("maps");
55155 + return 0;
55156 + }
55157 +#endif
55158 +
55159 show_map_vma(m, vma, is_pid);
55160
55161 if (m->count < m->size) /* vma is copied successfully */
55162 @@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55163 .private = &mss,
55164 };
55165
55166 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55167 + if (current->exec_id != m->exec_id) {
55168 + gr_log_badprocpid("smaps");
55169 + return 0;
55170 + }
55171 +#endif
55172 memset(&mss, 0, sizeof mss);
55173 - mss.vma = vma;
55174 - /* mmap_sem is held in m_start */
55175 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55176 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55177 -
55178 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55179 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
55180 +#endif
55181 + mss.vma = vma;
55182 + /* mmap_sem is held in m_start */
55183 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55184 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55185 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55186 + }
55187 +#endif
55188 show_map_vma(m, vma, is_pid);
55189
55190 seq_printf(m,
55191 @@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55192 "KernelPageSize: %8lu kB\n"
55193 "MMUPageSize: %8lu kB\n"
55194 "Locked: %8lu kB\n",
55195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55196 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
55197 +#else
55198 (vma->vm_end - vma->vm_start) >> 10,
55199 +#endif
55200 mss.resident >> 10,
55201 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
55202 mss.shared_clean >> 10,
55203 @@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55204 int n;
55205 char buffer[50];
55206
55207 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55208 + if (current->exec_id != m->exec_id) {
55209 + gr_log_badprocpid("numa_maps");
55210 + return 0;
55211 + }
55212 +#endif
55213 +
55214 if (!mm)
55215 return 0;
55216
55217 @@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55218 mpol_to_str(buffer, sizeof(buffer), pol);
55219 mpol_cond_put(pol);
55220
55221 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55222 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
55223 +#else
55224 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
55225 +#endif
55226
55227 if (file) {
55228 seq_printf(m, " file=");
55229 - seq_path(m, &file->f_path, "\n\t= ");
55230 + seq_path(m, &file->f_path, "\n\t\\= ");
55231 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
55232 seq_printf(m, " heap");
55233 } else {
55234 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
55235 index 56123a6..5a2f6ec 100644
55236 --- a/fs/proc/task_nommu.c
55237 +++ b/fs/proc/task_nommu.c
55238 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55239 else
55240 bytes += kobjsize(mm);
55241
55242 - if (current->fs && current->fs->users > 1)
55243 + if (current->fs && atomic_read(&current->fs->users) > 1)
55244 sbytes += kobjsize(current->fs);
55245 else
55246 bytes += kobjsize(current->fs);
55247 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
55248
55249 if (file) {
55250 pad_len_spaces(m, len);
55251 - seq_path(m, &file->f_path, "");
55252 + seq_path(m, &file->f_path, "\n\\");
55253 } else if (mm) {
55254 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
55255
55256 diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
55257 index b00fcc9..e0c6381 100644
55258 --- a/fs/qnx6/qnx6.h
55259 +++ b/fs/qnx6/qnx6.h
55260 @@ -74,7 +74,7 @@ enum {
55261 BYTESEX_BE,
55262 };
55263
55264 -static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55265 +static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55266 {
55267 if (sbi->s_bytesex == BYTESEX_LE)
55268 return le64_to_cpu((__force __le64)n);
55269 @@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
55270 return (__force __fs64)cpu_to_be64(n);
55271 }
55272
55273 -static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55274 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55275 {
55276 if (sbi->s_bytesex == BYTESEX_LE)
55277 return le32_to_cpu((__force __le32)n);
55278 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
55279 index 16e8abb..2dcf914 100644
55280 --- a/fs/quota/netlink.c
55281 +++ b/fs/quota/netlink.c
55282 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
55283 void quota_send_warning(struct kqid qid, dev_t dev,
55284 const char warntype)
55285 {
55286 - static atomic_t seq;
55287 + static atomic_unchecked_t seq;
55288 struct sk_buff *skb;
55289 void *msg_head;
55290 int ret;
55291 @@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
55292 "VFS: Not enough memory to send quota warning.\n");
55293 return;
55294 }
55295 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
55296 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
55297 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
55298 if (!msg_head) {
55299 printk(KERN_ERR
55300 diff --git a/fs/readdir.c b/fs/readdir.c
55301 index fee38e0..12fdf47 100644
55302 --- a/fs/readdir.c
55303 +++ b/fs/readdir.c
55304 @@ -17,6 +17,7 @@
55305 #include <linux/security.h>
55306 #include <linux/syscalls.h>
55307 #include <linux/unistd.h>
55308 +#include <linux/namei.h>
55309
55310 #include <asm/uaccess.h>
55311
55312 @@ -67,6 +68,7 @@ struct old_linux_dirent {
55313
55314 struct readdir_callback {
55315 struct old_linux_dirent __user * dirent;
55316 + struct file * file;
55317 int result;
55318 };
55319
55320 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
55321 buf->result = -EOVERFLOW;
55322 return -EOVERFLOW;
55323 }
55324 +
55325 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55326 + return 0;
55327 +
55328 buf->result++;
55329 dirent = buf->dirent;
55330 if (!access_ok(VERIFY_WRITE, dirent,
55331 @@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
55332
55333 buf.result = 0;
55334 buf.dirent = dirent;
55335 + buf.file = f.file;
55336
55337 error = vfs_readdir(f.file, fillonedir, &buf);
55338 if (buf.result)
55339 @@ -139,6 +146,7 @@ struct linux_dirent {
55340 struct getdents_callback {
55341 struct linux_dirent __user * current_dir;
55342 struct linux_dirent __user * previous;
55343 + struct file * file;
55344 int count;
55345 int error;
55346 };
55347 @@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55348 buf->error = -EOVERFLOW;
55349 return -EOVERFLOW;
55350 }
55351 +
55352 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55353 + return 0;
55354 +
55355 dirent = buf->previous;
55356 if (dirent) {
55357 if (__put_user(offset, &dirent->d_off))
55358 @@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55359 buf.previous = NULL;
55360 buf.count = count;
55361 buf.error = 0;
55362 + buf.file = f.file;
55363
55364 error = vfs_readdir(f.file, filldir, &buf);
55365 if (error >= 0)
55366 @@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55367 struct getdents_callback64 {
55368 struct linux_dirent64 __user * current_dir;
55369 struct linux_dirent64 __user * previous;
55370 + struct file *file;
55371 int count;
55372 int error;
55373 };
55374 @@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55375 buf->error = -EINVAL; /* only used if we fail.. */
55376 if (reclen > buf->count)
55377 return -EINVAL;
55378 +
55379 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55380 + return 0;
55381 +
55382 dirent = buf->previous;
55383 if (dirent) {
55384 if (__put_user(offset, &dirent->d_off))
55385 @@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55386
55387 buf.current_dir = dirent;
55388 buf.previous = NULL;
55389 + buf.file = f.file;
55390 buf.count = count;
55391 buf.error = 0;
55392
55393 @@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55394 error = buf.error;
55395 lastdirent = buf.previous;
55396 if (lastdirent) {
55397 - typeof(lastdirent->d_off) d_off = f.file->f_pos;
55398 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
55399 if (__put_user(d_off, &lastdirent->d_off))
55400 error = -EFAULT;
55401 else
55402 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
55403 index 2b7882b..1c5ef48 100644
55404 --- a/fs/reiserfs/do_balan.c
55405 +++ b/fs/reiserfs/do_balan.c
55406 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
55407 return;
55408 }
55409
55410 - atomic_inc(&(fs_generation(tb->tb_sb)));
55411 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
55412 do_balance_starts(tb);
55413
55414 /* balance leaf returns 0 except if combining L R and S into
55415 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
55416 index 9cc0740a..46bf953 100644
55417 --- a/fs/reiserfs/procfs.c
55418 +++ b/fs/reiserfs/procfs.c
55419 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
55420 "SMALL_TAILS " : "NO_TAILS ",
55421 replay_only(sb) ? "REPLAY_ONLY " : "",
55422 convert_reiserfs(sb) ? "CONV " : "",
55423 - atomic_read(&r->s_generation_counter),
55424 + atomic_read_unchecked(&r->s_generation_counter),
55425 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
55426 SF(s_do_balance), SF(s_unneeded_left_neighbor),
55427 SF(s_good_search_by_key_reada), SF(s_bmaps),
55428 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
55429 index 157e474..65a6114 100644
55430 --- a/fs/reiserfs/reiserfs.h
55431 +++ b/fs/reiserfs/reiserfs.h
55432 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
55433 /* Comment? -Hans */
55434 wait_queue_head_t s_wait;
55435 /* To be obsoleted soon by per buffer seals.. -Hans */
55436 - atomic_t s_generation_counter; // increased by one every time the
55437 + atomic_unchecked_t s_generation_counter; // increased by one every time the
55438 // tree gets re-balanced
55439 unsigned long s_properties; /* File system properties. Currently holds
55440 on-disk FS format */
55441 @@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
55442 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
55443
55444 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
55445 -#define get_generation(s) atomic_read (&fs_generation(s))
55446 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
55447 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
55448 #define __fs_changed(gen,s) (gen != get_generation (s))
55449 #define fs_changed(gen,s) \
55450 diff --git a/fs/select.c b/fs/select.c
55451 index 8c1c96c..a0f9b6d 100644
55452 --- a/fs/select.c
55453 +++ b/fs/select.c
55454 @@ -20,6 +20,7 @@
55455 #include <linux/export.h>
55456 #include <linux/slab.h>
55457 #include <linux/poll.h>
55458 +#include <linux/security.h>
55459 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
55460 #include <linux/file.h>
55461 #include <linux/fdtable.h>
55462 @@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
55463 struct poll_list *walk = head;
55464 unsigned long todo = nfds;
55465
55466 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
55467 if (nfds > rlimit(RLIMIT_NOFILE))
55468 return -EINVAL;
55469
55470 diff --git a/fs/seq_file.c b/fs/seq_file.c
55471 index 38bb59f..a304f9d 100644
55472 --- a/fs/seq_file.c
55473 +++ b/fs/seq_file.c
55474 @@ -10,6 +10,7 @@
55475 #include <linux/seq_file.h>
55476 #include <linux/slab.h>
55477 #include <linux/cred.h>
55478 +#include <linux/sched.h>
55479
55480 #include <asm/uaccess.h>
55481 #include <asm/page.h>
55482 @@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
55483 #ifdef CONFIG_USER_NS
55484 p->user_ns = file->f_cred->user_ns;
55485 #endif
55486 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55487 + p->exec_id = current->exec_id;
55488 +#endif
55489
55490 /*
55491 * Wrappers around seq_open(e.g. swaps_open) need to be
55492 @@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55493 return 0;
55494 }
55495 if (!m->buf) {
55496 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55497 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55498 if (!m->buf)
55499 return -ENOMEM;
55500 }
55501 @@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55502 Eoverflow:
55503 m->op->stop(m, p);
55504 kfree(m->buf);
55505 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55506 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55507 return !m->buf ? -ENOMEM : -EAGAIN;
55508 }
55509
55510 @@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55511
55512 /* grab buffer if we didn't have one */
55513 if (!m->buf) {
55514 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55515 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55516 if (!m->buf)
55517 goto Enomem;
55518 }
55519 @@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55520 goto Fill;
55521 m->op->stop(m, p);
55522 kfree(m->buf);
55523 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55524 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55525 if (!m->buf)
55526 goto Enomem;
55527 m->count = 0;
55528 @@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
55529 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
55530 void *data)
55531 {
55532 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
55533 + seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
55534 int res = -ENOMEM;
55535
55536 if (op) {
55537 diff --git a/fs/splice.c b/fs/splice.c
55538 index 29e394e..b13c247 100644
55539 --- a/fs/splice.c
55540 +++ b/fs/splice.c
55541 @@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55542 pipe_lock(pipe);
55543
55544 for (;;) {
55545 - if (!pipe->readers) {
55546 + if (!atomic_read(&pipe->readers)) {
55547 send_sig(SIGPIPE, current, 0);
55548 if (!ret)
55549 ret = -EPIPE;
55550 @@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55551 do_wakeup = 0;
55552 }
55553
55554 - pipe->waiting_writers++;
55555 + atomic_inc(&pipe->waiting_writers);
55556 pipe_wait(pipe);
55557 - pipe->waiting_writers--;
55558 + atomic_dec(&pipe->waiting_writers);
55559 }
55560
55561 pipe_unlock(pipe);
55562 @@ -564,7 +564,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55563 old_fs = get_fs();
55564 set_fs(get_ds());
55565 /* The cast to a user pointer is valid due to the set_fs() */
55566 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55567 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55568 set_fs(old_fs);
55569
55570 return res;
55571 @@ -579,7 +579,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55572 old_fs = get_fs();
55573 set_fs(get_ds());
55574 /* The cast to a user pointer is valid due to the set_fs() */
55575 - res = vfs_write(file, (__force const char __user *)buf, count, &pos);
55576 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55577 set_fs(old_fs);
55578
55579 return res;
55580 @@ -632,7 +632,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55581 goto err;
55582
55583 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55584 - vec[i].iov_base = (void __user *) page_address(page);
55585 + vec[i].iov_base = (void __force_user *) page_address(page);
55586 vec[i].iov_len = this_len;
55587 spd.pages[i] = page;
55588 spd.nr_pages++;
55589 @@ -853,10 +853,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55590 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55591 {
55592 while (!pipe->nrbufs) {
55593 - if (!pipe->writers)
55594 + if (!atomic_read(&pipe->writers))
55595 return 0;
55596
55597 - if (!pipe->waiting_writers && sd->num_spliced)
55598 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55599 return 0;
55600
55601 if (sd->flags & SPLICE_F_NONBLOCK)
55602 @@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55603 * out of the pipe right after the splice_to_pipe(). So set
55604 * PIPE_READERS appropriately.
55605 */
55606 - pipe->readers = 1;
55607 + atomic_set(&pipe->readers, 1);
55608
55609 current->splice_pipe = pipe;
55610 }
55611 @@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55612 ret = -ERESTARTSYS;
55613 break;
55614 }
55615 - if (!pipe->writers)
55616 + if (!atomic_read(&pipe->writers))
55617 break;
55618 - if (!pipe->waiting_writers) {
55619 + if (!atomic_read(&pipe->waiting_writers)) {
55620 if (flags & SPLICE_F_NONBLOCK) {
55621 ret = -EAGAIN;
55622 break;
55623 @@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55624 pipe_lock(pipe);
55625
55626 while (pipe->nrbufs >= pipe->buffers) {
55627 - if (!pipe->readers) {
55628 + if (!atomic_read(&pipe->readers)) {
55629 send_sig(SIGPIPE, current, 0);
55630 ret = -EPIPE;
55631 break;
55632 @@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55633 ret = -ERESTARTSYS;
55634 break;
55635 }
55636 - pipe->waiting_writers++;
55637 + atomic_inc(&pipe->waiting_writers);
55638 pipe_wait(pipe);
55639 - pipe->waiting_writers--;
55640 + atomic_dec(&pipe->waiting_writers);
55641 }
55642
55643 pipe_unlock(pipe);
55644 @@ -1826,14 +1826,14 @@ retry:
55645 pipe_double_lock(ipipe, opipe);
55646
55647 do {
55648 - if (!opipe->readers) {
55649 + if (!atomic_read(&opipe->readers)) {
55650 send_sig(SIGPIPE, current, 0);
55651 if (!ret)
55652 ret = -EPIPE;
55653 break;
55654 }
55655
55656 - if (!ipipe->nrbufs && !ipipe->writers)
55657 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55658 break;
55659
55660 /*
55661 @@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55662 pipe_double_lock(ipipe, opipe);
55663
55664 do {
55665 - if (!opipe->readers) {
55666 + if (!atomic_read(&opipe->readers)) {
55667 send_sig(SIGPIPE, current, 0);
55668 if (!ret)
55669 ret = -EPIPE;
55670 @@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55671 * return EAGAIN if we have the potential of some data in the
55672 * future, otherwise just return 0
55673 */
55674 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55675 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55676 ret = -EAGAIN;
55677
55678 pipe_unlock(ipipe);
55679 diff --git a/fs/stat.c b/fs/stat.c
55680 index 04ce1ac..a13dd1e 100644
55681 --- a/fs/stat.c
55682 +++ b/fs/stat.c
55683 @@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
55684 stat->gid = inode->i_gid;
55685 stat->rdev = inode->i_rdev;
55686 stat->size = i_size_read(inode);
55687 - stat->atime = inode->i_atime;
55688 - stat->mtime = inode->i_mtime;
55689 + if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55690 + stat->atime = inode->i_ctime;
55691 + stat->mtime = inode->i_ctime;
55692 + } else {
55693 + stat->atime = inode->i_atime;
55694 + stat->mtime = inode->i_mtime;
55695 + }
55696 stat->ctime = inode->i_ctime;
55697 stat->blksize = (1 << inode->i_blkbits);
55698 stat->blocks = inode->i_blocks;
55699 @@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
55700 if (retval)
55701 return retval;
55702
55703 - if (inode->i_op->getattr)
55704 - return inode->i_op->getattr(path->mnt, path->dentry, stat);
55705 + if (inode->i_op->getattr) {
55706 + retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
55707 + if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55708 + stat->atime = stat->ctime;
55709 + stat->mtime = stat->ctime;
55710 + }
55711 + return retval;
55712 + }
55713
55714 generic_fillattr(inode, stat);
55715 return 0;
55716 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
55717 index 15c68f9..36a8b3e 100644
55718 --- a/fs/sysfs/bin.c
55719 +++ b/fs/sysfs/bin.c
55720 @@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
55721 return ret;
55722 }
55723
55724 -static int bin_access(struct vm_area_struct *vma, unsigned long addr,
55725 - void *buf, int len, int write)
55726 +static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
55727 + void *buf, size_t len, int write)
55728 {
55729 struct file *file = vma->vm_file;
55730 struct bin_buffer *bb = file->private_data;
55731 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
55732 - int ret;
55733 + ssize_t ret;
55734
55735 if (!bb->vm_ops)
55736 return -EINVAL;
55737 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55738 index 6f31590..3c87c8a 100644
55739 --- a/fs/sysfs/dir.c
55740 +++ b/fs/sysfs/dir.c
55741 @@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
55742 *
55743 * Returns 31 bit hash of ns + name (so it fits in an off_t )
55744 */
55745 -static unsigned int sysfs_name_hash(const void *ns, const char *name)
55746 +static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
55747 {
55748 unsigned long hash = init_name_hash();
55749 unsigned int len = strlen(name);
55750 @@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55751 struct sysfs_dirent *sd;
55752 int rc;
55753
55754 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55755 + const char *parent_name = parent_sd->s_name;
55756 +
55757 + mode = S_IFDIR | S_IRWXU;
55758 +
55759 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55760 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55761 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55762 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55763 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55764 +#endif
55765 +
55766 /* allocate */
55767 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55768 if (!sd)
55769 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55770 index 602f56d..6853db8 100644
55771 --- a/fs/sysfs/file.c
55772 +++ b/fs/sysfs/file.c
55773 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55774
55775 struct sysfs_open_dirent {
55776 atomic_t refcnt;
55777 - atomic_t event;
55778 + atomic_unchecked_t event;
55779 wait_queue_head_t poll;
55780 struct list_head buffers; /* goes through sysfs_buffer.list */
55781 };
55782 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55783 if (!sysfs_get_active(attr_sd))
55784 return -ENODEV;
55785
55786 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55787 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55788 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55789
55790 sysfs_put_active(attr_sd);
55791 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55792 return -ENOMEM;
55793
55794 atomic_set(&new_od->refcnt, 0);
55795 - atomic_set(&new_od->event, 1);
55796 + atomic_set_unchecked(&new_od->event, 1);
55797 init_waitqueue_head(&new_od->poll);
55798 INIT_LIST_HEAD(&new_od->buffers);
55799 goto retry;
55800 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55801
55802 sysfs_put_active(attr_sd);
55803
55804 - if (buffer->event != atomic_read(&od->event))
55805 + if (buffer->event != atomic_read_unchecked(&od->event))
55806 goto trigger;
55807
55808 return DEFAULT_POLLMASK;
55809 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55810
55811 od = sd->s_attr.open;
55812 if (od) {
55813 - atomic_inc(&od->event);
55814 + atomic_inc_unchecked(&od->event);
55815 wake_up_interruptible(&od->poll);
55816 }
55817
55818 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55819 index 8c940df..25b733e 100644
55820 --- a/fs/sysfs/symlink.c
55821 +++ b/fs/sysfs/symlink.c
55822 @@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55823
55824 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55825 {
55826 - char *page = nd_get_link(nd);
55827 + const char *page = nd_get_link(nd);
55828 if (!IS_ERR(page))
55829 free_page((unsigned long)page);
55830 }
55831 diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
55832 index 69d4889..a810bd4 100644
55833 --- a/fs/sysv/sysv.h
55834 +++ b/fs/sysv/sysv.h
55835 @@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
55836 #endif
55837 }
55838
55839 -static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
55840 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
55841 {
55842 if (sbi->s_bytesex == BYTESEX_PDP)
55843 return PDP_swab((__force __u32)n);
55844 diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
55845 index e18b988..f1d4ad0f 100644
55846 --- a/fs/ubifs/io.c
55847 +++ b/fs/ubifs/io.c
55848 @@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
55849 return err;
55850 }
55851
55852 -int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
55853 +int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
55854 {
55855 int err;
55856
55857 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55858 index c175b4d..8f36a16 100644
55859 --- a/fs/udf/misc.c
55860 +++ b/fs/udf/misc.c
55861 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55862
55863 u8 udf_tag_checksum(const struct tag *t)
55864 {
55865 - u8 *data = (u8 *)t;
55866 + const u8 *data = (const u8 *)t;
55867 u8 checksum = 0;
55868 int i;
55869 for (i = 0; i < sizeof(struct tag); ++i)
55870 diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
55871 index 8d974c4..b82f6ec 100644
55872 --- a/fs/ufs/swab.h
55873 +++ b/fs/ufs/swab.h
55874 @@ -22,7 +22,7 @@ enum {
55875 BYTESEX_BE
55876 };
55877
55878 -static inline u64
55879 +static inline u64 __intentional_overflow(-1)
55880 fs64_to_cpu(struct super_block *sbp, __fs64 n)
55881 {
55882 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
55883 @@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
55884 return (__force __fs64)cpu_to_be64(n);
55885 }
55886
55887 -static inline u32
55888 +static inline u32 __intentional_overflow(-1)
55889 fs32_to_cpu(struct super_block *sbp, __fs32 n)
55890 {
55891 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
55892 diff --git a/fs/utimes.c b/fs/utimes.c
55893 index f4fb7ec..3fe03c0 100644
55894 --- a/fs/utimes.c
55895 +++ b/fs/utimes.c
55896 @@ -1,6 +1,7 @@
55897 #include <linux/compiler.h>
55898 #include <linux/file.h>
55899 #include <linux/fs.h>
55900 +#include <linux/security.h>
55901 #include <linux/linkage.h>
55902 #include <linux/mount.h>
55903 #include <linux/namei.h>
55904 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55905 goto mnt_drop_write_and_out;
55906 }
55907 }
55908 +
55909 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55910 + error = -EACCES;
55911 + goto mnt_drop_write_and_out;
55912 + }
55913 +
55914 mutex_lock(&inode->i_mutex);
55915 error = notify_change(path->dentry, &newattrs);
55916 mutex_unlock(&inode->i_mutex);
55917 diff --git a/fs/xattr.c b/fs/xattr.c
55918 index 3377dff..4d074d9 100644
55919 --- a/fs/xattr.c
55920 +++ b/fs/xattr.c
55921 @@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
55922 return rc;
55923 }
55924
55925 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
55926 +ssize_t
55927 +pax_getxattr(struct dentry *dentry, void *value, size_t size)
55928 +{
55929 + struct inode *inode = dentry->d_inode;
55930 + ssize_t error;
55931 +
55932 + error = inode_permission(inode, MAY_EXEC);
55933 + if (error)
55934 + return error;
55935 +
55936 + if (inode->i_op->getxattr)
55937 + error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
55938 + else
55939 + error = -EOPNOTSUPP;
55940 +
55941 + return error;
55942 +}
55943 +EXPORT_SYMBOL(pax_getxattr);
55944 +#endif
55945 +
55946 ssize_t
55947 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
55948 {
55949 @@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55950 * Extended attribute SET operations
55951 */
55952 static long
55953 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
55954 +setxattr(struct path *path, const char __user *name, const void __user *value,
55955 size_t size, int flags)
55956 {
55957 int error;
55958 @@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55959 posix_acl_fix_xattr_from_user(kvalue, size);
55960 }
55961
55962 - error = vfs_setxattr(d, kname, kvalue, size, flags);
55963 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55964 + error = -EACCES;
55965 + goto out;
55966 + }
55967 +
55968 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55969 out:
55970 if (vvalue)
55971 vfree(vvalue);
55972 @@ -377,7 +403,7 @@ retry:
55973 return error;
55974 error = mnt_want_write(path.mnt);
55975 if (!error) {
55976 - error = setxattr(path.dentry, name, value, size, flags);
55977 + error = setxattr(&path, name, value, size, flags);
55978 mnt_drop_write(path.mnt);
55979 }
55980 path_put(&path);
55981 @@ -401,7 +427,7 @@ retry:
55982 return error;
55983 error = mnt_want_write(path.mnt);
55984 if (!error) {
55985 - error = setxattr(path.dentry, name, value, size, flags);
55986 + error = setxattr(&path, name, value, size, flags);
55987 mnt_drop_write(path.mnt);
55988 }
55989 path_put(&path);
55990 @@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55991 const void __user *,value, size_t, size, int, flags)
55992 {
55993 struct fd f = fdget(fd);
55994 - struct dentry *dentry;
55995 int error = -EBADF;
55996
55997 if (!f.file)
55998 return error;
55999 - dentry = f.file->f_path.dentry;
56000 - audit_inode(NULL, dentry, 0);
56001 + audit_inode(NULL, f.file->f_path.dentry, 0);
56002 error = mnt_want_write_file(f.file);
56003 if (!error) {
56004 - error = setxattr(dentry, name, value, size, flags);
56005 + error = setxattr(&f.file->f_path, name, value, size, flags);
56006 mnt_drop_write_file(f.file);
56007 }
56008 fdput(f);
56009 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56010 index 9fbea87..6b19972 100644
56011 --- a/fs/xattr_acl.c
56012 +++ b/fs/xattr_acl.c
56013 @@ -76,8 +76,8 @@ struct posix_acl *
56014 posix_acl_from_xattr(struct user_namespace *user_ns,
56015 const void *value, size_t size)
56016 {
56017 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56018 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56019 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56020 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56021 int count;
56022 struct posix_acl *acl;
56023 struct posix_acl_entry *acl_e;
56024 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56025 index b44af92..06073da 100644
56026 --- a/fs/xfs/xfs_bmap.c
56027 +++ b/fs/xfs/xfs_bmap.c
56028 @@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56029 int nmap,
56030 int ret_nmap);
56031 #else
56032 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56033 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56034 #endif /* DEBUG */
56035
56036 STATIC int
56037 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
56038 index 1b9fc3e..e1bdde0 100644
56039 --- a/fs/xfs/xfs_dir2_sf.c
56040 +++ b/fs/xfs/xfs_dir2_sf.c
56041 @@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
56042 }
56043
56044 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
56045 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56046 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
56047 + char name[sfep->namelen];
56048 + memcpy(name, sfep->name, sfep->namelen);
56049 + if (filldir(dirent, name, sfep->namelen,
56050 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
56051 + *offset = off & 0x7fffffff;
56052 + return 0;
56053 + }
56054 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56055 off & 0x7fffffff, ino, DT_UNKNOWN)) {
56056 *offset = off & 0x7fffffff;
56057 return 0;
56058 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
56059 index d681e34..2a3f5ab 100644
56060 --- a/fs/xfs/xfs_ioctl.c
56061 +++ b/fs/xfs/xfs_ioctl.c
56062 @@ -127,7 +127,7 @@ xfs_find_handle(
56063 }
56064
56065 error = -EFAULT;
56066 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
56067 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
56068 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
56069 goto out_put;
56070
56071 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
56072 index d82efaa..0904a8e 100644
56073 --- a/fs/xfs/xfs_iops.c
56074 +++ b/fs/xfs/xfs_iops.c
56075 @@ -395,7 +395,7 @@ xfs_vn_put_link(
56076 struct nameidata *nd,
56077 void *p)
56078 {
56079 - char *s = nd_get_link(nd);
56080 + const char *s = nd_get_link(nd);
56081
56082 if (!IS_ERR(s))
56083 kfree(s);
56084 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56085 new file mode 100644
56086 index 0000000..7174794
56087 --- /dev/null
56088 +++ b/grsecurity/Kconfig
56089 @@ -0,0 +1,1031 @@
56090 +#
56091 +# grecurity configuration
56092 +#
56093 +menu "Memory Protections"
56094 +depends on GRKERNSEC
56095 +
56096 +config GRKERNSEC_KMEM
56097 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56098 + default y if GRKERNSEC_CONFIG_AUTO
56099 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56100 + help
56101 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56102 + be written to or read from to modify or leak the contents of the running
56103 + kernel. /dev/port will also not be allowed to be opened and support
56104 + for /dev/cpu/*/msr will be removed. If you have module
56105 + support disabled, enabling this will close up five ways that are
56106 + currently used to insert malicious code into the running kernel.
56107 +
56108 + Even with all these features enabled, we still highly recommend that
56109 + you use the RBAC system, as it is still possible for an attacker to
56110 + modify the running kernel through privileged I/O granted by ioperm/iopl.
56111 +
56112 + If you are not using XFree86, you may be able to stop this additional
56113 + case by enabling the 'Disable privileged I/O' option. Though nothing
56114 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56115 + but only to video memory, which is the only writing we allow in this
56116 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56117 + not be allowed to mprotect it with PROT_WRITE later.
56118 + Enabling this feature will prevent the "cpupower" and "powertop" tools
56119 + from working.
56120 +
56121 + It is highly recommended that you say Y here if you meet all the
56122 + conditions above.
56123 +
56124 +config GRKERNSEC_VM86
56125 + bool "Restrict VM86 mode"
56126 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56127 + depends on X86_32
56128 +
56129 + help
56130 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56131 + make use of a special execution mode on 32bit x86 processors called
56132 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56133 + video cards and will still work with this option enabled. The purpose
56134 + of the option is to prevent exploitation of emulation errors in
56135 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
56136 + Nearly all users should be able to enable this option.
56137 +
56138 +config GRKERNSEC_IO
56139 + bool "Disable privileged I/O"
56140 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56141 + depends on X86
56142 + select RTC_CLASS
56143 + select RTC_INTF_DEV
56144 + select RTC_DRV_CMOS
56145 +
56146 + help
56147 + If you say Y here, all ioperm and iopl calls will return an error.
56148 + Ioperm and iopl can be used to modify the running kernel.
56149 + Unfortunately, some programs need this access to operate properly,
56150 + the most notable of which are XFree86 and hwclock. hwclock can be
56151 + remedied by having RTC support in the kernel, so real-time
56152 + clock support is enabled if this option is enabled, to ensure
56153 + that hwclock operates correctly. XFree86 still will not
56154 + operate correctly with this option enabled, so DO NOT CHOOSE Y
56155 + IF YOU USE XFree86. If you use XFree86 and you still want to
56156 + protect your kernel against modification, use the RBAC system.
56157 +
56158 +config GRKERNSEC_JIT_HARDEN
56159 + bool "Harden BPF JIT against spray attacks"
56160 + default y if GRKERNSEC_CONFIG_AUTO
56161 + depends on BPF_JIT
56162 + help
56163 + If you say Y here, the native code generated by the kernel's Berkeley
56164 + Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
56165 + attacks that attempt to fit attacker-beneficial instructions in
56166 + 32bit immediate fields of JIT-generated native instructions. The
56167 + attacker will generally aim to cause an unintended instruction sequence
56168 + of JIT-generated native code to execute by jumping into the middle of
56169 + a generated instruction. This feature effectively randomizes the 32bit
56170 + immediate constants present in the generated code to thwart such attacks.
56171 +
56172 + If you're using KERNEXEC, it's recommended that you enable this option
56173 + to supplement the hardening of the kernel.
56174 +
56175 +config GRKERNSEC_RAND_THREADSTACK
56176 + bool "Insert random gaps between thread stacks"
56177 + default y if GRKERNSEC_CONFIG_AUTO
56178 + depends on PAX_RANDMMAP && !PPC && BROKEN
56179 + help
56180 + If you say Y here, a random-sized gap will be enforced between allocated
56181 + thread stacks. Glibc's NPTL and other threading libraries that
56182 + pass MAP_STACK to the kernel for thread stack allocation are supported.
56183 + The implementation currently provides 8 bits of entropy for the gap.
56184 +
56185 + Many distributions do not compile threaded remote services with the
56186 + -fstack-check argument to GCC, causing the variable-sized stack-based
56187 + allocator, alloca(), to not probe the stack on allocation. This
56188 + permits an unbounded alloca() to skip over any guard page and potentially
56189 + modify another thread's stack reliably. An enforced random gap
56190 + reduces the reliability of such an attack and increases the chance
56191 + that such a read/write to another thread's stack instead lands in
56192 + an unmapped area, causing a crash and triggering grsecurity's
56193 + anti-bruteforcing logic.
56194 +
56195 +config GRKERNSEC_PROC_MEMMAP
56196 + bool "Harden ASLR against information leaks and entropy reduction"
56197 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
56198 + depends on PAX_NOEXEC || PAX_ASLR
56199 + help
56200 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56201 + give no information about the addresses of its mappings if
56202 + PaX features that rely on random addresses are enabled on the task.
56203 + In addition to sanitizing this information and disabling other
56204 + dangerous sources of information, this option causes reads of sensitive
56205 + /proc/<pid> entries where the file descriptor was opened in a different
56206 + task than the one performing the read. Such attempts are logged.
56207 + This option also limits argv/env strings for suid/sgid binaries
56208 + to 512KB to prevent a complete exhaustion of the stack entropy provided
56209 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
56210 + binaries to prevent alternative mmap layouts from being abused.
56211 +
56212 + If you use PaX it is essential that you say Y here as it closes up
56213 + several holes that make full ASLR useless locally.
56214 +
56215 +config GRKERNSEC_BRUTE
56216 + bool "Deter exploit bruteforcing"
56217 + default y if GRKERNSEC_CONFIG_AUTO
56218 + help
56219 + If you say Y here, attempts to bruteforce exploits against forking
56220 + daemons such as apache or sshd, as well as against suid/sgid binaries
56221 + will be deterred. When a child of a forking daemon is killed by PaX
56222 + or crashes due to an illegal instruction or other suspicious signal,
56223 + the parent process will be delayed 30 seconds upon every subsequent
56224 + fork until the administrator is able to assess the situation and
56225 + restart the daemon.
56226 + In the suid/sgid case, the attempt is logged, the user has all their
56227 + processes terminated, and they are prevented from executing any further
56228 + processes for 15 minutes.
56229 + It is recommended that you also enable signal logging in the auditing
56230 + section so that logs are generated when a process triggers a suspicious
56231 + signal.
56232 + If the sysctl option is enabled, a sysctl option with name
56233 + "deter_bruteforce" is created.
56234 +
56235 +
56236 +config GRKERNSEC_MODHARDEN
56237 + bool "Harden module auto-loading"
56238 + default y if GRKERNSEC_CONFIG_AUTO
56239 + depends on MODULES
56240 + help
56241 + If you say Y here, module auto-loading in response to use of some
56242 + feature implemented by an unloaded module will be restricted to
56243 + root users. Enabling this option helps defend against attacks
56244 + by unprivileged users who abuse the auto-loading behavior to
56245 + cause a vulnerable module to load that is then exploited.
56246 +
56247 + If this option prevents a legitimate use of auto-loading for a
56248 + non-root user, the administrator can execute modprobe manually
56249 + with the exact name of the module mentioned in the alert log.
56250 + Alternatively, the administrator can add the module to the list
56251 + of modules loaded at boot by modifying init scripts.
56252 +
56253 + Modification of init scripts will most likely be needed on
56254 + Ubuntu servers with encrypted home directory support enabled,
56255 + as the first non-root user logging in will cause the ecb(aes),
56256 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56257 +
56258 +config GRKERNSEC_HIDESYM
56259 + bool "Hide kernel symbols"
56260 + default y if GRKERNSEC_CONFIG_AUTO
56261 + select PAX_USERCOPY_SLABS
56262 + help
56263 + If you say Y here, getting information on loaded modules, and
56264 + displaying all kernel symbols through a syscall will be restricted
56265 + to users with CAP_SYS_MODULE. For software compatibility reasons,
56266 + /proc/kallsyms will be restricted to the root user. The RBAC
56267 + system can hide that entry even from root.
56268 +
56269 + This option also prevents leaking of kernel addresses through
56270 + several /proc entries.
56271 +
56272 + Note that this option is only effective provided the following
56273 + conditions are met:
56274 + 1) The kernel using grsecurity is not precompiled by some distribution
56275 + 2) You have also enabled GRKERNSEC_DMESG
56276 + 3) You are using the RBAC system and hiding other files such as your
56277 + kernel image and System.map. Alternatively, enabling this option
56278 + causes the permissions on /boot, /lib/modules, and the kernel
56279 + source directory to change at compile time to prevent
56280 + reading by non-root users.
56281 + If the above conditions are met, this option will aid in providing a
56282 + useful protection against local kernel exploitation of overflows
56283 + and arbitrary read/write vulnerabilities.
56284 +
56285 +config GRKERNSEC_KERN_LOCKOUT
56286 + bool "Active kernel exploit response"
56287 + default y if GRKERNSEC_CONFIG_AUTO
56288 + depends on X86 || ARM || PPC || SPARC
56289 + help
56290 + If you say Y here, when a PaX alert is triggered due to suspicious
56291 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56292 + or an OOPS occurs due to bad memory accesses, instead of just
56293 + terminating the offending process (and potentially allowing
56294 + a subsequent exploit from the same user), we will take one of two
56295 + actions:
56296 + If the user was root, we will panic the system
56297 + If the user was non-root, we will log the attempt, terminate
56298 + all processes owned by the user, then prevent them from creating
56299 + any new processes until the system is restarted
56300 + This deters repeated kernel exploitation/bruteforcing attempts
56301 + and is useful for later forensics.
56302 +
56303 +endmenu
56304 +menu "Role Based Access Control Options"
56305 +depends on GRKERNSEC
56306 +
56307 +config GRKERNSEC_RBAC_DEBUG
56308 + bool
56309 +
56310 +config GRKERNSEC_NO_RBAC
56311 + bool "Disable RBAC system"
56312 + help
56313 + If you say Y here, the /dev/grsec device will be removed from the kernel,
56314 + preventing the RBAC system from being enabled. You should only say Y
56315 + here if you have no intention of using the RBAC system, so as to prevent
56316 + an attacker with root access from misusing the RBAC system to hide files
56317 + and processes when loadable module support and /dev/[k]mem have been
56318 + locked down.
56319 +
56320 +config GRKERNSEC_ACL_HIDEKERN
56321 + bool "Hide kernel processes"
56322 + help
56323 + If you say Y here, all kernel threads will be hidden to all
56324 + processes but those whose subject has the "view hidden processes"
56325 + flag.
56326 +
56327 +config GRKERNSEC_ACL_MAXTRIES
56328 + int "Maximum tries before password lockout"
56329 + default 3
56330 + help
56331 + This option enforces the maximum number of times a user can attempt
56332 + to authorize themselves with the grsecurity RBAC system before being
56333 + denied the ability to attempt authorization again for a specified time.
56334 + The lower the number, the harder it will be to brute-force a password.
56335 +
56336 +config GRKERNSEC_ACL_TIMEOUT
56337 + int "Time to wait after max password tries, in seconds"
56338 + default 30
56339 + help
56340 + This option specifies the time the user must wait after attempting to
56341 + authorize to the RBAC system with the maximum number of invalid
56342 + passwords. The higher the number, the harder it will be to brute-force
56343 + a password.
56344 +
56345 +endmenu
56346 +menu "Filesystem Protections"
56347 +depends on GRKERNSEC
56348 +
56349 +config GRKERNSEC_PROC
56350 + bool "Proc restrictions"
56351 + default y if GRKERNSEC_CONFIG_AUTO
56352 + help
56353 + If you say Y here, the permissions of the /proc filesystem
56354 + will be altered to enhance system security and privacy. You MUST
56355 + choose either a user only restriction or a user and group restriction.
56356 + Depending upon the option you choose, you can either restrict users to
56357 + see only the processes they themselves run, or choose a group that can
56358 + view all processes and files normally restricted to root if you choose
56359 + the "restrict to user only" option. NOTE: If you're running identd or
56360 + ntpd as a non-root user, you will have to run it as the group you
56361 + specify here.
56362 +
56363 +config GRKERNSEC_PROC_USER
56364 + bool "Restrict /proc to user only"
56365 + depends on GRKERNSEC_PROC
56366 + help
56367 + If you say Y here, non-root users will only be able to view their own
56368 + processes, and restricts them from viewing network-related information,
56369 + and viewing kernel symbol and module information.
56370 +
56371 +config GRKERNSEC_PROC_USERGROUP
56372 + bool "Allow special group"
56373 + default y if GRKERNSEC_CONFIG_AUTO
56374 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56375 + help
56376 + If you say Y here, you will be able to select a group that will be
56377 + able to view all processes and network-related information. If you've
56378 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56379 + remain hidden. This option is useful if you want to run identd as
56380 + a non-root user. The group you select may also be chosen at boot time
56381 + via "grsec_proc_gid=" on the kernel commandline.
56382 +
56383 +config GRKERNSEC_PROC_GID
56384 + int "GID for special group"
56385 + depends on GRKERNSEC_PROC_USERGROUP
56386 + default 1001
56387 +
56388 +config GRKERNSEC_PROC_ADD
56389 + bool "Additional restrictions"
56390 + default y if GRKERNSEC_CONFIG_AUTO
56391 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56392 + help
56393 + If you say Y here, additional restrictions will be placed on
56394 + /proc that keep normal users from viewing device information and
56395 + slabinfo information that could be useful for exploits.
56396 +
56397 +config GRKERNSEC_LINK
56398 + bool "Linking restrictions"
56399 + default y if GRKERNSEC_CONFIG_AUTO
56400 + help
56401 + If you say Y here, /tmp race exploits will be prevented, since users
56402 + will no longer be able to follow symlinks owned by other users in
56403 + world-writable +t directories (e.g. /tmp), unless the owner of the
56404 + symlink is the owner of the directory. users will also not be
56405 + able to hardlink to files they do not own. If the sysctl option is
56406 + enabled, a sysctl option with name "linking_restrictions" is created.
56407 +
56408 +config GRKERNSEC_SYMLINKOWN
56409 + bool "Kernel-enforced SymlinksIfOwnerMatch"
56410 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
56411 + help
56412 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
56413 + that prevents it from being used as a security feature. As Apache
56414 + verifies the symlink by performing a stat() against the target of
56415 + the symlink before it is followed, an attacker can setup a symlink
56416 + to point to a same-owned file, then replace the symlink with one
56417 + that targets another user's file just after Apache "validates" the
56418 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
56419 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
56420 + will be in place for the group you specify. If the sysctl option
56421 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
56422 + created.
56423 +
56424 +config GRKERNSEC_SYMLINKOWN_GID
56425 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
56426 + depends on GRKERNSEC_SYMLINKOWN
56427 + default 1006
56428 + help
56429 + Setting this GID determines what group kernel-enforced
56430 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
56431 + is enabled, a sysctl option with name "symlinkown_gid" is created.
56432 +
56433 +config GRKERNSEC_FIFO
56434 + bool "FIFO restrictions"
56435 + default y if GRKERNSEC_CONFIG_AUTO
56436 + help
56437 + If you say Y here, users will not be able to write to FIFOs they don't
56438 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56439 + the FIFO is the same owner of the directory it's held in. If the sysctl
56440 + option is enabled, a sysctl option with name "fifo_restrictions" is
56441 + created.
56442 +
56443 +config GRKERNSEC_SYSFS_RESTRICT
56444 + bool "Sysfs/debugfs restriction"
56445 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56446 + depends on SYSFS
56447 + help
56448 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56449 + any filesystem normally mounted under it (e.g. debugfs) will be
56450 + mostly accessible only by root. These filesystems generally provide access
56451 + to hardware and debug information that isn't appropriate for unprivileged
56452 + users of the system. Sysfs and debugfs have also become a large source
56453 + of new vulnerabilities, ranging from infoleaks to local compromise.
56454 + There has been very little oversight with an eye toward security involved
56455 + in adding new exporters of information to these filesystems, so their
56456 + use is discouraged.
56457 + For reasons of compatibility, a few directories have been whitelisted
56458 + for access by non-root users:
56459 + /sys/fs/selinux
56460 + /sys/fs/fuse
56461 + /sys/devices/system/cpu
56462 +
56463 +config GRKERNSEC_ROFS
56464 + bool "Runtime read-only mount protection"
56465 + help
56466 + If you say Y here, a sysctl option with name "romount_protect" will
56467 + be created. By setting this option to 1 at runtime, filesystems
56468 + will be protected in the following ways:
56469 + * No new writable mounts will be allowed
56470 + * Existing read-only mounts won't be able to be remounted read/write
56471 + * Write operations will be denied on all block devices
56472 + This option acts independently of grsec_lock: once it is set to 1,
56473 + it cannot be turned off. Therefore, please be mindful of the resulting
56474 + behavior if this option is enabled in an init script on a read-only
56475 + filesystem. This feature is mainly intended for secure embedded systems.
56476 +
56477 +config GRKERNSEC_DEVICE_SIDECHANNEL
56478 + bool "Eliminate stat/notify-based device sidechannels"
56479 + default y if GRKERNSEC_CONFIG_AUTO
56480 + help
56481 + If you say Y here, timing analyses on block or character
56482 + devices like /dev/ptmx using stat or inotify/dnotify/fanotify
56483 + will be thwarted for unprivileged users. If a process without
56484 + CAP_MKNOD stats such a device, the last access and last modify times
56485 + will match the device's create time. No access or modify events
56486 + will be triggered through inotify/dnotify/fanotify for such devices.
56487 + This feature will prevent attacks that may at a minimum
56488 + allow an attacker to determine the administrator's password length.
56489 +
56490 +config GRKERNSEC_CHROOT
56491 + bool "Chroot jail restrictions"
56492 + default y if GRKERNSEC_CONFIG_AUTO
56493 + help
56494 + If you say Y here, you will be able to choose several options that will
56495 + make breaking out of a chrooted jail much more difficult. If you
56496 + encounter no software incompatibilities with the following options, it
56497 + is recommended that you enable each one.
56498 +
56499 +config GRKERNSEC_CHROOT_MOUNT
56500 + bool "Deny mounts"
56501 + default y if GRKERNSEC_CONFIG_AUTO
56502 + depends on GRKERNSEC_CHROOT
56503 + help
56504 + If you say Y here, processes inside a chroot will not be able to
56505 + mount or remount filesystems. If the sysctl option is enabled, a
56506 + sysctl option with name "chroot_deny_mount" is created.
56507 +
56508 +config GRKERNSEC_CHROOT_DOUBLE
56509 + bool "Deny double-chroots"
56510 + default y if GRKERNSEC_CONFIG_AUTO
56511 + depends on GRKERNSEC_CHROOT
56512 + help
56513 + If you say Y here, processes inside a chroot will not be able to chroot
56514 + again outside the chroot. This is a widely used method of breaking
56515 + out of a chroot jail and should not be allowed. If the sysctl
56516 + option is enabled, a sysctl option with name
56517 + "chroot_deny_chroot" is created.
56518 +
56519 +config GRKERNSEC_CHROOT_PIVOT
56520 + bool "Deny pivot_root in chroot"
56521 + default y if GRKERNSEC_CONFIG_AUTO
56522 + depends on GRKERNSEC_CHROOT
56523 + help
56524 + If you say Y here, processes inside a chroot will not be able to use
56525 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56526 + works similar to chroot in that it changes the root filesystem. This
56527 + function could be misused in a chrooted process to attempt to break out
56528 + of the chroot, and therefore should not be allowed. If the sysctl
56529 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56530 + created.
56531 +
56532 +config GRKERNSEC_CHROOT_CHDIR
56533 + bool "Enforce chdir(\"/\") on all chroots"
56534 + default y if GRKERNSEC_CONFIG_AUTO
56535 + depends on GRKERNSEC_CHROOT
56536 + help
56537 + If you say Y here, the current working directory of all newly-chrooted
56538 + applications will be set to the the root directory of the chroot.
56539 + The man page on chroot(2) states:
56540 + Note that this call does not change the current working
56541 + directory, so that `.' can be outside the tree rooted at
56542 + `/'. In particular, the super-user can escape from a
56543 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56544 +
56545 + It is recommended that you say Y here, since it's not known to break
56546 + any software. If the sysctl option is enabled, a sysctl option with
56547 + name "chroot_enforce_chdir" is created.
56548 +
56549 +config GRKERNSEC_CHROOT_CHMOD
56550 + bool "Deny (f)chmod +s"
56551 + default y if GRKERNSEC_CONFIG_AUTO
56552 + depends on GRKERNSEC_CHROOT
56553 + help
56554 + If you say Y here, processes inside a chroot will not be able to chmod
56555 + or fchmod files to make them have suid or sgid bits. This protects
56556 + against another published method of breaking a chroot. If the sysctl
56557 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56558 + created.
56559 +
56560 +config GRKERNSEC_CHROOT_FCHDIR
56561 + bool "Deny fchdir out of chroot"
56562 + default y if GRKERNSEC_CONFIG_AUTO
56563 + depends on GRKERNSEC_CHROOT
56564 + help
56565 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56566 + to a file descriptor of the chrooting process that points to a directory
56567 + outside the filesystem will be stopped. If the sysctl option
56568 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56569 +
56570 +config GRKERNSEC_CHROOT_MKNOD
56571 + bool "Deny mknod"
56572 + default y if GRKERNSEC_CONFIG_AUTO
56573 + depends on GRKERNSEC_CHROOT
56574 + help
56575 + If you say Y here, processes inside a chroot will not be allowed to
56576 + mknod. The problem with using mknod inside a chroot is that it
56577 + would allow an attacker to create a device entry that is the same
56578 + as one on the physical root of your system, which could range from
56579 + anything from the console device to a device for your harddrive (which
56580 + they could then use to wipe the drive or steal data). It is recommended
56581 + that you say Y here, unless you run into software incompatibilities.
56582 + If the sysctl option is enabled, a sysctl option with name
56583 + "chroot_deny_mknod" is created.
56584 +
56585 +config GRKERNSEC_CHROOT_SHMAT
56586 + bool "Deny shmat() out of chroot"
56587 + default y if GRKERNSEC_CONFIG_AUTO
56588 + depends on GRKERNSEC_CHROOT
56589 + help
56590 + If you say Y here, processes inside a chroot will not be able to attach
56591 + to shared memory segments that were created outside of the chroot jail.
56592 + It is recommended that you say Y here. If the sysctl option is enabled,
56593 + a sysctl option with name "chroot_deny_shmat" is created.
56594 +
56595 +config GRKERNSEC_CHROOT_UNIX
56596 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56597 + default y if GRKERNSEC_CONFIG_AUTO
56598 + depends on GRKERNSEC_CHROOT
56599 + help
56600 + If you say Y here, processes inside a chroot will not be able to
56601 + connect to abstract (meaning not belonging to a filesystem) Unix
56602 + domain sockets that were bound outside of a chroot. It is recommended
56603 + that you say Y here. If the sysctl option is enabled, a sysctl option
56604 + with name "chroot_deny_unix" is created.
56605 +
56606 +config GRKERNSEC_CHROOT_FINDTASK
56607 + bool "Protect outside processes"
56608 + default y if GRKERNSEC_CONFIG_AUTO
56609 + depends on GRKERNSEC_CHROOT
56610 + help
56611 + If you say Y here, processes inside a chroot will not be able to
56612 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56613 + getsid, or view any process outside of the chroot. If the sysctl
56614 + option is enabled, a sysctl option with name "chroot_findtask" is
56615 + created.
56616 +
56617 +config GRKERNSEC_CHROOT_NICE
56618 + bool "Restrict priority changes"
56619 + default y if GRKERNSEC_CONFIG_AUTO
56620 + depends on GRKERNSEC_CHROOT
56621 + help
56622 + If you say Y here, processes inside a chroot will not be able to raise
56623 + the priority of processes in the chroot, or alter the priority of
56624 + processes outside the chroot. This provides more security than simply
56625 + removing CAP_SYS_NICE from the process' capability set. If the
56626 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56627 + is created.
56628 +
56629 +config GRKERNSEC_CHROOT_SYSCTL
56630 + bool "Deny sysctl writes"
56631 + default y if GRKERNSEC_CONFIG_AUTO
56632 + depends on GRKERNSEC_CHROOT
56633 + help
56634 + If you say Y here, an attacker in a chroot will not be able to
56635 + write to sysctl entries, either by sysctl(2) or through a /proc
56636 + interface. It is strongly recommended that you say Y here. If the
56637 + sysctl option is enabled, a sysctl option with name
56638 + "chroot_deny_sysctl" is created.
56639 +
56640 +config GRKERNSEC_CHROOT_CAPS
56641 + bool "Capability restrictions"
56642 + default y if GRKERNSEC_CONFIG_AUTO
56643 + depends on GRKERNSEC_CHROOT
56644 + help
56645 + If you say Y here, the capabilities on all processes within a
56646 + chroot jail will be lowered to stop module insertion, raw i/o,
56647 + system and net admin tasks, rebooting the system, modifying immutable
56648 + files, modifying IPC owned by another, and changing the system time.
56649 + This is left an option because it can break some apps. Disable this
56650 + if your chrooted apps are having problems performing those kinds of
56651 + tasks. If the sysctl option is enabled, a sysctl option with
56652 + name "chroot_caps" is created.
56653 +
56654 +config GRKERNSEC_CHROOT_INITRD
56655 + bool "Exempt initrd tasks from restrictions"
56656 + default y if GRKERNSEC_CONFIG_AUTO
56657 + depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
56658 + help
56659 + If you say Y here, tasks started prior to init will be exempted from
56660 + grsecurity's chroot restrictions. This option is mainly meant to
56661 + resolve Plymouth's performing privileged operations unnecessarily
56662 + in a chroot.
56663 +
56664 +endmenu
56665 +menu "Kernel Auditing"
56666 +depends on GRKERNSEC
56667 +
56668 +config GRKERNSEC_AUDIT_GROUP
56669 + bool "Single group for auditing"
56670 + help
56671 + If you say Y here, the exec and chdir logging features will only operate
56672 + on a group you specify. This option is recommended if you only want to
56673 + watch certain users instead of having a large amount of logs from the
56674 + entire system. If the sysctl option is enabled, a sysctl option with
56675 + name "audit_group" is created.
56676 +
56677 +config GRKERNSEC_AUDIT_GID
56678 + int "GID for auditing"
56679 + depends on GRKERNSEC_AUDIT_GROUP
56680 + default 1007
56681 +
56682 +config GRKERNSEC_EXECLOG
56683 + bool "Exec logging"
56684 + help
56685 + If you say Y here, all execve() calls will be logged (since the
56686 + other exec*() calls are frontends to execve(), all execution
56687 + will be logged). Useful for shell-servers that like to keep track
56688 + of their users. If the sysctl option is enabled, a sysctl option with
56689 + name "exec_logging" is created.
56690 + WARNING: This option when enabled will produce a LOT of logs, especially
56691 + on an active system.
56692 +
56693 +config GRKERNSEC_RESLOG
56694 + bool "Resource logging"
56695 + default y if GRKERNSEC_CONFIG_AUTO
56696 + help
56697 + If you say Y here, all attempts to overstep resource limits will
56698 + be logged with the resource name, the requested size, and the current
56699 + limit. It is highly recommended that you say Y here. If the sysctl
56700 + option is enabled, a sysctl option with name "resource_logging" is
56701 + created. If the RBAC system is enabled, the sysctl value is ignored.
56702 +
56703 +config GRKERNSEC_CHROOT_EXECLOG
56704 + bool "Log execs within chroot"
56705 + help
56706 + If you say Y here, all executions inside a chroot jail will be logged
56707 + to syslog. This can cause a large amount of logs if certain
56708 + applications (eg. djb's daemontools) are installed on the system, and
56709 + is therefore left as an option. If the sysctl option is enabled, a
56710 + sysctl option with name "chroot_execlog" is created.
56711 +
56712 +config GRKERNSEC_AUDIT_PTRACE
56713 + bool "Ptrace logging"
56714 + help
56715 + If you say Y here, all attempts to attach to a process via ptrace
56716 + will be logged. If the sysctl option is enabled, a sysctl option
56717 + with name "audit_ptrace" is created.
56718 +
56719 +config GRKERNSEC_AUDIT_CHDIR
56720 + bool "Chdir logging"
56721 + help
56722 + If you say Y here, all chdir() calls will be logged. If the sysctl
56723 + option is enabled, a sysctl option with name "audit_chdir" is created.
56724 +
56725 +config GRKERNSEC_AUDIT_MOUNT
56726 + bool "(Un)Mount logging"
56727 + help
56728 + If you say Y here, all mounts and unmounts will be logged. If the
56729 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56730 + created.
56731 +
56732 +config GRKERNSEC_SIGNAL
56733 + bool "Signal logging"
56734 + default y if GRKERNSEC_CONFIG_AUTO
56735 + help
56736 + If you say Y here, certain important signals will be logged, such as
56737 + SIGSEGV, which will as a result inform you of when a error in a program
56738 + occurred, which in some cases could mean a possible exploit attempt.
56739 + If the sysctl option is enabled, a sysctl option with name
56740 + "signal_logging" is created.
56741 +
56742 +config GRKERNSEC_FORKFAIL
56743 + bool "Fork failure logging"
56744 + help
56745 + If you say Y here, all failed fork() attempts will be logged.
56746 + This could suggest a fork bomb, or someone attempting to overstep
56747 + their process limit. If the sysctl option is enabled, a sysctl option
56748 + with name "forkfail_logging" is created.
56749 +
56750 +config GRKERNSEC_TIME
56751 + bool "Time change logging"
56752 + default y if GRKERNSEC_CONFIG_AUTO
56753 + help
56754 + If you say Y here, any changes of the system clock will be logged.
56755 + If the sysctl option is enabled, a sysctl option with name
56756 + "timechange_logging" is created.
56757 +
56758 +config GRKERNSEC_PROC_IPADDR
56759 + bool "/proc/<pid>/ipaddr support"
56760 + default y if GRKERNSEC_CONFIG_AUTO
56761 + help
56762 + If you say Y here, a new entry will be added to each /proc/<pid>
56763 + directory that contains the IP address of the person using the task.
56764 + The IP is carried across local TCP and AF_UNIX stream sockets.
56765 + This information can be useful for IDS/IPSes to perform remote response
56766 + to a local attack. The entry is readable by only the owner of the
56767 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56768 + the RBAC system), and thus does not create privacy concerns.
56769 +
56770 +config GRKERNSEC_RWXMAP_LOG
56771 + bool 'Denied RWX mmap/mprotect logging'
56772 + default y if GRKERNSEC_CONFIG_AUTO
56773 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56774 + help
56775 + If you say Y here, calls to mmap() and mprotect() with explicit
56776 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56777 + denied by the PAX_MPROTECT feature. If the sysctl option is
56778 + enabled, a sysctl option with name "rwxmap_logging" is created.
56779 +
56780 +config GRKERNSEC_AUDIT_TEXTREL
56781 + bool 'ELF text relocations logging (READ HELP)'
56782 + depends on PAX_MPROTECT
56783 + help
56784 + If you say Y here, text relocations will be logged with the filename
56785 + of the offending library or binary. The purpose of the feature is
56786 + to help Linux distribution developers get rid of libraries and
56787 + binaries that need text relocations which hinder the future progress
56788 + of PaX. Only Linux distribution developers should say Y here, and
56789 + never on a production machine, as this option creates an information
56790 + leak that could aid an attacker in defeating the randomization of
56791 + a single memory region. If the sysctl option is enabled, a sysctl
56792 + option with name "audit_textrel" is created.
56793 +
56794 +endmenu
56795 +
56796 +menu "Executable Protections"
56797 +depends on GRKERNSEC
56798 +
56799 +config GRKERNSEC_DMESG
56800 + bool "Dmesg(8) restriction"
56801 + default y if GRKERNSEC_CONFIG_AUTO
56802 + help
56803 + If you say Y here, non-root users will not be able to use dmesg(8)
56804 + to view the contents of the kernel's circular log buffer.
56805 + The kernel's log buffer often contains kernel addresses and other
56806 + identifying information useful to an attacker in fingerprinting a
56807 + system for a targeted exploit.
56808 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56809 + created.
56810 +
56811 +config GRKERNSEC_HARDEN_PTRACE
56812 + bool "Deter ptrace-based process snooping"
56813 + default y if GRKERNSEC_CONFIG_AUTO
56814 + help
56815 + If you say Y here, TTY sniffers and other malicious monitoring
56816 + programs implemented through ptrace will be defeated. If you
56817 + have been using the RBAC system, this option has already been
56818 + enabled for several years for all users, with the ability to make
56819 + fine-grained exceptions.
56820 +
56821 + This option only affects the ability of non-root users to ptrace
56822 + processes that are not a descendent of the ptracing process.
56823 + This means that strace ./binary and gdb ./binary will still work,
56824 + but attaching to arbitrary processes will not. If the sysctl
56825 + option is enabled, a sysctl option with name "harden_ptrace" is
56826 + created.
56827 +
56828 +config GRKERNSEC_PTRACE_READEXEC
56829 + bool "Require read access to ptrace sensitive binaries"
56830 + default y if GRKERNSEC_CONFIG_AUTO
56831 + help
56832 + If you say Y here, unprivileged users will not be able to ptrace unreadable
56833 + binaries. This option is useful in environments that
56834 + remove the read bits (e.g. file mode 4711) from suid binaries to
56835 + prevent infoleaking of their contents. This option adds
56836 + consistency to the use of that file mode, as the binary could normally
56837 + be read out when run without privileges while ptracing.
56838 +
56839 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56840 + is created.
56841 +
56842 +config GRKERNSEC_SETXID
56843 + bool "Enforce consistent multithreaded privileges"
56844 + default y if GRKERNSEC_CONFIG_AUTO
56845 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
56846 + help
56847 + If you say Y here, a change from a root uid to a non-root uid
56848 + in a multithreaded application will cause the resulting uids,
56849 + gids, supplementary groups, and capabilities in that thread
56850 + to be propagated to the other threads of the process. In most
56851 + cases this is unnecessary, as glibc will emulate this behavior
56852 + on behalf of the application. Other libcs do not act in the
56853 + same way, allowing the other threads of the process to continue
56854 + running with root privileges. If the sysctl option is enabled,
56855 + a sysctl option with name "consistent_setxid" is created.
56856 +
56857 +config GRKERNSEC_TPE
56858 + bool "Trusted Path Execution (TPE)"
56859 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
56860 + help
56861 + If you say Y here, you will be able to choose a gid to add to the
56862 + supplementary groups of users you want to mark as "untrusted."
56863 + These users will not be able to execute any files that are not in
56864 + root-owned directories writable only by root. If the sysctl option
56865 + is enabled, a sysctl option with name "tpe" is created.
56866 +
56867 +config GRKERNSEC_TPE_ALL
56868 + bool "Partially restrict all non-root users"
56869 + depends on GRKERNSEC_TPE
56870 + help
56871 + If you say Y here, all non-root users will be covered under
56872 + a weaker TPE restriction. This is separate from, and in addition to,
56873 + the main TPE options that you have selected elsewhere. Thus, if a
56874 + "trusted" GID is chosen, this restriction applies to even that GID.
56875 + Under this restriction, all non-root users will only be allowed to
56876 + execute files in directories they own that are not group or
56877 + world-writable, or in directories owned by root and writable only by
56878 + root. If the sysctl option is enabled, a sysctl option with name
56879 + "tpe_restrict_all" is created.
56880 +
56881 +config GRKERNSEC_TPE_INVERT
56882 + bool "Invert GID option"
56883 + depends on GRKERNSEC_TPE
56884 + help
56885 + If you say Y here, the group you specify in the TPE configuration will
56886 + decide what group TPE restrictions will be *disabled* for. This
56887 + option is useful if you want TPE restrictions to be applied to most
56888 + users on the system. If the sysctl option is enabled, a sysctl option
56889 + with name "tpe_invert" is created. Unlike other sysctl options, this
56890 + entry will default to on for backward-compatibility.
56891 +
56892 +config GRKERNSEC_TPE_GID
56893 + int
56894 + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
56895 + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
56896 +
56897 +config GRKERNSEC_TPE_UNTRUSTED_GID
56898 + int "GID for TPE-untrusted users"
56899 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56900 + default 1005
56901 + help
56902 + Setting this GID determines what group TPE restrictions will be
56903 + *enabled* for. If the sysctl option is enabled, a sysctl option
56904 + with name "tpe_gid" is created.
56905 +
56906 +config GRKERNSEC_TPE_TRUSTED_GID
56907 + int "GID for TPE-trusted users"
56908 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56909 + default 1005
56910 + help
56911 + Setting this GID determines what group TPE restrictions will be
56912 + *disabled* for. If the sysctl option is enabled, a sysctl option
56913 + with name "tpe_gid" is created.
56914 +
56915 +endmenu
56916 +menu "Network Protections"
56917 +depends on GRKERNSEC
56918 +
56919 +config GRKERNSEC_RANDNET
56920 + bool "Larger entropy pools"
56921 + default y if GRKERNSEC_CONFIG_AUTO
56922 + help
56923 + If you say Y here, the entropy pools used for many features of Linux
56924 + and grsecurity will be doubled in size. Since several grsecurity
56925 + features use additional randomness, it is recommended that you say Y
56926 + here. Saying Y here has a similar effect as modifying
56927 + /proc/sys/kernel/random/poolsize.
56928 +
56929 +config GRKERNSEC_BLACKHOLE
56930 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56931 + default y if GRKERNSEC_CONFIG_AUTO
56932 + depends on NET
56933 + help
56934 + If you say Y here, neither TCP resets nor ICMP
56935 + destination-unreachable packets will be sent in response to packets
56936 + sent to ports for which no associated listening process exists.
56937 + This feature supports both IPV4 and IPV6 and exempts the
56938 + loopback interface from blackholing. Enabling this feature
56939 + makes a host more resilient to DoS attacks and reduces network
56940 + visibility against scanners.
56941 +
56942 + The blackhole feature as-implemented is equivalent to the FreeBSD
56943 + blackhole feature, as it prevents RST responses to all packets, not
56944 + just SYNs. Under most application behavior this causes no
56945 + problems, but applications (like haproxy) may not close certain
56946 + connections in a way that cleanly terminates them on the remote
56947 + end, leaving the remote host in LAST_ACK state. Because of this
56948 + side-effect and to prevent intentional LAST_ACK DoSes, this
56949 + feature also adds automatic mitigation against such attacks.
56950 + The mitigation drastically reduces the amount of time a socket
56951 + can spend in LAST_ACK state. If you're using haproxy and not
56952 + all servers it connects to have this option enabled, consider
56953 + disabling this feature on the haproxy host.
56954 +
56955 + If the sysctl option is enabled, two sysctl options with names
56956 + "ip_blackhole" and "lastack_retries" will be created.
56957 + While "ip_blackhole" takes the standard zero/non-zero on/off
56958 + toggle, "lastack_retries" uses the same kinds of values as
56959 + "tcp_retries1" and "tcp_retries2". The default value of 4
56960 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56961 + state.
56962 +
56963 +config GRKERNSEC_NO_SIMULT_CONNECT
56964 + bool "Disable TCP Simultaneous Connect"
56965 + default y if GRKERNSEC_CONFIG_AUTO
56966 + depends on NET
56967 + help
56968 + If you say Y here, a feature by Willy Tarreau will be enabled that
56969 + removes a weakness in Linux's strict implementation of TCP that
56970 + allows two clients to connect to each other without either entering
56971 + a listening state. The weakness allows an attacker to easily prevent
56972 + a client from connecting to a known server provided the source port
56973 + for the connection is guessed correctly.
56974 +
56975 + As the weakness could be used to prevent an antivirus or IPS from
56976 + fetching updates, or prevent an SSL gateway from fetching a CRL,
56977 + it should be eliminated by enabling this option. Though Linux is
56978 + one of few operating systems supporting simultaneous connect, it
56979 + has no legitimate use in practice and is rarely supported by firewalls.
56980 +
56981 +config GRKERNSEC_SOCKET
56982 + bool "Socket restrictions"
56983 + depends on NET
56984 + help
56985 + If you say Y here, you will be able to choose from several options.
56986 + If you assign a GID on your system and add it to the supplementary
56987 + groups of users you want to restrict socket access to, this patch
56988 + will perform up to three things, based on the option(s) you choose.
56989 +
56990 +config GRKERNSEC_SOCKET_ALL
56991 + bool "Deny any sockets to group"
56992 + depends on GRKERNSEC_SOCKET
56993 + help
56994 + If you say Y here, you will be able to choose a GID of whose users will
56995 + be unable to connect to other hosts from your machine or run server
56996 + applications from your machine. If the sysctl option is enabled, a
56997 + sysctl option with name "socket_all" is created.
56998 +
56999 +config GRKERNSEC_SOCKET_ALL_GID
57000 + int "GID to deny all sockets for"
57001 + depends on GRKERNSEC_SOCKET_ALL
57002 + default 1004
57003 + help
57004 + Here you can choose the GID to disable socket access for. Remember to
57005 + add the users you want socket access disabled for to the GID
57006 + specified here. If the sysctl option is enabled, a sysctl option
57007 + with name "socket_all_gid" is created.
57008 +
57009 +config GRKERNSEC_SOCKET_CLIENT
57010 + bool "Deny client sockets to group"
57011 + depends on GRKERNSEC_SOCKET
57012 + help
57013 + If you say Y here, you will be able to choose a GID of whose users will
57014 + be unable to connect to other hosts from your machine, but will be
57015 + able to run servers. If this option is enabled, all users in the group
57016 + you specify will have to use passive mode when initiating ftp transfers
57017 + from the shell on your machine. If the sysctl option is enabled, a
57018 + sysctl option with name "socket_client" is created.
57019 +
57020 +config GRKERNSEC_SOCKET_CLIENT_GID
57021 + int "GID to deny client sockets for"
57022 + depends on GRKERNSEC_SOCKET_CLIENT
57023 + default 1003
57024 + help
57025 + Here you can choose the GID to disable client socket access for.
57026 + Remember to add the users you want client socket access disabled for to
57027 + the GID specified here. If the sysctl option is enabled, a sysctl
57028 + option with name "socket_client_gid" is created.
57029 +
57030 +config GRKERNSEC_SOCKET_SERVER
57031 + bool "Deny server sockets to group"
57032 + depends on GRKERNSEC_SOCKET
57033 + help
57034 + If you say Y here, you will be able to choose a GID of whose users will
57035 + be unable to run server applications from your machine. If the sysctl
57036 + option is enabled, a sysctl option with name "socket_server" is created.
57037 +
57038 +config GRKERNSEC_SOCKET_SERVER_GID
57039 + int "GID to deny server sockets for"
57040 + depends on GRKERNSEC_SOCKET_SERVER
57041 + default 1002
57042 + help
57043 + Here you can choose the GID to disable server socket access for.
57044 + Remember to add the users you want server socket access disabled for to
57045 + the GID specified here. If the sysctl option is enabled, a sysctl
57046 + option with name "socket_server_gid" is created.
57047 +
57048 +endmenu
57049 +menu "Sysctl Support"
57050 +depends on GRKERNSEC && SYSCTL
57051 +
57052 +config GRKERNSEC_SYSCTL
57053 + bool "Sysctl support"
57054 + default y if GRKERNSEC_CONFIG_AUTO
57055 + help
57056 + If you say Y here, you will be able to change the options that
57057 + grsecurity runs with at bootup, without having to recompile your
57058 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57059 + to enable (1) or disable (0) various features. All the sysctl entries
57060 + are mutable until the "grsec_lock" entry is set to a non-zero value.
57061 + All features enabled in the kernel configuration are disabled at boot
57062 + if you do not say Y to the "Turn on features by default" option.
57063 + All options should be set at startup, and the grsec_lock entry should
57064 + be set to a non-zero value after all the options are set.
57065 + *THIS IS EXTREMELY IMPORTANT*
57066 +
57067 +config GRKERNSEC_SYSCTL_DISTRO
57068 + bool "Extra sysctl support for distro makers (READ HELP)"
57069 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57070 + help
57071 + If you say Y here, additional sysctl options will be created
57072 + for features that affect processes running as root. Therefore,
57073 + it is critical when using this option that the grsec_lock entry be
57074 + enabled after boot. Only distros with prebuilt kernel packages
57075 + with this option enabled that can ensure grsec_lock is enabled
57076 + after boot should use this option.
57077 + *Failure to set grsec_lock after boot makes all grsec features
57078 + this option covers useless*
57079 +
57080 + Currently this option creates the following sysctl entries:
57081 + "Disable Privileged I/O": "disable_priv_io"
57082 +
57083 +config GRKERNSEC_SYSCTL_ON
57084 + bool "Turn on features by default"
57085 + default y if GRKERNSEC_CONFIG_AUTO
57086 + depends on GRKERNSEC_SYSCTL
57087 + help
57088 + If you say Y here, instead of having all features enabled in the
57089 + kernel configuration disabled at boot time, the features will be
57090 + enabled at boot time. It is recommended you say Y here unless
57091 + there is some reason you would want all sysctl-tunable features to
57092 + be disabled by default. As mentioned elsewhere, it is important
57093 + to enable the grsec_lock entry once you have finished modifying
57094 + the sysctl entries.
57095 +
57096 +endmenu
57097 +menu "Logging Options"
57098 +depends on GRKERNSEC
57099 +
57100 +config GRKERNSEC_FLOODTIME
57101 + int "Seconds in between log messages (minimum)"
57102 + default 10
57103 + help
57104 + This option allows you to enforce the number of seconds between
57105 + grsecurity log messages. The default should be suitable for most
57106 + people, however, if you choose to change it, choose a value small enough
57107 + to allow informative logs to be produced, but large enough to
57108 + prevent flooding.
57109 +
57110 +config GRKERNSEC_FLOODBURST
57111 + int "Number of messages in a burst (maximum)"
57112 + default 6
57113 + help
57114 + This option allows you to choose the maximum number of messages allowed
57115 + within the flood time interval you chose in a separate option. The
57116 + default should be suitable for most people, however if you find that
57117 + many of your logs are being interpreted as flooding, you may want to
57118 + raise this value.
57119 +
57120 +endmenu
57121 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57122 new file mode 100644
57123 index 0000000..1b9afa9
57124 --- /dev/null
57125 +++ b/grsecurity/Makefile
57126 @@ -0,0 +1,38 @@
57127 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57128 +# during 2001-2009 it has been completely redesigned by Brad Spengler
57129 +# into an RBAC system
57130 +#
57131 +# All code in this directory and various hooks inserted throughout the kernel
57132 +# are copyright Brad Spengler - Open Source Security, Inc., and released
57133 +# under the GPL v2 or higher
57134 +
57135 +KBUILD_CFLAGS += -Werror
57136 +
57137 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57138 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
57139 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57140 +
57141 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57142 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57143 + gracl_learn.o grsec_log.o
57144 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57145 +
57146 +ifdef CONFIG_NET
57147 +obj-y += grsec_sock.o
57148 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57149 +endif
57150 +
57151 +ifndef CONFIG_GRKERNSEC
57152 +obj-y += grsec_disabled.o
57153 +endif
57154 +
57155 +ifdef CONFIG_GRKERNSEC_HIDESYM
57156 +extra-y := grsec_hidesym.o
57157 +$(obj)/grsec_hidesym.o:
57158 + @-chmod -f 500 /boot
57159 + @-chmod -f 500 /lib/modules
57160 + @-chmod -f 500 /lib64/modules
57161 + @-chmod -f 500 /lib32/modules
57162 + @-chmod -f 700 .
57163 + @echo ' grsec: protected kernel image paths'
57164 +endif
57165 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
57166 new file mode 100644
57167 index 0000000..1248ee0
57168 --- /dev/null
57169 +++ b/grsecurity/gracl.c
57170 @@ -0,0 +1,4073 @@
57171 +#include <linux/kernel.h>
57172 +#include <linux/module.h>
57173 +#include <linux/sched.h>
57174 +#include <linux/mm.h>
57175 +#include <linux/file.h>
57176 +#include <linux/fs.h>
57177 +#include <linux/namei.h>
57178 +#include <linux/mount.h>
57179 +#include <linux/tty.h>
57180 +#include <linux/proc_fs.h>
57181 +#include <linux/lglock.h>
57182 +#include <linux/slab.h>
57183 +#include <linux/vmalloc.h>
57184 +#include <linux/types.h>
57185 +#include <linux/sysctl.h>
57186 +#include <linux/netdevice.h>
57187 +#include <linux/ptrace.h>
57188 +#include <linux/gracl.h>
57189 +#include <linux/gralloc.h>
57190 +#include <linux/security.h>
57191 +#include <linux/grinternal.h>
57192 +#include <linux/pid_namespace.h>
57193 +#include <linux/stop_machine.h>
57194 +#include <linux/fdtable.h>
57195 +#include <linux/percpu.h>
57196 +#include <linux/lglock.h>
57197 +#include <linux/hugetlb.h>
57198 +#include <linux/posix-timers.h>
57199 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
57200 +#include <linux/magic.h>
57201 +#include <linux/pagemap.h>
57202 +#include "../fs/btrfs/async-thread.h"
57203 +#include "../fs/btrfs/ctree.h"
57204 +#include "../fs/btrfs/btrfs_inode.h"
57205 +#endif
57206 +#include "../fs/mount.h"
57207 +
57208 +#include <asm/uaccess.h>
57209 +#include <asm/errno.h>
57210 +#include <asm/mman.h>
57211 +
57212 +extern struct lglock vfsmount_lock;
57213 +
57214 +static struct acl_role_db acl_role_set;
57215 +static struct name_db name_set;
57216 +static struct inodev_db inodev_set;
57217 +
57218 +/* for keeping track of userspace pointers used for subjects, so we
57219 + can share references in the kernel as well
57220 +*/
57221 +
57222 +static struct path real_root;
57223 +
57224 +static struct acl_subj_map_db subj_map_set;
57225 +
57226 +static struct acl_role_label *default_role;
57227 +
57228 +static struct acl_role_label *role_list;
57229 +
57230 +static u16 acl_sp_role_value;
57231 +
57232 +extern char *gr_shared_page[4];
57233 +static DEFINE_MUTEX(gr_dev_mutex);
57234 +DEFINE_RWLOCK(gr_inode_lock);
57235 +
57236 +struct gr_arg *gr_usermode;
57237 +
57238 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
57239 +
57240 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
57241 +extern void gr_clear_learn_entries(void);
57242 +
57243 +unsigned char *gr_system_salt;
57244 +unsigned char *gr_system_sum;
57245 +
57246 +static struct sprole_pw **acl_special_roles = NULL;
57247 +static __u16 num_sprole_pws = 0;
57248 +
57249 +static struct acl_role_label *kernel_role = NULL;
57250 +
57251 +static unsigned int gr_auth_attempts = 0;
57252 +static unsigned long gr_auth_expires = 0UL;
57253 +
57254 +#ifdef CONFIG_NET
57255 +extern struct vfsmount *sock_mnt;
57256 +#endif
57257 +
57258 +extern struct vfsmount *pipe_mnt;
57259 +extern struct vfsmount *shm_mnt;
57260 +
57261 +#ifdef CONFIG_HUGETLBFS
57262 +extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
57263 +#endif
57264 +
57265 +static struct acl_object_label *fakefs_obj_rw;
57266 +static struct acl_object_label *fakefs_obj_rwx;
57267 +
57268 +extern int gr_init_uidset(void);
57269 +extern void gr_free_uidset(void);
57270 +extern void gr_remove_uid(uid_t uid);
57271 +extern int gr_find_uid(uid_t uid);
57272 +
57273 +__inline__ int
57274 +gr_acl_is_enabled(void)
57275 +{
57276 + return (gr_status & GR_READY);
57277 +}
57278 +
57279 +static inline dev_t __get_dev(const struct dentry *dentry)
57280 +{
57281 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
57282 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
57283 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
57284 + else
57285 +#endif
57286 + return dentry->d_sb->s_dev;
57287 +}
57288 +
57289 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57290 +{
57291 + return __get_dev(dentry);
57292 +}
57293 +
57294 +static char gr_task_roletype_to_char(struct task_struct *task)
57295 +{
57296 + switch (task->role->roletype &
57297 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
57298 + GR_ROLE_SPECIAL)) {
57299 + case GR_ROLE_DEFAULT:
57300 + return 'D';
57301 + case GR_ROLE_USER:
57302 + return 'U';
57303 + case GR_ROLE_GROUP:
57304 + return 'G';
57305 + case GR_ROLE_SPECIAL:
57306 + return 'S';
57307 + }
57308 +
57309 + return 'X';
57310 +}
57311 +
57312 +char gr_roletype_to_char(void)
57313 +{
57314 + return gr_task_roletype_to_char(current);
57315 +}
57316 +
57317 +__inline__ int
57318 +gr_acl_tpe_check(void)
57319 +{
57320 + if (unlikely(!(gr_status & GR_READY)))
57321 + return 0;
57322 + if (current->role->roletype & GR_ROLE_TPE)
57323 + return 1;
57324 + else
57325 + return 0;
57326 +}
57327 +
57328 +int
57329 +gr_handle_rawio(const struct inode *inode)
57330 +{
57331 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57332 + if (inode && S_ISBLK(inode->i_mode) &&
57333 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57334 + !capable(CAP_SYS_RAWIO))
57335 + return 1;
57336 +#endif
57337 + return 0;
57338 +}
57339 +
57340 +static int
57341 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
57342 +{
57343 + if (likely(lena != lenb))
57344 + return 0;
57345 +
57346 + return !memcmp(a, b, lena);
57347 +}
57348 +
57349 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
57350 +{
57351 + *buflen -= namelen;
57352 + if (*buflen < 0)
57353 + return -ENAMETOOLONG;
57354 + *buffer -= namelen;
57355 + memcpy(*buffer, str, namelen);
57356 + return 0;
57357 +}
57358 +
57359 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
57360 +{
57361 + return prepend(buffer, buflen, name->name, name->len);
57362 +}
57363 +
57364 +static int prepend_path(const struct path *path, struct path *root,
57365 + char **buffer, int *buflen)
57366 +{
57367 + struct dentry *dentry = path->dentry;
57368 + struct vfsmount *vfsmnt = path->mnt;
57369 + struct mount *mnt = real_mount(vfsmnt);
57370 + bool slash = false;
57371 + int error = 0;
57372 +
57373 + while (dentry != root->dentry || vfsmnt != root->mnt) {
57374 + struct dentry * parent;
57375 +
57376 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
57377 + /* Global root? */
57378 + if (!mnt_has_parent(mnt)) {
57379 + goto out;
57380 + }
57381 + dentry = mnt->mnt_mountpoint;
57382 + mnt = mnt->mnt_parent;
57383 + vfsmnt = &mnt->mnt;
57384 + continue;
57385 + }
57386 + parent = dentry->d_parent;
57387 + prefetch(parent);
57388 + spin_lock(&dentry->d_lock);
57389 + error = prepend_name(buffer, buflen, &dentry->d_name);
57390 + spin_unlock(&dentry->d_lock);
57391 + if (!error)
57392 + error = prepend(buffer, buflen, "/", 1);
57393 + if (error)
57394 + break;
57395 +
57396 + slash = true;
57397 + dentry = parent;
57398 + }
57399 +
57400 +out:
57401 + if (!error && !slash)
57402 + error = prepend(buffer, buflen, "/", 1);
57403 +
57404 + return error;
57405 +}
57406 +
57407 +/* this must be called with vfsmount_lock and rename_lock held */
57408 +
57409 +static char *__our_d_path(const struct path *path, struct path *root,
57410 + char *buf, int buflen)
57411 +{
57412 + char *res = buf + buflen;
57413 + int error;
57414 +
57415 + prepend(&res, &buflen, "\0", 1);
57416 + error = prepend_path(path, root, &res, &buflen);
57417 + if (error)
57418 + return ERR_PTR(error);
57419 +
57420 + return res;
57421 +}
57422 +
57423 +static char *
57424 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
57425 +{
57426 + char *retval;
57427 +
57428 + retval = __our_d_path(path, root, buf, buflen);
57429 + if (unlikely(IS_ERR(retval)))
57430 + retval = strcpy(buf, "<path too long>");
57431 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
57432 + retval[1] = '\0';
57433 +
57434 + return retval;
57435 +}
57436 +
57437 +static char *
57438 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57439 + char *buf, int buflen)
57440 +{
57441 + struct path path;
57442 + char *res;
57443 +
57444 + path.dentry = (struct dentry *)dentry;
57445 + path.mnt = (struct vfsmount *)vfsmnt;
57446 +
57447 + /* we can use real_root.dentry, real_root.mnt, because this is only called
57448 + by the RBAC system */
57449 + res = gen_full_path(&path, &real_root, buf, buflen);
57450 +
57451 + return res;
57452 +}
57453 +
57454 +static char *
57455 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57456 + char *buf, int buflen)
57457 +{
57458 + char *res;
57459 + struct path path;
57460 + struct path root;
57461 + struct task_struct *reaper = init_pid_ns.child_reaper;
57462 +
57463 + path.dentry = (struct dentry *)dentry;
57464 + path.mnt = (struct vfsmount *)vfsmnt;
57465 +
57466 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
57467 + get_fs_root(reaper->fs, &root);
57468 +
57469 + br_read_lock(&vfsmount_lock);
57470 + write_seqlock(&rename_lock);
57471 + res = gen_full_path(&path, &root, buf, buflen);
57472 + write_sequnlock(&rename_lock);
57473 + br_read_unlock(&vfsmount_lock);
57474 +
57475 + path_put(&root);
57476 + return res;
57477 +}
57478 +
57479 +static char *
57480 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57481 +{
57482 + char *ret;
57483 + br_read_lock(&vfsmount_lock);
57484 + write_seqlock(&rename_lock);
57485 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57486 + PAGE_SIZE);
57487 + write_sequnlock(&rename_lock);
57488 + br_read_unlock(&vfsmount_lock);
57489 + return ret;
57490 +}
57491 +
57492 +static char *
57493 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57494 +{
57495 + char *ret;
57496 + char *buf;
57497 + int buflen;
57498 +
57499 + br_read_lock(&vfsmount_lock);
57500 + write_seqlock(&rename_lock);
57501 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
57502 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
57503 + buflen = (int)(ret - buf);
57504 + if (buflen >= 5)
57505 + prepend(&ret, &buflen, "/proc", 5);
57506 + else
57507 + ret = strcpy(buf, "<path too long>");
57508 + write_sequnlock(&rename_lock);
57509 + br_read_unlock(&vfsmount_lock);
57510 + return ret;
57511 +}
57512 +
57513 +char *
57514 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57515 +{
57516 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57517 + PAGE_SIZE);
57518 +}
57519 +
57520 +char *
57521 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57522 +{
57523 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57524 + PAGE_SIZE);
57525 +}
57526 +
57527 +char *
57528 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57529 +{
57530 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57531 + PAGE_SIZE);
57532 +}
57533 +
57534 +char *
57535 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57536 +{
57537 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57538 + PAGE_SIZE);
57539 +}
57540 +
57541 +char *
57542 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57543 +{
57544 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57545 + PAGE_SIZE);
57546 +}
57547 +
57548 +__inline__ __u32
57549 +to_gr_audit(const __u32 reqmode)
57550 +{
57551 + /* masks off auditable permission flags, then shifts them to create
57552 + auditing flags, and adds the special case of append auditing if
57553 + we're requesting write */
57554 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57555 +}
57556 +
57557 +struct acl_subject_label *
57558 +lookup_subject_map(const struct acl_subject_label *userp)
57559 +{
57560 + unsigned int index = gr_shash(userp, subj_map_set.s_size);
57561 + struct subject_map *match;
57562 +
57563 + match = subj_map_set.s_hash[index];
57564 +
57565 + while (match && match->user != userp)
57566 + match = match->next;
57567 +
57568 + if (match != NULL)
57569 + return match->kernel;
57570 + else
57571 + return NULL;
57572 +}
57573 +
57574 +static void
57575 +insert_subj_map_entry(struct subject_map *subjmap)
57576 +{
57577 + unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
57578 + struct subject_map **curr;
57579 +
57580 + subjmap->prev = NULL;
57581 +
57582 + curr = &subj_map_set.s_hash[index];
57583 + if (*curr != NULL)
57584 + (*curr)->prev = subjmap;
57585 +
57586 + subjmap->next = *curr;
57587 + *curr = subjmap;
57588 +
57589 + return;
57590 +}
57591 +
57592 +static struct acl_role_label *
57593 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57594 + const gid_t gid)
57595 +{
57596 + unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57597 + struct acl_role_label *match;
57598 + struct role_allowed_ip *ipp;
57599 + unsigned int x;
57600 + u32 curr_ip = task->signal->curr_ip;
57601 +
57602 + task->signal->saved_ip = curr_ip;
57603 +
57604 + match = acl_role_set.r_hash[index];
57605 +
57606 + while (match) {
57607 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57608 + for (x = 0; x < match->domain_child_num; x++) {
57609 + if (match->domain_children[x] == uid)
57610 + goto found;
57611 + }
57612 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57613 + break;
57614 + match = match->next;
57615 + }
57616 +found:
57617 + if (match == NULL) {
57618 + try_group:
57619 + index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57620 + match = acl_role_set.r_hash[index];
57621 +
57622 + while (match) {
57623 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57624 + for (x = 0; x < match->domain_child_num; x++) {
57625 + if (match->domain_children[x] == gid)
57626 + goto found2;
57627 + }
57628 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57629 + break;
57630 + match = match->next;
57631 + }
57632 +found2:
57633 + if (match == NULL)
57634 + match = default_role;
57635 + if (match->allowed_ips == NULL)
57636 + return match;
57637 + else {
57638 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57639 + if (likely
57640 + ((ntohl(curr_ip) & ipp->netmask) ==
57641 + (ntohl(ipp->addr) & ipp->netmask)))
57642 + return match;
57643 + }
57644 + match = default_role;
57645 + }
57646 + } else if (match->allowed_ips == NULL) {
57647 + return match;
57648 + } else {
57649 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57650 + if (likely
57651 + ((ntohl(curr_ip) & ipp->netmask) ==
57652 + (ntohl(ipp->addr) & ipp->netmask)))
57653 + return match;
57654 + }
57655 + goto try_group;
57656 + }
57657 +
57658 + return match;
57659 +}
57660 +
57661 +struct acl_subject_label *
57662 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57663 + const struct acl_role_label *role)
57664 +{
57665 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57666 + struct acl_subject_label *match;
57667 +
57668 + match = role->subj_hash[index];
57669 +
57670 + while (match && (match->inode != ino || match->device != dev ||
57671 + (match->mode & GR_DELETED))) {
57672 + match = match->next;
57673 + }
57674 +
57675 + if (match && !(match->mode & GR_DELETED))
57676 + return match;
57677 + else
57678 + return NULL;
57679 +}
57680 +
57681 +struct acl_subject_label *
57682 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57683 + const struct acl_role_label *role)
57684 +{
57685 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57686 + struct acl_subject_label *match;
57687 +
57688 + match = role->subj_hash[index];
57689 +
57690 + while (match && (match->inode != ino || match->device != dev ||
57691 + !(match->mode & GR_DELETED))) {
57692 + match = match->next;
57693 + }
57694 +
57695 + if (match && (match->mode & GR_DELETED))
57696 + return match;
57697 + else
57698 + return NULL;
57699 +}
57700 +
57701 +static struct acl_object_label *
57702 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57703 + const struct acl_subject_label *subj)
57704 +{
57705 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57706 + struct acl_object_label *match;
57707 +
57708 + match = subj->obj_hash[index];
57709 +
57710 + while (match && (match->inode != ino || match->device != dev ||
57711 + (match->mode & GR_DELETED))) {
57712 + match = match->next;
57713 + }
57714 +
57715 + if (match && !(match->mode & GR_DELETED))
57716 + return match;
57717 + else
57718 + return NULL;
57719 +}
57720 +
57721 +static struct acl_object_label *
57722 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57723 + const struct acl_subject_label *subj)
57724 +{
57725 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57726 + struct acl_object_label *match;
57727 +
57728 + match = subj->obj_hash[index];
57729 +
57730 + while (match && (match->inode != ino || match->device != dev ||
57731 + !(match->mode & GR_DELETED))) {
57732 + match = match->next;
57733 + }
57734 +
57735 + if (match && (match->mode & GR_DELETED))
57736 + return match;
57737 +
57738 + match = subj->obj_hash[index];
57739 +
57740 + while (match && (match->inode != ino || match->device != dev ||
57741 + (match->mode & GR_DELETED))) {
57742 + match = match->next;
57743 + }
57744 +
57745 + if (match && !(match->mode & GR_DELETED))
57746 + return match;
57747 + else
57748 + return NULL;
57749 +}
57750 +
57751 +static struct name_entry *
57752 +lookup_name_entry(const char *name)
57753 +{
57754 + unsigned int len = strlen(name);
57755 + unsigned int key = full_name_hash(name, len);
57756 + unsigned int index = key % name_set.n_size;
57757 + struct name_entry *match;
57758 +
57759 + match = name_set.n_hash[index];
57760 +
57761 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57762 + match = match->next;
57763 +
57764 + return match;
57765 +}
57766 +
57767 +static struct name_entry *
57768 +lookup_name_entry_create(const char *name)
57769 +{
57770 + unsigned int len = strlen(name);
57771 + unsigned int key = full_name_hash(name, len);
57772 + unsigned int index = key % name_set.n_size;
57773 + struct name_entry *match;
57774 +
57775 + match = name_set.n_hash[index];
57776 +
57777 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57778 + !match->deleted))
57779 + match = match->next;
57780 +
57781 + if (match && match->deleted)
57782 + return match;
57783 +
57784 + match = name_set.n_hash[index];
57785 +
57786 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57787 + match->deleted))
57788 + match = match->next;
57789 +
57790 + if (match && !match->deleted)
57791 + return match;
57792 + else
57793 + return NULL;
57794 +}
57795 +
57796 +static struct inodev_entry *
57797 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
57798 +{
57799 + unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
57800 + struct inodev_entry *match;
57801 +
57802 + match = inodev_set.i_hash[index];
57803 +
57804 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57805 + match = match->next;
57806 +
57807 + return match;
57808 +}
57809 +
57810 +static void
57811 +insert_inodev_entry(struct inodev_entry *entry)
57812 +{
57813 + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
57814 + inodev_set.i_size);
57815 + struct inodev_entry **curr;
57816 +
57817 + entry->prev = NULL;
57818 +
57819 + curr = &inodev_set.i_hash[index];
57820 + if (*curr != NULL)
57821 + (*curr)->prev = entry;
57822 +
57823 + entry->next = *curr;
57824 + *curr = entry;
57825 +
57826 + return;
57827 +}
57828 +
57829 +static void
57830 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57831 +{
57832 + unsigned int index =
57833 + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57834 + struct acl_role_label **curr;
57835 + struct acl_role_label *tmp, *tmp2;
57836 +
57837 + curr = &acl_role_set.r_hash[index];
57838 +
57839 + /* simple case, slot is empty, just set it to our role */
57840 + if (*curr == NULL) {
57841 + *curr = role;
57842 + } else {
57843 + /* example:
57844 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
57845 + 2 -> 3
57846 + */
57847 + /* first check to see if we can already be reached via this slot */
57848 + tmp = *curr;
57849 + while (tmp && tmp != role)
57850 + tmp = tmp->next;
57851 + if (tmp == role) {
57852 + /* we don't need to add ourselves to this slot's chain */
57853 + return;
57854 + }
57855 + /* we need to add ourselves to this chain, two cases */
57856 + if (role->next == NULL) {
57857 + /* simple case, append the current chain to our role */
57858 + role->next = *curr;
57859 + *curr = role;
57860 + } else {
57861 + /* 1 -> 2 -> 3 -> 4
57862 + 2 -> 3 -> 4
57863 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
57864 + */
57865 + /* trickier case: walk our role's chain until we find
57866 + the role for the start of the current slot's chain */
57867 + tmp = role;
57868 + tmp2 = *curr;
57869 + while (tmp->next && tmp->next != tmp2)
57870 + tmp = tmp->next;
57871 + if (tmp->next == tmp2) {
57872 + /* from example above, we found 3, so just
57873 + replace this slot's chain with ours */
57874 + *curr = role;
57875 + } else {
57876 + /* we didn't find a subset of our role's chain
57877 + in the current slot's chain, so append their
57878 + chain to ours, and set us as the first role in
57879 + the slot's chain
57880 +
57881 + we could fold this case with the case above,
57882 + but making it explicit for clarity
57883 + */
57884 + tmp->next = tmp2;
57885 + *curr = role;
57886 + }
57887 + }
57888 + }
57889 +
57890 + return;
57891 +}
57892 +
57893 +static void
57894 +insert_acl_role_label(struct acl_role_label *role)
57895 +{
57896 + int i;
57897 +
57898 + if (role_list == NULL) {
57899 + role_list = role;
57900 + role->prev = NULL;
57901 + } else {
57902 + role->prev = role_list;
57903 + role_list = role;
57904 + }
57905 +
57906 + /* used for hash chains */
57907 + role->next = NULL;
57908 +
57909 + if (role->roletype & GR_ROLE_DOMAIN) {
57910 + for (i = 0; i < role->domain_child_num; i++)
57911 + __insert_acl_role_label(role, role->domain_children[i]);
57912 + } else
57913 + __insert_acl_role_label(role, role->uidgid);
57914 +}
57915 +
57916 +static int
57917 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57918 +{
57919 + struct name_entry **curr, *nentry;
57920 + struct inodev_entry *ientry;
57921 + unsigned int len = strlen(name);
57922 + unsigned int key = full_name_hash(name, len);
57923 + unsigned int index = key % name_set.n_size;
57924 +
57925 + curr = &name_set.n_hash[index];
57926 +
57927 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57928 + curr = &((*curr)->next);
57929 +
57930 + if (*curr != NULL)
57931 + return 1;
57932 +
57933 + nentry = acl_alloc(sizeof (struct name_entry));
57934 + if (nentry == NULL)
57935 + return 0;
57936 + ientry = acl_alloc(sizeof (struct inodev_entry));
57937 + if (ientry == NULL)
57938 + return 0;
57939 + ientry->nentry = nentry;
57940 +
57941 + nentry->key = key;
57942 + nentry->name = name;
57943 + nentry->inode = inode;
57944 + nentry->device = device;
57945 + nentry->len = len;
57946 + nentry->deleted = deleted;
57947 +
57948 + nentry->prev = NULL;
57949 + curr = &name_set.n_hash[index];
57950 + if (*curr != NULL)
57951 + (*curr)->prev = nentry;
57952 + nentry->next = *curr;
57953 + *curr = nentry;
57954 +
57955 + /* insert us into the table searchable by inode/dev */
57956 + insert_inodev_entry(ientry);
57957 +
57958 + return 1;
57959 +}
57960 +
57961 +static void
57962 +insert_acl_obj_label(struct acl_object_label *obj,
57963 + struct acl_subject_label *subj)
57964 +{
57965 + unsigned int index =
57966 + gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
57967 + struct acl_object_label **curr;
57968 +
57969 +
57970 + obj->prev = NULL;
57971 +
57972 + curr = &subj->obj_hash[index];
57973 + if (*curr != NULL)
57974 + (*curr)->prev = obj;
57975 +
57976 + obj->next = *curr;
57977 + *curr = obj;
57978 +
57979 + return;
57980 +}
57981 +
57982 +static void
57983 +insert_acl_subj_label(struct acl_subject_label *obj,
57984 + struct acl_role_label *role)
57985 +{
57986 + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
57987 + struct acl_subject_label **curr;
57988 +
57989 + obj->prev = NULL;
57990 +
57991 + curr = &role->subj_hash[index];
57992 + if (*curr != NULL)
57993 + (*curr)->prev = obj;
57994 +
57995 + obj->next = *curr;
57996 + *curr = obj;
57997 +
57998 + return;
57999 +}
58000 +
58001 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58002 +
58003 +static void *
58004 +create_table(__u32 * len, int elementsize)
58005 +{
58006 + unsigned int table_sizes[] = {
58007 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58008 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58009 + 4194301, 8388593, 16777213, 33554393, 67108859
58010 + };
58011 + void *newtable = NULL;
58012 + unsigned int pwr = 0;
58013 +
58014 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58015 + table_sizes[pwr] <= *len)
58016 + pwr++;
58017 +
58018 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
58019 + return newtable;
58020 +
58021 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
58022 + newtable =
58023 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
58024 + else
58025 + newtable = vmalloc(table_sizes[pwr] * elementsize);
58026 +
58027 + *len = table_sizes[pwr];
58028 +
58029 + return newtable;
58030 +}
58031 +
58032 +static int
58033 +init_variables(const struct gr_arg *arg)
58034 +{
58035 + struct task_struct *reaper = init_pid_ns.child_reaper;
58036 + unsigned int stacksize;
58037 +
58038 + subj_map_set.s_size = arg->role_db.num_subjects;
58039 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
58040 + name_set.n_size = arg->role_db.num_objects;
58041 + inodev_set.i_size = arg->role_db.num_objects;
58042 +
58043 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
58044 + !name_set.n_size || !inodev_set.i_size)
58045 + return 1;
58046 +
58047 + if (!gr_init_uidset())
58048 + return 1;
58049 +
58050 + /* set up the stack that holds allocation info */
58051 +
58052 + stacksize = arg->role_db.num_pointers + 5;
58053 +
58054 + if (!acl_alloc_stack_init(stacksize))
58055 + return 1;
58056 +
58057 + /* grab reference for the real root dentry and vfsmount */
58058 + get_fs_root(reaper->fs, &real_root);
58059 +
58060 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58061 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
58062 +#endif
58063 +
58064 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
58065 + if (fakefs_obj_rw == NULL)
58066 + return 1;
58067 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
58068 +
58069 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
58070 + if (fakefs_obj_rwx == NULL)
58071 + return 1;
58072 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58073 +
58074 + subj_map_set.s_hash =
58075 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
58076 + acl_role_set.r_hash =
58077 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
58078 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
58079 + inodev_set.i_hash =
58080 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
58081 +
58082 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58083 + !name_set.n_hash || !inodev_set.i_hash)
58084 + return 1;
58085 +
58086 + memset(subj_map_set.s_hash, 0,
58087 + sizeof(struct subject_map *) * subj_map_set.s_size);
58088 + memset(acl_role_set.r_hash, 0,
58089 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
58090 + memset(name_set.n_hash, 0,
58091 + sizeof (struct name_entry *) * name_set.n_size);
58092 + memset(inodev_set.i_hash, 0,
58093 + sizeof (struct inodev_entry *) * inodev_set.i_size);
58094 +
58095 + return 0;
58096 +}
58097 +
58098 +/* free information not needed after startup
58099 + currently contains user->kernel pointer mappings for subjects
58100 +*/
58101 +
58102 +static void
58103 +free_init_variables(void)
58104 +{
58105 + __u32 i;
58106 +
58107 + if (subj_map_set.s_hash) {
58108 + for (i = 0; i < subj_map_set.s_size; i++) {
58109 + if (subj_map_set.s_hash[i]) {
58110 + kfree(subj_map_set.s_hash[i]);
58111 + subj_map_set.s_hash[i] = NULL;
58112 + }
58113 + }
58114 +
58115 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58116 + PAGE_SIZE)
58117 + kfree(subj_map_set.s_hash);
58118 + else
58119 + vfree(subj_map_set.s_hash);
58120 + }
58121 +
58122 + return;
58123 +}
58124 +
58125 +static void
58126 +free_variables(void)
58127 +{
58128 + struct acl_subject_label *s;
58129 + struct acl_role_label *r;
58130 + struct task_struct *task, *task2;
58131 + unsigned int x;
58132 +
58133 + gr_clear_learn_entries();
58134 +
58135 + read_lock(&tasklist_lock);
58136 + do_each_thread(task2, task) {
58137 + task->acl_sp_role = 0;
58138 + task->acl_role_id = 0;
58139 + task->acl = NULL;
58140 + task->role = NULL;
58141 + } while_each_thread(task2, task);
58142 + read_unlock(&tasklist_lock);
58143 +
58144 + /* release the reference to the real root dentry and vfsmount */
58145 + path_put(&real_root);
58146 + memset(&real_root, 0, sizeof(real_root));
58147 +
58148 + /* free all object hash tables */
58149 +
58150 + FOR_EACH_ROLE_START(r)
58151 + if (r->subj_hash == NULL)
58152 + goto next_role;
58153 + FOR_EACH_SUBJECT_START(r, s, x)
58154 + if (s->obj_hash == NULL)
58155 + break;
58156 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58157 + kfree(s->obj_hash);
58158 + else
58159 + vfree(s->obj_hash);
58160 + FOR_EACH_SUBJECT_END(s, x)
58161 + FOR_EACH_NESTED_SUBJECT_START(r, s)
58162 + if (s->obj_hash == NULL)
58163 + break;
58164 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58165 + kfree(s->obj_hash);
58166 + else
58167 + vfree(s->obj_hash);
58168 + FOR_EACH_NESTED_SUBJECT_END(s)
58169 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
58170 + kfree(r->subj_hash);
58171 + else
58172 + vfree(r->subj_hash);
58173 + r->subj_hash = NULL;
58174 +next_role:
58175 + FOR_EACH_ROLE_END(r)
58176 +
58177 + acl_free_all();
58178 +
58179 + if (acl_role_set.r_hash) {
58180 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
58181 + PAGE_SIZE)
58182 + kfree(acl_role_set.r_hash);
58183 + else
58184 + vfree(acl_role_set.r_hash);
58185 + }
58186 + if (name_set.n_hash) {
58187 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
58188 + PAGE_SIZE)
58189 + kfree(name_set.n_hash);
58190 + else
58191 + vfree(name_set.n_hash);
58192 + }
58193 +
58194 + if (inodev_set.i_hash) {
58195 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
58196 + PAGE_SIZE)
58197 + kfree(inodev_set.i_hash);
58198 + else
58199 + vfree(inodev_set.i_hash);
58200 + }
58201 +
58202 + gr_free_uidset();
58203 +
58204 + memset(&name_set, 0, sizeof (struct name_db));
58205 + memset(&inodev_set, 0, sizeof (struct inodev_db));
58206 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
58207 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
58208 +
58209 + default_role = NULL;
58210 + kernel_role = NULL;
58211 + role_list = NULL;
58212 +
58213 + return;
58214 +}
58215 +
58216 +static __u32
58217 +count_user_objs(struct acl_object_label *userp)
58218 +{
58219 + struct acl_object_label o_tmp;
58220 + __u32 num = 0;
58221 +
58222 + while (userp) {
58223 + if (copy_from_user(&o_tmp, userp,
58224 + sizeof (struct acl_object_label)))
58225 + break;
58226 +
58227 + userp = o_tmp.prev;
58228 + num++;
58229 + }
58230 +
58231 + return num;
58232 +}
58233 +
58234 +static struct acl_subject_label *
58235 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
58236 +
58237 +static int
58238 +copy_user_glob(struct acl_object_label *obj)
58239 +{
58240 + struct acl_object_label *g_tmp, **guser;
58241 + unsigned int len;
58242 + char *tmp;
58243 +
58244 + if (obj->globbed == NULL)
58245 + return 0;
58246 +
58247 + guser = &obj->globbed;
58248 + while (*guser) {
58249 + g_tmp = (struct acl_object_label *)
58250 + acl_alloc(sizeof (struct acl_object_label));
58251 + if (g_tmp == NULL)
58252 + return -ENOMEM;
58253 +
58254 + if (copy_from_user(g_tmp, *guser,
58255 + sizeof (struct acl_object_label)))
58256 + return -EFAULT;
58257 +
58258 + len = strnlen_user(g_tmp->filename, PATH_MAX);
58259 +
58260 + if (!len || len >= PATH_MAX)
58261 + return -EINVAL;
58262 +
58263 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58264 + return -ENOMEM;
58265 +
58266 + if (copy_from_user(tmp, g_tmp->filename, len))
58267 + return -EFAULT;
58268 + tmp[len-1] = '\0';
58269 + g_tmp->filename = tmp;
58270 +
58271 + *guser = g_tmp;
58272 + guser = &(g_tmp->next);
58273 + }
58274 +
58275 + return 0;
58276 +}
58277 +
58278 +static int
58279 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
58280 + struct acl_role_label *role)
58281 +{
58282 + struct acl_object_label *o_tmp;
58283 + unsigned int len;
58284 + int ret;
58285 + char *tmp;
58286 +
58287 + while (userp) {
58288 + if ((o_tmp = (struct acl_object_label *)
58289 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
58290 + return -ENOMEM;
58291 +
58292 + if (copy_from_user(o_tmp, userp,
58293 + sizeof (struct acl_object_label)))
58294 + return -EFAULT;
58295 +
58296 + userp = o_tmp->prev;
58297 +
58298 + len = strnlen_user(o_tmp->filename, PATH_MAX);
58299 +
58300 + if (!len || len >= PATH_MAX)
58301 + return -EINVAL;
58302 +
58303 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58304 + return -ENOMEM;
58305 +
58306 + if (copy_from_user(tmp, o_tmp->filename, len))
58307 + return -EFAULT;
58308 + tmp[len-1] = '\0';
58309 + o_tmp->filename = tmp;
58310 +
58311 + insert_acl_obj_label(o_tmp, subj);
58312 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
58313 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
58314 + return -ENOMEM;
58315 +
58316 + ret = copy_user_glob(o_tmp);
58317 + if (ret)
58318 + return ret;
58319 +
58320 + if (o_tmp->nested) {
58321 + int already_copied;
58322 +
58323 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
58324 + if (IS_ERR(o_tmp->nested))
58325 + return PTR_ERR(o_tmp->nested);
58326 +
58327 + /* insert into nested subject list if we haven't copied this one yet
58328 + to prevent duplicate entries */
58329 + if (!already_copied) {
58330 + o_tmp->nested->next = role->hash->first;
58331 + role->hash->first = o_tmp->nested;
58332 + }
58333 + }
58334 + }
58335 +
58336 + return 0;
58337 +}
58338 +
58339 +static __u32
58340 +count_user_subjs(struct acl_subject_label *userp)
58341 +{
58342 + struct acl_subject_label s_tmp;
58343 + __u32 num = 0;
58344 +
58345 + while (userp) {
58346 + if (copy_from_user(&s_tmp, userp,
58347 + sizeof (struct acl_subject_label)))
58348 + break;
58349 +
58350 + userp = s_tmp.prev;
58351 + }
58352 +
58353 + return num;
58354 +}
58355 +
58356 +static int
58357 +copy_user_allowedips(struct acl_role_label *rolep)
58358 +{
58359 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
58360 +
58361 + ruserip = rolep->allowed_ips;
58362 +
58363 + while (ruserip) {
58364 + rlast = rtmp;
58365 +
58366 + if ((rtmp = (struct role_allowed_ip *)
58367 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
58368 + return -ENOMEM;
58369 +
58370 + if (copy_from_user(rtmp, ruserip,
58371 + sizeof (struct role_allowed_ip)))
58372 + return -EFAULT;
58373 +
58374 + ruserip = rtmp->prev;
58375 +
58376 + if (!rlast) {
58377 + rtmp->prev = NULL;
58378 + rolep->allowed_ips = rtmp;
58379 + } else {
58380 + rlast->next = rtmp;
58381 + rtmp->prev = rlast;
58382 + }
58383 +
58384 + if (!ruserip)
58385 + rtmp->next = NULL;
58386 + }
58387 +
58388 + return 0;
58389 +}
58390 +
58391 +static int
58392 +copy_user_transitions(struct acl_role_label *rolep)
58393 +{
58394 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
58395 +
58396 + unsigned int len;
58397 + char *tmp;
58398 +
58399 + rusertp = rolep->transitions;
58400 +
58401 + while (rusertp) {
58402 + rlast = rtmp;
58403 +
58404 + if ((rtmp = (struct role_transition *)
58405 + acl_alloc(sizeof (struct role_transition))) == NULL)
58406 + return -ENOMEM;
58407 +
58408 + if (copy_from_user(rtmp, rusertp,
58409 + sizeof (struct role_transition)))
58410 + return -EFAULT;
58411 +
58412 + rusertp = rtmp->prev;
58413 +
58414 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
58415 +
58416 + if (!len || len >= GR_SPROLE_LEN)
58417 + return -EINVAL;
58418 +
58419 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58420 + return -ENOMEM;
58421 +
58422 + if (copy_from_user(tmp, rtmp->rolename, len))
58423 + return -EFAULT;
58424 + tmp[len-1] = '\0';
58425 + rtmp->rolename = tmp;
58426 +
58427 + if (!rlast) {
58428 + rtmp->prev = NULL;
58429 + rolep->transitions = rtmp;
58430 + } else {
58431 + rlast->next = rtmp;
58432 + rtmp->prev = rlast;
58433 + }
58434 +
58435 + if (!rusertp)
58436 + rtmp->next = NULL;
58437 + }
58438 +
58439 + return 0;
58440 +}
58441 +
58442 +static struct acl_subject_label *
58443 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
58444 +{
58445 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
58446 + unsigned int len;
58447 + char *tmp;
58448 + __u32 num_objs;
58449 + struct acl_ip_label **i_tmp, *i_utmp2;
58450 + struct gr_hash_struct ghash;
58451 + struct subject_map *subjmap;
58452 + unsigned int i_num;
58453 + int err;
58454 +
58455 + if (already_copied != NULL)
58456 + *already_copied = 0;
58457 +
58458 + s_tmp = lookup_subject_map(userp);
58459 +
58460 + /* we've already copied this subject into the kernel, just return
58461 + the reference to it, and don't copy it over again
58462 + */
58463 + if (s_tmp) {
58464 + if (already_copied != NULL)
58465 + *already_copied = 1;
58466 + return(s_tmp);
58467 + }
58468 +
58469 + if ((s_tmp = (struct acl_subject_label *)
58470 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
58471 + return ERR_PTR(-ENOMEM);
58472 +
58473 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
58474 + if (subjmap == NULL)
58475 + return ERR_PTR(-ENOMEM);
58476 +
58477 + subjmap->user = userp;
58478 + subjmap->kernel = s_tmp;
58479 + insert_subj_map_entry(subjmap);
58480 +
58481 + if (copy_from_user(s_tmp, userp,
58482 + sizeof (struct acl_subject_label)))
58483 + return ERR_PTR(-EFAULT);
58484 +
58485 + len = strnlen_user(s_tmp->filename, PATH_MAX);
58486 +
58487 + if (!len || len >= PATH_MAX)
58488 + return ERR_PTR(-EINVAL);
58489 +
58490 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58491 + return ERR_PTR(-ENOMEM);
58492 +
58493 + if (copy_from_user(tmp, s_tmp->filename, len))
58494 + return ERR_PTR(-EFAULT);
58495 + tmp[len-1] = '\0';
58496 + s_tmp->filename = tmp;
58497 +
58498 + if (!strcmp(s_tmp->filename, "/"))
58499 + role->root_label = s_tmp;
58500 +
58501 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
58502 + return ERR_PTR(-EFAULT);
58503 +
58504 + /* copy user and group transition tables */
58505 +
58506 + if (s_tmp->user_trans_num) {
58507 + uid_t *uidlist;
58508 +
58509 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
58510 + if (uidlist == NULL)
58511 + return ERR_PTR(-ENOMEM);
58512 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
58513 + return ERR_PTR(-EFAULT);
58514 +
58515 + s_tmp->user_transitions = uidlist;
58516 + }
58517 +
58518 + if (s_tmp->group_trans_num) {
58519 + gid_t *gidlist;
58520 +
58521 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
58522 + if (gidlist == NULL)
58523 + return ERR_PTR(-ENOMEM);
58524 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
58525 + return ERR_PTR(-EFAULT);
58526 +
58527 + s_tmp->group_transitions = gidlist;
58528 + }
58529 +
58530 + /* set up object hash table */
58531 + num_objs = count_user_objs(ghash.first);
58532 +
58533 + s_tmp->obj_hash_size = num_objs;
58534 + s_tmp->obj_hash =
58535 + (struct acl_object_label **)
58536 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
58537 +
58538 + if (!s_tmp->obj_hash)
58539 + return ERR_PTR(-ENOMEM);
58540 +
58541 + memset(s_tmp->obj_hash, 0,
58542 + s_tmp->obj_hash_size *
58543 + sizeof (struct acl_object_label *));
58544 +
58545 + /* add in objects */
58546 + err = copy_user_objs(ghash.first, s_tmp, role);
58547 +
58548 + if (err)
58549 + return ERR_PTR(err);
58550 +
58551 + /* set pointer for parent subject */
58552 + if (s_tmp->parent_subject) {
58553 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
58554 +
58555 + if (IS_ERR(s_tmp2))
58556 + return s_tmp2;
58557 +
58558 + s_tmp->parent_subject = s_tmp2;
58559 + }
58560 +
58561 + /* add in ip acls */
58562 +
58563 + if (!s_tmp->ip_num) {
58564 + s_tmp->ips = NULL;
58565 + goto insert;
58566 + }
58567 +
58568 + i_tmp =
58569 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58570 + sizeof (struct acl_ip_label *));
58571 +
58572 + if (!i_tmp)
58573 + return ERR_PTR(-ENOMEM);
58574 +
58575 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58576 + *(i_tmp + i_num) =
58577 + (struct acl_ip_label *)
58578 + acl_alloc(sizeof (struct acl_ip_label));
58579 + if (!*(i_tmp + i_num))
58580 + return ERR_PTR(-ENOMEM);
58581 +
58582 + if (copy_from_user
58583 + (&i_utmp2, s_tmp->ips + i_num,
58584 + sizeof (struct acl_ip_label *)))
58585 + return ERR_PTR(-EFAULT);
58586 +
58587 + if (copy_from_user
58588 + (*(i_tmp + i_num), i_utmp2,
58589 + sizeof (struct acl_ip_label)))
58590 + return ERR_PTR(-EFAULT);
58591 +
58592 + if ((*(i_tmp + i_num))->iface == NULL)
58593 + continue;
58594 +
58595 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58596 + if (!len || len >= IFNAMSIZ)
58597 + return ERR_PTR(-EINVAL);
58598 + tmp = acl_alloc(len);
58599 + if (tmp == NULL)
58600 + return ERR_PTR(-ENOMEM);
58601 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58602 + return ERR_PTR(-EFAULT);
58603 + (*(i_tmp + i_num))->iface = tmp;
58604 + }
58605 +
58606 + s_tmp->ips = i_tmp;
58607 +
58608 +insert:
58609 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58610 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58611 + return ERR_PTR(-ENOMEM);
58612 +
58613 + return s_tmp;
58614 +}
58615 +
58616 +static int
58617 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58618 +{
58619 + struct acl_subject_label s_pre;
58620 + struct acl_subject_label * ret;
58621 + int err;
58622 +
58623 + while (userp) {
58624 + if (copy_from_user(&s_pre, userp,
58625 + sizeof (struct acl_subject_label)))
58626 + return -EFAULT;
58627 +
58628 + ret = do_copy_user_subj(userp, role, NULL);
58629 +
58630 + err = PTR_ERR(ret);
58631 + if (IS_ERR(ret))
58632 + return err;
58633 +
58634 + insert_acl_subj_label(ret, role);
58635 +
58636 + userp = s_pre.prev;
58637 + }
58638 +
58639 + return 0;
58640 +}
58641 +
58642 +static int
58643 +copy_user_acl(struct gr_arg *arg)
58644 +{
58645 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58646 + struct acl_subject_label *subj_list;
58647 + struct sprole_pw *sptmp;
58648 + struct gr_hash_struct *ghash;
58649 + uid_t *domainlist;
58650 + unsigned int r_num;
58651 + unsigned int len;
58652 + char *tmp;
58653 + int err = 0;
58654 + __u16 i;
58655 + __u32 num_subjs;
58656 +
58657 + /* we need a default and kernel role */
58658 + if (arg->role_db.num_roles < 2)
58659 + return -EINVAL;
58660 +
58661 + /* copy special role authentication info from userspace */
58662 +
58663 + num_sprole_pws = arg->num_sprole_pws;
58664 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58665 +
58666 + if (!acl_special_roles && num_sprole_pws)
58667 + return -ENOMEM;
58668 +
58669 + for (i = 0; i < num_sprole_pws; i++) {
58670 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58671 + if (!sptmp)
58672 + return -ENOMEM;
58673 + if (copy_from_user(sptmp, arg->sprole_pws + i,
58674 + sizeof (struct sprole_pw)))
58675 + return -EFAULT;
58676 +
58677 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58678 +
58679 + if (!len || len >= GR_SPROLE_LEN)
58680 + return -EINVAL;
58681 +
58682 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58683 + return -ENOMEM;
58684 +
58685 + if (copy_from_user(tmp, sptmp->rolename, len))
58686 + return -EFAULT;
58687 +
58688 + tmp[len-1] = '\0';
58689 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58690 + printk(KERN_ALERT "Copying special role %s\n", tmp);
58691 +#endif
58692 + sptmp->rolename = tmp;
58693 + acl_special_roles[i] = sptmp;
58694 + }
58695 +
58696 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58697 +
58698 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58699 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
58700 +
58701 + if (!r_tmp)
58702 + return -ENOMEM;
58703 +
58704 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
58705 + sizeof (struct acl_role_label *)))
58706 + return -EFAULT;
58707 +
58708 + if (copy_from_user(r_tmp, r_utmp2,
58709 + sizeof (struct acl_role_label)))
58710 + return -EFAULT;
58711 +
58712 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58713 +
58714 + if (!len || len >= PATH_MAX)
58715 + return -EINVAL;
58716 +
58717 + if ((tmp = (char *) acl_alloc(len)) == NULL)
58718 + return -ENOMEM;
58719 +
58720 + if (copy_from_user(tmp, r_tmp->rolename, len))
58721 + return -EFAULT;
58722 +
58723 + tmp[len-1] = '\0';
58724 + r_tmp->rolename = tmp;
58725 +
58726 + if (!strcmp(r_tmp->rolename, "default")
58727 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58728 + default_role = r_tmp;
58729 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58730 + kernel_role = r_tmp;
58731 + }
58732 +
58733 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
58734 + return -ENOMEM;
58735 +
58736 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
58737 + return -EFAULT;
58738 +
58739 + r_tmp->hash = ghash;
58740 +
58741 + num_subjs = count_user_subjs(r_tmp->hash->first);
58742 +
58743 + r_tmp->subj_hash_size = num_subjs;
58744 + r_tmp->subj_hash =
58745 + (struct acl_subject_label **)
58746 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58747 +
58748 + if (!r_tmp->subj_hash)
58749 + return -ENOMEM;
58750 +
58751 + err = copy_user_allowedips(r_tmp);
58752 + if (err)
58753 + return err;
58754 +
58755 + /* copy domain info */
58756 + if (r_tmp->domain_children != NULL) {
58757 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58758 + if (domainlist == NULL)
58759 + return -ENOMEM;
58760 +
58761 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
58762 + return -EFAULT;
58763 +
58764 + r_tmp->domain_children = domainlist;
58765 + }
58766 +
58767 + err = copy_user_transitions(r_tmp);
58768 + if (err)
58769 + return err;
58770 +
58771 + memset(r_tmp->subj_hash, 0,
58772 + r_tmp->subj_hash_size *
58773 + sizeof (struct acl_subject_label *));
58774 +
58775 + /* acquire the list of subjects, then NULL out
58776 + the list prior to parsing the subjects for this role,
58777 + as during this parsing the list is replaced with a list
58778 + of *nested* subjects for the role
58779 + */
58780 + subj_list = r_tmp->hash->first;
58781 +
58782 + /* set nested subject list to null */
58783 + r_tmp->hash->first = NULL;
58784 +
58785 + err = copy_user_subjs(subj_list, r_tmp);
58786 +
58787 + if (err)
58788 + return err;
58789 +
58790 + insert_acl_role_label(r_tmp);
58791 + }
58792 +
58793 + if (default_role == NULL || kernel_role == NULL)
58794 + return -EINVAL;
58795 +
58796 + return err;
58797 +}
58798 +
58799 +static int
58800 +gracl_init(struct gr_arg *args)
58801 +{
58802 + int error = 0;
58803 +
58804 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58805 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58806 +
58807 + if (init_variables(args)) {
58808 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58809 + error = -ENOMEM;
58810 + free_variables();
58811 + goto out;
58812 + }
58813 +
58814 + error = copy_user_acl(args);
58815 + free_init_variables();
58816 + if (error) {
58817 + free_variables();
58818 + goto out;
58819 + }
58820 +
58821 + if ((error = gr_set_acls(0))) {
58822 + free_variables();
58823 + goto out;
58824 + }
58825 +
58826 + pax_open_kernel();
58827 + gr_status |= GR_READY;
58828 + pax_close_kernel();
58829 +
58830 + out:
58831 + return error;
58832 +}
58833 +
58834 +/* derived from glibc fnmatch() 0: match, 1: no match*/
58835 +
58836 +static int
58837 +glob_match(const char *p, const char *n)
58838 +{
58839 + char c;
58840 +
58841 + while ((c = *p++) != '\0') {
58842 + switch (c) {
58843 + case '?':
58844 + if (*n == '\0')
58845 + return 1;
58846 + else if (*n == '/')
58847 + return 1;
58848 + break;
58849 + case '\\':
58850 + if (*n != c)
58851 + return 1;
58852 + break;
58853 + case '*':
58854 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
58855 + if (*n == '/')
58856 + return 1;
58857 + else if (c == '?') {
58858 + if (*n == '\0')
58859 + return 1;
58860 + else
58861 + ++n;
58862 + }
58863 + }
58864 + if (c == '\0') {
58865 + return 0;
58866 + } else {
58867 + const char *endp;
58868 +
58869 + if ((endp = strchr(n, '/')) == NULL)
58870 + endp = n + strlen(n);
58871 +
58872 + if (c == '[') {
58873 + for (--p; n < endp; ++n)
58874 + if (!glob_match(p, n))
58875 + return 0;
58876 + } else if (c == '/') {
58877 + while (*n != '\0' && *n != '/')
58878 + ++n;
58879 + if (*n == '/' && !glob_match(p, n + 1))
58880 + return 0;
58881 + } else {
58882 + for (--p; n < endp; ++n)
58883 + if (*n == c && !glob_match(p, n))
58884 + return 0;
58885 + }
58886 +
58887 + return 1;
58888 + }
58889 + case '[':
58890 + {
58891 + int not;
58892 + char cold;
58893 +
58894 + if (*n == '\0' || *n == '/')
58895 + return 1;
58896 +
58897 + not = (*p == '!' || *p == '^');
58898 + if (not)
58899 + ++p;
58900 +
58901 + c = *p++;
58902 + for (;;) {
58903 + unsigned char fn = (unsigned char)*n;
58904 +
58905 + if (c == '\0')
58906 + return 1;
58907 + else {
58908 + if (c == fn)
58909 + goto matched;
58910 + cold = c;
58911 + c = *p++;
58912 +
58913 + if (c == '-' && *p != ']') {
58914 + unsigned char cend = *p++;
58915 +
58916 + if (cend == '\0')
58917 + return 1;
58918 +
58919 + if (cold <= fn && fn <= cend)
58920 + goto matched;
58921 +
58922 + c = *p++;
58923 + }
58924 + }
58925 +
58926 + if (c == ']')
58927 + break;
58928 + }
58929 + if (!not)
58930 + return 1;
58931 + break;
58932 + matched:
58933 + while (c != ']') {
58934 + if (c == '\0')
58935 + return 1;
58936 +
58937 + c = *p++;
58938 + }
58939 + if (not)
58940 + return 1;
58941 + }
58942 + break;
58943 + default:
58944 + if (c != *n)
58945 + return 1;
58946 + }
58947 +
58948 + ++n;
58949 + }
58950 +
58951 + if (*n == '\0')
58952 + return 0;
58953 +
58954 + if (*n == '/')
58955 + return 0;
58956 +
58957 + return 1;
58958 +}
58959 +
58960 +static struct acl_object_label *
58961 +chk_glob_label(struct acl_object_label *globbed,
58962 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58963 +{
58964 + struct acl_object_label *tmp;
58965 +
58966 + if (*path == NULL)
58967 + *path = gr_to_filename_nolock(dentry, mnt);
58968 +
58969 + tmp = globbed;
58970 +
58971 + while (tmp) {
58972 + if (!glob_match(tmp->filename, *path))
58973 + return tmp;
58974 + tmp = tmp->next;
58975 + }
58976 +
58977 + return NULL;
58978 +}
58979 +
58980 +static struct acl_object_label *
58981 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58982 + const ino_t curr_ino, const dev_t curr_dev,
58983 + const struct acl_subject_label *subj, char **path, const int checkglob)
58984 +{
58985 + struct acl_subject_label *tmpsubj;
58986 + struct acl_object_label *retval;
58987 + struct acl_object_label *retval2;
58988 +
58989 + tmpsubj = (struct acl_subject_label *) subj;
58990 + read_lock(&gr_inode_lock);
58991 + do {
58992 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58993 + if (retval) {
58994 + if (checkglob && retval->globbed) {
58995 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58996 + if (retval2)
58997 + retval = retval2;
58998 + }
58999 + break;
59000 + }
59001 + } while ((tmpsubj = tmpsubj->parent_subject));
59002 + read_unlock(&gr_inode_lock);
59003 +
59004 + return retval;
59005 +}
59006 +
59007 +static __inline__ struct acl_object_label *
59008 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59009 + struct dentry *curr_dentry,
59010 + const struct acl_subject_label *subj, char **path, const int checkglob)
59011 +{
59012 + int newglob = checkglob;
59013 + ino_t inode;
59014 + dev_t device;
59015 +
59016 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
59017 + as we don't want a / * rule to match instead of the / object
59018 + don't do this for create lookups that call this function though, since they're looking up
59019 + on the parent and thus need globbing checks on all paths
59020 + */
59021 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
59022 + newglob = GR_NO_GLOB;
59023 +
59024 + spin_lock(&curr_dentry->d_lock);
59025 + inode = curr_dentry->d_inode->i_ino;
59026 + device = __get_dev(curr_dentry);
59027 + spin_unlock(&curr_dentry->d_lock);
59028 +
59029 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
59030 +}
59031 +
59032 +#ifdef CONFIG_HUGETLBFS
59033 +static inline bool
59034 +is_hugetlbfs_mnt(const struct vfsmount *mnt)
59035 +{
59036 + int i;
59037 + for (i = 0; i < HUGE_MAX_HSTATE; i++) {
59038 + if (unlikely(hugetlbfs_vfsmount[i] == mnt))
59039 + return true;
59040 + }
59041 +
59042 + return false;
59043 +}
59044 +#endif
59045 +
59046 +static struct acl_object_label *
59047 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59048 + const struct acl_subject_label *subj, char *path, const int checkglob)
59049 +{
59050 + struct dentry *dentry = (struct dentry *) l_dentry;
59051 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59052 + struct mount *real_mnt = real_mount(mnt);
59053 + struct acl_object_label *retval;
59054 + struct dentry *parent;
59055 +
59056 + br_read_lock(&vfsmount_lock);
59057 + write_seqlock(&rename_lock);
59058 +
59059 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
59060 +#ifdef CONFIG_NET
59061 + mnt == sock_mnt ||
59062 +#endif
59063 +#ifdef CONFIG_HUGETLBFS
59064 + (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
59065 +#endif
59066 + /* ignore Eric Biederman */
59067 + IS_PRIVATE(l_dentry->d_inode))) {
59068 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
59069 + goto out;
59070 + }
59071 +
59072 + for (;;) {
59073 + if (dentry == real_root.dentry && mnt == real_root.mnt)
59074 + break;
59075 +
59076 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59077 + if (!mnt_has_parent(real_mnt))
59078 + break;
59079 +
59080 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59081 + if (retval != NULL)
59082 + goto out;
59083 +
59084 + dentry = real_mnt->mnt_mountpoint;
59085 + real_mnt = real_mnt->mnt_parent;
59086 + mnt = &real_mnt->mnt;
59087 + continue;
59088 + }
59089 +
59090 + parent = dentry->d_parent;
59091 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59092 + if (retval != NULL)
59093 + goto out;
59094 +
59095 + dentry = parent;
59096 + }
59097 +
59098 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59099 +
59100 + /* real_root is pinned so we don't have to hold a reference */
59101 + if (retval == NULL)
59102 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
59103 +out:
59104 + write_sequnlock(&rename_lock);
59105 + br_read_unlock(&vfsmount_lock);
59106 +
59107 + BUG_ON(retval == NULL);
59108 +
59109 + return retval;
59110 +}
59111 +
59112 +static __inline__ struct acl_object_label *
59113 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59114 + const struct acl_subject_label *subj)
59115 +{
59116 + char *path = NULL;
59117 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59118 +}
59119 +
59120 +static __inline__ struct acl_object_label *
59121 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59122 + const struct acl_subject_label *subj)
59123 +{
59124 + char *path = NULL;
59125 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59126 +}
59127 +
59128 +static __inline__ struct acl_object_label *
59129 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59130 + const struct acl_subject_label *subj, char *path)
59131 +{
59132 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59133 +}
59134 +
59135 +static struct acl_subject_label *
59136 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59137 + const struct acl_role_label *role)
59138 +{
59139 + struct dentry *dentry = (struct dentry *) l_dentry;
59140 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59141 + struct mount *real_mnt = real_mount(mnt);
59142 + struct acl_subject_label *retval;
59143 + struct dentry *parent;
59144 +
59145 + br_read_lock(&vfsmount_lock);
59146 + write_seqlock(&rename_lock);
59147 +
59148 + for (;;) {
59149 + if (dentry == real_root.dentry && mnt == real_root.mnt)
59150 + break;
59151 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59152 + if (!mnt_has_parent(real_mnt))
59153 + break;
59154 +
59155 + spin_lock(&dentry->d_lock);
59156 + read_lock(&gr_inode_lock);
59157 + retval =
59158 + lookup_acl_subj_label(dentry->d_inode->i_ino,
59159 + __get_dev(dentry), role);
59160 + read_unlock(&gr_inode_lock);
59161 + spin_unlock(&dentry->d_lock);
59162 + if (retval != NULL)
59163 + goto out;
59164 +
59165 + dentry = real_mnt->mnt_mountpoint;
59166 + real_mnt = real_mnt->mnt_parent;
59167 + mnt = &real_mnt->mnt;
59168 + continue;
59169 + }
59170 +
59171 + spin_lock(&dentry->d_lock);
59172 + read_lock(&gr_inode_lock);
59173 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59174 + __get_dev(dentry), role);
59175 + read_unlock(&gr_inode_lock);
59176 + parent = dentry->d_parent;
59177 + spin_unlock(&dentry->d_lock);
59178 +
59179 + if (retval != NULL)
59180 + goto out;
59181 +
59182 + dentry = parent;
59183 + }
59184 +
59185 + spin_lock(&dentry->d_lock);
59186 + read_lock(&gr_inode_lock);
59187 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59188 + __get_dev(dentry), role);
59189 + read_unlock(&gr_inode_lock);
59190 + spin_unlock(&dentry->d_lock);
59191 +
59192 + if (unlikely(retval == NULL)) {
59193 + /* real_root is pinned, we don't need to hold a reference */
59194 + read_lock(&gr_inode_lock);
59195 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
59196 + __get_dev(real_root.dentry), role);
59197 + read_unlock(&gr_inode_lock);
59198 + }
59199 +out:
59200 + write_sequnlock(&rename_lock);
59201 + br_read_unlock(&vfsmount_lock);
59202 +
59203 + BUG_ON(retval == NULL);
59204 +
59205 + return retval;
59206 +}
59207 +
59208 +static void
59209 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
59210 +{
59211 + struct task_struct *task = current;
59212 + const struct cred *cred = current_cred();
59213 +
59214 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59215 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59216 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59217 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
59218 +
59219 + return;
59220 +}
59221 +
59222 +static void
59223 +gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
59224 +{
59225 + struct task_struct *task = current;
59226 + const struct cred *cred = current_cred();
59227 +
59228 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59229 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59230 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59231 + 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
59232 +
59233 + return;
59234 +}
59235 +
59236 +static void
59237 +gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
59238 +{
59239 + struct task_struct *task = current;
59240 + const struct cred *cred = current_cred();
59241 +
59242 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59243 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59244 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59245 + 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
59246 +
59247 + return;
59248 +}
59249 +
59250 +__u32
59251 +gr_search_file(const struct dentry * dentry, const __u32 mode,
59252 + const struct vfsmount * mnt)
59253 +{
59254 + __u32 retval = mode;
59255 + struct acl_subject_label *curracl;
59256 + struct acl_object_label *currobj;
59257 +
59258 + if (unlikely(!(gr_status & GR_READY)))
59259 + return (mode & ~GR_AUDITS);
59260 +
59261 + curracl = current->acl;
59262 +
59263 + currobj = chk_obj_label(dentry, mnt, curracl);
59264 + retval = currobj->mode & mode;
59265 +
59266 + /* if we're opening a specified transfer file for writing
59267 + (e.g. /dev/initctl), then transfer our role to init
59268 + */
59269 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
59270 + current->role->roletype & GR_ROLE_PERSIST)) {
59271 + struct task_struct *task = init_pid_ns.child_reaper;
59272 +
59273 + if (task->role != current->role) {
59274 + task->acl_sp_role = 0;
59275 + task->acl_role_id = current->acl_role_id;
59276 + task->role = current->role;
59277 + rcu_read_lock();
59278 + read_lock(&grsec_exec_file_lock);
59279 + gr_apply_subject_to_task(task);
59280 + read_unlock(&grsec_exec_file_lock);
59281 + rcu_read_unlock();
59282 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
59283 + }
59284 + }
59285 +
59286 + if (unlikely
59287 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
59288 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
59289 + __u32 new_mode = mode;
59290 +
59291 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59292 +
59293 + retval = new_mode;
59294 +
59295 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
59296 + new_mode |= GR_INHERIT;
59297 +
59298 + if (!(mode & GR_NOLEARN))
59299 + gr_log_learn(dentry, mnt, new_mode);
59300 + }
59301 +
59302 + return retval;
59303 +}
59304 +
59305 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
59306 + const struct dentry *parent,
59307 + const struct vfsmount *mnt)
59308 +{
59309 + struct name_entry *match;
59310 + struct acl_object_label *matchpo;
59311 + struct acl_subject_label *curracl;
59312 + char *path;
59313 +
59314 + if (unlikely(!(gr_status & GR_READY)))
59315 + return NULL;
59316 +
59317 + preempt_disable();
59318 + path = gr_to_filename_rbac(new_dentry, mnt);
59319 + match = lookup_name_entry_create(path);
59320 +
59321 + curracl = current->acl;
59322 +
59323 + if (match) {
59324 + read_lock(&gr_inode_lock);
59325 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
59326 + read_unlock(&gr_inode_lock);
59327 +
59328 + if (matchpo) {
59329 + preempt_enable();
59330 + return matchpo;
59331 + }
59332 + }
59333 +
59334 + // lookup parent
59335 +
59336 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
59337 +
59338 + preempt_enable();
59339 + return matchpo;
59340 +}
59341 +
59342 +__u32
59343 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
59344 + const struct vfsmount * mnt, const __u32 mode)
59345 +{
59346 + struct acl_object_label *matchpo;
59347 + __u32 retval;
59348 +
59349 + if (unlikely(!(gr_status & GR_READY)))
59350 + return (mode & ~GR_AUDITS);
59351 +
59352 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
59353 +
59354 + retval = matchpo->mode & mode;
59355 +
59356 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
59357 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59358 + __u32 new_mode = mode;
59359 +
59360 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59361 +
59362 + gr_log_learn(new_dentry, mnt, new_mode);
59363 + return new_mode;
59364 + }
59365 +
59366 + return retval;
59367 +}
59368 +
59369 +__u32
59370 +gr_check_link(const struct dentry * new_dentry,
59371 + const struct dentry * parent_dentry,
59372 + const struct vfsmount * parent_mnt,
59373 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
59374 +{
59375 + struct acl_object_label *obj;
59376 + __u32 oldmode, newmode;
59377 + __u32 needmode;
59378 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
59379 + GR_DELETE | GR_INHERIT;
59380 +
59381 + if (unlikely(!(gr_status & GR_READY)))
59382 + return (GR_CREATE | GR_LINK);
59383 +
59384 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
59385 + oldmode = obj->mode;
59386 +
59387 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
59388 + newmode = obj->mode;
59389 +
59390 + needmode = newmode & checkmodes;
59391 +
59392 + // old name for hardlink must have at least the permissions of the new name
59393 + if ((oldmode & needmode) != needmode)
59394 + goto bad;
59395 +
59396 + // if old name had restrictions/auditing, make sure the new name does as well
59397 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
59398 +
59399 + // don't allow hardlinking of suid/sgid/fcapped files without permission
59400 + if (is_privileged_binary(old_dentry))
59401 + needmode |= GR_SETID;
59402 +
59403 + if ((newmode & needmode) != needmode)
59404 + goto bad;
59405 +
59406 + // enforce minimum permissions
59407 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
59408 + return newmode;
59409 +bad:
59410 + needmode = oldmode;
59411 + if (is_privileged_binary(old_dentry))
59412 + needmode |= GR_SETID;
59413 +
59414 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
59415 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
59416 + return (GR_CREATE | GR_LINK);
59417 + } else if (newmode & GR_SUPPRESS)
59418 + return GR_SUPPRESS;
59419 + else
59420 + return 0;
59421 +}
59422 +
59423 +int
59424 +gr_check_hidden_task(const struct task_struct *task)
59425 +{
59426 + if (unlikely(!(gr_status & GR_READY)))
59427 + return 0;
59428 +
59429 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
59430 + return 1;
59431 +
59432 + return 0;
59433 +}
59434 +
59435 +int
59436 +gr_check_protected_task(const struct task_struct *task)
59437 +{
59438 + if (unlikely(!(gr_status & GR_READY) || !task))
59439 + return 0;
59440 +
59441 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59442 + task->acl != current->acl)
59443 + return 1;
59444 +
59445 + return 0;
59446 +}
59447 +
59448 +int
59449 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59450 +{
59451 + struct task_struct *p;
59452 + int ret = 0;
59453 +
59454 + if (unlikely(!(gr_status & GR_READY) || !pid))
59455 + return ret;
59456 +
59457 + read_lock(&tasklist_lock);
59458 + do_each_pid_task(pid, type, p) {
59459 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59460 + p->acl != current->acl) {
59461 + ret = 1;
59462 + goto out;
59463 + }
59464 + } while_each_pid_task(pid, type, p);
59465 +out:
59466 + read_unlock(&tasklist_lock);
59467 +
59468 + return ret;
59469 +}
59470 +
59471 +void
59472 +gr_copy_label(struct task_struct *tsk)
59473 +{
59474 + tsk->signal->used_accept = 0;
59475 + tsk->acl_sp_role = 0;
59476 + tsk->acl_role_id = current->acl_role_id;
59477 + tsk->acl = current->acl;
59478 + tsk->role = current->role;
59479 + tsk->signal->curr_ip = current->signal->curr_ip;
59480 + tsk->signal->saved_ip = current->signal->saved_ip;
59481 + if (current->exec_file)
59482 + get_file(current->exec_file);
59483 + tsk->exec_file = current->exec_file;
59484 + tsk->is_writable = current->is_writable;
59485 + if (unlikely(current->signal->used_accept)) {
59486 + current->signal->curr_ip = 0;
59487 + current->signal->saved_ip = 0;
59488 + }
59489 +
59490 + return;
59491 +}
59492 +
59493 +static void
59494 +gr_set_proc_res(struct task_struct *task)
59495 +{
59496 + struct acl_subject_label *proc;
59497 + unsigned short i;
59498 +
59499 + proc = task->acl;
59500 +
59501 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
59502 + return;
59503 +
59504 + for (i = 0; i < RLIM_NLIMITS; i++) {
59505 + if (!(proc->resmask & (1U << i)))
59506 + continue;
59507 +
59508 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
59509 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
59510 +
59511 + if (i == RLIMIT_CPU)
59512 + update_rlimit_cpu(task, proc->res[i].rlim_cur);
59513 + }
59514 +
59515 + return;
59516 +}
59517 +
59518 +extern int __gr_process_user_ban(struct user_struct *user);
59519 +
59520 +int
59521 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
59522 +{
59523 + unsigned int i;
59524 + __u16 num;
59525 + uid_t *uidlist;
59526 + uid_t curuid;
59527 + int realok = 0;
59528 + int effectiveok = 0;
59529 + int fsok = 0;
59530 + uid_t globalreal, globaleffective, globalfs;
59531 +
59532 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59533 + struct user_struct *user;
59534 +
59535 + if (!uid_valid(real))
59536 + goto skipit;
59537 +
59538 + /* find user based on global namespace */
59539 +
59540 + globalreal = GR_GLOBAL_UID(real);
59541 +
59542 + user = find_user(make_kuid(&init_user_ns, globalreal));
59543 + if (user == NULL)
59544 + goto skipit;
59545 +
59546 + if (__gr_process_user_ban(user)) {
59547 + /* for find_user */
59548 + free_uid(user);
59549 + return 1;
59550 + }
59551 +
59552 + /* for find_user */
59553 + free_uid(user);
59554 +
59555 +skipit:
59556 +#endif
59557 +
59558 + if (unlikely(!(gr_status & GR_READY)))
59559 + return 0;
59560 +
59561 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59562 + gr_log_learn_uid_change(real, effective, fs);
59563 +
59564 + num = current->acl->user_trans_num;
59565 + uidlist = current->acl->user_transitions;
59566 +
59567 + if (uidlist == NULL)
59568 + return 0;
59569 +
59570 + if (!uid_valid(real)) {
59571 + realok = 1;
59572 + globalreal = (uid_t)-1;
59573 + } else {
59574 + globalreal = GR_GLOBAL_UID(real);
59575 + }
59576 + if (!uid_valid(effective)) {
59577 + effectiveok = 1;
59578 + globaleffective = (uid_t)-1;
59579 + } else {
59580 + globaleffective = GR_GLOBAL_UID(effective);
59581 + }
59582 + if (!uid_valid(fs)) {
59583 + fsok = 1;
59584 + globalfs = (uid_t)-1;
59585 + } else {
59586 + globalfs = GR_GLOBAL_UID(fs);
59587 + }
59588 +
59589 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
59590 + for (i = 0; i < num; i++) {
59591 + curuid = uidlist[i];
59592 + if (globalreal == curuid)
59593 + realok = 1;
59594 + if (globaleffective == curuid)
59595 + effectiveok = 1;
59596 + if (globalfs == curuid)
59597 + fsok = 1;
59598 + }
59599 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
59600 + for (i = 0; i < num; i++) {
59601 + curuid = uidlist[i];
59602 + if (globalreal == curuid)
59603 + break;
59604 + if (globaleffective == curuid)
59605 + break;
59606 + if (globalfs == curuid)
59607 + break;
59608 + }
59609 + /* not in deny list */
59610 + if (i == num) {
59611 + realok = 1;
59612 + effectiveok = 1;
59613 + fsok = 1;
59614 + }
59615 + }
59616 +
59617 + if (realok && effectiveok && fsok)
59618 + return 0;
59619 + else {
59620 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59621 + return 1;
59622 + }
59623 +}
59624 +
59625 +int
59626 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
59627 +{
59628 + unsigned int i;
59629 + __u16 num;
59630 + gid_t *gidlist;
59631 + gid_t curgid;
59632 + int realok = 0;
59633 + int effectiveok = 0;
59634 + int fsok = 0;
59635 + gid_t globalreal, globaleffective, globalfs;
59636 +
59637 + if (unlikely(!(gr_status & GR_READY)))
59638 + return 0;
59639 +
59640 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59641 + gr_log_learn_gid_change(real, effective, fs);
59642 +
59643 + num = current->acl->group_trans_num;
59644 + gidlist = current->acl->group_transitions;
59645 +
59646 + if (gidlist == NULL)
59647 + return 0;
59648 +
59649 + if (!gid_valid(real)) {
59650 + realok = 1;
59651 + globalreal = (gid_t)-1;
59652 + } else {
59653 + globalreal = GR_GLOBAL_GID(real);
59654 + }
59655 + if (!gid_valid(effective)) {
59656 + effectiveok = 1;
59657 + globaleffective = (gid_t)-1;
59658 + } else {
59659 + globaleffective = GR_GLOBAL_GID(effective);
59660 + }
59661 + if (!gid_valid(fs)) {
59662 + fsok = 1;
59663 + globalfs = (gid_t)-1;
59664 + } else {
59665 + globalfs = GR_GLOBAL_GID(fs);
59666 + }
59667 +
59668 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
59669 + for (i = 0; i < num; i++) {
59670 + curgid = gidlist[i];
59671 + if (globalreal == curgid)
59672 + realok = 1;
59673 + if (globaleffective == curgid)
59674 + effectiveok = 1;
59675 + if (globalfs == curgid)
59676 + fsok = 1;
59677 + }
59678 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
59679 + for (i = 0; i < num; i++) {
59680 + curgid = gidlist[i];
59681 + if (globalreal == curgid)
59682 + break;
59683 + if (globaleffective == curgid)
59684 + break;
59685 + if (globalfs == curgid)
59686 + break;
59687 + }
59688 + /* not in deny list */
59689 + if (i == num) {
59690 + realok = 1;
59691 + effectiveok = 1;
59692 + fsok = 1;
59693 + }
59694 + }
59695 +
59696 + if (realok && effectiveok && fsok)
59697 + return 0;
59698 + else {
59699 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59700 + return 1;
59701 + }
59702 +}
59703 +
59704 +extern int gr_acl_is_capable(const int cap);
59705 +
59706 +void
59707 +gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
59708 +{
59709 + struct acl_role_label *role = task->role;
59710 + struct acl_subject_label *subj = NULL;
59711 + struct acl_object_label *obj;
59712 + struct file *filp;
59713 + uid_t uid;
59714 + gid_t gid;
59715 +
59716 + if (unlikely(!(gr_status & GR_READY)))
59717 + return;
59718 +
59719 + uid = GR_GLOBAL_UID(kuid);
59720 + gid = GR_GLOBAL_GID(kgid);
59721 +
59722 + filp = task->exec_file;
59723 +
59724 + /* kernel process, we'll give them the kernel role */
59725 + if (unlikely(!filp)) {
59726 + task->role = kernel_role;
59727 + task->acl = kernel_role->root_label;
59728 + return;
59729 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59730 + role = lookup_acl_role_label(task, uid, gid);
59731 +
59732 + /* don't change the role if we're not a privileged process */
59733 + if (role && task->role != role &&
59734 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59735 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59736 + return;
59737 +
59738 + /* perform subject lookup in possibly new role
59739 + we can use this result below in the case where role == task->role
59740 + */
59741 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59742 +
59743 + /* if we changed uid/gid, but result in the same role
59744 + and are using inheritance, don't lose the inherited subject
59745 + if current subject is other than what normal lookup
59746 + would result in, we arrived via inheritance, don't
59747 + lose subject
59748 + */
59749 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59750 + (subj == task->acl)))
59751 + task->acl = subj;
59752 +
59753 + task->role = role;
59754 +
59755 + task->is_writable = 0;
59756 +
59757 + /* ignore additional mmap checks for processes that are writable
59758 + by the default ACL */
59759 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59760 + if (unlikely(obj->mode & GR_WRITE))
59761 + task->is_writable = 1;
59762 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59763 + if (unlikely(obj->mode & GR_WRITE))
59764 + task->is_writable = 1;
59765 +
59766 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59767 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59768 +#endif
59769 +
59770 + gr_set_proc_res(task);
59771 +
59772 + return;
59773 +}
59774 +
59775 +int
59776 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59777 + const int unsafe_flags)
59778 +{
59779 + struct task_struct *task = current;
59780 + struct acl_subject_label *newacl;
59781 + struct acl_object_label *obj;
59782 + __u32 retmode;
59783 +
59784 + if (unlikely(!(gr_status & GR_READY)))
59785 + return 0;
59786 +
59787 + newacl = chk_subj_label(dentry, mnt, task->role);
59788 +
59789 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
59790 + did an exec
59791 + */
59792 + rcu_read_lock();
59793 + read_lock(&tasklist_lock);
59794 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
59795 + (task->parent->acl->mode & GR_POVERRIDE))) {
59796 + read_unlock(&tasklist_lock);
59797 + rcu_read_unlock();
59798 + goto skip_check;
59799 + }
59800 + read_unlock(&tasklist_lock);
59801 + rcu_read_unlock();
59802 +
59803 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59804 + !(task->role->roletype & GR_ROLE_GOD) &&
59805 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59806 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59807 + if (unsafe_flags & LSM_UNSAFE_SHARE)
59808 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59809 + else
59810 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59811 + return -EACCES;
59812 + }
59813 +
59814 +skip_check:
59815 +
59816 + obj = chk_obj_label(dentry, mnt, task->acl);
59817 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59818 +
59819 + if (!(task->acl->mode & GR_INHERITLEARN) &&
59820 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59821 + if (obj->nested)
59822 + task->acl = obj->nested;
59823 + else
59824 + task->acl = newacl;
59825 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59826 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59827 +
59828 + task->is_writable = 0;
59829 +
59830 + /* ignore additional mmap checks for processes that are writable
59831 + by the default ACL */
59832 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
59833 + if (unlikely(obj->mode & GR_WRITE))
59834 + task->is_writable = 1;
59835 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
59836 + if (unlikely(obj->mode & GR_WRITE))
59837 + task->is_writable = 1;
59838 +
59839 + gr_set_proc_res(task);
59840 +
59841 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59842 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59843 +#endif
59844 + return 0;
59845 +}
59846 +
59847 +/* always called with valid inodev ptr */
59848 +static void
59849 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59850 +{
59851 + struct acl_object_label *matchpo;
59852 + struct acl_subject_label *matchps;
59853 + struct acl_subject_label *subj;
59854 + struct acl_role_label *role;
59855 + unsigned int x;
59856 +
59857 + FOR_EACH_ROLE_START(role)
59858 + FOR_EACH_SUBJECT_START(role, subj, x)
59859 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59860 + matchpo->mode |= GR_DELETED;
59861 + FOR_EACH_SUBJECT_END(subj,x)
59862 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
59863 + /* nested subjects aren't in the role's subj_hash table */
59864 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59865 + matchpo->mode |= GR_DELETED;
59866 + FOR_EACH_NESTED_SUBJECT_END(subj)
59867 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59868 + matchps->mode |= GR_DELETED;
59869 + FOR_EACH_ROLE_END(role)
59870 +
59871 + inodev->nentry->deleted = 1;
59872 +
59873 + return;
59874 +}
59875 +
59876 +void
59877 +gr_handle_delete(const ino_t ino, const dev_t dev)
59878 +{
59879 + struct inodev_entry *inodev;
59880 +
59881 + if (unlikely(!(gr_status & GR_READY)))
59882 + return;
59883 +
59884 + write_lock(&gr_inode_lock);
59885 + inodev = lookup_inodev_entry(ino, dev);
59886 + if (inodev != NULL)
59887 + do_handle_delete(inodev, ino, dev);
59888 + write_unlock(&gr_inode_lock);
59889 +
59890 + return;
59891 +}
59892 +
59893 +static void
59894 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59895 + const ino_t newinode, const dev_t newdevice,
59896 + struct acl_subject_label *subj)
59897 +{
59898 + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
59899 + struct acl_object_label *match;
59900 +
59901 + match = subj->obj_hash[index];
59902 +
59903 + while (match && (match->inode != oldinode ||
59904 + match->device != olddevice ||
59905 + !(match->mode & GR_DELETED)))
59906 + match = match->next;
59907 +
59908 + if (match && (match->inode == oldinode)
59909 + && (match->device == olddevice)
59910 + && (match->mode & GR_DELETED)) {
59911 + if (match->prev == NULL) {
59912 + subj->obj_hash[index] = match->next;
59913 + if (match->next != NULL)
59914 + match->next->prev = NULL;
59915 + } else {
59916 + match->prev->next = match->next;
59917 + if (match->next != NULL)
59918 + match->next->prev = match->prev;
59919 + }
59920 + match->prev = NULL;
59921 + match->next = NULL;
59922 + match->inode = newinode;
59923 + match->device = newdevice;
59924 + match->mode &= ~GR_DELETED;
59925 +
59926 + insert_acl_obj_label(match, subj);
59927 + }
59928 +
59929 + return;
59930 +}
59931 +
59932 +static void
59933 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59934 + const ino_t newinode, const dev_t newdevice,
59935 + struct acl_role_label *role)
59936 +{
59937 + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
59938 + struct acl_subject_label *match;
59939 +
59940 + match = role->subj_hash[index];
59941 +
59942 + while (match && (match->inode != oldinode ||
59943 + match->device != olddevice ||
59944 + !(match->mode & GR_DELETED)))
59945 + match = match->next;
59946 +
59947 + if (match && (match->inode == oldinode)
59948 + && (match->device == olddevice)
59949 + && (match->mode & GR_DELETED)) {
59950 + if (match->prev == NULL) {
59951 + role->subj_hash[index] = match->next;
59952 + if (match->next != NULL)
59953 + match->next->prev = NULL;
59954 + } else {
59955 + match->prev->next = match->next;
59956 + if (match->next != NULL)
59957 + match->next->prev = match->prev;
59958 + }
59959 + match->prev = NULL;
59960 + match->next = NULL;
59961 + match->inode = newinode;
59962 + match->device = newdevice;
59963 + match->mode &= ~GR_DELETED;
59964 +
59965 + insert_acl_subj_label(match, role);
59966 + }
59967 +
59968 + return;
59969 +}
59970 +
59971 +static void
59972 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59973 + const ino_t newinode, const dev_t newdevice)
59974 +{
59975 + unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
59976 + struct inodev_entry *match;
59977 +
59978 + match = inodev_set.i_hash[index];
59979 +
59980 + while (match && (match->nentry->inode != oldinode ||
59981 + match->nentry->device != olddevice || !match->nentry->deleted))
59982 + match = match->next;
59983 +
59984 + if (match && (match->nentry->inode == oldinode)
59985 + && (match->nentry->device == olddevice) &&
59986 + match->nentry->deleted) {
59987 + if (match->prev == NULL) {
59988 + inodev_set.i_hash[index] = match->next;
59989 + if (match->next != NULL)
59990 + match->next->prev = NULL;
59991 + } else {
59992 + match->prev->next = match->next;
59993 + if (match->next != NULL)
59994 + match->next->prev = match->prev;
59995 + }
59996 + match->prev = NULL;
59997 + match->next = NULL;
59998 + match->nentry->inode = newinode;
59999 + match->nentry->device = newdevice;
60000 + match->nentry->deleted = 0;
60001 +
60002 + insert_inodev_entry(match);
60003 + }
60004 +
60005 + return;
60006 +}
60007 +
60008 +static void
60009 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60010 +{
60011 + struct acl_subject_label *subj;
60012 + struct acl_role_label *role;
60013 + unsigned int x;
60014 +
60015 + FOR_EACH_ROLE_START(role)
60016 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
60017 +
60018 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
60019 + if ((subj->inode == ino) && (subj->device == dev)) {
60020 + subj->inode = ino;
60021 + subj->device = dev;
60022 + }
60023 + /* nested subjects aren't in the role's subj_hash table */
60024 + update_acl_obj_label(matchn->inode, matchn->device,
60025 + ino, dev, subj);
60026 + FOR_EACH_NESTED_SUBJECT_END(subj)
60027 + FOR_EACH_SUBJECT_START(role, subj, x)
60028 + update_acl_obj_label(matchn->inode, matchn->device,
60029 + ino, dev, subj);
60030 + FOR_EACH_SUBJECT_END(subj,x)
60031 + FOR_EACH_ROLE_END(role)
60032 +
60033 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
60034 +
60035 + return;
60036 +}
60037 +
60038 +static void
60039 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
60040 + const struct vfsmount *mnt)
60041 +{
60042 + ino_t ino = dentry->d_inode->i_ino;
60043 + dev_t dev = __get_dev(dentry);
60044 +
60045 + __do_handle_create(matchn, ino, dev);
60046 +
60047 + return;
60048 +}
60049 +
60050 +void
60051 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60052 +{
60053 + struct name_entry *matchn;
60054 +
60055 + if (unlikely(!(gr_status & GR_READY)))
60056 + return;
60057 +
60058 + preempt_disable();
60059 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
60060 +
60061 + if (unlikely((unsigned long)matchn)) {
60062 + write_lock(&gr_inode_lock);
60063 + do_handle_create(matchn, dentry, mnt);
60064 + write_unlock(&gr_inode_lock);
60065 + }
60066 + preempt_enable();
60067 +
60068 + return;
60069 +}
60070 +
60071 +void
60072 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60073 +{
60074 + struct name_entry *matchn;
60075 +
60076 + if (unlikely(!(gr_status & GR_READY)))
60077 + return;
60078 +
60079 + preempt_disable();
60080 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
60081 +
60082 + if (unlikely((unsigned long)matchn)) {
60083 + write_lock(&gr_inode_lock);
60084 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
60085 + write_unlock(&gr_inode_lock);
60086 + }
60087 + preempt_enable();
60088 +
60089 + return;
60090 +}
60091 +
60092 +void
60093 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60094 + struct dentry *old_dentry,
60095 + struct dentry *new_dentry,
60096 + struct vfsmount *mnt, const __u8 replace)
60097 +{
60098 + struct name_entry *matchn;
60099 + struct inodev_entry *inodev;
60100 + struct inode *inode = new_dentry->d_inode;
60101 + ino_t old_ino = old_dentry->d_inode->i_ino;
60102 + dev_t old_dev = __get_dev(old_dentry);
60103 +
60104 + /* vfs_rename swaps the name and parent link for old_dentry and
60105 + new_dentry
60106 + at this point, old_dentry has the new name, parent link, and inode
60107 + for the renamed file
60108 + if a file is being replaced by a rename, new_dentry has the inode
60109 + and name for the replaced file
60110 + */
60111 +
60112 + if (unlikely(!(gr_status & GR_READY)))
60113 + return;
60114 +
60115 + preempt_disable();
60116 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
60117 +
60118 + /* we wouldn't have to check d_inode if it weren't for
60119 + NFS silly-renaming
60120 + */
60121 +
60122 + write_lock(&gr_inode_lock);
60123 + if (unlikely(replace && inode)) {
60124 + ino_t new_ino = inode->i_ino;
60125 + dev_t new_dev = __get_dev(new_dentry);
60126 +
60127 + inodev = lookup_inodev_entry(new_ino, new_dev);
60128 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
60129 + do_handle_delete(inodev, new_ino, new_dev);
60130 + }
60131 +
60132 + inodev = lookup_inodev_entry(old_ino, old_dev);
60133 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
60134 + do_handle_delete(inodev, old_ino, old_dev);
60135 +
60136 + if (unlikely((unsigned long)matchn))
60137 + do_handle_create(matchn, old_dentry, mnt);
60138 +
60139 + write_unlock(&gr_inode_lock);
60140 + preempt_enable();
60141 +
60142 + return;
60143 +}
60144 +
60145 +static int
60146 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
60147 + unsigned char **sum)
60148 +{
60149 + struct acl_role_label *r;
60150 + struct role_allowed_ip *ipp;
60151 + struct role_transition *trans;
60152 + unsigned int i;
60153 + int found = 0;
60154 + u32 curr_ip = current->signal->curr_ip;
60155 +
60156 + current->signal->saved_ip = curr_ip;
60157 +
60158 + /* check transition table */
60159 +
60160 + for (trans = current->role->transitions; trans; trans = trans->next) {
60161 + if (!strcmp(rolename, trans->rolename)) {
60162 + found = 1;
60163 + break;
60164 + }
60165 + }
60166 +
60167 + if (!found)
60168 + return 0;
60169 +
60170 + /* handle special roles that do not require authentication
60171 + and check ip */
60172 +
60173 + FOR_EACH_ROLE_START(r)
60174 + if (!strcmp(rolename, r->rolename) &&
60175 + (r->roletype & GR_ROLE_SPECIAL)) {
60176 + found = 0;
60177 + if (r->allowed_ips != NULL) {
60178 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
60179 + if ((ntohl(curr_ip) & ipp->netmask) ==
60180 + (ntohl(ipp->addr) & ipp->netmask))
60181 + found = 1;
60182 + }
60183 + } else
60184 + found = 2;
60185 + if (!found)
60186 + return 0;
60187 +
60188 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
60189 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
60190 + *salt = NULL;
60191 + *sum = NULL;
60192 + return 1;
60193 + }
60194 + }
60195 + FOR_EACH_ROLE_END(r)
60196 +
60197 + for (i = 0; i < num_sprole_pws; i++) {
60198 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
60199 + *salt = acl_special_roles[i]->salt;
60200 + *sum = acl_special_roles[i]->sum;
60201 + return 1;
60202 + }
60203 + }
60204 +
60205 + return 0;
60206 +}
60207 +
60208 +static void
60209 +assign_special_role(char *rolename)
60210 +{
60211 + struct acl_object_label *obj;
60212 + struct acl_role_label *r;
60213 + struct acl_role_label *assigned = NULL;
60214 + struct task_struct *tsk;
60215 + struct file *filp;
60216 +
60217 + FOR_EACH_ROLE_START(r)
60218 + if (!strcmp(rolename, r->rolename) &&
60219 + (r->roletype & GR_ROLE_SPECIAL)) {
60220 + assigned = r;
60221 + break;
60222 + }
60223 + FOR_EACH_ROLE_END(r)
60224 +
60225 + if (!assigned)
60226 + return;
60227 +
60228 + read_lock(&tasklist_lock);
60229 + read_lock(&grsec_exec_file_lock);
60230 +
60231 + tsk = current->real_parent;
60232 + if (tsk == NULL)
60233 + goto out_unlock;
60234 +
60235 + filp = tsk->exec_file;
60236 + if (filp == NULL)
60237 + goto out_unlock;
60238 +
60239 + tsk->is_writable = 0;
60240 +
60241 + tsk->acl_sp_role = 1;
60242 + tsk->acl_role_id = ++acl_sp_role_value;
60243 + tsk->role = assigned;
60244 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
60245 +
60246 + /* ignore additional mmap checks for processes that are writable
60247 + by the default ACL */
60248 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60249 + if (unlikely(obj->mode & GR_WRITE))
60250 + tsk->is_writable = 1;
60251 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
60252 + if (unlikely(obj->mode & GR_WRITE))
60253 + tsk->is_writable = 1;
60254 +
60255 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60256 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
60257 +#endif
60258 +
60259 +out_unlock:
60260 + read_unlock(&grsec_exec_file_lock);
60261 + read_unlock(&tasklist_lock);
60262 + return;
60263 +}
60264 +
60265 +int gr_check_secure_terminal(struct task_struct *task)
60266 +{
60267 + struct task_struct *p, *p2, *p3;
60268 + struct files_struct *files;
60269 + struct fdtable *fdt;
60270 + struct file *our_file = NULL, *file;
60271 + int i;
60272 +
60273 + if (task->signal->tty == NULL)
60274 + return 1;
60275 +
60276 + files = get_files_struct(task);
60277 + if (files != NULL) {
60278 + rcu_read_lock();
60279 + fdt = files_fdtable(files);
60280 + for (i=0; i < fdt->max_fds; i++) {
60281 + file = fcheck_files(files, i);
60282 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
60283 + get_file(file);
60284 + our_file = file;
60285 + }
60286 + }
60287 + rcu_read_unlock();
60288 + put_files_struct(files);
60289 + }
60290 +
60291 + if (our_file == NULL)
60292 + return 1;
60293 +
60294 + read_lock(&tasklist_lock);
60295 + do_each_thread(p2, p) {
60296 + files = get_files_struct(p);
60297 + if (files == NULL ||
60298 + (p->signal && p->signal->tty == task->signal->tty)) {
60299 + if (files != NULL)
60300 + put_files_struct(files);
60301 + continue;
60302 + }
60303 + rcu_read_lock();
60304 + fdt = files_fdtable(files);
60305 + for (i=0; i < fdt->max_fds; i++) {
60306 + file = fcheck_files(files, i);
60307 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
60308 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
60309 + p3 = task;
60310 + while (task_pid_nr(p3) > 0) {
60311 + if (p3 == p)
60312 + break;
60313 + p3 = p3->real_parent;
60314 + }
60315 + if (p3 == p)
60316 + break;
60317 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
60318 + gr_handle_alertkill(p);
60319 + rcu_read_unlock();
60320 + put_files_struct(files);
60321 + read_unlock(&tasklist_lock);
60322 + fput(our_file);
60323 + return 0;
60324 + }
60325 + }
60326 + rcu_read_unlock();
60327 + put_files_struct(files);
60328 + } while_each_thread(p2, p);
60329 + read_unlock(&tasklist_lock);
60330 +
60331 + fput(our_file);
60332 + return 1;
60333 +}
60334 +
60335 +static int gr_rbac_disable(void *unused)
60336 +{
60337 + pax_open_kernel();
60338 + gr_status &= ~GR_READY;
60339 + pax_close_kernel();
60340 +
60341 + return 0;
60342 +}
60343 +
60344 +ssize_t
60345 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
60346 +{
60347 + struct gr_arg_wrapper uwrap;
60348 + unsigned char *sprole_salt = NULL;
60349 + unsigned char *sprole_sum = NULL;
60350 + int error = sizeof (struct gr_arg_wrapper);
60351 + int error2 = 0;
60352 +
60353 + mutex_lock(&gr_dev_mutex);
60354 +
60355 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
60356 + error = -EPERM;
60357 + goto out;
60358 + }
60359 +
60360 + if (count != sizeof (struct gr_arg_wrapper)) {
60361 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
60362 + error = -EINVAL;
60363 + goto out;
60364 + }
60365 +
60366 +
60367 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
60368 + gr_auth_expires = 0;
60369 + gr_auth_attempts = 0;
60370 + }
60371 +
60372 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
60373 + error = -EFAULT;
60374 + goto out;
60375 + }
60376 +
60377 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
60378 + error = -EINVAL;
60379 + goto out;
60380 + }
60381 +
60382 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
60383 + error = -EFAULT;
60384 + goto out;
60385 + }
60386 +
60387 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60388 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60389 + time_after(gr_auth_expires, get_seconds())) {
60390 + error = -EBUSY;
60391 + goto out;
60392 + }
60393 +
60394 + /* if non-root trying to do anything other than use a special role,
60395 + do not attempt authentication, do not count towards authentication
60396 + locking
60397 + */
60398 +
60399 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
60400 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60401 + gr_is_global_nonroot(current_uid())) {
60402 + error = -EPERM;
60403 + goto out;
60404 + }
60405 +
60406 + /* ensure pw and special role name are null terminated */
60407 +
60408 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
60409 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
60410 +
60411 + /* Okay.
60412 + * We have our enough of the argument structure..(we have yet
60413 + * to copy_from_user the tables themselves) . Copy the tables
60414 + * only if we need them, i.e. for loading operations. */
60415 +
60416 + switch (gr_usermode->mode) {
60417 + case GR_STATUS:
60418 + if (gr_status & GR_READY) {
60419 + error = 1;
60420 + if (!gr_check_secure_terminal(current))
60421 + error = 3;
60422 + } else
60423 + error = 2;
60424 + goto out;
60425 + case GR_SHUTDOWN:
60426 + if ((gr_status & GR_READY)
60427 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60428 + stop_machine(gr_rbac_disable, NULL, NULL);
60429 + free_variables();
60430 + memset(gr_usermode, 0, sizeof (struct gr_arg));
60431 + memset(gr_system_salt, 0, GR_SALT_LEN);
60432 + memset(gr_system_sum, 0, GR_SHA_LEN);
60433 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
60434 + } else if (gr_status & GR_READY) {
60435 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
60436 + error = -EPERM;
60437 + } else {
60438 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
60439 + error = -EAGAIN;
60440 + }
60441 + break;
60442 + case GR_ENABLE:
60443 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
60444 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
60445 + else {
60446 + if (gr_status & GR_READY)
60447 + error = -EAGAIN;
60448 + else
60449 + error = error2;
60450 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
60451 + }
60452 + break;
60453 + case GR_RELOAD:
60454 + if (!(gr_status & GR_READY)) {
60455 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
60456 + error = -EAGAIN;
60457 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60458 + stop_machine(gr_rbac_disable, NULL, NULL);
60459 + free_variables();
60460 + error2 = gracl_init(gr_usermode);
60461 + if (!error2)
60462 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
60463 + else {
60464 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60465 + error = error2;
60466 + }
60467 + } else {
60468 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60469 + error = -EPERM;
60470 + }
60471 + break;
60472 + case GR_SEGVMOD:
60473 + if (unlikely(!(gr_status & GR_READY))) {
60474 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
60475 + error = -EAGAIN;
60476 + break;
60477 + }
60478 +
60479 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60480 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
60481 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
60482 + struct acl_subject_label *segvacl;
60483 + segvacl =
60484 + lookup_acl_subj_label(gr_usermode->segv_inode,
60485 + gr_usermode->segv_device,
60486 + current->role);
60487 + if (segvacl) {
60488 + segvacl->crashes = 0;
60489 + segvacl->expires = 0;
60490 + }
60491 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
60492 + gr_remove_uid(gr_usermode->segv_uid);
60493 + }
60494 + } else {
60495 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
60496 + error = -EPERM;
60497 + }
60498 + break;
60499 + case GR_SPROLE:
60500 + case GR_SPROLEPAM:
60501 + if (unlikely(!(gr_status & GR_READY))) {
60502 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
60503 + error = -EAGAIN;
60504 + break;
60505 + }
60506 +
60507 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
60508 + current->role->expires = 0;
60509 + current->role->auth_attempts = 0;
60510 + }
60511 +
60512 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60513 + time_after(current->role->expires, get_seconds())) {
60514 + error = -EBUSY;
60515 + goto out;
60516 + }
60517 +
60518 + if (lookup_special_role_auth
60519 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
60520 + && ((!sprole_salt && !sprole_sum)
60521 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
60522 + char *p = "";
60523 + assign_special_role(gr_usermode->sp_role);
60524 + read_lock(&tasklist_lock);
60525 + if (current->real_parent)
60526 + p = current->real_parent->role->rolename;
60527 + read_unlock(&tasklist_lock);
60528 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
60529 + p, acl_sp_role_value);
60530 + } else {
60531 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
60532 + error = -EPERM;
60533 + if(!(current->role->auth_attempts++))
60534 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60535 +
60536 + goto out;
60537 + }
60538 + break;
60539 + case GR_UNSPROLE:
60540 + if (unlikely(!(gr_status & GR_READY))) {
60541 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
60542 + error = -EAGAIN;
60543 + break;
60544 + }
60545 +
60546 + if (current->role->roletype & GR_ROLE_SPECIAL) {
60547 + char *p = "";
60548 + int i = 0;
60549 +
60550 + read_lock(&tasklist_lock);
60551 + if (current->real_parent) {
60552 + p = current->real_parent->role->rolename;
60553 + i = current->real_parent->acl_role_id;
60554 + }
60555 + read_unlock(&tasklist_lock);
60556 +
60557 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
60558 + gr_set_acls(1);
60559 + } else {
60560 + error = -EPERM;
60561 + goto out;
60562 + }
60563 + break;
60564 + default:
60565 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
60566 + error = -EINVAL;
60567 + break;
60568 + }
60569 +
60570 + if (error != -EPERM)
60571 + goto out;
60572 +
60573 + if(!(gr_auth_attempts++))
60574 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60575 +
60576 + out:
60577 + mutex_unlock(&gr_dev_mutex);
60578 + return error;
60579 +}
60580 +
60581 +/* must be called with
60582 + rcu_read_lock();
60583 + read_lock(&tasklist_lock);
60584 + read_lock(&grsec_exec_file_lock);
60585 +*/
60586 +int gr_apply_subject_to_task(struct task_struct *task)
60587 +{
60588 + struct acl_object_label *obj;
60589 + char *tmpname;
60590 + struct acl_subject_label *tmpsubj;
60591 + struct file *filp;
60592 + struct name_entry *nmatch;
60593 +
60594 + filp = task->exec_file;
60595 + if (filp == NULL)
60596 + return 0;
60597 +
60598 + /* the following is to apply the correct subject
60599 + on binaries running when the RBAC system
60600 + is enabled, when the binaries have been
60601 + replaced or deleted since their execution
60602 + -----
60603 + when the RBAC system starts, the inode/dev
60604 + from exec_file will be one the RBAC system
60605 + is unaware of. It only knows the inode/dev
60606 + of the present file on disk, or the absence
60607 + of it.
60608 + */
60609 + preempt_disable();
60610 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60611 +
60612 + nmatch = lookup_name_entry(tmpname);
60613 + preempt_enable();
60614 + tmpsubj = NULL;
60615 + if (nmatch) {
60616 + if (nmatch->deleted)
60617 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60618 + else
60619 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60620 + if (tmpsubj != NULL)
60621 + task->acl = tmpsubj;
60622 + }
60623 + if (tmpsubj == NULL)
60624 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60625 + task->role);
60626 + if (task->acl) {
60627 + task->is_writable = 0;
60628 + /* ignore additional mmap checks for processes that are writable
60629 + by the default ACL */
60630 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60631 + if (unlikely(obj->mode & GR_WRITE))
60632 + task->is_writable = 1;
60633 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60634 + if (unlikely(obj->mode & GR_WRITE))
60635 + task->is_writable = 1;
60636 +
60637 + gr_set_proc_res(task);
60638 +
60639 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60640 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60641 +#endif
60642 + } else {
60643 + return 1;
60644 + }
60645 +
60646 + return 0;
60647 +}
60648 +
60649 +int
60650 +gr_set_acls(const int type)
60651 +{
60652 + struct task_struct *task, *task2;
60653 + struct acl_role_label *role = current->role;
60654 + __u16 acl_role_id = current->acl_role_id;
60655 + const struct cred *cred;
60656 + int ret;
60657 +
60658 + rcu_read_lock();
60659 + read_lock(&tasklist_lock);
60660 + read_lock(&grsec_exec_file_lock);
60661 + do_each_thread(task2, task) {
60662 + /* check to see if we're called from the exit handler,
60663 + if so, only replace ACLs that have inherited the admin
60664 + ACL */
60665 +
60666 + if (type && (task->role != role ||
60667 + task->acl_role_id != acl_role_id))
60668 + continue;
60669 +
60670 + task->acl_role_id = 0;
60671 + task->acl_sp_role = 0;
60672 +
60673 + if (task->exec_file) {
60674 + cred = __task_cred(task);
60675 + task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
60676 + ret = gr_apply_subject_to_task(task);
60677 + if (ret) {
60678 + read_unlock(&grsec_exec_file_lock);
60679 + read_unlock(&tasklist_lock);
60680 + rcu_read_unlock();
60681 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
60682 + return ret;
60683 + }
60684 + } else {
60685 + // it's a kernel process
60686 + task->role = kernel_role;
60687 + task->acl = kernel_role->root_label;
60688 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60689 + task->acl->mode &= ~GR_PROCFIND;
60690 +#endif
60691 + }
60692 + } while_each_thread(task2, task);
60693 + read_unlock(&grsec_exec_file_lock);
60694 + read_unlock(&tasklist_lock);
60695 + rcu_read_unlock();
60696 +
60697 + return 0;
60698 +}
60699 +
60700 +#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
60701 +static const unsigned long res_learn_bumps[GR_NLIMITS] = {
60702 + [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
60703 + [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
60704 + [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
60705 + [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
60706 + [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
60707 + [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
60708 + [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
60709 + [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
60710 + [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
60711 + [RLIMIT_AS] = GR_RLIM_AS_BUMP,
60712 + [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
60713 + [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
60714 + [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
60715 + [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
60716 + [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
60717 + [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
60718 +};
60719 +
60720 +void
60721 +gr_learn_resource(const struct task_struct *task,
60722 + const int res, const unsigned long wanted, const int gt)
60723 +{
60724 + struct acl_subject_label *acl;
60725 + const struct cred *cred;
60726 +
60727 + if (unlikely((gr_status & GR_READY) &&
60728 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60729 + goto skip_reslog;
60730 +
60731 + gr_log_resource(task, res, wanted, gt);
60732 +skip_reslog:
60733 +
60734 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60735 + return;
60736 +
60737 + acl = task->acl;
60738 +
60739 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60740 + !(acl->resmask & (1U << (unsigned short) res))))
60741 + return;
60742 +
60743 + if (wanted >= acl->res[res].rlim_cur) {
60744 + unsigned long res_add;
60745 +
60746 + res_add = wanted + res_learn_bumps[res];
60747 +
60748 + acl->res[res].rlim_cur = res_add;
60749 +
60750 + if (wanted > acl->res[res].rlim_max)
60751 + acl->res[res].rlim_max = res_add;
60752 +
60753 + /* only log the subject filename, since resource logging is supported for
60754 + single-subject learning only */
60755 + rcu_read_lock();
60756 + cred = __task_cred(task);
60757 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60758 + task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
60759 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60760 + "", (unsigned long) res, &task->signal->saved_ip);
60761 + rcu_read_unlock();
60762 + }
60763 +
60764 + return;
60765 +}
60766 +EXPORT_SYMBOL(gr_learn_resource);
60767 +#endif
60768 +
60769 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60770 +void
60771 +pax_set_initial_flags(struct linux_binprm *bprm)
60772 +{
60773 + struct task_struct *task = current;
60774 + struct acl_subject_label *proc;
60775 + unsigned long flags;
60776 +
60777 + if (unlikely(!(gr_status & GR_READY)))
60778 + return;
60779 +
60780 + flags = pax_get_flags(task);
60781 +
60782 + proc = task->acl;
60783 +
60784 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60785 + flags &= ~MF_PAX_PAGEEXEC;
60786 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60787 + flags &= ~MF_PAX_SEGMEXEC;
60788 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60789 + flags &= ~MF_PAX_RANDMMAP;
60790 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60791 + flags &= ~MF_PAX_EMUTRAMP;
60792 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60793 + flags &= ~MF_PAX_MPROTECT;
60794 +
60795 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60796 + flags |= MF_PAX_PAGEEXEC;
60797 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60798 + flags |= MF_PAX_SEGMEXEC;
60799 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60800 + flags |= MF_PAX_RANDMMAP;
60801 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60802 + flags |= MF_PAX_EMUTRAMP;
60803 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60804 + flags |= MF_PAX_MPROTECT;
60805 +
60806 + pax_set_flags(task, flags);
60807 +
60808 + return;
60809 +}
60810 +#endif
60811 +
60812 +int
60813 +gr_handle_proc_ptrace(struct task_struct *task)
60814 +{
60815 + struct file *filp;
60816 + struct task_struct *tmp = task;
60817 + struct task_struct *curtemp = current;
60818 + __u32 retmode;
60819 +
60820 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60821 + if (unlikely(!(gr_status & GR_READY)))
60822 + return 0;
60823 +#endif
60824 +
60825 + read_lock(&tasklist_lock);
60826 + read_lock(&grsec_exec_file_lock);
60827 + filp = task->exec_file;
60828 +
60829 + while (task_pid_nr(tmp) > 0) {
60830 + if (tmp == curtemp)
60831 + break;
60832 + tmp = tmp->real_parent;
60833 + }
60834 +
60835 + if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60836 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60837 + read_unlock(&grsec_exec_file_lock);
60838 + read_unlock(&tasklist_lock);
60839 + return 1;
60840 + }
60841 +
60842 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60843 + if (!(gr_status & GR_READY)) {
60844 + read_unlock(&grsec_exec_file_lock);
60845 + read_unlock(&tasklist_lock);
60846 + return 0;
60847 + }
60848 +#endif
60849 +
60850 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60851 + read_unlock(&grsec_exec_file_lock);
60852 + read_unlock(&tasklist_lock);
60853 +
60854 + if (retmode & GR_NOPTRACE)
60855 + return 1;
60856 +
60857 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60858 + && (current->acl != task->acl || (current->acl != current->role->root_label
60859 + && task_pid_nr(current) != task_pid_nr(task))))
60860 + return 1;
60861 +
60862 + return 0;
60863 +}
60864 +
60865 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60866 +{
60867 + if (unlikely(!(gr_status & GR_READY)))
60868 + return;
60869 +
60870 + if (!(current->role->roletype & GR_ROLE_GOD))
60871 + return;
60872 +
60873 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60874 + p->role->rolename, gr_task_roletype_to_char(p),
60875 + p->acl->filename);
60876 +}
60877 +
60878 +int
60879 +gr_handle_ptrace(struct task_struct *task, const long request)
60880 +{
60881 + struct task_struct *tmp = task;
60882 + struct task_struct *curtemp = current;
60883 + __u32 retmode;
60884 +
60885 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60886 + if (unlikely(!(gr_status & GR_READY)))
60887 + return 0;
60888 +#endif
60889 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
60890 + read_lock(&tasklist_lock);
60891 + while (task_pid_nr(tmp) > 0) {
60892 + if (tmp == curtemp)
60893 + break;
60894 + tmp = tmp->real_parent;
60895 + }
60896 +
60897 + if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60898 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60899 + read_unlock(&tasklist_lock);
60900 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60901 + return 1;
60902 + }
60903 + read_unlock(&tasklist_lock);
60904 + }
60905 +
60906 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60907 + if (!(gr_status & GR_READY))
60908 + return 0;
60909 +#endif
60910 +
60911 + read_lock(&grsec_exec_file_lock);
60912 + if (unlikely(!task->exec_file)) {
60913 + read_unlock(&grsec_exec_file_lock);
60914 + return 0;
60915 + }
60916 +
60917 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60918 + read_unlock(&grsec_exec_file_lock);
60919 +
60920 + if (retmode & GR_NOPTRACE) {
60921 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60922 + return 1;
60923 + }
60924 +
60925 + if (retmode & GR_PTRACERD) {
60926 + switch (request) {
60927 + case PTRACE_SEIZE:
60928 + case PTRACE_POKETEXT:
60929 + case PTRACE_POKEDATA:
60930 + case PTRACE_POKEUSR:
60931 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60932 + case PTRACE_SETREGS:
60933 + case PTRACE_SETFPREGS:
60934 +#endif
60935 +#ifdef CONFIG_X86
60936 + case PTRACE_SETFPXREGS:
60937 +#endif
60938 +#ifdef CONFIG_ALTIVEC
60939 + case PTRACE_SETVRREGS:
60940 +#endif
60941 + return 1;
60942 + default:
60943 + return 0;
60944 + }
60945 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60946 + !(current->role->roletype & GR_ROLE_GOD) &&
60947 + (current->acl != task->acl)) {
60948 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60949 + return 1;
60950 + }
60951 +
60952 + return 0;
60953 +}
60954 +
60955 +static int is_writable_mmap(const struct file *filp)
60956 +{
60957 + struct task_struct *task = current;
60958 + struct acl_object_label *obj, *obj2;
60959 +
60960 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60961 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60962 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60963 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60964 + task->role->root_label);
60965 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60966 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60967 + return 1;
60968 + }
60969 + }
60970 + return 0;
60971 +}
60972 +
60973 +int
60974 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60975 +{
60976 + __u32 mode;
60977 +
60978 + if (unlikely(!file || !(prot & PROT_EXEC)))
60979 + return 1;
60980 +
60981 + if (is_writable_mmap(file))
60982 + return 0;
60983 +
60984 + mode =
60985 + gr_search_file(file->f_path.dentry,
60986 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60987 + file->f_path.mnt);
60988 +
60989 + if (!gr_tpe_allow(file))
60990 + return 0;
60991 +
60992 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60993 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60994 + return 0;
60995 + } else if (unlikely(!(mode & GR_EXEC))) {
60996 + return 0;
60997 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60998 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60999 + return 1;
61000 + }
61001 +
61002 + return 1;
61003 +}
61004 +
61005 +int
61006 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61007 +{
61008 + __u32 mode;
61009 +
61010 + if (unlikely(!file || !(prot & PROT_EXEC)))
61011 + return 1;
61012 +
61013 + if (is_writable_mmap(file))
61014 + return 0;
61015 +
61016 + mode =
61017 + gr_search_file(file->f_path.dentry,
61018 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61019 + file->f_path.mnt);
61020 +
61021 + if (!gr_tpe_allow(file))
61022 + return 0;
61023 +
61024 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61025 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61026 + return 0;
61027 + } else if (unlikely(!(mode & GR_EXEC))) {
61028 + return 0;
61029 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61030 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61031 + return 1;
61032 + }
61033 +
61034 + return 1;
61035 +}
61036 +
61037 +void
61038 +gr_acl_handle_psacct(struct task_struct *task, const long code)
61039 +{
61040 + unsigned long runtime;
61041 + unsigned long cputime;
61042 + unsigned int wday, cday;
61043 + __u8 whr, chr;
61044 + __u8 wmin, cmin;
61045 + __u8 wsec, csec;
61046 + struct timespec timeval;
61047 +
61048 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61049 + !(task->acl->mode & GR_PROCACCT)))
61050 + return;
61051 +
61052 + do_posix_clock_monotonic_gettime(&timeval);
61053 + runtime = timeval.tv_sec - task->start_time.tv_sec;
61054 + wday = runtime / (3600 * 24);
61055 + runtime -= wday * (3600 * 24);
61056 + whr = runtime / 3600;
61057 + runtime -= whr * 3600;
61058 + wmin = runtime / 60;
61059 + runtime -= wmin * 60;
61060 + wsec = runtime;
61061 +
61062 + cputime = (task->utime + task->stime) / HZ;
61063 + cday = cputime / (3600 * 24);
61064 + cputime -= cday * (3600 * 24);
61065 + chr = cputime / 3600;
61066 + cputime -= chr * 3600;
61067 + cmin = cputime / 60;
61068 + cputime -= cmin * 60;
61069 + csec = cputime;
61070 +
61071 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61072 +
61073 + return;
61074 +}
61075 +
61076 +void gr_set_kernel_label(struct task_struct *task)
61077 +{
61078 + if (gr_status & GR_READY) {
61079 + task->role = kernel_role;
61080 + task->acl = kernel_role->root_label;
61081 + }
61082 + return;
61083 +}
61084 +
61085 +#ifdef CONFIG_TASKSTATS
61086 +int gr_is_taskstats_denied(int pid)
61087 +{
61088 + struct task_struct *task;
61089 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61090 + const struct cred *cred;
61091 +#endif
61092 + int ret = 0;
61093 +
61094 + /* restrict taskstats viewing to un-chrooted root users
61095 + who have the 'view' subject flag if the RBAC system is enabled
61096 + */
61097 +
61098 + rcu_read_lock();
61099 + read_lock(&tasklist_lock);
61100 + task = find_task_by_vpid(pid);
61101 + if (task) {
61102 +#ifdef CONFIG_GRKERNSEC_CHROOT
61103 + if (proc_is_chrooted(task))
61104 + ret = -EACCES;
61105 +#endif
61106 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61107 + cred = __task_cred(task);
61108 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61109 + if (gr_is_global_nonroot(cred->uid))
61110 + ret = -EACCES;
61111 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61112 + if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
61113 + ret = -EACCES;
61114 +#endif
61115 +#endif
61116 + if (gr_status & GR_READY) {
61117 + if (!(task->acl->mode & GR_VIEW))
61118 + ret = -EACCES;
61119 + }
61120 + } else
61121 + ret = -ENOENT;
61122 +
61123 + read_unlock(&tasklist_lock);
61124 + rcu_read_unlock();
61125 +
61126 + return ret;
61127 +}
61128 +#endif
61129 +
61130 +/* AUXV entries are filled via a descendant of search_binary_handler
61131 + after we've already applied the subject for the target
61132 +*/
61133 +int gr_acl_enable_at_secure(void)
61134 +{
61135 + if (unlikely(!(gr_status & GR_READY)))
61136 + return 0;
61137 +
61138 + if (current->acl->mode & GR_ATSECURE)
61139 + return 1;
61140 +
61141 + return 0;
61142 +}
61143 +
61144 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
61145 +{
61146 + struct task_struct *task = current;
61147 + struct dentry *dentry = file->f_path.dentry;
61148 + struct vfsmount *mnt = file->f_path.mnt;
61149 + struct acl_object_label *obj, *tmp;
61150 + struct acl_subject_label *subj;
61151 + unsigned int bufsize;
61152 + int is_not_root;
61153 + char *path;
61154 + dev_t dev = __get_dev(dentry);
61155 +
61156 + if (unlikely(!(gr_status & GR_READY)))
61157 + return 1;
61158 +
61159 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61160 + return 1;
61161 +
61162 + /* ignore Eric Biederman */
61163 + if (IS_PRIVATE(dentry->d_inode))
61164 + return 1;
61165 +
61166 + subj = task->acl;
61167 + read_lock(&gr_inode_lock);
61168 + do {
61169 + obj = lookup_acl_obj_label(ino, dev, subj);
61170 + if (obj != NULL) {
61171 + read_unlock(&gr_inode_lock);
61172 + return (obj->mode & GR_FIND) ? 1 : 0;
61173 + }
61174 + } while ((subj = subj->parent_subject));
61175 + read_unlock(&gr_inode_lock);
61176 +
61177 + /* this is purely an optimization since we're looking for an object
61178 + for the directory we're doing a readdir on
61179 + if it's possible for any globbed object to match the entry we're
61180 + filling into the directory, then the object we find here will be
61181 + an anchor point with attached globbed objects
61182 + */
61183 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
61184 + if (obj->globbed == NULL)
61185 + return (obj->mode & GR_FIND) ? 1 : 0;
61186 +
61187 + is_not_root = ((obj->filename[0] == '/') &&
61188 + (obj->filename[1] == '\0')) ? 0 : 1;
61189 + bufsize = PAGE_SIZE - namelen - is_not_root;
61190 +
61191 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
61192 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
61193 + return 1;
61194 +
61195 + preempt_disable();
61196 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61197 + bufsize);
61198 +
61199 + bufsize = strlen(path);
61200 +
61201 + /* if base is "/", don't append an additional slash */
61202 + if (is_not_root)
61203 + *(path + bufsize) = '/';
61204 + memcpy(path + bufsize + is_not_root, name, namelen);
61205 + *(path + bufsize + namelen + is_not_root) = '\0';
61206 +
61207 + tmp = obj->globbed;
61208 + while (tmp) {
61209 + if (!glob_match(tmp->filename, path)) {
61210 + preempt_enable();
61211 + return (tmp->mode & GR_FIND) ? 1 : 0;
61212 + }
61213 + tmp = tmp->next;
61214 + }
61215 + preempt_enable();
61216 + return (obj->mode & GR_FIND) ? 1 : 0;
61217 +}
61218 +
61219 +void gr_put_exec_file(struct task_struct *task)
61220 +{
61221 + struct file *filp;
61222 +
61223 + write_lock(&grsec_exec_file_lock);
61224 + filp = task->exec_file;
61225 + task->exec_file = NULL;
61226 + write_unlock(&grsec_exec_file_lock);
61227 +
61228 + if (filp)
61229 + fput(filp);
61230 +
61231 + return;
61232 +}
61233 +
61234 +
61235 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
61236 +EXPORT_SYMBOL(gr_acl_is_enabled);
61237 +#endif
61238 +EXPORT_SYMBOL(gr_set_kernel_label);
61239 +#ifdef CONFIG_SECURITY
61240 +EXPORT_SYMBOL(gr_check_user_change);
61241 +EXPORT_SYMBOL(gr_check_group_change);
61242 +#endif
61243 +
61244 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
61245 new file mode 100644
61246 index 0000000..34fefda
61247 --- /dev/null
61248 +++ b/grsecurity/gracl_alloc.c
61249 @@ -0,0 +1,105 @@
61250 +#include <linux/kernel.h>
61251 +#include <linux/mm.h>
61252 +#include <linux/slab.h>
61253 +#include <linux/vmalloc.h>
61254 +#include <linux/gracl.h>
61255 +#include <linux/grsecurity.h>
61256 +
61257 +static unsigned long alloc_stack_next = 1;
61258 +static unsigned long alloc_stack_size = 1;
61259 +static void **alloc_stack;
61260 +
61261 +static __inline__ int
61262 +alloc_pop(void)
61263 +{
61264 + if (alloc_stack_next == 1)
61265 + return 0;
61266 +
61267 + kfree(alloc_stack[alloc_stack_next - 2]);
61268 +
61269 + alloc_stack_next--;
61270 +
61271 + return 1;
61272 +}
61273 +
61274 +static __inline__ int
61275 +alloc_push(void *buf)
61276 +{
61277 + if (alloc_stack_next >= alloc_stack_size)
61278 + return 1;
61279 +
61280 + alloc_stack[alloc_stack_next - 1] = buf;
61281 +
61282 + alloc_stack_next++;
61283 +
61284 + return 0;
61285 +}
61286 +
61287 +void *
61288 +acl_alloc(unsigned long len)
61289 +{
61290 + void *ret = NULL;
61291 +
61292 + if (!len || len > PAGE_SIZE)
61293 + goto out;
61294 +
61295 + ret = kmalloc(len, GFP_KERNEL);
61296 +
61297 + if (ret) {
61298 + if (alloc_push(ret)) {
61299 + kfree(ret);
61300 + ret = NULL;
61301 + }
61302 + }
61303 +
61304 +out:
61305 + return ret;
61306 +}
61307 +
61308 +void *
61309 +acl_alloc_num(unsigned long num, unsigned long len)
61310 +{
61311 + if (!len || (num > (PAGE_SIZE / len)))
61312 + return NULL;
61313 +
61314 + return acl_alloc(num * len);
61315 +}
61316 +
61317 +void
61318 +acl_free_all(void)
61319 +{
61320 + if (gr_acl_is_enabled() || !alloc_stack)
61321 + return;
61322 +
61323 + while (alloc_pop()) ;
61324 +
61325 + if (alloc_stack) {
61326 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
61327 + kfree(alloc_stack);
61328 + else
61329 + vfree(alloc_stack);
61330 + }
61331 +
61332 + alloc_stack = NULL;
61333 + alloc_stack_size = 1;
61334 + alloc_stack_next = 1;
61335 +
61336 + return;
61337 +}
61338 +
61339 +int
61340 +acl_alloc_stack_init(unsigned long size)
61341 +{
61342 + if ((size * sizeof (void *)) <= PAGE_SIZE)
61343 + alloc_stack =
61344 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
61345 + else
61346 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
61347 +
61348 + alloc_stack_size = size;
61349 +
61350 + if (!alloc_stack)
61351 + return 0;
61352 + else
61353 + return 1;
61354 +}
61355 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
61356 new file mode 100644
61357 index 0000000..bdd51ea
61358 --- /dev/null
61359 +++ b/grsecurity/gracl_cap.c
61360 @@ -0,0 +1,110 @@
61361 +#include <linux/kernel.h>
61362 +#include <linux/module.h>
61363 +#include <linux/sched.h>
61364 +#include <linux/gracl.h>
61365 +#include <linux/grsecurity.h>
61366 +#include <linux/grinternal.h>
61367 +
61368 +extern const char *captab_log[];
61369 +extern int captab_log_entries;
61370 +
61371 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
61372 +{
61373 + struct acl_subject_label *curracl;
61374 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61375 + kernel_cap_t cap_audit = __cap_empty_set;
61376 +
61377 + if (!gr_acl_is_enabled())
61378 + return 1;
61379 +
61380 + curracl = task->acl;
61381 +
61382 + cap_drop = curracl->cap_lower;
61383 + cap_mask = curracl->cap_mask;
61384 + cap_audit = curracl->cap_invert_audit;
61385 +
61386 + while ((curracl = curracl->parent_subject)) {
61387 + /* if the cap isn't specified in the current computed mask but is specified in the
61388 + current level subject, and is lowered in the current level subject, then add
61389 + it to the set of dropped capabilities
61390 + otherwise, add the current level subject's mask to the current computed mask
61391 + */
61392 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61393 + cap_raise(cap_mask, cap);
61394 + if (cap_raised(curracl->cap_lower, cap))
61395 + cap_raise(cap_drop, cap);
61396 + if (cap_raised(curracl->cap_invert_audit, cap))
61397 + cap_raise(cap_audit, cap);
61398 + }
61399 + }
61400 +
61401 + if (!cap_raised(cap_drop, cap)) {
61402 + if (cap_raised(cap_audit, cap))
61403 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
61404 + return 1;
61405 + }
61406 +
61407 + curracl = task->acl;
61408 +
61409 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
61410 + && cap_raised(cred->cap_effective, cap)) {
61411 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61412 + task->role->roletype, GR_GLOBAL_UID(cred->uid),
61413 + GR_GLOBAL_GID(cred->gid), task->exec_file ?
61414 + gr_to_filename(task->exec_file->f_path.dentry,
61415 + task->exec_file->f_path.mnt) : curracl->filename,
61416 + curracl->filename, 0UL,
61417 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
61418 + return 1;
61419 + }
61420 +
61421 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
61422 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
61423 +
61424 + return 0;
61425 +}
61426 +
61427 +int
61428 +gr_acl_is_capable(const int cap)
61429 +{
61430 + return gr_task_acl_is_capable(current, current_cred(), cap);
61431 +}
61432 +
61433 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
61434 +{
61435 + struct acl_subject_label *curracl;
61436 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61437 +
61438 + if (!gr_acl_is_enabled())
61439 + return 1;
61440 +
61441 + curracl = task->acl;
61442 +
61443 + cap_drop = curracl->cap_lower;
61444 + cap_mask = curracl->cap_mask;
61445 +
61446 + while ((curracl = curracl->parent_subject)) {
61447 + /* if the cap isn't specified in the current computed mask but is specified in the
61448 + current level subject, and is lowered in the current level subject, then add
61449 + it to the set of dropped capabilities
61450 + otherwise, add the current level subject's mask to the current computed mask
61451 + */
61452 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61453 + cap_raise(cap_mask, cap);
61454 + if (cap_raised(curracl->cap_lower, cap))
61455 + cap_raise(cap_drop, cap);
61456 + }
61457 + }
61458 +
61459 + if (!cap_raised(cap_drop, cap))
61460 + return 1;
61461 +
61462 + return 0;
61463 +}
61464 +
61465 +int
61466 +gr_acl_is_capable_nolog(const int cap)
61467 +{
61468 + return gr_task_acl_is_capable_nolog(current, cap);
61469 +}
61470 +
61471 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
61472 new file mode 100644
61473 index 0000000..a340c17
61474 --- /dev/null
61475 +++ b/grsecurity/gracl_fs.c
61476 @@ -0,0 +1,431 @@
61477 +#include <linux/kernel.h>
61478 +#include <linux/sched.h>
61479 +#include <linux/types.h>
61480 +#include <linux/fs.h>
61481 +#include <linux/file.h>
61482 +#include <linux/stat.h>
61483 +#include <linux/grsecurity.h>
61484 +#include <linux/grinternal.h>
61485 +#include <linux/gracl.h>
61486 +
61487 +umode_t
61488 +gr_acl_umask(void)
61489 +{
61490 + if (unlikely(!gr_acl_is_enabled()))
61491 + return 0;
61492 +
61493 + return current->role->umask;
61494 +}
61495 +
61496 +__u32
61497 +gr_acl_handle_hidden_file(const struct dentry * dentry,
61498 + const struct vfsmount * mnt)
61499 +{
61500 + __u32 mode;
61501 +
61502 + if (unlikely(!dentry->d_inode))
61503 + return GR_FIND;
61504 +
61505 + mode =
61506 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61507 +
61508 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61509 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61510 + return mode;
61511 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61512 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61513 + return 0;
61514 + } else if (unlikely(!(mode & GR_FIND)))
61515 + return 0;
61516 +
61517 + return GR_FIND;
61518 +}
61519 +
61520 +__u32
61521 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61522 + int acc_mode)
61523 +{
61524 + __u32 reqmode = GR_FIND;
61525 + __u32 mode;
61526 +
61527 + if (unlikely(!dentry->d_inode))
61528 + return reqmode;
61529 +
61530 + if (acc_mode & MAY_APPEND)
61531 + reqmode |= GR_APPEND;
61532 + else if (acc_mode & MAY_WRITE)
61533 + reqmode |= GR_WRITE;
61534 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61535 + reqmode |= GR_READ;
61536 +
61537 + mode =
61538 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61539 + mnt);
61540 +
61541 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61542 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61543 + reqmode & GR_READ ? " reading" : "",
61544 + reqmode & GR_WRITE ? " writing" : reqmode &
61545 + GR_APPEND ? " appending" : "");
61546 + return reqmode;
61547 + } else
61548 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61549 + {
61550 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61551 + reqmode & GR_READ ? " reading" : "",
61552 + reqmode & GR_WRITE ? " writing" : reqmode &
61553 + GR_APPEND ? " appending" : "");
61554 + return 0;
61555 + } else if (unlikely((mode & reqmode) != reqmode))
61556 + return 0;
61557 +
61558 + return reqmode;
61559 +}
61560 +
61561 +__u32
61562 +gr_acl_handle_creat(const struct dentry * dentry,
61563 + const struct dentry * p_dentry,
61564 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61565 + const int imode)
61566 +{
61567 + __u32 reqmode = GR_WRITE | GR_CREATE;
61568 + __u32 mode;
61569 +
61570 + if (acc_mode & MAY_APPEND)
61571 + reqmode |= GR_APPEND;
61572 + // if a directory was required or the directory already exists, then
61573 + // don't count this open as a read
61574 + if ((acc_mode & MAY_READ) &&
61575 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61576 + reqmode |= GR_READ;
61577 + if ((open_flags & O_CREAT) &&
61578 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61579 + reqmode |= GR_SETID;
61580 +
61581 + mode =
61582 + gr_check_create(dentry, p_dentry, p_mnt,
61583 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61584 +
61585 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61586 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61587 + reqmode & GR_READ ? " reading" : "",
61588 + reqmode & GR_WRITE ? " writing" : reqmode &
61589 + GR_APPEND ? " appending" : "");
61590 + return reqmode;
61591 + } else
61592 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61593 + {
61594 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61595 + reqmode & GR_READ ? " reading" : "",
61596 + reqmode & GR_WRITE ? " writing" : reqmode &
61597 + GR_APPEND ? " appending" : "");
61598 + return 0;
61599 + } else if (unlikely((mode & reqmode) != reqmode))
61600 + return 0;
61601 +
61602 + return reqmode;
61603 +}
61604 +
61605 +__u32
61606 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61607 + const int fmode)
61608 +{
61609 + __u32 mode, reqmode = GR_FIND;
61610 +
61611 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61612 + reqmode |= GR_EXEC;
61613 + if (fmode & S_IWOTH)
61614 + reqmode |= GR_WRITE;
61615 + if (fmode & S_IROTH)
61616 + reqmode |= GR_READ;
61617 +
61618 + mode =
61619 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61620 + mnt);
61621 +
61622 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61623 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61624 + reqmode & GR_READ ? " reading" : "",
61625 + reqmode & GR_WRITE ? " writing" : "",
61626 + reqmode & GR_EXEC ? " executing" : "");
61627 + return reqmode;
61628 + } else
61629 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61630 + {
61631 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61632 + reqmode & GR_READ ? " reading" : "",
61633 + reqmode & GR_WRITE ? " writing" : "",
61634 + reqmode & GR_EXEC ? " executing" : "");
61635 + return 0;
61636 + } else if (unlikely((mode & reqmode) != reqmode))
61637 + return 0;
61638 +
61639 + return reqmode;
61640 +}
61641 +
61642 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61643 +{
61644 + __u32 mode;
61645 +
61646 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61647 +
61648 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61649 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61650 + return mode;
61651 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61652 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61653 + return 0;
61654 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61655 + return 0;
61656 +
61657 + return (reqmode);
61658 +}
61659 +
61660 +__u32
61661 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61662 +{
61663 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61664 +}
61665 +
61666 +__u32
61667 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61668 +{
61669 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61670 +}
61671 +
61672 +__u32
61673 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61674 +{
61675 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61676 +}
61677 +
61678 +__u32
61679 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61680 +{
61681 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61682 +}
61683 +
61684 +__u32
61685 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61686 + umode_t *modeptr)
61687 +{
61688 + umode_t mode;
61689 +
61690 + *modeptr &= ~gr_acl_umask();
61691 + mode = *modeptr;
61692 +
61693 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61694 + return 1;
61695 +
61696 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
61697 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
61698 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61699 + GR_CHMOD_ACL_MSG);
61700 + } else {
61701 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61702 + }
61703 +}
61704 +
61705 +__u32
61706 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61707 +{
61708 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61709 +}
61710 +
61711 +__u32
61712 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61713 +{
61714 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61715 +}
61716 +
61717 +__u32
61718 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61719 +{
61720 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61721 +}
61722 +
61723 +__u32
61724 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61725 +{
61726 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61727 + GR_UNIXCONNECT_ACL_MSG);
61728 +}
61729 +
61730 +/* hardlinks require at minimum create and link permission,
61731 + any additional privilege required is based on the
61732 + privilege of the file being linked to
61733 +*/
61734 +__u32
61735 +gr_acl_handle_link(const struct dentry * new_dentry,
61736 + const struct dentry * parent_dentry,
61737 + const struct vfsmount * parent_mnt,
61738 + const struct dentry * old_dentry,
61739 + const struct vfsmount * old_mnt, const struct filename *to)
61740 +{
61741 + __u32 mode;
61742 + __u32 needmode = GR_CREATE | GR_LINK;
61743 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61744 +
61745 + mode =
61746 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61747 + old_mnt);
61748 +
61749 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61750 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61751 + return mode;
61752 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61753 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61754 + return 0;
61755 + } else if (unlikely((mode & needmode) != needmode))
61756 + return 0;
61757 +
61758 + return 1;
61759 +}
61760 +
61761 +__u32
61762 +gr_acl_handle_symlink(const struct dentry * new_dentry,
61763 + const struct dentry * parent_dentry,
61764 + const struct vfsmount * parent_mnt, const struct filename *from)
61765 +{
61766 + __u32 needmode = GR_WRITE | GR_CREATE;
61767 + __u32 mode;
61768 +
61769 + mode =
61770 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
61771 + GR_CREATE | GR_AUDIT_CREATE |
61772 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61773 +
61774 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61775 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61776 + return mode;
61777 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61778 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61779 + return 0;
61780 + } else if (unlikely((mode & needmode) != needmode))
61781 + return 0;
61782 +
61783 + return (GR_WRITE | GR_CREATE);
61784 +}
61785 +
61786 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61787 +{
61788 + __u32 mode;
61789 +
61790 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61791 +
61792 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61793 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61794 + return mode;
61795 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61796 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61797 + return 0;
61798 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
61799 + return 0;
61800 +
61801 + return (reqmode);
61802 +}
61803 +
61804 +__u32
61805 +gr_acl_handle_mknod(const struct dentry * new_dentry,
61806 + const struct dentry * parent_dentry,
61807 + const struct vfsmount * parent_mnt,
61808 + const int mode)
61809 +{
61810 + __u32 reqmode = GR_WRITE | GR_CREATE;
61811 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61812 + reqmode |= GR_SETID;
61813 +
61814 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61815 + reqmode, GR_MKNOD_ACL_MSG);
61816 +}
61817 +
61818 +__u32
61819 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
61820 + const struct dentry *parent_dentry,
61821 + const struct vfsmount *parent_mnt)
61822 +{
61823 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61824 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61825 +}
61826 +
61827 +#define RENAME_CHECK_SUCCESS(old, new) \
61828 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61829 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61830 +
61831 +int
61832 +gr_acl_handle_rename(struct dentry *new_dentry,
61833 + struct dentry *parent_dentry,
61834 + const struct vfsmount *parent_mnt,
61835 + struct dentry *old_dentry,
61836 + struct inode *old_parent_inode,
61837 + struct vfsmount *old_mnt, const struct filename *newname)
61838 +{
61839 + __u32 comp1, comp2;
61840 + int error = 0;
61841 +
61842 + if (unlikely(!gr_acl_is_enabled()))
61843 + return 0;
61844 +
61845 + if (!new_dentry->d_inode) {
61846 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61847 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61848 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61849 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61850 + GR_DELETE | GR_AUDIT_DELETE |
61851 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61852 + GR_SUPPRESS, old_mnt);
61853 + } else {
61854 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61855 + GR_CREATE | GR_DELETE |
61856 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61857 + GR_AUDIT_READ | GR_AUDIT_WRITE |
61858 + GR_SUPPRESS, parent_mnt);
61859 + comp2 =
61860 + gr_search_file(old_dentry,
61861 + GR_READ | GR_WRITE | GR_AUDIT_READ |
61862 + GR_DELETE | GR_AUDIT_DELETE |
61863 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61864 + }
61865 +
61866 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61867 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61868 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61869 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61870 + && !(comp2 & GR_SUPPRESS)) {
61871 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61872 + error = -EACCES;
61873 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61874 + error = -EACCES;
61875 +
61876 + return error;
61877 +}
61878 +
61879 +void
61880 +gr_acl_handle_exit(void)
61881 +{
61882 + u16 id;
61883 + char *rolename;
61884 +
61885 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61886 + !(current->role->roletype & GR_ROLE_PERSIST))) {
61887 + id = current->acl_role_id;
61888 + rolename = current->role->rolename;
61889 + gr_set_acls(1);
61890 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61891 + }
61892 +
61893 + gr_put_exec_file(current);
61894 + return;
61895 +}
61896 +
61897 +int
61898 +gr_acl_handle_procpidmem(const struct task_struct *task)
61899 +{
61900 + if (unlikely(!gr_acl_is_enabled()))
61901 + return 0;
61902 +
61903 + if (task != current && task->acl->mode & GR_PROTPROCFD)
61904 + return -EACCES;
61905 +
61906 + return 0;
61907 +}
61908 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61909 new file mode 100644
61910 index 0000000..8132048
61911 --- /dev/null
61912 +++ b/grsecurity/gracl_ip.c
61913 @@ -0,0 +1,387 @@
61914 +#include <linux/kernel.h>
61915 +#include <asm/uaccess.h>
61916 +#include <asm/errno.h>
61917 +#include <net/sock.h>
61918 +#include <linux/file.h>
61919 +#include <linux/fs.h>
61920 +#include <linux/net.h>
61921 +#include <linux/in.h>
61922 +#include <linux/skbuff.h>
61923 +#include <linux/ip.h>
61924 +#include <linux/udp.h>
61925 +#include <linux/types.h>
61926 +#include <linux/sched.h>
61927 +#include <linux/netdevice.h>
61928 +#include <linux/inetdevice.h>
61929 +#include <linux/gracl.h>
61930 +#include <linux/grsecurity.h>
61931 +#include <linux/grinternal.h>
61932 +
61933 +#define GR_BIND 0x01
61934 +#define GR_CONNECT 0x02
61935 +#define GR_INVERT 0x04
61936 +#define GR_BINDOVERRIDE 0x08
61937 +#define GR_CONNECTOVERRIDE 0x10
61938 +#define GR_SOCK_FAMILY 0x20
61939 +
61940 +static const char * gr_protocols[IPPROTO_MAX] = {
61941 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61942 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61943 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61944 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61945 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61946 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61947 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61948 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61949 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61950 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61951 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61952 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61953 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61954 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61955 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61956 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61957 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61958 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61959 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61960 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61961 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61962 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61963 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61964 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61965 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61966 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61967 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61968 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61969 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61970 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61971 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61972 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61973 + };
61974 +
61975 +static const char * gr_socktypes[SOCK_MAX] = {
61976 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61977 + "unknown:7", "unknown:8", "unknown:9", "packet"
61978 + };
61979 +
61980 +static const char * gr_sockfamilies[AF_MAX+1] = {
61981 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61982 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61983 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61984 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
61985 + };
61986 +
61987 +const char *
61988 +gr_proto_to_name(unsigned char proto)
61989 +{
61990 + return gr_protocols[proto];
61991 +}
61992 +
61993 +const char *
61994 +gr_socktype_to_name(unsigned char type)
61995 +{
61996 + return gr_socktypes[type];
61997 +}
61998 +
61999 +const char *
62000 +gr_sockfamily_to_name(unsigned char family)
62001 +{
62002 + return gr_sockfamilies[family];
62003 +}
62004 +
62005 +int
62006 +gr_search_socket(const int domain, const int type, const int protocol)
62007 +{
62008 + struct acl_subject_label *curr;
62009 + const struct cred *cred = current_cred();
62010 +
62011 + if (unlikely(!gr_acl_is_enabled()))
62012 + goto exit;
62013 +
62014 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
62015 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62016 + goto exit; // let the kernel handle it
62017 +
62018 + curr = current->acl;
62019 +
62020 + if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
62021 + /* the family is allowed, if this is PF_INET allow it only if
62022 + the extra sock type/protocol checks pass */
62023 + if (domain == PF_INET)
62024 + goto inet_check;
62025 + goto exit;
62026 + } else {
62027 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62028 + __u32 fakeip = 0;
62029 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62030 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
62031 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
62032 + gr_to_filename(current->exec_file->f_path.dentry,
62033 + current->exec_file->f_path.mnt) :
62034 + curr->filename, curr->filename,
62035 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62036 + &current->signal->saved_ip);
62037 + goto exit;
62038 + }
62039 + goto exit_fail;
62040 + }
62041 +
62042 +inet_check:
62043 + /* the rest of this checking is for IPv4 only */
62044 + if (!curr->ips)
62045 + goto exit;
62046 +
62047 + if ((curr->ip_type & (1U << type)) &&
62048 + (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
62049 + goto exit;
62050 +
62051 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62052 + /* we don't place acls on raw sockets , and sometimes
62053 + dgram/ip sockets are opened for ioctl and not
62054 + bind/connect, so we'll fake a bind learn log */
62055 + if (type == SOCK_RAW || type == SOCK_PACKET) {
62056 + __u32 fakeip = 0;
62057 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62058 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
62059 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
62060 + gr_to_filename(current->exec_file->f_path.dentry,
62061 + current->exec_file->f_path.mnt) :
62062 + curr->filename, curr->filename,
62063 + &fakeip, 0, type,
62064 + protocol, GR_CONNECT, &current->signal->saved_ip);
62065 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62066 + __u32 fakeip = 0;
62067 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62068 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
62069 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
62070 + gr_to_filename(current->exec_file->f_path.dentry,
62071 + current->exec_file->f_path.mnt) :
62072 + curr->filename, curr->filename,
62073 + &fakeip, 0, type,
62074 + protocol, GR_BIND, &current->signal->saved_ip);
62075 + }
62076 + /* we'll log when they use connect or bind */
62077 + goto exit;
62078 + }
62079 +
62080 +exit_fail:
62081 + if (domain == PF_INET)
62082 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62083 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
62084 + else
62085 +#ifndef CONFIG_IPV6
62086 + if (domain != PF_INET6)
62087 +#endif
62088 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62089 + gr_socktype_to_name(type), protocol);
62090 +
62091 + return 0;
62092 +exit:
62093 + return 1;
62094 +}
62095 +
62096 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62097 +{
62098 + if ((ip->mode & mode) &&
62099 + (ip_port >= ip->low) &&
62100 + (ip_port <= ip->high) &&
62101 + ((ntohl(ip_addr) & our_netmask) ==
62102 + (ntohl(our_addr) & our_netmask))
62103 + && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
62104 + && (ip->type & (1U << type))) {
62105 + if (ip->mode & GR_INVERT)
62106 + return 2; // specifically denied
62107 + else
62108 + return 1; // allowed
62109 + }
62110 +
62111 + return 0; // not specifically allowed, may continue parsing
62112 +}
62113 +
62114 +static int
62115 +gr_search_connectbind(const int full_mode, struct sock *sk,
62116 + struct sockaddr_in *addr, const int type)
62117 +{
62118 + char iface[IFNAMSIZ] = {0};
62119 + struct acl_subject_label *curr;
62120 + struct acl_ip_label *ip;
62121 + struct inet_sock *isk;
62122 + struct net_device *dev;
62123 + struct in_device *idev;
62124 + unsigned long i;
62125 + int ret;
62126 + int mode = full_mode & (GR_BIND | GR_CONNECT);
62127 + __u32 ip_addr = 0;
62128 + __u32 our_addr;
62129 + __u32 our_netmask;
62130 + char *p;
62131 + __u16 ip_port = 0;
62132 + const struct cred *cred = current_cred();
62133 +
62134 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62135 + return 0;
62136 +
62137 + curr = current->acl;
62138 + isk = inet_sk(sk);
62139 +
62140 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62141 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62142 + addr->sin_addr.s_addr = curr->inaddr_any_override;
62143 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62144 + struct sockaddr_in saddr;
62145 + int err;
62146 +
62147 + saddr.sin_family = AF_INET;
62148 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
62149 + saddr.sin_port = isk->inet_sport;
62150 +
62151 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62152 + if (err)
62153 + return err;
62154 +
62155 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62156 + if (err)
62157 + return err;
62158 + }
62159 +
62160 + if (!curr->ips)
62161 + return 0;
62162 +
62163 + ip_addr = addr->sin_addr.s_addr;
62164 + ip_port = ntohs(addr->sin_port);
62165 +
62166 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62167 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62168 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
62169 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
62170 + gr_to_filename(current->exec_file->f_path.dentry,
62171 + current->exec_file->f_path.mnt) :
62172 + curr->filename, curr->filename,
62173 + &ip_addr, ip_port, type,
62174 + sk->sk_protocol, mode, &current->signal->saved_ip);
62175 + return 0;
62176 + }
62177 +
62178 + for (i = 0; i < curr->ip_num; i++) {
62179 + ip = *(curr->ips + i);
62180 + if (ip->iface != NULL) {
62181 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
62182 + p = strchr(iface, ':');
62183 + if (p != NULL)
62184 + *p = '\0';
62185 + dev = dev_get_by_name(sock_net(sk), iface);
62186 + if (dev == NULL)
62187 + continue;
62188 + idev = in_dev_get(dev);
62189 + if (idev == NULL) {
62190 + dev_put(dev);
62191 + continue;
62192 + }
62193 + rcu_read_lock();
62194 + for_ifa(idev) {
62195 + if (!strcmp(ip->iface, ifa->ifa_label)) {
62196 + our_addr = ifa->ifa_address;
62197 + our_netmask = 0xffffffff;
62198 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62199 + if (ret == 1) {
62200 + rcu_read_unlock();
62201 + in_dev_put(idev);
62202 + dev_put(dev);
62203 + return 0;
62204 + } else if (ret == 2) {
62205 + rcu_read_unlock();
62206 + in_dev_put(idev);
62207 + dev_put(dev);
62208 + goto denied;
62209 + }
62210 + }
62211 + } endfor_ifa(idev);
62212 + rcu_read_unlock();
62213 + in_dev_put(idev);
62214 + dev_put(dev);
62215 + } else {
62216 + our_addr = ip->addr;
62217 + our_netmask = ip->netmask;
62218 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62219 + if (ret == 1)
62220 + return 0;
62221 + else if (ret == 2)
62222 + goto denied;
62223 + }
62224 + }
62225 +
62226 +denied:
62227 + if (mode == GR_BIND)
62228 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62229 + else if (mode == GR_CONNECT)
62230 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62231 +
62232 + return -EACCES;
62233 +}
62234 +
62235 +int
62236 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
62237 +{
62238 + /* always allow disconnection of dgram sockets with connect */
62239 + if (addr->sin_family == AF_UNSPEC)
62240 + return 0;
62241 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
62242 +}
62243 +
62244 +int
62245 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
62246 +{
62247 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
62248 +}
62249 +
62250 +int gr_search_listen(struct socket *sock)
62251 +{
62252 + struct sock *sk = sock->sk;
62253 + struct sockaddr_in addr;
62254 +
62255 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62256 + addr.sin_port = inet_sk(sk)->inet_sport;
62257 +
62258 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62259 +}
62260 +
62261 +int gr_search_accept(struct socket *sock)
62262 +{
62263 + struct sock *sk = sock->sk;
62264 + struct sockaddr_in addr;
62265 +
62266 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62267 + addr.sin_port = inet_sk(sk)->inet_sport;
62268 +
62269 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62270 +}
62271 +
62272 +int
62273 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
62274 +{
62275 + if (addr)
62276 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
62277 + else {
62278 + struct sockaddr_in sin;
62279 + const struct inet_sock *inet = inet_sk(sk);
62280 +
62281 + sin.sin_addr.s_addr = inet->inet_daddr;
62282 + sin.sin_port = inet->inet_dport;
62283 +
62284 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62285 + }
62286 +}
62287 +
62288 +int
62289 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
62290 +{
62291 + struct sockaddr_in sin;
62292 +
62293 + if (unlikely(skb->len < sizeof (struct udphdr)))
62294 + return 0; // skip this packet
62295 +
62296 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
62297 + sin.sin_port = udp_hdr(skb)->source;
62298 +
62299 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62300 +}
62301 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
62302 new file mode 100644
62303 index 0000000..25f54ef
62304 --- /dev/null
62305 +++ b/grsecurity/gracl_learn.c
62306 @@ -0,0 +1,207 @@
62307 +#include <linux/kernel.h>
62308 +#include <linux/mm.h>
62309 +#include <linux/sched.h>
62310 +#include <linux/poll.h>
62311 +#include <linux/string.h>
62312 +#include <linux/file.h>
62313 +#include <linux/types.h>
62314 +#include <linux/vmalloc.h>
62315 +#include <linux/grinternal.h>
62316 +
62317 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
62318 + size_t count, loff_t *ppos);
62319 +extern int gr_acl_is_enabled(void);
62320 +
62321 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
62322 +static int gr_learn_attached;
62323 +
62324 +/* use a 512k buffer */
62325 +#define LEARN_BUFFER_SIZE (512 * 1024)
62326 +
62327 +static DEFINE_SPINLOCK(gr_learn_lock);
62328 +static DEFINE_MUTEX(gr_learn_user_mutex);
62329 +
62330 +/* we need to maintain two buffers, so that the kernel context of grlearn
62331 + uses a semaphore around the userspace copying, and the other kernel contexts
62332 + use a spinlock when copying into the buffer, since they cannot sleep
62333 +*/
62334 +static char *learn_buffer;
62335 +static char *learn_buffer_user;
62336 +static int learn_buffer_len;
62337 +static int learn_buffer_user_len;
62338 +
62339 +static ssize_t
62340 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
62341 +{
62342 + DECLARE_WAITQUEUE(wait, current);
62343 + ssize_t retval = 0;
62344 +
62345 + add_wait_queue(&learn_wait, &wait);
62346 + set_current_state(TASK_INTERRUPTIBLE);
62347 + do {
62348 + mutex_lock(&gr_learn_user_mutex);
62349 + spin_lock(&gr_learn_lock);
62350 + if (learn_buffer_len)
62351 + break;
62352 + spin_unlock(&gr_learn_lock);
62353 + mutex_unlock(&gr_learn_user_mutex);
62354 + if (file->f_flags & O_NONBLOCK) {
62355 + retval = -EAGAIN;
62356 + goto out;
62357 + }
62358 + if (signal_pending(current)) {
62359 + retval = -ERESTARTSYS;
62360 + goto out;
62361 + }
62362 +
62363 + schedule();
62364 + } while (1);
62365 +
62366 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
62367 + learn_buffer_user_len = learn_buffer_len;
62368 + retval = learn_buffer_len;
62369 + learn_buffer_len = 0;
62370 +
62371 + spin_unlock(&gr_learn_lock);
62372 +
62373 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
62374 + retval = -EFAULT;
62375 +
62376 + mutex_unlock(&gr_learn_user_mutex);
62377 +out:
62378 + set_current_state(TASK_RUNNING);
62379 + remove_wait_queue(&learn_wait, &wait);
62380 + return retval;
62381 +}
62382 +
62383 +static unsigned int
62384 +poll_learn(struct file * file, poll_table * wait)
62385 +{
62386 + poll_wait(file, &learn_wait, wait);
62387 +
62388 + if (learn_buffer_len)
62389 + return (POLLIN | POLLRDNORM);
62390 +
62391 + return 0;
62392 +}
62393 +
62394 +void
62395 +gr_clear_learn_entries(void)
62396 +{
62397 + char *tmp;
62398 +
62399 + mutex_lock(&gr_learn_user_mutex);
62400 + spin_lock(&gr_learn_lock);
62401 + tmp = learn_buffer;
62402 + learn_buffer = NULL;
62403 + spin_unlock(&gr_learn_lock);
62404 + if (tmp)
62405 + vfree(tmp);
62406 + if (learn_buffer_user != NULL) {
62407 + vfree(learn_buffer_user);
62408 + learn_buffer_user = NULL;
62409 + }
62410 + learn_buffer_len = 0;
62411 + mutex_unlock(&gr_learn_user_mutex);
62412 +
62413 + return;
62414 +}
62415 +
62416 +void
62417 +gr_add_learn_entry(const char *fmt, ...)
62418 +{
62419 + va_list args;
62420 + unsigned int len;
62421 +
62422 + if (!gr_learn_attached)
62423 + return;
62424 +
62425 + spin_lock(&gr_learn_lock);
62426 +
62427 + /* leave a gap at the end so we know when it's "full" but don't have to
62428 + compute the exact length of the string we're trying to append
62429 + */
62430 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
62431 + spin_unlock(&gr_learn_lock);
62432 + wake_up_interruptible(&learn_wait);
62433 + return;
62434 + }
62435 + if (learn_buffer == NULL) {
62436 + spin_unlock(&gr_learn_lock);
62437 + return;
62438 + }
62439 +
62440 + va_start(args, fmt);
62441 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
62442 + va_end(args);
62443 +
62444 + learn_buffer_len += len + 1;
62445 +
62446 + spin_unlock(&gr_learn_lock);
62447 + wake_up_interruptible(&learn_wait);
62448 +
62449 + return;
62450 +}
62451 +
62452 +static int
62453 +open_learn(struct inode *inode, struct file *file)
62454 +{
62455 + if (file->f_mode & FMODE_READ && gr_learn_attached)
62456 + return -EBUSY;
62457 + if (file->f_mode & FMODE_READ) {
62458 + int retval = 0;
62459 + mutex_lock(&gr_learn_user_mutex);
62460 + if (learn_buffer == NULL)
62461 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
62462 + if (learn_buffer_user == NULL)
62463 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
62464 + if (learn_buffer == NULL) {
62465 + retval = -ENOMEM;
62466 + goto out_error;
62467 + }
62468 + if (learn_buffer_user == NULL) {
62469 + retval = -ENOMEM;
62470 + goto out_error;
62471 + }
62472 + learn_buffer_len = 0;
62473 + learn_buffer_user_len = 0;
62474 + gr_learn_attached = 1;
62475 +out_error:
62476 + mutex_unlock(&gr_learn_user_mutex);
62477 + return retval;
62478 + }
62479 + return 0;
62480 +}
62481 +
62482 +static int
62483 +close_learn(struct inode *inode, struct file *file)
62484 +{
62485 + if (file->f_mode & FMODE_READ) {
62486 + char *tmp = NULL;
62487 + mutex_lock(&gr_learn_user_mutex);
62488 + spin_lock(&gr_learn_lock);
62489 + tmp = learn_buffer;
62490 + learn_buffer = NULL;
62491 + spin_unlock(&gr_learn_lock);
62492 + if (tmp)
62493 + vfree(tmp);
62494 + if (learn_buffer_user != NULL) {
62495 + vfree(learn_buffer_user);
62496 + learn_buffer_user = NULL;
62497 + }
62498 + learn_buffer_len = 0;
62499 + learn_buffer_user_len = 0;
62500 + gr_learn_attached = 0;
62501 + mutex_unlock(&gr_learn_user_mutex);
62502 + }
62503 +
62504 + return 0;
62505 +}
62506 +
62507 +const struct file_operations grsec_fops = {
62508 + .read = read_learn,
62509 + .write = write_grsec_handler,
62510 + .open = open_learn,
62511 + .release = close_learn,
62512 + .poll = poll_learn,
62513 +};
62514 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62515 new file mode 100644
62516 index 0000000..39645c9
62517 --- /dev/null
62518 +++ b/grsecurity/gracl_res.c
62519 @@ -0,0 +1,68 @@
62520 +#include <linux/kernel.h>
62521 +#include <linux/sched.h>
62522 +#include <linux/gracl.h>
62523 +#include <linux/grinternal.h>
62524 +
62525 +static const char *restab_log[] = {
62526 + [RLIMIT_CPU] = "RLIMIT_CPU",
62527 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62528 + [RLIMIT_DATA] = "RLIMIT_DATA",
62529 + [RLIMIT_STACK] = "RLIMIT_STACK",
62530 + [RLIMIT_CORE] = "RLIMIT_CORE",
62531 + [RLIMIT_RSS] = "RLIMIT_RSS",
62532 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
62533 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62534 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62535 + [RLIMIT_AS] = "RLIMIT_AS",
62536 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62537 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62538 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62539 + [RLIMIT_NICE] = "RLIMIT_NICE",
62540 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62541 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62542 + [GR_CRASH_RES] = "RLIMIT_CRASH"
62543 +};
62544 +
62545 +void
62546 +gr_log_resource(const struct task_struct *task,
62547 + const int res, const unsigned long wanted, const int gt)
62548 +{
62549 + const struct cred *cred;
62550 + unsigned long rlim;
62551 +
62552 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
62553 + return;
62554 +
62555 + // not yet supported resource
62556 + if (unlikely(!restab_log[res]))
62557 + return;
62558 +
62559 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62560 + rlim = task_rlimit_max(task, res);
62561 + else
62562 + rlim = task_rlimit(task, res);
62563 +
62564 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62565 + return;
62566 +
62567 + rcu_read_lock();
62568 + cred = __task_cred(task);
62569 +
62570 + if (res == RLIMIT_NPROC &&
62571 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62572 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62573 + goto out_rcu_unlock;
62574 + else if (res == RLIMIT_MEMLOCK &&
62575 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62576 + goto out_rcu_unlock;
62577 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62578 + goto out_rcu_unlock;
62579 + rcu_read_unlock();
62580 +
62581 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62582 +
62583 + return;
62584 +out_rcu_unlock:
62585 + rcu_read_unlock();
62586 + return;
62587 +}
62588 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62589 new file mode 100644
62590 index 0000000..4dcc92a
62591 --- /dev/null
62592 +++ b/grsecurity/gracl_segv.c
62593 @@ -0,0 +1,305 @@
62594 +#include <linux/kernel.h>
62595 +#include <linux/mm.h>
62596 +#include <asm/uaccess.h>
62597 +#include <asm/errno.h>
62598 +#include <asm/mman.h>
62599 +#include <net/sock.h>
62600 +#include <linux/file.h>
62601 +#include <linux/fs.h>
62602 +#include <linux/net.h>
62603 +#include <linux/in.h>
62604 +#include <linux/slab.h>
62605 +#include <linux/types.h>
62606 +#include <linux/sched.h>
62607 +#include <linux/timer.h>
62608 +#include <linux/gracl.h>
62609 +#include <linux/grsecurity.h>
62610 +#include <linux/grinternal.h>
62611 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62612 +#include <linux/magic.h>
62613 +#include <linux/pagemap.h>
62614 +#include "../fs/btrfs/async-thread.h"
62615 +#include "../fs/btrfs/ctree.h"
62616 +#include "../fs/btrfs/btrfs_inode.h"
62617 +#endif
62618 +
62619 +static struct crash_uid *uid_set;
62620 +static unsigned short uid_used;
62621 +static DEFINE_SPINLOCK(gr_uid_lock);
62622 +extern rwlock_t gr_inode_lock;
62623 +extern struct acl_subject_label *
62624 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62625 + struct acl_role_label *role);
62626 +
62627 +static inline dev_t __get_dev(const struct dentry *dentry)
62628 +{
62629 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62630 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
62631 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
62632 + else
62633 +#endif
62634 + return dentry->d_sb->s_dev;
62635 +}
62636 +
62637 +int
62638 +gr_init_uidset(void)
62639 +{
62640 + uid_set =
62641 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62642 + uid_used = 0;
62643 +
62644 + return uid_set ? 1 : 0;
62645 +}
62646 +
62647 +void
62648 +gr_free_uidset(void)
62649 +{
62650 + if (uid_set)
62651 + kfree(uid_set);
62652 +
62653 + return;
62654 +}
62655 +
62656 +int
62657 +gr_find_uid(const uid_t uid)
62658 +{
62659 + struct crash_uid *tmp = uid_set;
62660 + uid_t buid;
62661 + int low = 0, high = uid_used - 1, mid;
62662 +
62663 + while (high >= low) {
62664 + mid = (low + high) >> 1;
62665 + buid = tmp[mid].uid;
62666 + if (buid == uid)
62667 + return mid;
62668 + if (buid > uid)
62669 + high = mid - 1;
62670 + if (buid < uid)
62671 + low = mid + 1;
62672 + }
62673 +
62674 + return -1;
62675 +}
62676 +
62677 +static __inline__ void
62678 +gr_insertsort(void)
62679 +{
62680 + unsigned short i, j;
62681 + struct crash_uid index;
62682 +
62683 + for (i = 1; i < uid_used; i++) {
62684 + index = uid_set[i];
62685 + j = i;
62686 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62687 + uid_set[j] = uid_set[j - 1];
62688 + j--;
62689 + }
62690 + uid_set[j] = index;
62691 + }
62692 +
62693 + return;
62694 +}
62695 +
62696 +static __inline__ void
62697 +gr_insert_uid(const kuid_t kuid, const unsigned long expires)
62698 +{
62699 + int loc;
62700 + uid_t uid = GR_GLOBAL_UID(kuid);
62701 +
62702 + if (uid_used == GR_UIDTABLE_MAX)
62703 + return;
62704 +
62705 + loc = gr_find_uid(uid);
62706 +
62707 + if (loc >= 0) {
62708 + uid_set[loc].expires = expires;
62709 + return;
62710 + }
62711 +
62712 + uid_set[uid_used].uid = uid;
62713 + uid_set[uid_used].expires = expires;
62714 + uid_used++;
62715 +
62716 + gr_insertsort();
62717 +
62718 + return;
62719 +}
62720 +
62721 +void
62722 +gr_remove_uid(const unsigned short loc)
62723 +{
62724 + unsigned short i;
62725 +
62726 + for (i = loc + 1; i < uid_used; i++)
62727 + uid_set[i - 1] = uid_set[i];
62728 +
62729 + uid_used--;
62730 +
62731 + return;
62732 +}
62733 +
62734 +int
62735 +gr_check_crash_uid(const kuid_t kuid)
62736 +{
62737 + int loc;
62738 + int ret = 0;
62739 + uid_t uid;
62740 +
62741 + if (unlikely(!gr_acl_is_enabled()))
62742 + return 0;
62743 +
62744 + uid = GR_GLOBAL_UID(kuid);
62745 +
62746 + spin_lock(&gr_uid_lock);
62747 + loc = gr_find_uid(uid);
62748 +
62749 + if (loc < 0)
62750 + goto out_unlock;
62751 +
62752 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
62753 + gr_remove_uid(loc);
62754 + else
62755 + ret = 1;
62756 +
62757 +out_unlock:
62758 + spin_unlock(&gr_uid_lock);
62759 + return ret;
62760 +}
62761 +
62762 +static __inline__ int
62763 +proc_is_setxid(const struct cred *cred)
62764 +{
62765 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
62766 + !uid_eq(cred->uid, cred->fsuid))
62767 + return 1;
62768 + if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
62769 + !gid_eq(cred->gid, cred->fsgid))
62770 + return 1;
62771 +
62772 + return 0;
62773 +}
62774 +
62775 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
62776 +
62777 +void
62778 +gr_handle_crash(struct task_struct *task, const int sig)
62779 +{
62780 + struct acl_subject_label *curr;
62781 + struct task_struct *tsk, *tsk2;
62782 + const struct cred *cred;
62783 + const struct cred *cred2;
62784 +
62785 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62786 + return;
62787 +
62788 + if (unlikely(!gr_acl_is_enabled()))
62789 + return;
62790 +
62791 + curr = task->acl;
62792 +
62793 + if (!(curr->resmask & (1U << GR_CRASH_RES)))
62794 + return;
62795 +
62796 + if (time_before_eq(curr->expires, get_seconds())) {
62797 + curr->expires = 0;
62798 + curr->crashes = 0;
62799 + }
62800 +
62801 + curr->crashes++;
62802 +
62803 + if (!curr->expires)
62804 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62805 +
62806 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62807 + time_after(curr->expires, get_seconds())) {
62808 + rcu_read_lock();
62809 + cred = __task_cred(task);
62810 + if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
62811 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62812 + spin_lock(&gr_uid_lock);
62813 + gr_insert_uid(cred->uid, curr->expires);
62814 + spin_unlock(&gr_uid_lock);
62815 + curr->expires = 0;
62816 + curr->crashes = 0;
62817 + read_lock(&tasklist_lock);
62818 + do_each_thread(tsk2, tsk) {
62819 + cred2 = __task_cred(tsk);
62820 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
62821 + gr_fake_force_sig(SIGKILL, tsk);
62822 + } while_each_thread(tsk2, tsk);
62823 + read_unlock(&tasklist_lock);
62824 + } else {
62825 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62826 + read_lock(&tasklist_lock);
62827 + read_lock(&grsec_exec_file_lock);
62828 + do_each_thread(tsk2, tsk) {
62829 + if (likely(tsk != task)) {
62830 + // if this thread has the same subject as the one that triggered
62831 + // RES_CRASH and it's the same binary, kill it
62832 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62833 + gr_fake_force_sig(SIGKILL, tsk);
62834 + }
62835 + } while_each_thread(tsk2, tsk);
62836 + read_unlock(&grsec_exec_file_lock);
62837 + read_unlock(&tasklist_lock);
62838 + }
62839 + rcu_read_unlock();
62840 + }
62841 +
62842 + return;
62843 +}
62844 +
62845 +int
62846 +gr_check_crash_exec(const struct file *filp)
62847 +{
62848 + struct acl_subject_label *curr;
62849 +
62850 + if (unlikely(!gr_acl_is_enabled()))
62851 + return 0;
62852 +
62853 + read_lock(&gr_inode_lock);
62854 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62855 + __get_dev(filp->f_path.dentry),
62856 + current->role);
62857 + read_unlock(&gr_inode_lock);
62858 +
62859 + if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
62860 + (!curr->crashes && !curr->expires))
62861 + return 0;
62862 +
62863 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62864 + time_after(curr->expires, get_seconds()))
62865 + return 1;
62866 + else if (time_before_eq(curr->expires, get_seconds())) {
62867 + curr->crashes = 0;
62868 + curr->expires = 0;
62869 + }
62870 +
62871 + return 0;
62872 +}
62873 +
62874 +void
62875 +gr_handle_alertkill(struct task_struct *task)
62876 +{
62877 + struct acl_subject_label *curracl;
62878 + __u32 curr_ip;
62879 + struct task_struct *p, *p2;
62880 +
62881 + if (unlikely(!gr_acl_is_enabled()))
62882 + return;
62883 +
62884 + curracl = task->acl;
62885 + curr_ip = task->signal->curr_ip;
62886 +
62887 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62888 + read_lock(&tasklist_lock);
62889 + do_each_thread(p2, p) {
62890 + if (p->signal->curr_ip == curr_ip)
62891 + gr_fake_force_sig(SIGKILL, p);
62892 + } while_each_thread(p2, p);
62893 + read_unlock(&tasklist_lock);
62894 + } else if (curracl->mode & GR_KILLPROC)
62895 + gr_fake_force_sig(SIGKILL, task);
62896 +
62897 + return;
62898 +}
62899 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62900 new file mode 100644
62901 index 0000000..98011b0
62902 --- /dev/null
62903 +++ b/grsecurity/gracl_shm.c
62904 @@ -0,0 +1,40 @@
62905 +#include <linux/kernel.h>
62906 +#include <linux/mm.h>
62907 +#include <linux/sched.h>
62908 +#include <linux/file.h>
62909 +#include <linux/ipc.h>
62910 +#include <linux/gracl.h>
62911 +#include <linux/grsecurity.h>
62912 +#include <linux/grinternal.h>
62913 +
62914 +int
62915 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62916 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
62917 +{
62918 + struct task_struct *task;
62919 +
62920 + if (!gr_acl_is_enabled())
62921 + return 1;
62922 +
62923 + rcu_read_lock();
62924 + read_lock(&tasklist_lock);
62925 +
62926 + task = find_task_by_vpid(shm_cprid);
62927 +
62928 + if (unlikely(!task))
62929 + task = find_task_by_vpid(shm_lapid);
62930 +
62931 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62932 + (task_pid_nr(task) == shm_lapid)) &&
62933 + (task->acl->mode & GR_PROTSHM) &&
62934 + (task->acl != current->acl))) {
62935 + read_unlock(&tasklist_lock);
62936 + rcu_read_unlock();
62937 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
62938 + return 0;
62939 + }
62940 + read_unlock(&tasklist_lock);
62941 + rcu_read_unlock();
62942 +
62943 + return 1;
62944 +}
62945 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62946 new file mode 100644
62947 index 0000000..bc0be01
62948 --- /dev/null
62949 +++ b/grsecurity/grsec_chdir.c
62950 @@ -0,0 +1,19 @@
62951 +#include <linux/kernel.h>
62952 +#include <linux/sched.h>
62953 +#include <linux/fs.h>
62954 +#include <linux/file.h>
62955 +#include <linux/grsecurity.h>
62956 +#include <linux/grinternal.h>
62957 +
62958 +void
62959 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62960 +{
62961 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62962 + if ((grsec_enable_chdir && grsec_enable_group &&
62963 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62964 + !grsec_enable_group)) {
62965 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62966 + }
62967 +#endif
62968 + return;
62969 +}
62970 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62971 new file mode 100644
62972 index 0000000..bd6e105
62973 --- /dev/null
62974 +++ b/grsecurity/grsec_chroot.c
62975 @@ -0,0 +1,370 @@
62976 +#include <linux/kernel.h>
62977 +#include <linux/module.h>
62978 +#include <linux/sched.h>
62979 +#include <linux/file.h>
62980 +#include <linux/fs.h>
62981 +#include <linux/mount.h>
62982 +#include <linux/types.h>
62983 +#include "../fs/mount.h"
62984 +#include <linux/grsecurity.h>
62985 +#include <linux/grinternal.h>
62986 +
62987 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
62988 +static int gr_init_ran;
62989 +#endif
62990 +
62991 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
62992 +{
62993 +#ifdef CONFIG_GRKERNSEC
62994 + if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
62995 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
62996 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
62997 + && gr_init_ran
62998 +#endif
62999 + )
63000 + task->gr_is_chrooted = 1;
63001 + else {
63002 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
63003 + if (task_pid_nr(task) == 1 && !gr_init_ran)
63004 + gr_init_ran = 1;
63005 +#endif
63006 + task->gr_is_chrooted = 0;
63007 + }
63008 +
63009 + task->gr_chroot_dentry = path->dentry;
63010 +#endif
63011 + return;
63012 +}
63013 +
63014 +void gr_clear_chroot_entries(struct task_struct *task)
63015 +{
63016 +#ifdef CONFIG_GRKERNSEC
63017 + task->gr_is_chrooted = 0;
63018 + task->gr_chroot_dentry = NULL;
63019 +#endif
63020 + return;
63021 +}
63022 +
63023 +int
63024 +gr_handle_chroot_unix(const pid_t pid)
63025 +{
63026 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63027 + struct task_struct *p;
63028 +
63029 + if (unlikely(!grsec_enable_chroot_unix))
63030 + return 1;
63031 +
63032 + if (likely(!proc_is_chrooted(current)))
63033 + return 1;
63034 +
63035 + rcu_read_lock();
63036 + read_lock(&tasklist_lock);
63037 + p = find_task_by_vpid_unrestricted(pid);
63038 + if (unlikely(p && !have_same_root(current, p))) {
63039 + read_unlock(&tasklist_lock);
63040 + rcu_read_unlock();
63041 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63042 + return 0;
63043 + }
63044 + read_unlock(&tasklist_lock);
63045 + rcu_read_unlock();
63046 +#endif
63047 + return 1;
63048 +}
63049 +
63050 +int
63051 +gr_handle_chroot_nice(void)
63052 +{
63053 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63054 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63055 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63056 + return -EPERM;
63057 + }
63058 +#endif
63059 + return 0;
63060 +}
63061 +
63062 +int
63063 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63064 +{
63065 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63066 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63067 + && proc_is_chrooted(current)) {
63068 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
63069 + return -EACCES;
63070 + }
63071 +#endif
63072 + return 0;
63073 +}
63074 +
63075 +int
63076 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63077 +{
63078 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63079 + struct task_struct *p;
63080 + int ret = 0;
63081 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63082 + return ret;
63083 +
63084 + read_lock(&tasklist_lock);
63085 + do_each_pid_task(pid, type, p) {
63086 + if (!have_same_root(current, p)) {
63087 + ret = 1;
63088 + goto out;
63089 + }
63090 + } while_each_pid_task(pid, type, p);
63091 +out:
63092 + read_unlock(&tasklist_lock);
63093 + return ret;
63094 +#endif
63095 + return 0;
63096 +}
63097 +
63098 +int
63099 +gr_pid_is_chrooted(struct task_struct *p)
63100 +{
63101 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63102 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63103 + return 0;
63104 +
63105 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63106 + !have_same_root(current, p)) {
63107 + return 1;
63108 + }
63109 +#endif
63110 + return 0;
63111 +}
63112 +
63113 +EXPORT_SYMBOL(gr_pid_is_chrooted);
63114 +
63115 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63116 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63117 +{
63118 + struct path path, currentroot;
63119 + int ret = 0;
63120 +
63121 + path.dentry = (struct dentry *)u_dentry;
63122 + path.mnt = (struct vfsmount *)u_mnt;
63123 + get_fs_root(current->fs, &currentroot);
63124 + if (path_is_under(&path, &currentroot))
63125 + ret = 1;
63126 + path_put(&currentroot);
63127 +
63128 + return ret;
63129 +}
63130 +#endif
63131 +
63132 +int
63133 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63134 +{
63135 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63136 + if (!grsec_enable_chroot_fchdir)
63137 + return 1;
63138 +
63139 + if (!proc_is_chrooted(current))
63140 + return 1;
63141 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63142 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63143 + return 0;
63144 + }
63145 +#endif
63146 + return 1;
63147 +}
63148 +
63149 +int
63150 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63151 + const time_t shm_createtime)
63152 +{
63153 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63154 + struct task_struct *p;
63155 + time_t starttime;
63156 +
63157 + if (unlikely(!grsec_enable_chroot_shmat))
63158 + return 1;
63159 +
63160 + if (likely(!proc_is_chrooted(current)))
63161 + return 1;
63162 +
63163 + rcu_read_lock();
63164 + read_lock(&tasklist_lock);
63165 +
63166 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
63167 + starttime = p->start_time.tv_sec;
63168 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
63169 + if (have_same_root(current, p)) {
63170 + goto allow;
63171 + } else {
63172 + read_unlock(&tasklist_lock);
63173 + rcu_read_unlock();
63174 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63175 + return 0;
63176 + }
63177 + }
63178 + /* creator exited, pid reuse, fall through to next check */
63179 + }
63180 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
63181 + if (unlikely(!have_same_root(current, p))) {
63182 + read_unlock(&tasklist_lock);
63183 + rcu_read_unlock();
63184 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63185 + return 0;
63186 + }
63187 + }
63188 +
63189 +allow:
63190 + read_unlock(&tasklist_lock);
63191 + rcu_read_unlock();
63192 +#endif
63193 + return 1;
63194 +}
63195 +
63196 +void
63197 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
63198 +{
63199 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63200 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
63201 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
63202 +#endif
63203 + return;
63204 +}
63205 +
63206 +int
63207 +gr_handle_chroot_mknod(const struct dentry *dentry,
63208 + const struct vfsmount *mnt, const int mode)
63209 +{
63210 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63211 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
63212 + proc_is_chrooted(current)) {
63213 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
63214 + return -EPERM;
63215 + }
63216 +#endif
63217 + return 0;
63218 +}
63219 +
63220 +int
63221 +gr_handle_chroot_mount(const struct dentry *dentry,
63222 + const struct vfsmount *mnt, const char *dev_name)
63223 +{
63224 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63225 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
63226 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
63227 + return -EPERM;
63228 + }
63229 +#endif
63230 + return 0;
63231 +}
63232 +
63233 +int
63234 +gr_handle_chroot_pivot(void)
63235 +{
63236 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63237 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
63238 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
63239 + return -EPERM;
63240 + }
63241 +#endif
63242 + return 0;
63243 +}
63244 +
63245 +int
63246 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
63247 +{
63248 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63249 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
63250 + !gr_is_outside_chroot(dentry, mnt)) {
63251 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
63252 + return -EPERM;
63253 + }
63254 +#endif
63255 + return 0;
63256 +}
63257 +
63258 +extern const char *captab_log[];
63259 +extern int captab_log_entries;
63260 +
63261 +int
63262 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63263 +{
63264 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63265 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63266 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63267 + if (cap_raised(chroot_caps, cap)) {
63268 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
63269 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
63270 + }
63271 + return 0;
63272 + }
63273 + }
63274 +#endif
63275 + return 1;
63276 +}
63277 +
63278 +int
63279 +gr_chroot_is_capable(const int cap)
63280 +{
63281 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63282 + return gr_task_chroot_is_capable(current, current_cred(), cap);
63283 +#endif
63284 + return 1;
63285 +}
63286 +
63287 +int
63288 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
63289 +{
63290 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63291 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63292 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63293 + if (cap_raised(chroot_caps, cap)) {
63294 + return 0;
63295 + }
63296 + }
63297 +#endif
63298 + return 1;
63299 +}
63300 +
63301 +int
63302 +gr_chroot_is_capable_nolog(const int cap)
63303 +{
63304 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63305 + return gr_task_chroot_is_capable_nolog(current, cap);
63306 +#endif
63307 + return 1;
63308 +}
63309 +
63310 +int
63311 +gr_handle_chroot_sysctl(const int op)
63312 +{
63313 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63314 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
63315 + proc_is_chrooted(current))
63316 + return -EACCES;
63317 +#endif
63318 + return 0;
63319 +}
63320 +
63321 +void
63322 +gr_handle_chroot_chdir(const struct path *path)
63323 +{
63324 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63325 + if (grsec_enable_chroot_chdir)
63326 + set_fs_pwd(current->fs, path);
63327 +#endif
63328 + return;
63329 +}
63330 +
63331 +int
63332 +gr_handle_chroot_chmod(const struct dentry *dentry,
63333 + const struct vfsmount *mnt, const int mode)
63334 +{
63335 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63336 + /* allow chmod +s on directories, but not files */
63337 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
63338 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
63339 + proc_is_chrooted(current)) {
63340 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
63341 + return -EPERM;
63342 + }
63343 +#endif
63344 + return 0;
63345 +}
63346 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
63347 new file mode 100644
63348 index 0000000..ce65ceb
63349 --- /dev/null
63350 +++ b/grsecurity/grsec_disabled.c
63351 @@ -0,0 +1,434 @@
63352 +#include <linux/kernel.h>
63353 +#include <linux/module.h>
63354 +#include <linux/sched.h>
63355 +#include <linux/file.h>
63356 +#include <linux/fs.h>
63357 +#include <linux/kdev_t.h>
63358 +#include <linux/net.h>
63359 +#include <linux/in.h>
63360 +#include <linux/ip.h>
63361 +#include <linux/skbuff.h>
63362 +#include <linux/sysctl.h>
63363 +
63364 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63365 +void
63366 +pax_set_initial_flags(struct linux_binprm *bprm)
63367 +{
63368 + return;
63369 +}
63370 +#endif
63371 +
63372 +#ifdef CONFIG_SYSCTL
63373 +__u32
63374 +gr_handle_sysctl(const struct ctl_table * table, const int op)
63375 +{
63376 + return 0;
63377 +}
63378 +#endif
63379 +
63380 +#ifdef CONFIG_TASKSTATS
63381 +int gr_is_taskstats_denied(int pid)
63382 +{
63383 + return 0;
63384 +}
63385 +#endif
63386 +
63387 +int
63388 +gr_acl_is_enabled(void)
63389 +{
63390 + return 0;
63391 +}
63392 +
63393 +void
63394 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63395 +{
63396 + return;
63397 +}
63398 +
63399 +int
63400 +gr_handle_rawio(const struct inode *inode)
63401 +{
63402 + return 0;
63403 +}
63404 +
63405 +void
63406 +gr_acl_handle_psacct(struct task_struct *task, const long code)
63407 +{
63408 + return;
63409 +}
63410 +
63411 +int
63412 +gr_handle_ptrace(struct task_struct *task, const long request)
63413 +{
63414 + return 0;
63415 +}
63416 +
63417 +int
63418 +gr_handle_proc_ptrace(struct task_struct *task)
63419 +{
63420 + return 0;
63421 +}
63422 +
63423 +int
63424 +gr_set_acls(const int type)
63425 +{
63426 + return 0;
63427 +}
63428 +
63429 +int
63430 +gr_check_hidden_task(const struct task_struct *tsk)
63431 +{
63432 + return 0;
63433 +}
63434 +
63435 +int
63436 +gr_check_protected_task(const struct task_struct *task)
63437 +{
63438 + return 0;
63439 +}
63440 +
63441 +int
63442 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63443 +{
63444 + return 0;
63445 +}
63446 +
63447 +void
63448 +gr_copy_label(struct task_struct *tsk)
63449 +{
63450 + return;
63451 +}
63452 +
63453 +void
63454 +gr_set_pax_flags(struct task_struct *task)
63455 +{
63456 + return;
63457 +}
63458 +
63459 +int
63460 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63461 + const int unsafe_share)
63462 +{
63463 + return 0;
63464 +}
63465 +
63466 +void
63467 +gr_handle_delete(const ino_t ino, const dev_t dev)
63468 +{
63469 + return;
63470 +}
63471 +
63472 +void
63473 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63474 +{
63475 + return;
63476 +}
63477 +
63478 +void
63479 +gr_handle_crash(struct task_struct *task, const int sig)
63480 +{
63481 + return;
63482 +}
63483 +
63484 +int
63485 +gr_check_crash_exec(const struct file *filp)
63486 +{
63487 + return 0;
63488 +}
63489 +
63490 +int
63491 +gr_check_crash_uid(const kuid_t uid)
63492 +{
63493 + return 0;
63494 +}
63495 +
63496 +void
63497 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63498 + struct dentry *old_dentry,
63499 + struct dentry *new_dentry,
63500 + struct vfsmount *mnt, const __u8 replace)
63501 +{
63502 + return;
63503 +}
63504 +
63505 +int
63506 +gr_search_socket(const int family, const int type, const int protocol)
63507 +{
63508 + return 1;
63509 +}
63510 +
63511 +int
63512 +gr_search_connectbind(const int mode, const struct socket *sock,
63513 + const struct sockaddr_in *addr)
63514 +{
63515 + return 0;
63516 +}
63517 +
63518 +void
63519 +gr_handle_alertkill(struct task_struct *task)
63520 +{
63521 + return;
63522 +}
63523 +
63524 +__u32
63525 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63526 +{
63527 + return 1;
63528 +}
63529 +
63530 +__u32
63531 +gr_acl_handle_hidden_file(const struct dentry * dentry,
63532 + const struct vfsmount * mnt)
63533 +{
63534 + return 1;
63535 +}
63536 +
63537 +__u32
63538 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63539 + int acc_mode)
63540 +{
63541 + return 1;
63542 +}
63543 +
63544 +__u32
63545 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63546 +{
63547 + return 1;
63548 +}
63549 +
63550 +__u32
63551 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63552 +{
63553 + return 1;
63554 +}
63555 +
63556 +int
63557 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63558 + unsigned int *vm_flags)
63559 +{
63560 + return 1;
63561 +}
63562 +
63563 +__u32
63564 +gr_acl_handle_truncate(const struct dentry * dentry,
63565 + const struct vfsmount * mnt)
63566 +{
63567 + return 1;
63568 +}
63569 +
63570 +__u32
63571 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63572 +{
63573 + return 1;
63574 +}
63575 +
63576 +__u32
63577 +gr_acl_handle_access(const struct dentry * dentry,
63578 + const struct vfsmount * mnt, const int fmode)
63579 +{
63580 + return 1;
63581 +}
63582 +
63583 +__u32
63584 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63585 + umode_t *mode)
63586 +{
63587 + return 1;
63588 +}
63589 +
63590 +__u32
63591 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63592 +{
63593 + return 1;
63594 +}
63595 +
63596 +__u32
63597 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63598 +{
63599 + return 1;
63600 +}
63601 +
63602 +void
63603 +grsecurity_init(void)
63604 +{
63605 + return;
63606 +}
63607 +
63608 +umode_t gr_acl_umask(void)
63609 +{
63610 + return 0;
63611 +}
63612 +
63613 +__u32
63614 +gr_acl_handle_mknod(const struct dentry * new_dentry,
63615 + const struct dentry * parent_dentry,
63616 + const struct vfsmount * parent_mnt,
63617 + const int mode)
63618 +{
63619 + return 1;
63620 +}
63621 +
63622 +__u32
63623 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
63624 + const struct dentry * parent_dentry,
63625 + const struct vfsmount * parent_mnt)
63626 +{
63627 + return 1;
63628 +}
63629 +
63630 +__u32
63631 +gr_acl_handle_symlink(const struct dentry * new_dentry,
63632 + const struct dentry * parent_dentry,
63633 + const struct vfsmount * parent_mnt, const struct filename *from)
63634 +{
63635 + return 1;
63636 +}
63637 +
63638 +__u32
63639 +gr_acl_handle_link(const struct dentry * new_dentry,
63640 + const struct dentry * parent_dentry,
63641 + const struct vfsmount * parent_mnt,
63642 + const struct dentry * old_dentry,
63643 + const struct vfsmount * old_mnt, const struct filename *to)
63644 +{
63645 + return 1;
63646 +}
63647 +
63648 +int
63649 +gr_acl_handle_rename(const struct dentry *new_dentry,
63650 + const struct dentry *parent_dentry,
63651 + const struct vfsmount *parent_mnt,
63652 + const struct dentry *old_dentry,
63653 + const struct inode *old_parent_inode,
63654 + const struct vfsmount *old_mnt, const struct filename *newname)
63655 +{
63656 + return 0;
63657 +}
63658 +
63659 +int
63660 +gr_acl_handle_filldir(const struct file *file, const char *name,
63661 + const int namelen, const ino_t ino)
63662 +{
63663 + return 1;
63664 +}
63665 +
63666 +int
63667 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63668 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
63669 +{
63670 + return 1;
63671 +}
63672 +
63673 +int
63674 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63675 +{
63676 + return 0;
63677 +}
63678 +
63679 +int
63680 +gr_search_accept(const struct socket *sock)
63681 +{
63682 + return 0;
63683 +}
63684 +
63685 +int
63686 +gr_search_listen(const struct socket *sock)
63687 +{
63688 + return 0;
63689 +}
63690 +
63691 +int
63692 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63693 +{
63694 + return 0;
63695 +}
63696 +
63697 +__u32
63698 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63699 +{
63700 + return 1;
63701 +}
63702 +
63703 +__u32
63704 +gr_acl_handle_creat(const struct dentry * dentry,
63705 + const struct dentry * p_dentry,
63706 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63707 + const int imode)
63708 +{
63709 + return 1;
63710 +}
63711 +
63712 +void
63713 +gr_acl_handle_exit(void)
63714 +{
63715 + return;
63716 +}
63717 +
63718 +int
63719 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63720 +{
63721 + return 1;
63722 +}
63723 +
63724 +void
63725 +gr_set_role_label(const kuid_t uid, const kgid_t gid)
63726 +{
63727 + return;
63728 +}
63729 +
63730 +int
63731 +gr_acl_handle_procpidmem(const struct task_struct *task)
63732 +{
63733 + return 0;
63734 +}
63735 +
63736 +int
63737 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63738 +{
63739 + return 0;
63740 +}
63741 +
63742 +int
63743 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63744 +{
63745 + return 0;
63746 +}
63747 +
63748 +void
63749 +gr_set_kernel_label(struct task_struct *task)
63750 +{
63751 + return;
63752 +}
63753 +
63754 +int
63755 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63756 +{
63757 + return 0;
63758 +}
63759 +
63760 +int
63761 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63762 +{
63763 + return 0;
63764 +}
63765 +
63766 +int gr_acl_enable_at_secure(void)
63767 +{
63768 + return 0;
63769 +}
63770 +
63771 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63772 +{
63773 + return dentry->d_sb->s_dev;
63774 +}
63775 +
63776 +void gr_put_exec_file(struct task_struct *task)
63777 +{
63778 + return;
63779 +}
63780 +
63781 +EXPORT_SYMBOL(gr_set_kernel_label);
63782 +#ifdef CONFIG_SECURITY
63783 +EXPORT_SYMBOL(gr_check_user_change);
63784 +EXPORT_SYMBOL(gr_check_group_change);
63785 +#endif
63786 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63787 new file mode 100644
63788 index 0000000..387032b
63789 --- /dev/null
63790 +++ b/grsecurity/grsec_exec.c
63791 @@ -0,0 +1,187 @@
63792 +#include <linux/kernel.h>
63793 +#include <linux/sched.h>
63794 +#include <linux/file.h>
63795 +#include <linux/binfmts.h>
63796 +#include <linux/fs.h>
63797 +#include <linux/types.h>
63798 +#include <linux/grdefs.h>
63799 +#include <linux/grsecurity.h>
63800 +#include <linux/grinternal.h>
63801 +#include <linux/capability.h>
63802 +#include <linux/module.h>
63803 +#include <linux/compat.h>
63804 +
63805 +#include <asm/uaccess.h>
63806 +
63807 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63808 +static char gr_exec_arg_buf[132];
63809 +static DEFINE_MUTEX(gr_exec_arg_mutex);
63810 +#endif
63811 +
63812 +struct user_arg_ptr {
63813 +#ifdef CONFIG_COMPAT
63814 + bool is_compat;
63815 +#endif
63816 + union {
63817 + const char __user *const __user *native;
63818 +#ifdef CONFIG_COMPAT
63819 + const compat_uptr_t __user *compat;
63820 +#endif
63821 + } ptr;
63822 +};
63823 +
63824 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
63825 +
63826 +void
63827 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
63828 +{
63829 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63830 + char *grarg = gr_exec_arg_buf;
63831 + unsigned int i, x, execlen = 0;
63832 + char c;
63833 +
63834 + if (!((grsec_enable_execlog && grsec_enable_group &&
63835 + in_group_p(grsec_audit_gid))
63836 + || (grsec_enable_execlog && !grsec_enable_group)))
63837 + return;
63838 +
63839 + mutex_lock(&gr_exec_arg_mutex);
63840 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
63841 +
63842 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
63843 + const char __user *p;
63844 + unsigned int len;
63845 +
63846 + p = get_user_arg_ptr(argv, i);
63847 + if (IS_ERR(p))
63848 + goto log;
63849 +
63850 + len = strnlen_user(p, 128 - execlen);
63851 + if (len > 128 - execlen)
63852 + len = 128 - execlen;
63853 + else if (len > 0)
63854 + len--;
63855 + if (copy_from_user(grarg + execlen, p, len))
63856 + goto log;
63857 +
63858 + /* rewrite unprintable characters */
63859 + for (x = 0; x < len; x++) {
63860 + c = *(grarg + execlen + x);
63861 + if (c < 32 || c > 126)
63862 + *(grarg + execlen + x) = ' ';
63863 + }
63864 +
63865 + execlen += len;
63866 + *(grarg + execlen) = ' ';
63867 + *(grarg + execlen + 1) = '\0';
63868 + execlen++;
63869 + }
63870 +
63871 + log:
63872 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63873 + bprm->file->f_path.mnt, grarg);
63874 + mutex_unlock(&gr_exec_arg_mutex);
63875 +#endif
63876 + return;
63877 +}
63878 +
63879 +#ifdef CONFIG_GRKERNSEC
63880 +extern int gr_acl_is_capable(const int cap);
63881 +extern int gr_acl_is_capable_nolog(const int cap);
63882 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63883 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
63884 +extern int gr_chroot_is_capable(const int cap);
63885 +extern int gr_chroot_is_capable_nolog(const int cap);
63886 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63887 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
63888 +#endif
63889 +
63890 +const char *captab_log[] = {
63891 + "CAP_CHOWN",
63892 + "CAP_DAC_OVERRIDE",
63893 + "CAP_DAC_READ_SEARCH",
63894 + "CAP_FOWNER",
63895 + "CAP_FSETID",
63896 + "CAP_KILL",
63897 + "CAP_SETGID",
63898 + "CAP_SETUID",
63899 + "CAP_SETPCAP",
63900 + "CAP_LINUX_IMMUTABLE",
63901 + "CAP_NET_BIND_SERVICE",
63902 + "CAP_NET_BROADCAST",
63903 + "CAP_NET_ADMIN",
63904 + "CAP_NET_RAW",
63905 + "CAP_IPC_LOCK",
63906 + "CAP_IPC_OWNER",
63907 + "CAP_SYS_MODULE",
63908 + "CAP_SYS_RAWIO",
63909 + "CAP_SYS_CHROOT",
63910 + "CAP_SYS_PTRACE",
63911 + "CAP_SYS_PACCT",
63912 + "CAP_SYS_ADMIN",
63913 + "CAP_SYS_BOOT",
63914 + "CAP_SYS_NICE",
63915 + "CAP_SYS_RESOURCE",
63916 + "CAP_SYS_TIME",
63917 + "CAP_SYS_TTY_CONFIG",
63918 + "CAP_MKNOD",
63919 + "CAP_LEASE",
63920 + "CAP_AUDIT_WRITE",
63921 + "CAP_AUDIT_CONTROL",
63922 + "CAP_SETFCAP",
63923 + "CAP_MAC_OVERRIDE",
63924 + "CAP_MAC_ADMIN",
63925 + "CAP_SYSLOG",
63926 + "CAP_WAKE_ALARM"
63927 +};
63928 +
63929 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63930 +
63931 +int gr_is_capable(const int cap)
63932 +{
63933 +#ifdef CONFIG_GRKERNSEC
63934 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63935 + return 1;
63936 + return 0;
63937 +#else
63938 + return 1;
63939 +#endif
63940 +}
63941 +
63942 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63943 +{
63944 +#ifdef CONFIG_GRKERNSEC
63945 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
63946 + return 1;
63947 + return 0;
63948 +#else
63949 + return 1;
63950 +#endif
63951 +}
63952 +
63953 +int gr_is_capable_nolog(const int cap)
63954 +{
63955 +#ifdef CONFIG_GRKERNSEC
63956 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63957 + return 1;
63958 + return 0;
63959 +#else
63960 + return 1;
63961 +#endif
63962 +}
63963 +
63964 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
63965 +{
63966 +#ifdef CONFIG_GRKERNSEC
63967 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
63968 + return 1;
63969 + return 0;
63970 +#else
63971 + return 1;
63972 +#endif
63973 +}
63974 +
63975 +EXPORT_SYMBOL(gr_is_capable);
63976 +EXPORT_SYMBOL(gr_is_capable_nolog);
63977 +EXPORT_SYMBOL(gr_task_is_capable);
63978 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
63979 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63980 new file mode 100644
63981 index 0000000..06cc6ea
63982 --- /dev/null
63983 +++ b/grsecurity/grsec_fifo.c
63984 @@ -0,0 +1,24 @@
63985 +#include <linux/kernel.h>
63986 +#include <linux/sched.h>
63987 +#include <linux/fs.h>
63988 +#include <linux/file.h>
63989 +#include <linux/grinternal.h>
63990 +
63991 +int
63992 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63993 + const struct dentry *dir, const int flag, const int acc_mode)
63994 +{
63995 +#ifdef CONFIG_GRKERNSEC_FIFO
63996 + const struct cred *cred = current_cred();
63997 +
63998 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63999 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64000 + !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64001 + !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64002 + if (!inode_permission(dentry->d_inode, acc_mode))
64003 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64004 + return -EACCES;
64005 + }
64006 +#endif
64007 + return 0;
64008 +}
64009 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64010 new file mode 100644
64011 index 0000000..8ca18bf
64012 --- /dev/null
64013 +++ b/grsecurity/grsec_fork.c
64014 @@ -0,0 +1,23 @@
64015 +#include <linux/kernel.h>
64016 +#include <linux/sched.h>
64017 +#include <linux/grsecurity.h>
64018 +#include <linux/grinternal.h>
64019 +#include <linux/errno.h>
64020 +
64021 +void
64022 +gr_log_forkfail(const int retval)
64023 +{
64024 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64025 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64026 + switch (retval) {
64027 + case -EAGAIN:
64028 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64029 + break;
64030 + case -ENOMEM:
64031 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64032 + break;
64033 + }
64034 + }
64035 +#endif
64036 + return;
64037 +}
64038 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64039 new file mode 100644
64040 index 0000000..a862e9f
64041 --- /dev/null
64042 +++ b/grsecurity/grsec_init.c
64043 @@ -0,0 +1,283 @@
64044 +#include <linux/kernel.h>
64045 +#include <linux/sched.h>
64046 +#include <linux/mm.h>
64047 +#include <linux/gracl.h>
64048 +#include <linux/slab.h>
64049 +#include <linux/vmalloc.h>
64050 +#include <linux/percpu.h>
64051 +#include <linux/module.h>
64052 +
64053 +int grsec_enable_ptrace_readexec;
64054 +int grsec_enable_setxid;
64055 +int grsec_enable_symlinkown;
64056 +kgid_t grsec_symlinkown_gid;
64057 +int grsec_enable_brute;
64058 +int grsec_enable_link;
64059 +int grsec_enable_dmesg;
64060 +int grsec_enable_harden_ptrace;
64061 +int grsec_enable_fifo;
64062 +int grsec_enable_execlog;
64063 +int grsec_enable_signal;
64064 +int grsec_enable_forkfail;
64065 +int grsec_enable_audit_ptrace;
64066 +int grsec_enable_time;
64067 +int grsec_enable_audit_textrel;
64068 +int grsec_enable_group;
64069 +kgid_t grsec_audit_gid;
64070 +int grsec_enable_chdir;
64071 +int grsec_enable_mount;
64072 +int grsec_enable_rofs;
64073 +int grsec_enable_chroot_findtask;
64074 +int grsec_enable_chroot_mount;
64075 +int grsec_enable_chroot_shmat;
64076 +int grsec_enable_chroot_fchdir;
64077 +int grsec_enable_chroot_double;
64078 +int grsec_enable_chroot_pivot;
64079 +int grsec_enable_chroot_chdir;
64080 +int grsec_enable_chroot_chmod;
64081 +int grsec_enable_chroot_mknod;
64082 +int grsec_enable_chroot_nice;
64083 +int grsec_enable_chroot_execlog;
64084 +int grsec_enable_chroot_caps;
64085 +int grsec_enable_chroot_sysctl;
64086 +int grsec_enable_chroot_unix;
64087 +int grsec_enable_tpe;
64088 +kgid_t grsec_tpe_gid;
64089 +int grsec_enable_blackhole;
64090 +#ifdef CONFIG_IPV6_MODULE
64091 +EXPORT_SYMBOL(grsec_enable_blackhole);
64092 +#endif
64093 +int grsec_lastack_retries;
64094 +int grsec_enable_tpe_all;
64095 +int grsec_enable_tpe_invert;
64096 +int grsec_enable_socket_all;
64097 +kgid_t grsec_socket_all_gid;
64098 +int grsec_enable_socket_client;
64099 +kgid_t grsec_socket_client_gid;
64100 +int grsec_enable_socket_server;
64101 +kgid_t grsec_socket_server_gid;
64102 +int grsec_resource_logging;
64103 +int grsec_disable_privio;
64104 +int grsec_enable_log_rwxmaps;
64105 +int grsec_lock;
64106 +
64107 +DEFINE_SPINLOCK(grsec_alert_lock);
64108 +unsigned long grsec_alert_wtime = 0;
64109 +unsigned long grsec_alert_fyet = 0;
64110 +
64111 +DEFINE_SPINLOCK(grsec_audit_lock);
64112 +
64113 +DEFINE_RWLOCK(grsec_exec_file_lock);
64114 +
64115 +char *gr_shared_page[4];
64116 +
64117 +char *gr_alert_log_fmt;
64118 +char *gr_audit_log_fmt;
64119 +char *gr_alert_log_buf;
64120 +char *gr_audit_log_buf;
64121 +
64122 +extern struct gr_arg *gr_usermode;
64123 +extern unsigned char *gr_system_salt;
64124 +extern unsigned char *gr_system_sum;
64125 +
64126 +void __init
64127 +grsecurity_init(void)
64128 +{
64129 + int j;
64130 + /* create the per-cpu shared pages */
64131 +
64132 +#ifdef CONFIG_X86
64133 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64134 +#endif
64135 +
64136 + for (j = 0; j < 4; j++) {
64137 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64138 + if (gr_shared_page[j] == NULL) {
64139 + panic("Unable to allocate grsecurity shared page");
64140 + return;
64141 + }
64142 + }
64143 +
64144 + /* allocate log buffers */
64145 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64146 + if (!gr_alert_log_fmt) {
64147 + panic("Unable to allocate grsecurity alert log format buffer");
64148 + return;
64149 + }
64150 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64151 + if (!gr_audit_log_fmt) {
64152 + panic("Unable to allocate grsecurity audit log format buffer");
64153 + return;
64154 + }
64155 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64156 + if (!gr_alert_log_buf) {
64157 + panic("Unable to allocate grsecurity alert log buffer");
64158 + return;
64159 + }
64160 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64161 + if (!gr_audit_log_buf) {
64162 + panic("Unable to allocate grsecurity audit log buffer");
64163 + return;
64164 + }
64165 +
64166 + /* allocate memory for authentication structure */
64167 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
64168 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
64169 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
64170 +
64171 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
64172 + panic("Unable to allocate grsecurity authentication structure");
64173 + return;
64174 + }
64175 +
64176 +
64177 +#ifdef CONFIG_GRKERNSEC_IO
64178 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
64179 + grsec_disable_privio = 1;
64180 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64181 + grsec_disable_privio = 1;
64182 +#else
64183 + grsec_disable_privio = 0;
64184 +#endif
64185 +#endif
64186 +
64187 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64188 + /* for backward compatibility, tpe_invert always defaults to on if
64189 + enabled in the kernel
64190 + */
64191 + grsec_enable_tpe_invert = 1;
64192 +#endif
64193 +
64194 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64195 +#ifndef CONFIG_GRKERNSEC_SYSCTL
64196 + grsec_lock = 1;
64197 +#endif
64198 +
64199 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64200 + grsec_enable_audit_textrel = 1;
64201 +#endif
64202 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64203 + grsec_enable_log_rwxmaps = 1;
64204 +#endif
64205 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64206 + grsec_enable_group = 1;
64207 + grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
64208 +#endif
64209 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64210 + grsec_enable_ptrace_readexec = 1;
64211 +#endif
64212 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64213 + grsec_enable_chdir = 1;
64214 +#endif
64215 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64216 + grsec_enable_harden_ptrace = 1;
64217 +#endif
64218 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64219 + grsec_enable_mount = 1;
64220 +#endif
64221 +#ifdef CONFIG_GRKERNSEC_LINK
64222 + grsec_enable_link = 1;
64223 +#endif
64224 +#ifdef CONFIG_GRKERNSEC_BRUTE
64225 + grsec_enable_brute = 1;
64226 +#endif
64227 +#ifdef CONFIG_GRKERNSEC_DMESG
64228 + grsec_enable_dmesg = 1;
64229 +#endif
64230 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64231 + grsec_enable_blackhole = 1;
64232 + grsec_lastack_retries = 4;
64233 +#endif
64234 +#ifdef CONFIG_GRKERNSEC_FIFO
64235 + grsec_enable_fifo = 1;
64236 +#endif
64237 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64238 + grsec_enable_execlog = 1;
64239 +#endif
64240 +#ifdef CONFIG_GRKERNSEC_SETXID
64241 + grsec_enable_setxid = 1;
64242 +#endif
64243 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64244 + grsec_enable_signal = 1;
64245 +#endif
64246 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64247 + grsec_enable_forkfail = 1;
64248 +#endif
64249 +#ifdef CONFIG_GRKERNSEC_TIME
64250 + grsec_enable_time = 1;
64251 +#endif
64252 +#ifdef CONFIG_GRKERNSEC_RESLOG
64253 + grsec_resource_logging = 1;
64254 +#endif
64255 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64256 + grsec_enable_chroot_findtask = 1;
64257 +#endif
64258 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64259 + grsec_enable_chroot_unix = 1;
64260 +#endif
64261 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64262 + grsec_enable_chroot_mount = 1;
64263 +#endif
64264 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64265 + grsec_enable_chroot_fchdir = 1;
64266 +#endif
64267 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64268 + grsec_enable_chroot_shmat = 1;
64269 +#endif
64270 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64271 + grsec_enable_audit_ptrace = 1;
64272 +#endif
64273 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64274 + grsec_enable_chroot_double = 1;
64275 +#endif
64276 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64277 + grsec_enable_chroot_pivot = 1;
64278 +#endif
64279 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64280 + grsec_enable_chroot_chdir = 1;
64281 +#endif
64282 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64283 + grsec_enable_chroot_chmod = 1;
64284 +#endif
64285 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64286 + grsec_enable_chroot_mknod = 1;
64287 +#endif
64288 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64289 + grsec_enable_chroot_nice = 1;
64290 +#endif
64291 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64292 + grsec_enable_chroot_execlog = 1;
64293 +#endif
64294 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64295 + grsec_enable_chroot_caps = 1;
64296 +#endif
64297 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64298 + grsec_enable_chroot_sysctl = 1;
64299 +#endif
64300 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64301 + grsec_enable_symlinkown = 1;
64302 + grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
64303 +#endif
64304 +#ifdef CONFIG_GRKERNSEC_TPE
64305 + grsec_enable_tpe = 1;
64306 + grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
64307 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64308 + grsec_enable_tpe_all = 1;
64309 +#endif
64310 +#endif
64311 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64312 + grsec_enable_socket_all = 1;
64313 + grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
64314 +#endif
64315 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64316 + grsec_enable_socket_client = 1;
64317 + grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
64318 +#endif
64319 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64320 + grsec_enable_socket_server = 1;
64321 + grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
64322 +#endif
64323 +#endif
64324 +
64325 + return;
64326 +}
64327 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
64328 new file mode 100644
64329 index 0000000..5e05e20
64330 --- /dev/null
64331 +++ b/grsecurity/grsec_link.c
64332 @@ -0,0 +1,58 @@
64333 +#include <linux/kernel.h>
64334 +#include <linux/sched.h>
64335 +#include <linux/fs.h>
64336 +#include <linux/file.h>
64337 +#include <linux/grinternal.h>
64338 +
64339 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
64340 +{
64341 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64342 + const struct inode *link_inode = link->dentry->d_inode;
64343 +
64344 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
64345 + /* ignore root-owned links, e.g. /proc/self */
64346 + gr_is_global_nonroot(link_inode->i_uid) && target &&
64347 + !uid_eq(link_inode->i_uid, target->i_uid)) {
64348 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
64349 + return 1;
64350 + }
64351 +#endif
64352 + return 0;
64353 +}
64354 +
64355 +int
64356 +gr_handle_follow_link(const struct inode *parent,
64357 + const struct inode *inode,
64358 + const struct dentry *dentry, const struct vfsmount *mnt)
64359 +{
64360 +#ifdef CONFIG_GRKERNSEC_LINK
64361 + const struct cred *cred = current_cred();
64362 +
64363 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
64364 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
64365 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
64366 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
64367 + return -EACCES;
64368 + }
64369 +#endif
64370 + return 0;
64371 +}
64372 +
64373 +int
64374 +gr_handle_hardlink(const struct dentry *dentry,
64375 + const struct vfsmount *mnt,
64376 + struct inode *inode, const int mode, const struct filename *to)
64377 +{
64378 +#ifdef CONFIG_GRKERNSEC_LINK
64379 + const struct cred *cred = current_cred();
64380 +
64381 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
64382 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
64383 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
64384 + !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
64385 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
64386 + return -EPERM;
64387 + }
64388 +#endif
64389 + return 0;
64390 +}
64391 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
64392 new file mode 100644
64393 index 0000000..7c06085
64394 --- /dev/null
64395 +++ b/grsecurity/grsec_log.c
64396 @@ -0,0 +1,326 @@
64397 +#include <linux/kernel.h>
64398 +#include <linux/sched.h>
64399 +#include <linux/file.h>
64400 +#include <linux/tty.h>
64401 +#include <linux/fs.h>
64402 +#include <linux/grinternal.h>
64403 +
64404 +#ifdef CONFIG_TREE_PREEMPT_RCU
64405 +#define DISABLE_PREEMPT() preempt_disable()
64406 +#define ENABLE_PREEMPT() preempt_enable()
64407 +#else
64408 +#define DISABLE_PREEMPT()
64409 +#define ENABLE_PREEMPT()
64410 +#endif
64411 +
64412 +#define BEGIN_LOCKS(x) \
64413 + DISABLE_PREEMPT(); \
64414 + rcu_read_lock(); \
64415 + read_lock(&tasklist_lock); \
64416 + read_lock(&grsec_exec_file_lock); \
64417 + if (x != GR_DO_AUDIT) \
64418 + spin_lock(&grsec_alert_lock); \
64419 + else \
64420 + spin_lock(&grsec_audit_lock)
64421 +
64422 +#define END_LOCKS(x) \
64423 + if (x != GR_DO_AUDIT) \
64424 + spin_unlock(&grsec_alert_lock); \
64425 + else \
64426 + spin_unlock(&grsec_audit_lock); \
64427 + read_unlock(&grsec_exec_file_lock); \
64428 + read_unlock(&tasklist_lock); \
64429 + rcu_read_unlock(); \
64430 + ENABLE_PREEMPT(); \
64431 + if (x == GR_DONT_AUDIT) \
64432 + gr_handle_alertkill(current)
64433 +
64434 +enum {
64435 + FLOODING,
64436 + NO_FLOODING
64437 +};
64438 +
64439 +extern char *gr_alert_log_fmt;
64440 +extern char *gr_audit_log_fmt;
64441 +extern char *gr_alert_log_buf;
64442 +extern char *gr_audit_log_buf;
64443 +
64444 +static int gr_log_start(int audit)
64445 +{
64446 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
64447 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
64448 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64449 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
64450 + unsigned long curr_secs = get_seconds();
64451 +
64452 + if (audit == GR_DO_AUDIT)
64453 + goto set_fmt;
64454 +
64455 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
64456 + grsec_alert_wtime = curr_secs;
64457 + grsec_alert_fyet = 0;
64458 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
64459 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
64460 + grsec_alert_fyet++;
64461 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
64462 + grsec_alert_wtime = curr_secs;
64463 + grsec_alert_fyet++;
64464 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
64465 + return FLOODING;
64466 + }
64467 + else return FLOODING;
64468 +
64469 +set_fmt:
64470 +#endif
64471 + memset(buf, 0, PAGE_SIZE);
64472 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
64473 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
64474 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64475 + } else if (current->signal->curr_ip) {
64476 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
64477 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
64478 + } else if (gr_acl_is_enabled()) {
64479 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
64480 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64481 + } else {
64482 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
64483 + strcpy(buf, fmt);
64484 + }
64485 +
64486 + return NO_FLOODING;
64487 +}
64488 +
64489 +static void gr_log_middle(int audit, const char *msg, va_list ap)
64490 + __attribute__ ((format (printf, 2, 0)));
64491 +
64492 +static void gr_log_middle(int audit, const char *msg, va_list ap)
64493 +{
64494 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64495 + unsigned int len = strlen(buf);
64496 +
64497 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64498 +
64499 + return;
64500 +}
64501 +
64502 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
64503 + __attribute__ ((format (printf, 2, 3)));
64504 +
64505 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
64506 +{
64507 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64508 + unsigned int len = strlen(buf);
64509 + va_list ap;
64510 +
64511 + va_start(ap, msg);
64512 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64513 + va_end(ap);
64514 +
64515 + return;
64516 +}
64517 +
64518 +static void gr_log_end(int audit, int append_default)
64519 +{
64520 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64521 + if (append_default) {
64522 + struct task_struct *task = current;
64523 + struct task_struct *parent = task->real_parent;
64524 + const struct cred *cred = __task_cred(task);
64525 + const struct cred *pcred = __task_cred(parent);
64526 + unsigned int len = strlen(buf);
64527 +
64528 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64529 + }
64530 +
64531 + printk("%s\n", buf);
64532 +
64533 + return;
64534 +}
64535 +
64536 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64537 +{
64538 + int logtype;
64539 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64540 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64541 + void *voidptr = NULL;
64542 + int num1 = 0, num2 = 0;
64543 + unsigned long ulong1 = 0, ulong2 = 0;
64544 + struct dentry *dentry = NULL;
64545 + struct vfsmount *mnt = NULL;
64546 + struct file *file = NULL;
64547 + struct task_struct *task = NULL;
64548 + const struct cred *cred, *pcred;
64549 + va_list ap;
64550 +
64551 + BEGIN_LOCKS(audit);
64552 + logtype = gr_log_start(audit);
64553 + if (logtype == FLOODING) {
64554 + END_LOCKS(audit);
64555 + return;
64556 + }
64557 + va_start(ap, argtypes);
64558 + switch (argtypes) {
64559 + case GR_TTYSNIFF:
64560 + task = va_arg(ap, struct task_struct *);
64561 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
64562 + break;
64563 + case GR_SYSCTL_HIDDEN:
64564 + str1 = va_arg(ap, char *);
64565 + gr_log_middle_varargs(audit, msg, result, str1);
64566 + break;
64567 + case GR_RBAC:
64568 + dentry = va_arg(ap, struct dentry *);
64569 + mnt = va_arg(ap, struct vfsmount *);
64570 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64571 + break;
64572 + case GR_RBAC_STR:
64573 + dentry = va_arg(ap, struct dentry *);
64574 + mnt = va_arg(ap, struct vfsmount *);
64575 + str1 = va_arg(ap, char *);
64576 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64577 + break;
64578 + case GR_STR_RBAC:
64579 + str1 = va_arg(ap, char *);
64580 + dentry = va_arg(ap, struct dentry *);
64581 + mnt = va_arg(ap, struct vfsmount *);
64582 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64583 + break;
64584 + case GR_RBAC_MODE2:
64585 + dentry = va_arg(ap, struct dentry *);
64586 + mnt = va_arg(ap, struct vfsmount *);
64587 + str1 = va_arg(ap, char *);
64588 + str2 = va_arg(ap, char *);
64589 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64590 + break;
64591 + case GR_RBAC_MODE3:
64592 + dentry = va_arg(ap, struct dentry *);
64593 + mnt = va_arg(ap, struct vfsmount *);
64594 + str1 = va_arg(ap, char *);
64595 + str2 = va_arg(ap, char *);
64596 + str3 = va_arg(ap, char *);
64597 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64598 + break;
64599 + case GR_FILENAME:
64600 + dentry = va_arg(ap, struct dentry *);
64601 + mnt = va_arg(ap, struct vfsmount *);
64602 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64603 + break;
64604 + case GR_STR_FILENAME:
64605 + str1 = va_arg(ap, char *);
64606 + dentry = va_arg(ap, struct dentry *);
64607 + mnt = va_arg(ap, struct vfsmount *);
64608 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64609 + break;
64610 + case GR_FILENAME_STR:
64611 + dentry = va_arg(ap, struct dentry *);
64612 + mnt = va_arg(ap, struct vfsmount *);
64613 + str1 = va_arg(ap, char *);
64614 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64615 + break;
64616 + case GR_FILENAME_TWO_INT:
64617 + dentry = va_arg(ap, struct dentry *);
64618 + mnt = va_arg(ap, struct vfsmount *);
64619 + num1 = va_arg(ap, int);
64620 + num2 = va_arg(ap, int);
64621 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64622 + break;
64623 + case GR_FILENAME_TWO_INT_STR:
64624 + dentry = va_arg(ap, struct dentry *);
64625 + mnt = va_arg(ap, struct vfsmount *);
64626 + num1 = va_arg(ap, int);
64627 + num2 = va_arg(ap, int);
64628 + str1 = va_arg(ap, char *);
64629 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64630 + break;
64631 + case GR_TEXTREL:
64632 + file = va_arg(ap, struct file *);
64633 + ulong1 = va_arg(ap, unsigned long);
64634 + ulong2 = va_arg(ap, unsigned long);
64635 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64636 + break;
64637 + case GR_PTRACE:
64638 + task = va_arg(ap, struct task_struct *);
64639 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
64640 + break;
64641 + case GR_RESOURCE:
64642 + task = va_arg(ap, struct task_struct *);
64643 + cred = __task_cred(task);
64644 + pcred = __task_cred(task->real_parent);
64645 + ulong1 = va_arg(ap, unsigned long);
64646 + str1 = va_arg(ap, char *);
64647 + ulong2 = va_arg(ap, unsigned long);
64648 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64649 + break;
64650 + case GR_CAP:
64651 + task = va_arg(ap, struct task_struct *);
64652 + cred = __task_cred(task);
64653 + pcred = __task_cred(task->real_parent);
64654 + str1 = va_arg(ap, char *);
64655 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64656 + break;
64657 + case GR_SIG:
64658 + str1 = va_arg(ap, char *);
64659 + voidptr = va_arg(ap, void *);
64660 + gr_log_middle_varargs(audit, msg, str1, voidptr);
64661 + break;
64662 + case GR_SIG2:
64663 + task = va_arg(ap, struct task_struct *);
64664 + cred = __task_cred(task);
64665 + pcred = __task_cred(task->real_parent);
64666 + num1 = va_arg(ap, int);
64667 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64668 + break;
64669 + case GR_CRASH1:
64670 + task = va_arg(ap, struct task_struct *);
64671 + cred = __task_cred(task);
64672 + pcred = __task_cred(task->real_parent);
64673 + ulong1 = va_arg(ap, unsigned long);
64674 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
64675 + break;
64676 + case GR_CRASH2:
64677 + task = va_arg(ap, struct task_struct *);
64678 + cred = __task_cred(task);
64679 + pcred = __task_cred(task->real_parent);
64680 + ulong1 = va_arg(ap, unsigned long);
64681 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
64682 + break;
64683 + case GR_RWXMAP:
64684 + file = va_arg(ap, struct file *);
64685 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64686 + break;
64687 + case GR_PSACCT:
64688 + {
64689 + unsigned int wday, cday;
64690 + __u8 whr, chr;
64691 + __u8 wmin, cmin;
64692 + __u8 wsec, csec;
64693 + char cur_tty[64] = { 0 };
64694 + char parent_tty[64] = { 0 };
64695 +
64696 + task = va_arg(ap, struct task_struct *);
64697 + wday = va_arg(ap, unsigned int);
64698 + cday = va_arg(ap, unsigned int);
64699 + whr = va_arg(ap, int);
64700 + chr = va_arg(ap, int);
64701 + wmin = va_arg(ap, int);
64702 + cmin = va_arg(ap, int);
64703 + wsec = va_arg(ap, int);
64704 + csec = va_arg(ap, int);
64705 + ulong1 = va_arg(ap, unsigned long);
64706 + cred = __task_cred(task);
64707 + pcred = __task_cred(task->real_parent);
64708 +
64709 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64710 + }
64711 + break;
64712 + default:
64713 + gr_log_middle(audit, msg, ap);
64714 + }
64715 + va_end(ap);
64716 + // these don't need DEFAULTSECARGS printed on the end
64717 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64718 + gr_log_end(audit, 0);
64719 + else
64720 + gr_log_end(audit, 1);
64721 + END_LOCKS(audit);
64722 +}
64723 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64724 new file mode 100644
64725 index 0000000..f536303
64726 --- /dev/null
64727 +++ b/grsecurity/grsec_mem.c
64728 @@ -0,0 +1,40 @@
64729 +#include <linux/kernel.h>
64730 +#include <linux/sched.h>
64731 +#include <linux/mm.h>
64732 +#include <linux/mman.h>
64733 +#include <linux/grinternal.h>
64734 +
64735 +void
64736 +gr_handle_ioperm(void)
64737 +{
64738 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64739 + return;
64740 +}
64741 +
64742 +void
64743 +gr_handle_iopl(void)
64744 +{
64745 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64746 + return;
64747 +}
64748 +
64749 +void
64750 +gr_handle_mem_readwrite(u64 from, u64 to)
64751 +{
64752 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64753 + return;
64754 +}
64755 +
64756 +void
64757 +gr_handle_vm86(void)
64758 +{
64759 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64760 + return;
64761 +}
64762 +
64763 +void
64764 +gr_log_badprocpid(const char *entry)
64765 +{
64766 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64767 + return;
64768 +}
64769 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64770 new file mode 100644
64771 index 0000000..2131422
64772 --- /dev/null
64773 +++ b/grsecurity/grsec_mount.c
64774 @@ -0,0 +1,62 @@
64775 +#include <linux/kernel.h>
64776 +#include <linux/sched.h>
64777 +#include <linux/mount.h>
64778 +#include <linux/grsecurity.h>
64779 +#include <linux/grinternal.h>
64780 +
64781 +void
64782 +gr_log_remount(const char *devname, const int retval)
64783 +{
64784 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64785 + if (grsec_enable_mount && (retval >= 0))
64786 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64787 +#endif
64788 + return;
64789 +}
64790 +
64791 +void
64792 +gr_log_unmount(const char *devname, const int retval)
64793 +{
64794 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64795 + if (grsec_enable_mount && (retval >= 0))
64796 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64797 +#endif
64798 + return;
64799 +}
64800 +
64801 +void
64802 +gr_log_mount(const char *from, const char *to, const int retval)
64803 +{
64804 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64805 + if (grsec_enable_mount && (retval >= 0))
64806 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64807 +#endif
64808 + return;
64809 +}
64810 +
64811 +int
64812 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64813 +{
64814 +#ifdef CONFIG_GRKERNSEC_ROFS
64815 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64816 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64817 + return -EPERM;
64818 + } else
64819 + return 0;
64820 +#endif
64821 + return 0;
64822 +}
64823 +
64824 +int
64825 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64826 +{
64827 +#ifdef CONFIG_GRKERNSEC_ROFS
64828 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64829 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64830 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64831 + return -EPERM;
64832 + } else
64833 + return 0;
64834 +#endif
64835 + return 0;
64836 +}
64837 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64838 new file mode 100644
64839 index 0000000..a3b12a0
64840 --- /dev/null
64841 +++ b/grsecurity/grsec_pax.c
64842 @@ -0,0 +1,36 @@
64843 +#include <linux/kernel.h>
64844 +#include <linux/sched.h>
64845 +#include <linux/mm.h>
64846 +#include <linux/file.h>
64847 +#include <linux/grinternal.h>
64848 +#include <linux/grsecurity.h>
64849 +
64850 +void
64851 +gr_log_textrel(struct vm_area_struct * vma)
64852 +{
64853 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64854 + if (grsec_enable_audit_textrel)
64855 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64856 +#endif
64857 + return;
64858 +}
64859 +
64860 +void
64861 +gr_log_rwxmmap(struct file *file)
64862 +{
64863 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64864 + if (grsec_enable_log_rwxmaps)
64865 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64866 +#endif
64867 + return;
64868 +}
64869 +
64870 +void
64871 +gr_log_rwxmprotect(struct file *file)
64872 +{
64873 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64874 + if (grsec_enable_log_rwxmaps)
64875 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64876 +#endif
64877 + return;
64878 +}
64879 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64880 new file mode 100644
64881 index 0000000..f7f29aa
64882 --- /dev/null
64883 +++ b/grsecurity/grsec_ptrace.c
64884 @@ -0,0 +1,30 @@
64885 +#include <linux/kernel.h>
64886 +#include <linux/sched.h>
64887 +#include <linux/grinternal.h>
64888 +#include <linux/security.h>
64889 +
64890 +void
64891 +gr_audit_ptrace(struct task_struct *task)
64892 +{
64893 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64894 + if (grsec_enable_audit_ptrace)
64895 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64896 +#endif
64897 + return;
64898 +}
64899 +
64900 +int
64901 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
64902 +{
64903 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64904 + const struct dentry *dentry = file->f_path.dentry;
64905 + const struct vfsmount *mnt = file->f_path.mnt;
64906 +
64907 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64908 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64909 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64910 + return -EACCES;
64911 + }
64912 +#endif
64913 + return 0;
64914 +}
64915 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64916 new file mode 100644
64917 index 0000000..e09715a
64918 --- /dev/null
64919 +++ b/grsecurity/grsec_sig.c
64920 @@ -0,0 +1,222 @@
64921 +#include <linux/kernel.h>
64922 +#include <linux/sched.h>
64923 +#include <linux/delay.h>
64924 +#include <linux/grsecurity.h>
64925 +#include <linux/grinternal.h>
64926 +#include <linux/hardirq.h>
64927 +
64928 +char *signames[] = {
64929 + [SIGSEGV] = "Segmentation fault",
64930 + [SIGILL] = "Illegal instruction",
64931 + [SIGABRT] = "Abort",
64932 + [SIGBUS] = "Invalid alignment/Bus error"
64933 +};
64934 +
64935 +void
64936 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64937 +{
64938 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64939 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64940 + (sig == SIGABRT) || (sig == SIGBUS))) {
64941 + if (task_pid_nr(t) == task_pid_nr(current)) {
64942 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64943 + } else {
64944 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64945 + }
64946 + }
64947 +#endif
64948 + return;
64949 +}
64950 +
64951 +int
64952 +gr_handle_signal(const struct task_struct *p, const int sig)
64953 +{
64954 +#ifdef CONFIG_GRKERNSEC
64955 + /* ignore the 0 signal for protected task checks */
64956 + if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
64957 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64958 + return -EPERM;
64959 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64960 + return -EPERM;
64961 + }
64962 +#endif
64963 + return 0;
64964 +}
64965 +
64966 +#ifdef CONFIG_GRKERNSEC
64967 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64968 +
64969 +int gr_fake_force_sig(int sig, struct task_struct *t)
64970 +{
64971 + unsigned long int flags;
64972 + int ret, blocked, ignored;
64973 + struct k_sigaction *action;
64974 +
64975 + spin_lock_irqsave(&t->sighand->siglock, flags);
64976 + action = &t->sighand->action[sig-1];
64977 + ignored = action->sa.sa_handler == SIG_IGN;
64978 + blocked = sigismember(&t->blocked, sig);
64979 + if (blocked || ignored) {
64980 + action->sa.sa_handler = SIG_DFL;
64981 + if (blocked) {
64982 + sigdelset(&t->blocked, sig);
64983 + recalc_sigpending_and_wake(t);
64984 + }
64985 + }
64986 + if (action->sa.sa_handler == SIG_DFL)
64987 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64988 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64989 +
64990 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64991 +
64992 + return ret;
64993 +}
64994 +#endif
64995 +
64996 +#ifdef CONFIG_GRKERNSEC_BRUTE
64997 +#define GR_USER_BAN_TIME (15 * 60)
64998 +#define GR_DAEMON_BRUTE_TIME (30 * 60)
64999 +
65000 +static int __get_dumpable(unsigned long mm_flags)
65001 +{
65002 + int ret;
65003 +
65004 + ret = mm_flags & MMF_DUMPABLE_MASK;
65005 + return (ret >= 2) ? 2 : ret;
65006 +}
65007 +#endif
65008 +
65009 +void gr_handle_brute_attach(unsigned long mm_flags)
65010 +{
65011 +#ifdef CONFIG_GRKERNSEC_BRUTE
65012 + struct task_struct *p = current;
65013 + kuid_t uid = GLOBAL_ROOT_UID;
65014 + int daemon = 0;
65015 +
65016 + if (!grsec_enable_brute)
65017 + return;
65018 +
65019 + rcu_read_lock();
65020 + read_lock(&tasklist_lock);
65021 + read_lock(&grsec_exec_file_lock);
65022 + if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
65023 + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
65024 + p->real_parent->brute = 1;
65025 + daemon = 1;
65026 + } else {
65027 + const struct cred *cred = __task_cred(p), *cred2;
65028 + struct task_struct *tsk, *tsk2;
65029 +
65030 + if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
65031 + struct user_struct *user;
65032 +
65033 + uid = cred->uid;
65034 +
65035 + /* this is put upon execution past expiration */
65036 + user = find_user(uid);
65037 + if (user == NULL)
65038 + goto unlock;
65039 + user->banned = 1;
65040 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65041 + if (user->ban_expires == ~0UL)
65042 + user->ban_expires--;
65043 +
65044 + do_each_thread(tsk2, tsk) {
65045 + cred2 = __task_cred(tsk);
65046 + if (tsk != p && uid_eq(cred2->uid, uid))
65047 + gr_fake_force_sig(SIGKILL, tsk);
65048 + } while_each_thread(tsk2, tsk);
65049 + }
65050 + }
65051 +unlock:
65052 + read_unlock(&grsec_exec_file_lock);
65053 + read_unlock(&tasklist_lock);
65054 + rcu_read_unlock();
65055 +
65056 + if (gr_is_global_nonroot(uid))
65057 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
65058 + GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
65059 + else if (daemon)
65060 + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
65061 +
65062 +#endif
65063 + return;
65064 +}
65065 +
65066 +void gr_handle_brute_check(void)
65067 +{
65068 +#ifdef CONFIG_GRKERNSEC_BRUTE
65069 + struct task_struct *p = current;
65070 +
65071 + if (unlikely(p->brute)) {
65072 + if (!grsec_enable_brute)
65073 + p->brute = 0;
65074 + else if (time_before(get_seconds(), p->brute_expires))
65075 + msleep(30 * 1000);
65076 + }
65077 +#endif
65078 + return;
65079 +}
65080 +
65081 +void gr_handle_kernel_exploit(void)
65082 +{
65083 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65084 + const struct cred *cred;
65085 + struct task_struct *tsk, *tsk2;
65086 + struct user_struct *user;
65087 + kuid_t uid;
65088 +
65089 + if (in_irq() || in_serving_softirq() || in_nmi())
65090 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65091 +
65092 + uid = current_uid();
65093 +
65094 + if (gr_is_global_root(uid))
65095 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
65096 + else {
65097 + /* kill all the processes of this user, hold a reference
65098 + to their creds struct, and prevent them from creating
65099 + another process until system reset
65100 + */
65101 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
65102 + GR_GLOBAL_UID(uid));
65103 + /* we intentionally leak this ref */
65104 + user = get_uid(current->cred->user);
65105 + if (user) {
65106 + user->banned = 1;
65107 + user->ban_expires = ~0UL;
65108 + }
65109 +
65110 + read_lock(&tasklist_lock);
65111 + do_each_thread(tsk2, tsk) {
65112 + cred = __task_cred(tsk);
65113 + if (uid_eq(cred->uid, uid))
65114 + gr_fake_force_sig(SIGKILL, tsk);
65115 + } while_each_thread(tsk2, tsk);
65116 + read_unlock(&tasklist_lock);
65117 + }
65118 +#endif
65119 +}
65120 +
65121 +int __gr_process_user_ban(struct user_struct *user)
65122 +{
65123 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65124 + if (unlikely(user->banned)) {
65125 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65126 + user->banned = 0;
65127 + user->ban_expires = 0;
65128 + free_uid(user);
65129 + } else
65130 + return -EPERM;
65131 + }
65132 +#endif
65133 + return 0;
65134 +}
65135 +
65136 +int gr_process_user_ban(void)
65137 +{
65138 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65139 + return __gr_process_user_ban(current->cred->user);
65140 +#endif
65141 + return 0;
65142 +}
65143 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65144 new file mode 100644
65145 index 0000000..4030d57
65146 --- /dev/null
65147 +++ b/grsecurity/grsec_sock.c
65148 @@ -0,0 +1,244 @@
65149 +#include <linux/kernel.h>
65150 +#include <linux/module.h>
65151 +#include <linux/sched.h>
65152 +#include <linux/file.h>
65153 +#include <linux/net.h>
65154 +#include <linux/in.h>
65155 +#include <linux/ip.h>
65156 +#include <net/sock.h>
65157 +#include <net/inet_sock.h>
65158 +#include <linux/grsecurity.h>
65159 +#include <linux/grinternal.h>
65160 +#include <linux/gracl.h>
65161 +
65162 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
65163 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
65164 +
65165 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
65166 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
65167 +
65168 +#ifdef CONFIG_UNIX_MODULE
65169 +EXPORT_SYMBOL(gr_acl_handle_unix);
65170 +EXPORT_SYMBOL(gr_acl_handle_mknod);
65171 +EXPORT_SYMBOL(gr_handle_chroot_unix);
65172 +EXPORT_SYMBOL(gr_handle_create);
65173 +#endif
65174 +
65175 +#ifdef CONFIG_GRKERNSEC
65176 +#define gr_conn_table_size 32749
65177 +struct conn_table_entry {
65178 + struct conn_table_entry *next;
65179 + struct signal_struct *sig;
65180 +};
65181 +
65182 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
65183 +DEFINE_SPINLOCK(gr_conn_table_lock);
65184 +
65185 +extern const char * gr_socktype_to_name(unsigned char type);
65186 +extern const char * gr_proto_to_name(unsigned char proto);
65187 +extern const char * gr_sockfamily_to_name(unsigned char family);
65188 +
65189 +static __inline__ int
65190 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
65191 +{
65192 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
65193 +}
65194 +
65195 +static __inline__ int
65196 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
65197 + __u16 sport, __u16 dport)
65198 +{
65199 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
65200 + sig->gr_sport == sport && sig->gr_dport == dport))
65201 + return 1;
65202 + else
65203 + return 0;
65204 +}
65205 +
65206 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
65207 +{
65208 + struct conn_table_entry **match;
65209 + unsigned int index;
65210 +
65211 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65212 + sig->gr_sport, sig->gr_dport,
65213 + gr_conn_table_size);
65214 +
65215 + newent->sig = sig;
65216 +
65217 + match = &gr_conn_table[index];
65218 + newent->next = *match;
65219 + *match = newent;
65220 +
65221 + return;
65222 +}
65223 +
65224 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
65225 +{
65226 + struct conn_table_entry *match, *last = NULL;
65227 + unsigned int index;
65228 +
65229 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65230 + sig->gr_sport, sig->gr_dport,
65231 + gr_conn_table_size);
65232 +
65233 + match = gr_conn_table[index];
65234 + while (match && !conn_match(match->sig,
65235 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
65236 + sig->gr_dport)) {
65237 + last = match;
65238 + match = match->next;
65239 + }
65240 +
65241 + if (match) {
65242 + if (last)
65243 + last->next = match->next;
65244 + else
65245 + gr_conn_table[index] = NULL;
65246 + kfree(match);
65247 + }
65248 +
65249 + return;
65250 +}
65251 +
65252 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
65253 + __u16 sport, __u16 dport)
65254 +{
65255 + struct conn_table_entry *match;
65256 + unsigned int index;
65257 +
65258 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
65259 +
65260 + match = gr_conn_table[index];
65261 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
65262 + match = match->next;
65263 +
65264 + if (match)
65265 + return match->sig;
65266 + else
65267 + return NULL;
65268 +}
65269 +
65270 +#endif
65271 +
65272 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
65273 +{
65274 +#ifdef CONFIG_GRKERNSEC
65275 + struct signal_struct *sig = task->signal;
65276 + struct conn_table_entry *newent;
65277 +
65278 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
65279 + if (newent == NULL)
65280 + return;
65281 + /* no bh lock needed since we are called with bh disabled */
65282 + spin_lock(&gr_conn_table_lock);
65283 + gr_del_task_from_ip_table_nolock(sig);
65284 + sig->gr_saddr = inet->inet_rcv_saddr;
65285 + sig->gr_daddr = inet->inet_daddr;
65286 + sig->gr_sport = inet->inet_sport;
65287 + sig->gr_dport = inet->inet_dport;
65288 + gr_add_to_task_ip_table_nolock(sig, newent);
65289 + spin_unlock(&gr_conn_table_lock);
65290 +#endif
65291 + return;
65292 +}
65293 +
65294 +void gr_del_task_from_ip_table(struct task_struct *task)
65295 +{
65296 +#ifdef CONFIG_GRKERNSEC
65297 + spin_lock_bh(&gr_conn_table_lock);
65298 + gr_del_task_from_ip_table_nolock(task->signal);
65299 + spin_unlock_bh(&gr_conn_table_lock);
65300 +#endif
65301 + return;
65302 +}
65303 +
65304 +void
65305 +gr_attach_curr_ip(const struct sock *sk)
65306 +{
65307 +#ifdef CONFIG_GRKERNSEC
65308 + struct signal_struct *p, *set;
65309 + const struct inet_sock *inet = inet_sk(sk);
65310 +
65311 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
65312 + return;
65313 +
65314 + set = current->signal;
65315 +
65316 + spin_lock_bh(&gr_conn_table_lock);
65317 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
65318 + inet->inet_dport, inet->inet_sport);
65319 + if (unlikely(p != NULL)) {
65320 + set->curr_ip = p->curr_ip;
65321 + set->used_accept = 1;
65322 + gr_del_task_from_ip_table_nolock(p);
65323 + spin_unlock_bh(&gr_conn_table_lock);
65324 + return;
65325 + }
65326 + spin_unlock_bh(&gr_conn_table_lock);
65327 +
65328 + set->curr_ip = inet->inet_daddr;
65329 + set->used_accept = 1;
65330 +#endif
65331 + return;
65332 +}
65333 +
65334 +int
65335 +gr_handle_sock_all(const int family, const int type, const int protocol)
65336 +{
65337 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65338 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
65339 + (family != AF_UNIX)) {
65340 + if (family == AF_INET)
65341 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
65342 + else
65343 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
65344 + return -EACCES;
65345 + }
65346 +#endif
65347 + return 0;
65348 +}
65349 +
65350 +int
65351 +gr_handle_sock_server(const struct sockaddr *sck)
65352 +{
65353 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65354 + if (grsec_enable_socket_server &&
65355 + in_group_p(grsec_socket_server_gid) &&
65356 + sck && (sck->sa_family != AF_UNIX) &&
65357 + (sck->sa_family != AF_LOCAL)) {
65358 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65359 + return -EACCES;
65360 + }
65361 +#endif
65362 + return 0;
65363 +}
65364 +
65365 +int
65366 +gr_handle_sock_server_other(const struct sock *sck)
65367 +{
65368 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65369 + if (grsec_enable_socket_server &&
65370 + in_group_p(grsec_socket_server_gid) &&
65371 + sck && (sck->sk_family != AF_UNIX) &&
65372 + (sck->sk_family != AF_LOCAL)) {
65373 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65374 + return -EACCES;
65375 + }
65376 +#endif
65377 + return 0;
65378 +}
65379 +
65380 +int
65381 +gr_handle_sock_client(const struct sockaddr *sck)
65382 +{
65383 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65384 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
65385 + sck && (sck->sa_family != AF_UNIX) &&
65386 + (sck->sa_family != AF_LOCAL)) {
65387 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
65388 + return -EACCES;
65389 + }
65390 +#endif
65391 + return 0;
65392 +}
65393 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
65394 new file mode 100644
65395 index 0000000..f55ef0f
65396 --- /dev/null
65397 +++ b/grsecurity/grsec_sysctl.c
65398 @@ -0,0 +1,469 @@
65399 +#include <linux/kernel.h>
65400 +#include <linux/sched.h>
65401 +#include <linux/sysctl.h>
65402 +#include <linux/grsecurity.h>
65403 +#include <linux/grinternal.h>
65404 +
65405 +int
65406 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
65407 +{
65408 +#ifdef CONFIG_GRKERNSEC_SYSCTL
65409 + if (dirname == NULL || name == NULL)
65410 + return 0;
65411 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
65412 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
65413 + return -EACCES;
65414 + }
65415 +#endif
65416 + return 0;
65417 +}
65418 +
65419 +#ifdef CONFIG_GRKERNSEC_ROFS
65420 +static int __maybe_unused one = 1;
65421 +#endif
65422 +
65423 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65424 +struct ctl_table grsecurity_table[] = {
65425 +#ifdef CONFIG_GRKERNSEC_SYSCTL
65426 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
65427 +#ifdef CONFIG_GRKERNSEC_IO
65428 + {
65429 + .procname = "disable_priv_io",
65430 + .data = &grsec_disable_privio,
65431 + .maxlen = sizeof(int),
65432 + .mode = 0600,
65433 + .proc_handler = &proc_dointvec,
65434 + },
65435 +#endif
65436 +#endif
65437 +#ifdef CONFIG_GRKERNSEC_LINK
65438 + {
65439 + .procname = "linking_restrictions",
65440 + .data = &grsec_enable_link,
65441 + .maxlen = sizeof(int),
65442 + .mode = 0600,
65443 + .proc_handler = &proc_dointvec,
65444 + },
65445 +#endif
65446 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65447 + {
65448 + .procname = "enforce_symlinksifowner",
65449 + .data = &grsec_enable_symlinkown,
65450 + .maxlen = sizeof(int),
65451 + .mode = 0600,
65452 + .proc_handler = &proc_dointvec,
65453 + },
65454 + {
65455 + .procname = "symlinkown_gid",
65456 + .data = &grsec_symlinkown_gid,
65457 + .maxlen = sizeof(int),
65458 + .mode = 0600,
65459 + .proc_handler = &proc_dointvec,
65460 + },
65461 +#endif
65462 +#ifdef CONFIG_GRKERNSEC_BRUTE
65463 + {
65464 + .procname = "deter_bruteforce",
65465 + .data = &grsec_enable_brute,
65466 + .maxlen = sizeof(int),
65467 + .mode = 0600,
65468 + .proc_handler = &proc_dointvec,
65469 + },
65470 +#endif
65471 +#ifdef CONFIG_GRKERNSEC_FIFO
65472 + {
65473 + .procname = "fifo_restrictions",
65474 + .data = &grsec_enable_fifo,
65475 + .maxlen = sizeof(int),
65476 + .mode = 0600,
65477 + .proc_handler = &proc_dointvec,
65478 + },
65479 +#endif
65480 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65481 + {
65482 + .procname = "ptrace_readexec",
65483 + .data = &grsec_enable_ptrace_readexec,
65484 + .maxlen = sizeof(int),
65485 + .mode = 0600,
65486 + .proc_handler = &proc_dointvec,
65487 + },
65488 +#endif
65489 +#ifdef CONFIG_GRKERNSEC_SETXID
65490 + {
65491 + .procname = "consistent_setxid",
65492 + .data = &grsec_enable_setxid,
65493 + .maxlen = sizeof(int),
65494 + .mode = 0600,
65495 + .proc_handler = &proc_dointvec,
65496 + },
65497 +#endif
65498 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65499 + {
65500 + .procname = "ip_blackhole",
65501 + .data = &grsec_enable_blackhole,
65502 + .maxlen = sizeof(int),
65503 + .mode = 0600,
65504 + .proc_handler = &proc_dointvec,
65505 + },
65506 + {
65507 + .procname = "lastack_retries",
65508 + .data = &grsec_lastack_retries,
65509 + .maxlen = sizeof(int),
65510 + .mode = 0600,
65511 + .proc_handler = &proc_dointvec,
65512 + },
65513 +#endif
65514 +#ifdef CONFIG_GRKERNSEC_EXECLOG
65515 + {
65516 + .procname = "exec_logging",
65517 + .data = &grsec_enable_execlog,
65518 + .maxlen = sizeof(int),
65519 + .mode = 0600,
65520 + .proc_handler = &proc_dointvec,
65521 + },
65522 +#endif
65523 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65524 + {
65525 + .procname = "rwxmap_logging",
65526 + .data = &grsec_enable_log_rwxmaps,
65527 + .maxlen = sizeof(int),
65528 + .mode = 0600,
65529 + .proc_handler = &proc_dointvec,
65530 + },
65531 +#endif
65532 +#ifdef CONFIG_GRKERNSEC_SIGNAL
65533 + {
65534 + .procname = "signal_logging",
65535 + .data = &grsec_enable_signal,
65536 + .maxlen = sizeof(int),
65537 + .mode = 0600,
65538 + .proc_handler = &proc_dointvec,
65539 + },
65540 +#endif
65541 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
65542 + {
65543 + .procname = "forkfail_logging",
65544 + .data = &grsec_enable_forkfail,
65545 + .maxlen = sizeof(int),
65546 + .mode = 0600,
65547 + .proc_handler = &proc_dointvec,
65548 + },
65549 +#endif
65550 +#ifdef CONFIG_GRKERNSEC_TIME
65551 + {
65552 + .procname = "timechange_logging",
65553 + .data = &grsec_enable_time,
65554 + .maxlen = sizeof(int),
65555 + .mode = 0600,
65556 + .proc_handler = &proc_dointvec,
65557 + },
65558 +#endif
65559 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65560 + {
65561 + .procname = "chroot_deny_shmat",
65562 + .data = &grsec_enable_chroot_shmat,
65563 + .maxlen = sizeof(int),
65564 + .mode = 0600,
65565 + .proc_handler = &proc_dointvec,
65566 + },
65567 +#endif
65568 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65569 + {
65570 + .procname = "chroot_deny_unix",
65571 + .data = &grsec_enable_chroot_unix,
65572 + .maxlen = sizeof(int),
65573 + .mode = 0600,
65574 + .proc_handler = &proc_dointvec,
65575 + },
65576 +#endif
65577 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65578 + {
65579 + .procname = "chroot_deny_mount",
65580 + .data = &grsec_enable_chroot_mount,
65581 + .maxlen = sizeof(int),
65582 + .mode = 0600,
65583 + .proc_handler = &proc_dointvec,
65584 + },
65585 +#endif
65586 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65587 + {
65588 + .procname = "chroot_deny_fchdir",
65589 + .data = &grsec_enable_chroot_fchdir,
65590 + .maxlen = sizeof(int),
65591 + .mode = 0600,
65592 + .proc_handler = &proc_dointvec,
65593 + },
65594 +#endif
65595 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65596 + {
65597 + .procname = "chroot_deny_chroot",
65598 + .data = &grsec_enable_chroot_double,
65599 + .maxlen = sizeof(int),
65600 + .mode = 0600,
65601 + .proc_handler = &proc_dointvec,
65602 + },
65603 +#endif
65604 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65605 + {
65606 + .procname = "chroot_deny_pivot",
65607 + .data = &grsec_enable_chroot_pivot,
65608 + .maxlen = sizeof(int),
65609 + .mode = 0600,
65610 + .proc_handler = &proc_dointvec,
65611 + },
65612 +#endif
65613 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65614 + {
65615 + .procname = "chroot_enforce_chdir",
65616 + .data = &grsec_enable_chroot_chdir,
65617 + .maxlen = sizeof(int),
65618 + .mode = 0600,
65619 + .proc_handler = &proc_dointvec,
65620 + },
65621 +#endif
65622 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65623 + {
65624 + .procname = "chroot_deny_chmod",
65625 + .data = &grsec_enable_chroot_chmod,
65626 + .maxlen = sizeof(int),
65627 + .mode = 0600,
65628 + .proc_handler = &proc_dointvec,
65629 + },
65630 +#endif
65631 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65632 + {
65633 + .procname = "chroot_deny_mknod",
65634 + .data = &grsec_enable_chroot_mknod,
65635 + .maxlen = sizeof(int),
65636 + .mode = 0600,
65637 + .proc_handler = &proc_dointvec,
65638 + },
65639 +#endif
65640 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65641 + {
65642 + .procname = "chroot_restrict_nice",
65643 + .data = &grsec_enable_chroot_nice,
65644 + .maxlen = sizeof(int),
65645 + .mode = 0600,
65646 + .proc_handler = &proc_dointvec,
65647 + },
65648 +#endif
65649 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65650 + {
65651 + .procname = "chroot_execlog",
65652 + .data = &grsec_enable_chroot_execlog,
65653 + .maxlen = sizeof(int),
65654 + .mode = 0600,
65655 + .proc_handler = &proc_dointvec,
65656 + },
65657 +#endif
65658 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65659 + {
65660 + .procname = "chroot_caps",
65661 + .data = &grsec_enable_chroot_caps,
65662 + .maxlen = sizeof(int),
65663 + .mode = 0600,
65664 + .proc_handler = &proc_dointvec,
65665 + },
65666 +#endif
65667 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65668 + {
65669 + .procname = "chroot_deny_sysctl",
65670 + .data = &grsec_enable_chroot_sysctl,
65671 + .maxlen = sizeof(int),
65672 + .mode = 0600,
65673 + .proc_handler = &proc_dointvec,
65674 + },
65675 +#endif
65676 +#ifdef CONFIG_GRKERNSEC_TPE
65677 + {
65678 + .procname = "tpe",
65679 + .data = &grsec_enable_tpe,
65680 + .maxlen = sizeof(int),
65681 + .mode = 0600,
65682 + .proc_handler = &proc_dointvec,
65683 + },
65684 + {
65685 + .procname = "tpe_gid",
65686 + .data = &grsec_tpe_gid,
65687 + .maxlen = sizeof(int),
65688 + .mode = 0600,
65689 + .proc_handler = &proc_dointvec,
65690 + },
65691 +#endif
65692 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65693 + {
65694 + .procname = "tpe_invert",
65695 + .data = &grsec_enable_tpe_invert,
65696 + .maxlen = sizeof(int),
65697 + .mode = 0600,
65698 + .proc_handler = &proc_dointvec,
65699 + },
65700 +#endif
65701 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65702 + {
65703 + .procname = "tpe_restrict_all",
65704 + .data = &grsec_enable_tpe_all,
65705 + .maxlen = sizeof(int),
65706 + .mode = 0600,
65707 + .proc_handler = &proc_dointvec,
65708 + },
65709 +#endif
65710 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65711 + {
65712 + .procname = "socket_all",
65713 + .data = &grsec_enable_socket_all,
65714 + .maxlen = sizeof(int),
65715 + .mode = 0600,
65716 + .proc_handler = &proc_dointvec,
65717 + },
65718 + {
65719 + .procname = "socket_all_gid",
65720 + .data = &grsec_socket_all_gid,
65721 + .maxlen = sizeof(int),
65722 + .mode = 0600,
65723 + .proc_handler = &proc_dointvec,
65724 + },
65725 +#endif
65726 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65727 + {
65728 + .procname = "socket_client",
65729 + .data = &grsec_enable_socket_client,
65730 + .maxlen = sizeof(int),
65731 + .mode = 0600,
65732 + .proc_handler = &proc_dointvec,
65733 + },
65734 + {
65735 + .procname = "socket_client_gid",
65736 + .data = &grsec_socket_client_gid,
65737 + .maxlen = sizeof(int),
65738 + .mode = 0600,
65739 + .proc_handler = &proc_dointvec,
65740 + },
65741 +#endif
65742 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65743 + {
65744 + .procname = "socket_server",
65745 + .data = &grsec_enable_socket_server,
65746 + .maxlen = sizeof(int),
65747 + .mode = 0600,
65748 + .proc_handler = &proc_dointvec,
65749 + },
65750 + {
65751 + .procname = "socket_server_gid",
65752 + .data = &grsec_socket_server_gid,
65753 + .maxlen = sizeof(int),
65754 + .mode = 0600,
65755 + .proc_handler = &proc_dointvec,
65756 + },
65757 +#endif
65758 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65759 + {
65760 + .procname = "audit_group",
65761 + .data = &grsec_enable_group,
65762 + .maxlen = sizeof(int),
65763 + .mode = 0600,
65764 + .proc_handler = &proc_dointvec,
65765 + },
65766 + {
65767 + .procname = "audit_gid",
65768 + .data = &grsec_audit_gid,
65769 + .maxlen = sizeof(int),
65770 + .mode = 0600,
65771 + .proc_handler = &proc_dointvec,
65772 + },
65773 +#endif
65774 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65775 + {
65776 + .procname = "audit_chdir",
65777 + .data = &grsec_enable_chdir,
65778 + .maxlen = sizeof(int),
65779 + .mode = 0600,
65780 + .proc_handler = &proc_dointvec,
65781 + },
65782 +#endif
65783 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65784 + {
65785 + .procname = "audit_mount",
65786 + .data = &grsec_enable_mount,
65787 + .maxlen = sizeof(int),
65788 + .mode = 0600,
65789 + .proc_handler = &proc_dointvec,
65790 + },
65791 +#endif
65792 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65793 + {
65794 + .procname = "audit_textrel",
65795 + .data = &grsec_enable_audit_textrel,
65796 + .maxlen = sizeof(int),
65797 + .mode = 0600,
65798 + .proc_handler = &proc_dointvec,
65799 + },
65800 +#endif
65801 +#ifdef CONFIG_GRKERNSEC_DMESG
65802 + {
65803 + .procname = "dmesg",
65804 + .data = &grsec_enable_dmesg,
65805 + .maxlen = sizeof(int),
65806 + .mode = 0600,
65807 + .proc_handler = &proc_dointvec,
65808 + },
65809 +#endif
65810 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65811 + {
65812 + .procname = "chroot_findtask",
65813 + .data = &grsec_enable_chroot_findtask,
65814 + .maxlen = sizeof(int),
65815 + .mode = 0600,
65816 + .proc_handler = &proc_dointvec,
65817 + },
65818 +#endif
65819 +#ifdef CONFIG_GRKERNSEC_RESLOG
65820 + {
65821 + .procname = "resource_logging",
65822 + .data = &grsec_resource_logging,
65823 + .maxlen = sizeof(int),
65824 + .mode = 0600,
65825 + .proc_handler = &proc_dointvec,
65826 + },
65827 +#endif
65828 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65829 + {
65830 + .procname = "audit_ptrace",
65831 + .data = &grsec_enable_audit_ptrace,
65832 + .maxlen = sizeof(int),
65833 + .mode = 0600,
65834 + .proc_handler = &proc_dointvec,
65835 + },
65836 +#endif
65837 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65838 + {
65839 + .procname = "harden_ptrace",
65840 + .data = &grsec_enable_harden_ptrace,
65841 + .maxlen = sizeof(int),
65842 + .mode = 0600,
65843 + .proc_handler = &proc_dointvec,
65844 + },
65845 +#endif
65846 + {
65847 + .procname = "grsec_lock",
65848 + .data = &grsec_lock,
65849 + .maxlen = sizeof(int),
65850 + .mode = 0600,
65851 + .proc_handler = &proc_dointvec,
65852 + },
65853 +#endif
65854 +#ifdef CONFIG_GRKERNSEC_ROFS
65855 + {
65856 + .procname = "romount_protect",
65857 + .data = &grsec_enable_rofs,
65858 + .maxlen = sizeof(int),
65859 + .mode = 0600,
65860 + .proc_handler = &proc_dointvec_minmax,
65861 + .extra1 = &one,
65862 + .extra2 = &one,
65863 + },
65864 +#endif
65865 + { }
65866 +};
65867 +#endif
65868 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65869 new file mode 100644
65870 index 0000000..0dc13c3
65871 --- /dev/null
65872 +++ b/grsecurity/grsec_time.c
65873 @@ -0,0 +1,16 @@
65874 +#include <linux/kernel.h>
65875 +#include <linux/sched.h>
65876 +#include <linux/grinternal.h>
65877 +#include <linux/module.h>
65878 +
65879 +void
65880 +gr_log_timechange(void)
65881 +{
65882 +#ifdef CONFIG_GRKERNSEC_TIME
65883 + if (grsec_enable_time)
65884 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65885 +#endif
65886 + return;
65887 +}
65888 +
65889 +EXPORT_SYMBOL(gr_log_timechange);
65890 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65891 new file mode 100644
65892 index 0000000..ee57dcf
65893 --- /dev/null
65894 +++ b/grsecurity/grsec_tpe.c
65895 @@ -0,0 +1,73 @@
65896 +#include <linux/kernel.h>
65897 +#include <linux/sched.h>
65898 +#include <linux/file.h>
65899 +#include <linux/fs.h>
65900 +#include <linux/grinternal.h>
65901 +
65902 +extern int gr_acl_tpe_check(void);
65903 +
65904 +int
65905 +gr_tpe_allow(const struct file *file)
65906 +{
65907 +#ifdef CONFIG_GRKERNSEC
65908 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65909 + const struct cred *cred = current_cred();
65910 + char *msg = NULL;
65911 + char *msg2 = NULL;
65912 +
65913 + // never restrict root
65914 + if (gr_is_global_root(cred->uid))
65915 + return 1;
65916 +
65917 + if (grsec_enable_tpe) {
65918 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65919 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65920 + msg = "not being in trusted group";
65921 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65922 + msg = "being in untrusted group";
65923 +#else
65924 + if (in_group_p(grsec_tpe_gid))
65925 + msg = "being in untrusted group";
65926 +#endif
65927 + }
65928 + if (!msg && gr_acl_tpe_check())
65929 + msg = "being in untrusted role";
65930 +
65931 + // not in any affected group/role
65932 + if (!msg)
65933 + goto next_check;
65934 +
65935 + if (gr_is_global_nonroot(inode->i_uid))
65936 + msg2 = "file in non-root-owned directory";
65937 + else if (inode->i_mode & S_IWOTH)
65938 + msg2 = "file in world-writable directory";
65939 + else if (inode->i_mode & S_IWGRP)
65940 + msg2 = "file in group-writable directory";
65941 +
65942 + if (msg && msg2) {
65943 + char fullmsg[70] = {0};
65944 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65945 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65946 + return 0;
65947 + }
65948 + msg = NULL;
65949 +next_check:
65950 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65951 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65952 + return 1;
65953 +
65954 + if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
65955 + msg = "directory not owned by user";
65956 + else if (inode->i_mode & S_IWOTH)
65957 + msg = "file in world-writable directory";
65958 + else if (inode->i_mode & S_IWGRP)
65959 + msg = "file in group-writable directory";
65960 +
65961 + if (msg) {
65962 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65963 + return 0;
65964 + }
65965 +#endif
65966 +#endif
65967 + return 1;
65968 +}
65969 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65970 new file mode 100644
65971 index 0000000..9f7b1ac
65972 --- /dev/null
65973 +++ b/grsecurity/grsum.c
65974 @@ -0,0 +1,61 @@
65975 +#include <linux/err.h>
65976 +#include <linux/kernel.h>
65977 +#include <linux/sched.h>
65978 +#include <linux/mm.h>
65979 +#include <linux/scatterlist.h>
65980 +#include <linux/crypto.h>
65981 +#include <linux/gracl.h>
65982 +
65983 +
65984 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65985 +#error "crypto and sha256 must be built into the kernel"
65986 +#endif
65987 +
65988 +int
65989 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65990 +{
65991 + char *p;
65992 + struct crypto_hash *tfm;
65993 + struct hash_desc desc;
65994 + struct scatterlist sg;
65995 + unsigned char temp_sum[GR_SHA_LEN];
65996 + volatile int retval = 0;
65997 + volatile int dummy = 0;
65998 + unsigned int i;
65999 +
66000 + sg_init_table(&sg, 1);
66001 +
66002 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66003 + if (IS_ERR(tfm)) {
66004 + /* should never happen, since sha256 should be built in */
66005 + return 1;
66006 + }
66007 +
66008 + desc.tfm = tfm;
66009 + desc.flags = 0;
66010 +
66011 + crypto_hash_init(&desc);
66012 +
66013 + p = salt;
66014 + sg_set_buf(&sg, p, GR_SALT_LEN);
66015 + crypto_hash_update(&desc, &sg, sg.length);
66016 +
66017 + p = entry->pw;
66018 + sg_set_buf(&sg, p, strlen(p));
66019 +
66020 + crypto_hash_update(&desc, &sg, sg.length);
66021 +
66022 + crypto_hash_final(&desc, temp_sum);
66023 +
66024 + memset(entry->pw, 0, GR_PW_LEN);
66025 +
66026 + for (i = 0; i < GR_SHA_LEN; i++)
66027 + if (sum[i] != temp_sum[i])
66028 + retval = 1;
66029 + else
66030 + dummy = 1; // waste a cycle
66031 +
66032 + crypto_free_hash(tfm);
66033 +
66034 + return retval;
66035 +}
66036 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
66037 index 77ff547..181834f 100644
66038 --- a/include/asm-generic/4level-fixup.h
66039 +++ b/include/asm-generic/4level-fixup.h
66040 @@ -13,8 +13,10 @@
66041 #define pmd_alloc(mm, pud, address) \
66042 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
66043 NULL: pmd_offset(pud, address))
66044 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
66045
66046 #define pud_alloc(mm, pgd, address) (pgd)
66047 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
66048 #define pud_offset(pgd, start) (pgd)
66049 #define pud_none(pud) 0
66050 #define pud_bad(pud) 0
66051 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66052 index b7babf0..04ad282 100644
66053 --- a/include/asm-generic/atomic-long.h
66054 +++ b/include/asm-generic/atomic-long.h
66055 @@ -22,6 +22,12 @@
66056
66057 typedef atomic64_t atomic_long_t;
66058
66059 +#ifdef CONFIG_PAX_REFCOUNT
66060 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
66061 +#else
66062 +typedef atomic64_t atomic_long_unchecked_t;
66063 +#endif
66064 +
66065 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66066
66067 static inline long atomic_long_read(atomic_long_t *l)
66068 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66069 return (long)atomic64_read(v);
66070 }
66071
66072 +#ifdef CONFIG_PAX_REFCOUNT
66073 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66074 +{
66075 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66076 +
66077 + return (long)atomic64_read_unchecked(v);
66078 +}
66079 +#endif
66080 +
66081 static inline void atomic_long_set(atomic_long_t *l, long i)
66082 {
66083 atomic64_t *v = (atomic64_t *)l;
66084 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66085 atomic64_set(v, i);
66086 }
66087
66088 +#ifdef CONFIG_PAX_REFCOUNT
66089 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66090 +{
66091 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66092 +
66093 + atomic64_set_unchecked(v, i);
66094 +}
66095 +#endif
66096 +
66097 static inline void atomic_long_inc(atomic_long_t *l)
66098 {
66099 atomic64_t *v = (atomic64_t *)l;
66100 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66101 atomic64_inc(v);
66102 }
66103
66104 +#ifdef CONFIG_PAX_REFCOUNT
66105 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66106 +{
66107 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66108 +
66109 + atomic64_inc_unchecked(v);
66110 +}
66111 +#endif
66112 +
66113 static inline void atomic_long_dec(atomic_long_t *l)
66114 {
66115 atomic64_t *v = (atomic64_t *)l;
66116 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66117 atomic64_dec(v);
66118 }
66119
66120 +#ifdef CONFIG_PAX_REFCOUNT
66121 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66122 +{
66123 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66124 +
66125 + atomic64_dec_unchecked(v);
66126 +}
66127 +#endif
66128 +
66129 static inline void atomic_long_add(long i, atomic_long_t *l)
66130 {
66131 atomic64_t *v = (atomic64_t *)l;
66132 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66133 atomic64_add(i, v);
66134 }
66135
66136 +#ifdef CONFIG_PAX_REFCOUNT
66137 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66138 +{
66139 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66140 +
66141 + atomic64_add_unchecked(i, v);
66142 +}
66143 +#endif
66144 +
66145 static inline void atomic_long_sub(long i, atomic_long_t *l)
66146 {
66147 atomic64_t *v = (atomic64_t *)l;
66148 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66149 atomic64_sub(i, v);
66150 }
66151
66152 +#ifdef CONFIG_PAX_REFCOUNT
66153 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66154 +{
66155 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66156 +
66157 + atomic64_sub_unchecked(i, v);
66158 +}
66159 +#endif
66160 +
66161 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66162 {
66163 atomic64_t *v = (atomic64_t *)l;
66164 @@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66165 return (long)atomic64_add_return(i, v);
66166 }
66167
66168 +#ifdef CONFIG_PAX_REFCOUNT
66169 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66170 +{
66171 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66172 +
66173 + return (long)atomic64_add_return_unchecked(i, v);
66174 +}
66175 +#endif
66176 +
66177 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66178 {
66179 atomic64_t *v = (atomic64_t *)l;
66180 @@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66181 return (long)atomic64_inc_return(v);
66182 }
66183
66184 +#ifdef CONFIG_PAX_REFCOUNT
66185 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66186 +{
66187 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66188 +
66189 + return (long)atomic64_inc_return_unchecked(v);
66190 +}
66191 +#endif
66192 +
66193 static inline long atomic_long_dec_return(atomic_long_t *l)
66194 {
66195 atomic64_t *v = (atomic64_t *)l;
66196 @@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66197
66198 typedef atomic_t atomic_long_t;
66199
66200 +#ifdef CONFIG_PAX_REFCOUNT
66201 +typedef atomic_unchecked_t atomic_long_unchecked_t;
66202 +#else
66203 +typedef atomic_t atomic_long_unchecked_t;
66204 +#endif
66205 +
66206 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
66207 static inline long atomic_long_read(atomic_long_t *l)
66208 {
66209 @@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66210 return (long)atomic_read(v);
66211 }
66212
66213 +#ifdef CONFIG_PAX_REFCOUNT
66214 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66215 +{
66216 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66217 +
66218 + return (long)atomic_read_unchecked(v);
66219 +}
66220 +#endif
66221 +
66222 static inline void atomic_long_set(atomic_long_t *l, long i)
66223 {
66224 atomic_t *v = (atomic_t *)l;
66225 @@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66226 atomic_set(v, i);
66227 }
66228
66229 +#ifdef CONFIG_PAX_REFCOUNT
66230 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66231 +{
66232 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66233 +
66234 + atomic_set_unchecked(v, i);
66235 +}
66236 +#endif
66237 +
66238 static inline void atomic_long_inc(atomic_long_t *l)
66239 {
66240 atomic_t *v = (atomic_t *)l;
66241 @@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66242 atomic_inc(v);
66243 }
66244
66245 +#ifdef CONFIG_PAX_REFCOUNT
66246 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66247 +{
66248 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66249 +
66250 + atomic_inc_unchecked(v);
66251 +}
66252 +#endif
66253 +
66254 static inline void atomic_long_dec(atomic_long_t *l)
66255 {
66256 atomic_t *v = (atomic_t *)l;
66257 @@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66258 atomic_dec(v);
66259 }
66260
66261 +#ifdef CONFIG_PAX_REFCOUNT
66262 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66263 +{
66264 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66265 +
66266 + atomic_dec_unchecked(v);
66267 +}
66268 +#endif
66269 +
66270 static inline void atomic_long_add(long i, atomic_long_t *l)
66271 {
66272 atomic_t *v = (atomic_t *)l;
66273 @@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66274 atomic_add(i, v);
66275 }
66276
66277 +#ifdef CONFIG_PAX_REFCOUNT
66278 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66279 +{
66280 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66281 +
66282 + atomic_add_unchecked(i, v);
66283 +}
66284 +#endif
66285 +
66286 static inline void atomic_long_sub(long i, atomic_long_t *l)
66287 {
66288 atomic_t *v = (atomic_t *)l;
66289 @@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66290 atomic_sub(i, v);
66291 }
66292
66293 +#ifdef CONFIG_PAX_REFCOUNT
66294 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66295 +{
66296 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66297 +
66298 + atomic_sub_unchecked(i, v);
66299 +}
66300 +#endif
66301 +
66302 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66303 {
66304 atomic_t *v = (atomic_t *)l;
66305 @@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66306 return (long)atomic_add_return(i, v);
66307 }
66308
66309 +#ifdef CONFIG_PAX_REFCOUNT
66310 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66311 +{
66312 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66313 +
66314 + return (long)atomic_add_return_unchecked(i, v);
66315 +}
66316 +
66317 +#endif
66318 +
66319 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66320 {
66321 atomic_t *v = (atomic_t *)l;
66322 @@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66323 return (long)atomic_inc_return(v);
66324 }
66325
66326 +#ifdef CONFIG_PAX_REFCOUNT
66327 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66328 +{
66329 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66330 +
66331 + return (long)atomic_inc_return_unchecked(v);
66332 +}
66333 +#endif
66334 +
66335 static inline long atomic_long_dec_return(atomic_long_t *l)
66336 {
66337 atomic_t *v = (atomic_t *)l;
66338 @@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66339
66340 #endif /* BITS_PER_LONG == 64 */
66341
66342 +#ifdef CONFIG_PAX_REFCOUNT
66343 +static inline void pax_refcount_needs_these_functions(void)
66344 +{
66345 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
66346 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
66347 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
66348 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
66349 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
66350 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
66351 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
66352 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
66353 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
66354 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
66355 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
66356 +#ifdef CONFIG_X86
66357 + atomic_clear_mask_unchecked(0, NULL);
66358 + atomic_set_mask_unchecked(0, NULL);
66359 +#endif
66360 +
66361 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
66362 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
66363 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
66364 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
66365 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
66366 + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
66367 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
66368 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
66369 +}
66370 +#else
66371 +#define atomic_read_unchecked(v) atomic_read(v)
66372 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
66373 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
66374 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
66375 +#define atomic_inc_unchecked(v) atomic_inc(v)
66376 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
66377 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
66378 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
66379 +#define atomic_dec_unchecked(v) atomic_dec(v)
66380 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
66381 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
66382 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
66383 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
66384 +
66385 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
66386 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
66387 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
66388 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
66389 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
66390 +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
66391 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
66392 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
66393 +#endif
66394 +
66395 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
66396 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
66397 index 33bd2de..f31bff97 100644
66398 --- a/include/asm-generic/atomic.h
66399 +++ b/include/asm-generic/atomic.h
66400 @@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
66401 * Atomically clears the bits set in @mask from @v
66402 */
66403 #ifndef atomic_clear_mask
66404 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
66405 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
66406 {
66407 unsigned long flags;
66408
66409 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
66410 index b18ce4f..2ee2843 100644
66411 --- a/include/asm-generic/atomic64.h
66412 +++ b/include/asm-generic/atomic64.h
66413 @@ -16,6 +16,8 @@ typedef struct {
66414 long long counter;
66415 } atomic64_t;
66416
66417 +typedef atomic64_t atomic64_unchecked_t;
66418 +
66419 #define ATOMIC64_INIT(i) { (i) }
66420
66421 extern long long atomic64_read(const atomic64_t *v);
66422 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
66423 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
66424 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
66425
66426 +#define atomic64_read_unchecked(v) atomic64_read(v)
66427 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
66428 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
66429 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
66430 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
66431 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
66432 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
66433 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
66434 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
66435 +
66436 #endif /* _ASM_GENERIC_ATOMIC64_H */
66437 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
66438 index 1bfcfe5..e04c5c9 100644
66439 --- a/include/asm-generic/cache.h
66440 +++ b/include/asm-generic/cache.h
66441 @@ -6,7 +6,7 @@
66442 * cache lines need to provide their own cache.h.
66443 */
66444
66445 -#define L1_CACHE_SHIFT 5
66446 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
66447 +#define L1_CACHE_SHIFT 5UL
66448 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
66449
66450 #endif /* __ASM_GENERIC_CACHE_H */
66451 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
66452 index 0d68a1e..b74a761 100644
66453 --- a/include/asm-generic/emergency-restart.h
66454 +++ b/include/asm-generic/emergency-restart.h
66455 @@ -1,7 +1,7 @@
66456 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
66457 #define _ASM_GENERIC_EMERGENCY_RESTART_H
66458
66459 -static inline void machine_emergency_restart(void)
66460 +static inline __noreturn void machine_emergency_restart(void)
66461 {
66462 machine_restart(NULL);
66463 }
66464 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66465 index 90f99c7..00ce236 100644
66466 --- a/include/asm-generic/kmap_types.h
66467 +++ b/include/asm-generic/kmap_types.h
66468 @@ -2,9 +2,9 @@
66469 #define _ASM_GENERIC_KMAP_TYPES_H
66470
66471 #ifdef __WITH_KM_FENCE
66472 -# define KM_TYPE_NR 41
66473 +# define KM_TYPE_NR 42
66474 #else
66475 -# define KM_TYPE_NR 20
66476 +# define KM_TYPE_NR 21
66477 #endif
66478
66479 #endif
66480 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
66481 index 9ceb03b..62b0b8f 100644
66482 --- a/include/asm-generic/local.h
66483 +++ b/include/asm-generic/local.h
66484 @@ -23,24 +23,37 @@ typedef struct
66485 atomic_long_t a;
66486 } local_t;
66487
66488 +typedef struct {
66489 + atomic_long_unchecked_t a;
66490 +} local_unchecked_t;
66491 +
66492 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
66493
66494 #define local_read(l) atomic_long_read(&(l)->a)
66495 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
66496 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
66497 +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
66498 #define local_inc(l) atomic_long_inc(&(l)->a)
66499 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
66500 #define local_dec(l) atomic_long_dec(&(l)->a)
66501 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
66502 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
66503 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
66504 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
66505 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
66506
66507 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
66508 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
66509 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
66510 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
66511 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
66512 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
66513 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
66514 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
66515 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
66516
66517 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66518 +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66519 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
66520 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
66521 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
66522 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66523 index 725612b..9cc513a 100644
66524 --- a/include/asm-generic/pgtable-nopmd.h
66525 +++ b/include/asm-generic/pgtable-nopmd.h
66526 @@ -1,14 +1,19 @@
66527 #ifndef _PGTABLE_NOPMD_H
66528 #define _PGTABLE_NOPMD_H
66529
66530 -#ifndef __ASSEMBLY__
66531 -
66532 #include <asm-generic/pgtable-nopud.h>
66533
66534 -struct mm_struct;
66535 -
66536 #define __PAGETABLE_PMD_FOLDED
66537
66538 +#define PMD_SHIFT PUD_SHIFT
66539 +#define PTRS_PER_PMD 1
66540 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66541 +#define PMD_MASK (~(PMD_SIZE-1))
66542 +
66543 +#ifndef __ASSEMBLY__
66544 +
66545 +struct mm_struct;
66546 +
66547 /*
66548 * Having the pmd type consist of a pud gets the size right, and allows
66549 * us to conceptually access the pud entry that this pmd is folded into
66550 @@ -16,11 +21,6 @@ struct mm_struct;
66551 */
66552 typedef struct { pud_t pud; } pmd_t;
66553
66554 -#define PMD_SHIFT PUD_SHIFT
66555 -#define PTRS_PER_PMD 1
66556 -#define PMD_SIZE (1UL << PMD_SHIFT)
66557 -#define PMD_MASK (~(PMD_SIZE-1))
66558 -
66559 /*
66560 * The "pud_xxx()" functions here are trivial for a folded two-level
66561 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66562 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66563 index 810431d..0ec4804f 100644
66564 --- a/include/asm-generic/pgtable-nopud.h
66565 +++ b/include/asm-generic/pgtable-nopud.h
66566 @@ -1,10 +1,15 @@
66567 #ifndef _PGTABLE_NOPUD_H
66568 #define _PGTABLE_NOPUD_H
66569
66570 -#ifndef __ASSEMBLY__
66571 -
66572 #define __PAGETABLE_PUD_FOLDED
66573
66574 +#define PUD_SHIFT PGDIR_SHIFT
66575 +#define PTRS_PER_PUD 1
66576 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66577 +#define PUD_MASK (~(PUD_SIZE-1))
66578 +
66579 +#ifndef __ASSEMBLY__
66580 +
66581 /*
66582 * Having the pud type consist of a pgd gets the size right, and allows
66583 * us to conceptually access the pgd entry that this pud is folded into
66584 @@ -12,11 +17,6 @@
66585 */
66586 typedef struct { pgd_t pgd; } pud_t;
66587
66588 -#define PUD_SHIFT PGDIR_SHIFT
66589 -#define PTRS_PER_PUD 1
66590 -#define PUD_SIZE (1UL << PUD_SHIFT)
66591 -#define PUD_MASK (~(PUD_SIZE-1))
66592 -
66593 /*
66594 * The "pgd_xxx()" functions here are trivial for a folded two-level
66595 * setup: the pud is never bad, and a pud always exists (as it's folded
66596 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
66597 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
66598
66599 #define pgd_populate(mm, pgd, pud) do { } while (0)
66600 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
66601 /*
66602 * (puds are folded into pgds so this doesn't get actually called,
66603 * but the define is needed for a generic inline function.)
66604 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66605 index a59ff51..2594a70 100644
66606 --- a/include/asm-generic/pgtable.h
66607 +++ b/include/asm-generic/pgtable.h
66608 @@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
66609 }
66610 #endif /* CONFIG_NUMA_BALANCING */
66611
66612 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66613 +static inline unsigned long pax_open_kernel(void) { return 0; }
66614 +#endif
66615 +
66616 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66617 +static inline unsigned long pax_close_kernel(void) { return 0; }
66618 +#endif
66619 +
66620 #endif /* CONFIG_MMU */
66621
66622 #endif /* !__ASSEMBLY__ */
66623 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66624 index afa12c7..99d4da0 100644
66625 --- a/include/asm-generic/vmlinux.lds.h
66626 +++ b/include/asm-generic/vmlinux.lds.h
66627 @@ -245,6 +245,7 @@
66628 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66629 VMLINUX_SYMBOL(__start_rodata) = .; \
66630 *(.rodata) *(.rodata.*) \
66631 + *(.data..read_only) \
66632 *(__vermagic) /* Kernel version magic */ \
66633 . = ALIGN(8); \
66634 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
66635 @@ -755,17 +756,18 @@
66636 * section in the linker script will go there too. @phdr should have
66637 * a leading colon.
66638 *
66639 - * Note that this macros defines __per_cpu_load as an absolute symbol.
66640 + * Note that this macros defines per_cpu_load as an absolute symbol.
66641 * If there is no need to put the percpu section at a predetermined
66642 * address, use PERCPU_SECTION.
66643 */
66644 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
66645 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
66646 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66647 + per_cpu_load = .; \
66648 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66649 - LOAD_OFFSET) { \
66650 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66651 PERCPU_INPUT(cacheline) \
66652 } phdr \
66653 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
66654 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
66655
66656 /**
66657 * PERCPU_SECTION - define output section for percpu area, simple version
66658 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
66659 index 418d270..bfd2794 100644
66660 --- a/include/crypto/algapi.h
66661 +++ b/include/crypto/algapi.h
66662 @@ -34,7 +34,7 @@ struct crypto_type {
66663 unsigned int maskclear;
66664 unsigned int maskset;
66665 unsigned int tfmsize;
66666 -};
66667 +} __do_const;
66668
66669 struct crypto_instance {
66670 struct crypto_alg alg;
66671 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66672 index f1ce786..086a7a5 100644
66673 --- a/include/drm/drmP.h
66674 +++ b/include/drm/drmP.h
66675 @@ -72,6 +72,7 @@
66676 #include <linux/workqueue.h>
66677 #include <linux/poll.h>
66678 #include <asm/pgalloc.h>
66679 +#include <asm/local.h>
66680 #include <drm/drm.h>
66681 #include <drm/drm_sarea.h>
66682
66683 @@ -296,10 +297,12 @@ do { \
66684 * \param cmd command.
66685 * \param arg argument.
66686 */
66687 -typedef int drm_ioctl_t(struct drm_device *dev, void *data,
66688 +typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
66689 + struct drm_file *file_priv);
66690 +typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
66691 struct drm_file *file_priv);
66692
66693 -typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66694 +typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
66695 unsigned long arg);
66696
66697 #define DRM_IOCTL_NR(n) _IOC_NR(n)
66698 @@ -314,9 +317,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66699 struct drm_ioctl_desc {
66700 unsigned int cmd;
66701 int flags;
66702 - drm_ioctl_t *func;
66703 + drm_ioctl_t func;
66704 unsigned int cmd_drv;
66705 -};
66706 +} __do_const;
66707
66708 /**
66709 * Creates a driver or general drm_ioctl_desc array entry for the given
66710 @@ -1014,7 +1017,7 @@ struct drm_info_list {
66711 int (*show)(struct seq_file*, void*); /** show callback */
66712 u32 driver_features; /**< Required driver features for this entry */
66713 void *data;
66714 -};
66715 +} __do_const;
66716
66717 /**
66718 * debugfs node structure. This structure represents a debugfs file.
66719 @@ -1087,7 +1090,7 @@ struct drm_device {
66720
66721 /** \name Usage Counters */
66722 /*@{ */
66723 - int open_count; /**< Outstanding files open */
66724 + local_t open_count; /**< Outstanding files open */
66725 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66726 atomic_t vma_count; /**< Outstanding vma areas open */
66727 int buf_use; /**< Buffers in use -- cannot alloc */
66728 @@ -1098,7 +1101,7 @@ struct drm_device {
66729 /*@{ */
66730 unsigned long counters;
66731 enum drm_stat_type types[15];
66732 - atomic_t counts[15];
66733 + atomic_unchecked_t counts[15];
66734 /*@} */
66735
66736 struct list_head filelist;
66737 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66738 index f43d556..94d9343 100644
66739 --- a/include/drm/drm_crtc_helper.h
66740 +++ b/include/drm/drm_crtc_helper.h
66741 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
66742 struct drm_connector *connector);
66743 /* disable encoder when not in use - more explicit than dpms off */
66744 void (*disable)(struct drm_encoder *encoder);
66745 -};
66746 +} __no_const;
66747
66748 /**
66749 * drm_connector_helper_funcs - helper operations for connectors
66750 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66751 index 72dcbe8..8db58d7 100644
66752 --- a/include/drm/ttm/ttm_memory.h
66753 +++ b/include/drm/ttm/ttm_memory.h
66754 @@ -48,7 +48,7 @@
66755
66756 struct ttm_mem_shrink {
66757 int (*do_shrink) (struct ttm_mem_shrink *);
66758 -};
66759 +} __no_const;
66760
66761 /**
66762 * struct ttm_mem_global - Global memory accounting structure.
66763 diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
66764 index 4b840e8..155d235 100644
66765 --- a/include/keys/asymmetric-subtype.h
66766 +++ b/include/keys/asymmetric-subtype.h
66767 @@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
66768 /* Verify the signature on a key of this subtype (optional) */
66769 int (*verify_signature)(const struct key *key,
66770 const struct public_key_signature *sig);
66771 -};
66772 +} __do_const;
66773
66774 /**
66775 * asymmetric_key_subtype - Get the subtype from an asymmetric key
66776 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66777 index c1da539..1dcec55 100644
66778 --- a/include/linux/atmdev.h
66779 +++ b/include/linux/atmdev.h
66780 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
66781 #endif
66782
66783 struct k_atm_aal_stats {
66784 -#define __HANDLE_ITEM(i) atomic_t i
66785 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
66786 __AAL_STAT_ITEMS
66787 #undef __HANDLE_ITEM
66788 };
66789 @@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
66790 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
66791 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
66792 struct module *owner;
66793 -};
66794 +} __do_const ;
66795
66796 struct atmphy_ops {
66797 int (*start)(struct atm_dev *dev);
66798 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66799 index c3a0914..ec5d48a 100644
66800 --- a/include/linux/binfmts.h
66801 +++ b/include/linux/binfmts.h
66802 @@ -73,8 +73,9 @@ struct linux_binfmt {
66803 int (*load_binary)(struct linux_binprm *);
66804 int (*load_shlib)(struct file *);
66805 int (*core_dump)(struct coredump_params *cprm);
66806 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66807 unsigned long min_coredump; /* minimal dump size */
66808 -};
66809 +} __do_const;
66810
66811 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
66812
66813 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66814 index 33f358f..7f2c27f 100644
66815 --- a/include/linux/blkdev.h
66816 +++ b/include/linux/blkdev.h
66817 @@ -1499,7 +1499,7 @@ struct block_device_operations {
66818 /* this callback is with swap_lock and sometimes page table lock held */
66819 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
66820 struct module *owner;
66821 -};
66822 +} __do_const;
66823
66824 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66825 unsigned long);
66826 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66827 index 7c2e030..b72475d 100644
66828 --- a/include/linux/blktrace_api.h
66829 +++ b/include/linux/blktrace_api.h
66830 @@ -23,7 +23,7 @@ struct blk_trace {
66831 struct dentry *dir;
66832 struct dentry *dropped_file;
66833 struct dentry *msg_file;
66834 - atomic_t dropped;
66835 + atomic_unchecked_t dropped;
66836 };
66837
66838 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66839 diff --git a/include/linux/cache.h b/include/linux/cache.h
66840 index 4c57065..4307975 100644
66841 --- a/include/linux/cache.h
66842 +++ b/include/linux/cache.h
66843 @@ -16,6 +16,10 @@
66844 #define __read_mostly
66845 #endif
66846
66847 +#ifndef __read_only
66848 +#define __read_only __read_mostly
66849 +#endif
66850 +
66851 #ifndef ____cacheline_aligned
66852 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66853 #endif
66854 diff --git a/include/linux/capability.h b/include/linux/capability.h
66855 index d9a4f7f4..19f77d6 100644
66856 --- a/include/linux/capability.h
66857 +++ b/include/linux/capability.h
66858 @@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
66859 extern bool nsown_capable(int cap);
66860 extern bool inode_capable(const struct inode *inode, int cap);
66861 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
66862 +extern bool capable_nolog(int cap);
66863 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
66864 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
66865
66866 /* audit system wants to get cap info from files as well */
66867 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
66868
66869 +extern int is_privileged_binary(const struct dentry *dentry);
66870 +
66871 #endif /* !_LINUX_CAPABILITY_H */
66872 diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
66873 index 8609d57..86e4d79 100644
66874 --- a/include/linux/cdrom.h
66875 +++ b/include/linux/cdrom.h
66876 @@ -87,7 +87,6 @@ struct cdrom_device_ops {
66877
66878 /* driver specifications */
66879 const int capability; /* capability flags */
66880 - int n_minors; /* number of active minor devices */
66881 /* handle uniform packets for scsi type devices (scsi,atapi) */
66882 int (*generic_packet) (struct cdrom_device_info *,
66883 struct packet_command *);
66884 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
66885 index 42e55de..1cd0e66 100644
66886 --- a/include/linux/cleancache.h
66887 +++ b/include/linux/cleancache.h
66888 @@ -31,7 +31,7 @@ struct cleancache_ops {
66889 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
66890 void (*invalidate_inode)(int, struct cleancache_filekey);
66891 void (*invalidate_fs)(int);
66892 -};
66893 +} __no_const;
66894
66895 extern struct cleancache_ops
66896 cleancache_register_ops(struct cleancache_ops *ops);
66897 diff --git a/include/linux/compat.h b/include/linux/compat.h
66898 index 377cd8c..2479845 100644
66899 --- a/include/linux/compat.h
66900 +++ b/include/linux/compat.h
66901 @@ -332,14 +332,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
66902 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
66903 int version, void __user *uptr);
66904 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
66905 - void __user *uptr);
66906 + void __user *uptr) __intentional_overflow(0);
66907 #else
66908 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
66909 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
66910 compat_ssize_t msgsz, int msgflg);
66911 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
66912 compat_ssize_t msgsz, long msgtyp, int msgflg);
66913 -long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
66914 +long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
66915 #endif
66916 long compat_sys_msgctl(int first, int second, void __user *uptr);
66917 long compat_sys_shmctl(int first, int second, void __user *uptr);
66918 @@ -442,7 +442,7 @@ extern int compat_ptrace_request(struct task_struct *child,
66919 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
66920 compat_ulong_t addr, compat_ulong_t data);
66921 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
66922 - compat_long_t addr, compat_long_t data);
66923 + compat_ulong_t addr, compat_ulong_t data);
66924
66925 /*
66926 * epoll (fs/eventpoll.c) compat bits follow ...
66927 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66928 index 68b162d..660f5f0 100644
66929 --- a/include/linux/compiler-gcc4.h
66930 +++ b/include/linux/compiler-gcc4.h
66931 @@ -39,9 +39,29 @@
66932 # define __compiletime_warning(message) __attribute__((warning(message)))
66933 # define __compiletime_error(message) __attribute__((error(message)))
66934 #endif /* __CHECKER__ */
66935 +
66936 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66937 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66938 +#define __bos0(ptr) __bos((ptr), 0)
66939 +#define __bos1(ptr) __bos((ptr), 1)
66940 #endif /* GCC_VERSION >= 40300 */
66941
66942 #if GCC_VERSION >= 40500
66943 +
66944 +#ifdef CONSTIFY_PLUGIN
66945 +#define __no_const __attribute__((no_const))
66946 +#define __do_const __attribute__((do_const))
66947 +#endif
66948 +
66949 +#ifdef SIZE_OVERFLOW_PLUGIN
66950 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
66951 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
66952 +#endif
66953 +
66954 +#ifdef LATENT_ENTROPY_PLUGIN
66955 +#define __latent_entropy __attribute__((latent_entropy))
66956 +#endif
66957 +
66958 /*
66959 * Mark a position in code as unreachable. This can be used to
66960 * suppress control flow warnings after asm blocks that transfer
66961 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66962 index 10b8f23..5e0b083 100644
66963 --- a/include/linux/compiler.h
66964 +++ b/include/linux/compiler.h
66965 @@ -5,11 +5,14 @@
66966
66967 #ifdef __CHECKER__
66968 # define __user __attribute__((noderef, address_space(1)))
66969 +# define __force_user __force __user
66970 # define __kernel __attribute__((address_space(0)))
66971 +# define __force_kernel __force __kernel
66972 # define __safe __attribute__((safe))
66973 # define __force __attribute__((force))
66974 # define __nocast __attribute__((nocast))
66975 # define __iomem __attribute__((noderef, address_space(2)))
66976 +# define __force_iomem __force __iomem
66977 # define __must_hold(x) __attribute__((context(x,1,1)))
66978 # define __acquires(x) __attribute__((context(x,0,1)))
66979 # define __releases(x) __attribute__((context(x,1,0)))
66980 @@ -17,20 +20,37 @@
66981 # define __release(x) __context__(x,-1)
66982 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66983 # define __percpu __attribute__((noderef, address_space(3)))
66984 +# define __force_percpu __force __percpu
66985 #ifdef CONFIG_SPARSE_RCU_POINTER
66986 # define __rcu __attribute__((noderef, address_space(4)))
66987 +# define __force_rcu __force __rcu
66988 #else
66989 # define __rcu
66990 +# define __force_rcu
66991 #endif
66992 extern void __chk_user_ptr(const volatile void __user *);
66993 extern void __chk_io_ptr(const volatile void __iomem *);
66994 #else
66995 -# define __user
66996 -# define __kernel
66997 +# ifdef CHECKER_PLUGIN
66998 +//# define __user
66999 +//# define __force_user
67000 +//# define __kernel
67001 +//# define __force_kernel
67002 +# else
67003 +# ifdef STRUCTLEAK_PLUGIN
67004 +# define __user __attribute__((user))
67005 +# else
67006 +# define __user
67007 +# endif
67008 +# define __force_user
67009 +# define __kernel
67010 +# define __force_kernel
67011 +# endif
67012 # define __safe
67013 # define __force
67014 # define __nocast
67015 # define __iomem
67016 +# define __force_iomem
67017 # define __chk_user_ptr(x) (void)0
67018 # define __chk_io_ptr(x) (void)0
67019 # define __builtin_warning(x, y...) (1)
67020 @@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67021 # define __release(x) (void)0
67022 # define __cond_lock(x,c) (c)
67023 # define __percpu
67024 +# define __force_percpu
67025 # define __rcu
67026 +# define __force_rcu
67027 #endif
67028
67029 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
67030 @@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67031 # define __attribute_const__ /* unimplemented */
67032 #endif
67033
67034 +#ifndef __no_const
67035 +# define __no_const
67036 +#endif
67037 +
67038 +#ifndef __do_const
67039 +# define __do_const
67040 +#endif
67041 +
67042 +#ifndef __size_overflow
67043 +# define __size_overflow(...)
67044 +#endif
67045 +
67046 +#ifndef __intentional_overflow
67047 +# define __intentional_overflow(...)
67048 +#endif
67049 +
67050 +#ifndef __latent_entropy
67051 +# define __latent_entropy
67052 +#endif
67053 +
67054 /*
67055 * Tell gcc if a function is cold. The compiler will assume any path
67056 * directly leading to the call is unlikely.
67057 @@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67058 #define __cold
67059 #endif
67060
67061 +#ifndef __alloc_size
67062 +#define __alloc_size(...)
67063 +#endif
67064 +
67065 +#ifndef __bos
67066 +#define __bos(ptr, arg)
67067 +#endif
67068 +
67069 +#ifndef __bos0
67070 +#define __bos0(ptr)
67071 +#endif
67072 +
67073 +#ifndef __bos1
67074 +#define __bos1(ptr)
67075 +#endif
67076 +
67077 /* Simple shorthand for a section definition */
67078 #ifndef __section
67079 # define __section(S) __attribute__ ((__section__(#S)))
67080 @@ -349,6 +407,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67081 * use is to mediate communication between process-level code and irq/NMI
67082 * handlers, all running on the same CPU.
67083 */
67084 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67085 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67086 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67087
67088 #endif /* __LINUX_COMPILER_H */
67089 diff --git a/include/linux/completion.h b/include/linux/completion.h
67090 index 33f0280..35c6568 100644
67091 --- a/include/linux/completion.h
67092 +++ b/include/linux/completion.h
67093 @@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
67094 extern void wait_for_completion(struct completion *);
67095 extern void wait_for_completion_io(struct completion *);
67096 extern int wait_for_completion_interruptible(struct completion *x);
67097 -extern int wait_for_completion_killable(struct completion *x);
67098 +extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
67099 extern unsigned long wait_for_completion_timeout(struct completion *x,
67100 unsigned long timeout);
67101 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
67102 unsigned long timeout);
67103 extern long wait_for_completion_interruptible_timeout(
67104 - struct completion *x, unsigned long timeout);
67105 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67106 extern long wait_for_completion_killable_timeout(
67107 - struct completion *x, unsigned long timeout);
67108 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67109 extern bool try_wait_for_completion(struct completion *x);
67110 extern bool completion_done(struct completion *x);
67111
67112 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
67113 index 34025df..d94bbbc 100644
67114 --- a/include/linux/configfs.h
67115 +++ b/include/linux/configfs.h
67116 @@ -125,7 +125,7 @@ struct configfs_attribute {
67117 const char *ca_name;
67118 struct module *ca_owner;
67119 umode_t ca_mode;
67120 -};
67121 +} __do_const;
67122
67123 /*
67124 * Users often need to create attribute structures for their configurable
67125 diff --git a/include/linux/cpu.h b/include/linux/cpu.h
67126 index ce7a074..01ab8ac 100644
67127 --- a/include/linux/cpu.h
67128 +++ b/include/linux/cpu.h
67129 @@ -115,7 +115,7 @@ enum {
67130 /* Need to know about CPUs going up/down? */
67131 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
67132 #define cpu_notifier(fn, pri) { \
67133 - static struct notifier_block fn##_nb __cpuinitdata = \
67134 + static struct notifier_block fn##_nb = \
67135 { .notifier_call = fn, .priority = pri }; \
67136 register_cpu_notifier(&fn##_nb); \
67137 }
67138 diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
67139 index a22944c..4e695fe 100644
67140 --- a/include/linux/cpufreq.h
67141 +++ b/include/linux/cpufreq.h
67142 @@ -252,7 +252,7 @@ struct cpufreq_driver {
67143 int (*suspend) (struct cpufreq_policy *policy);
67144 int (*resume) (struct cpufreq_policy *policy);
67145 struct freq_attr **attr;
67146 -};
67147 +} __do_const;
67148
67149 /* flags */
67150
67151 @@ -311,6 +311,7 @@ struct global_attr {
67152 ssize_t (*store)(struct kobject *a, struct attribute *b,
67153 const char *c, size_t count);
67154 };
67155 +typedef struct global_attr __no_const global_attr_no_const;
67156
67157 #define define_one_global_ro(_name) \
67158 static struct global_attr _name = \
67159 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
67160 index 480c14d..552896f 100644
67161 --- a/include/linux/cpuidle.h
67162 +++ b/include/linux/cpuidle.h
67163 @@ -52,7 +52,8 @@ struct cpuidle_state {
67164 int index);
67165
67166 int (*enter_dead) (struct cpuidle_device *dev, int index);
67167 -};
67168 +} __do_const;
67169 +typedef struct cpuidle_state __no_const cpuidle_state_no_const;
67170
67171 /* Idle State Flags */
67172 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
67173 @@ -194,7 +195,7 @@ struct cpuidle_governor {
67174 void (*reflect) (struct cpuidle_device *dev, int index);
67175
67176 struct module *owner;
67177 -};
67178 +} __do_const;
67179
67180 #ifdef CONFIG_CPU_IDLE
67181
67182 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
67183 index 0325602..5e9feff 100644
67184 --- a/include/linux/cpumask.h
67185 +++ b/include/linux/cpumask.h
67186 @@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67187 }
67188
67189 /* Valid inputs for n are -1 and 0. */
67190 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67191 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67192 {
67193 return n+1;
67194 }
67195
67196 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67197 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67198 {
67199 return n+1;
67200 }
67201
67202 -static inline unsigned int cpumask_next_and(int n,
67203 +static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
67204 const struct cpumask *srcp,
67205 const struct cpumask *andp)
67206 {
67207 @@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67208 *
67209 * Returns >= nr_cpu_ids if no further cpus set.
67210 */
67211 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67212 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67213 {
67214 /* -1 is a legal arg here. */
67215 if (n != -1)
67216 @@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67217 *
67218 * Returns >= nr_cpu_ids if no further cpus unset.
67219 */
67220 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67221 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67222 {
67223 /* -1 is a legal arg here. */
67224 if (n != -1)
67225 @@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67226 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
67227 }
67228
67229 -int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
67230 +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
67231 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
67232
67233 /**
67234 diff --git a/include/linux/cred.h b/include/linux/cred.h
67235 index 04421e8..6bce4ef 100644
67236 --- a/include/linux/cred.h
67237 +++ b/include/linux/cred.h
67238 @@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
67239 static inline void validate_process_creds(void)
67240 {
67241 }
67242 +static inline void validate_task_creds(struct task_struct *task)
67243 +{
67244 +}
67245 #endif
67246
67247 /**
67248 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
67249 index b92eadf..b4ecdc1 100644
67250 --- a/include/linux/crypto.h
67251 +++ b/include/linux/crypto.h
67252 @@ -373,7 +373,7 @@ struct cipher_tfm {
67253 const u8 *key, unsigned int keylen);
67254 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67255 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67256 -};
67257 +} __no_const;
67258
67259 struct hash_tfm {
67260 int (*init)(struct hash_desc *desc);
67261 @@ -394,13 +394,13 @@ struct compress_tfm {
67262 int (*cot_decompress)(struct crypto_tfm *tfm,
67263 const u8 *src, unsigned int slen,
67264 u8 *dst, unsigned int *dlen);
67265 -};
67266 +} __no_const;
67267
67268 struct rng_tfm {
67269 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
67270 unsigned int dlen);
67271 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
67272 -};
67273 +} __no_const;
67274
67275 #define crt_ablkcipher crt_u.ablkcipher
67276 #define crt_aead crt_u.aead
67277 diff --git a/include/linux/ctype.h b/include/linux/ctype.h
67278 index 8acfe31..6ffccd63 100644
67279 --- a/include/linux/ctype.h
67280 +++ b/include/linux/ctype.h
67281 @@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
67282 * Fast implementation of tolower() for internal usage. Do not use in your
67283 * code.
67284 */
67285 -static inline char _tolower(const char c)
67286 +static inline unsigned char _tolower(const unsigned char c)
67287 {
67288 return c | 0x20;
67289 }
67290 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
67291 index 7925bf0..d5143d2 100644
67292 --- a/include/linux/decompress/mm.h
67293 +++ b/include/linux/decompress/mm.h
67294 @@ -77,7 +77,7 @@ static void free(void *where)
67295 * warnings when not needed (indeed large_malloc / large_free are not
67296 * needed by inflate */
67297
67298 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67299 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67300 #define free(a) kfree(a)
67301
67302 #define large_malloc(a) vmalloc(a)
67303 diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
67304 index fe8c447..bdc1f33 100644
67305 --- a/include/linux/devfreq.h
67306 +++ b/include/linux/devfreq.h
67307 @@ -114,7 +114,7 @@ struct devfreq_governor {
67308 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
67309 int (*event_handler)(struct devfreq *devfreq,
67310 unsigned int event, void *data);
67311 -};
67312 +} __do_const;
67313
67314 /**
67315 * struct devfreq - Device devfreq structure
67316 diff --git a/include/linux/device.h b/include/linux/device.h
67317 index 9d6464e..8a5cc92 100644
67318 --- a/include/linux/device.h
67319 +++ b/include/linux/device.h
67320 @@ -295,7 +295,7 @@ struct subsys_interface {
67321 struct list_head node;
67322 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67323 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
67324 -};
67325 +} __do_const;
67326
67327 int subsys_interface_register(struct subsys_interface *sif);
67328 void subsys_interface_unregister(struct subsys_interface *sif);
67329 @@ -475,7 +475,7 @@ struct device_type {
67330 void (*release)(struct device *dev);
67331
67332 const struct dev_pm_ops *pm;
67333 -};
67334 +} __do_const;
67335
67336 /* interface for exporting device attributes */
67337 struct device_attribute {
67338 @@ -485,11 +485,12 @@ struct device_attribute {
67339 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
67340 const char *buf, size_t count);
67341 };
67342 +typedef struct device_attribute __no_const device_attribute_no_const;
67343
67344 struct dev_ext_attribute {
67345 struct device_attribute attr;
67346 void *var;
67347 -};
67348 +} __do_const;
67349
67350 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
67351 char *buf);
67352 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
67353 index 94af418..b1ca7a2 100644
67354 --- a/include/linux/dma-mapping.h
67355 +++ b/include/linux/dma-mapping.h
67356 @@ -54,7 +54,7 @@ struct dma_map_ops {
67357 u64 (*get_required_mask)(struct device *dev);
67358 #endif
67359 int is_phys;
67360 -};
67361 +} __do_const;
67362
67363 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
67364
67365 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
67366 index 91ac8da..a841318 100644
67367 --- a/include/linux/dmaengine.h
67368 +++ b/include/linux/dmaengine.h
67369 @@ -1034,9 +1034,9 @@ struct dma_pinned_list {
67370 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
67371 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
67372
67373 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67374 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67375 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
67376 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67377 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67378 struct dma_pinned_list *pinned_list, struct page *page,
67379 unsigned int offset, size_t len);
67380
67381 diff --git a/include/linux/efi.h b/include/linux/efi.h
67382 index 3d7df3d..301f024 100644
67383 --- a/include/linux/efi.h
67384 +++ b/include/linux/efi.h
67385 @@ -740,6 +740,7 @@ struct efivar_operations {
67386 efi_set_variable_t *set_variable;
67387 efi_query_variable_store_t *query_variable_store;
67388 };
67389 +typedef struct efivar_operations __no_const efivar_operations_no_const;
67390
67391 struct efivars {
67392 /*
67393 diff --git a/include/linux/elf.h b/include/linux/elf.h
67394 index 40a3c0e..4c45a38 100644
67395 --- a/include/linux/elf.h
67396 +++ b/include/linux/elf.h
67397 @@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
67398 #define elf_note elf32_note
67399 #define elf_addr_t Elf32_Off
67400 #define Elf_Half Elf32_Half
67401 +#define elf_dyn Elf32_Dyn
67402
67403 #else
67404
67405 @@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
67406 #define elf_note elf64_note
67407 #define elf_addr_t Elf64_Off
67408 #define Elf_Half Elf64_Half
67409 +#define elf_dyn Elf64_Dyn
67410
67411 #endif
67412
67413 diff --git a/include/linux/err.h b/include/linux/err.h
67414 index f2edce2..cc2082c 100644
67415 --- a/include/linux/err.h
67416 +++ b/include/linux/err.h
67417 @@ -19,12 +19,12 @@
67418
67419 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67420
67421 -static inline void * __must_check ERR_PTR(long error)
67422 +static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
67423 {
67424 return (void *) error;
67425 }
67426
67427 -static inline long __must_check PTR_ERR(const void *ptr)
67428 +static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
67429 {
67430 return (long) ptr;
67431 }
67432 diff --git a/include/linux/extcon.h b/include/linux/extcon.h
67433 index fcb51c8..bdafcf6 100644
67434 --- a/include/linux/extcon.h
67435 +++ b/include/linux/extcon.h
67436 @@ -134,7 +134,7 @@ struct extcon_dev {
67437 /* /sys/class/extcon/.../mutually_exclusive/... */
67438 struct attribute_group attr_g_muex;
67439 struct attribute **attrs_muex;
67440 - struct device_attribute *d_attrs_muex;
67441 + device_attribute_no_const *d_attrs_muex;
67442 };
67443
67444 /**
67445 diff --git a/include/linux/fb.h b/include/linux/fb.h
67446 index 58b9860..58e5516 100644
67447 --- a/include/linux/fb.h
67448 +++ b/include/linux/fb.h
67449 @@ -304,7 +304,7 @@ struct fb_ops {
67450 /* called at KDB enter and leave time to prepare the console */
67451 int (*fb_debug_enter)(struct fb_info *info);
67452 int (*fb_debug_leave)(struct fb_info *info);
67453 -};
67454 +} __do_const;
67455
67456 #ifdef CONFIG_FB_TILEBLITTING
67457 #define FB_TILE_CURSOR_NONE 0
67458 diff --git a/include/linux/filter.h b/include/linux/filter.h
67459 index c45eabc..baa0be5 100644
67460 --- a/include/linux/filter.h
67461 +++ b/include/linux/filter.h
67462 @@ -20,6 +20,7 @@ struct compat_sock_fprog {
67463
67464 struct sk_buff;
67465 struct sock;
67466 +struct bpf_jit_work;
67467
67468 struct sk_filter
67469 {
67470 @@ -27,6 +28,9 @@ struct sk_filter
67471 unsigned int len; /* Number of filter blocks */
67472 unsigned int (*bpf_func)(const struct sk_buff *skb,
67473 const struct sock_filter *filter);
67474 +#ifdef CONFIG_BPF_JIT
67475 + struct bpf_jit_work *work;
67476 +#endif
67477 struct rcu_head rcu;
67478 struct sock_filter insns[0];
67479 };
67480 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
67481 index 3044254..9767f41 100644
67482 --- a/include/linux/frontswap.h
67483 +++ b/include/linux/frontswap.h
67484 @@ -11,7 +11,7 @@ struct frontswap_ops {
67485 int (*load)(unsigned, pgoff_t, struct page *);
67486 void (*invalidate_page)(unsigned, pgoff_t);
67487 void (*invalidate_area)(unsigned);
67488 -};
67489 +} __no_const;
67490
67491 extern bool frontswap_enabled;
67492 extern struct frontswap_ops
67493 diff --git a/include/linux/fs.h b/include/linux/fs.h
67494 index 2c28271..8d3d74c 100644
67495 --- a/include/linux/fs.h
67496 +++ b/include/linux/fs.h
67497 @@ -1541,7 +1541,8 @@ struct file_operations {
67498 long (*fallocate)(struct file *file, int mode, loff_t offset,
67499 loff_t len);
67500 int (*show_fdinfo)(struct seq_file *m, struct file *f);
67501 -};
67502 +} __do_const;
67503 +typedef struct file_operations __no_const file_operations_no_const;
67504
67505 struct inode_operations {
67506 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
67507 @@ -2672,4 +2673,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
67508 inode->i_flags |= S_NOSEC;
67509 }
67510
67511 +static inline bool is_sidechannel_device(const struct inode *inode)
67512 +{
67513 +#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
67514 + umode_t mode = inode->i_mode;
67515 + return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
67516 +#else
67517 + return false;
67518 +#endif
67519 +}
67520 +
67521 #endif /* _LINUX_FS_H */
67522 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67523 index 2b93a9a..855d94a 100644
67524 --- a/include/linux/fs_struct.h
67525 +++ b/include/linux/fs_struct.h
67526 @@ -6,7 +6,7 @@
67527 #include <linux/seqlock.h>
67528
67529 struct fs_struct {
67530 - int users;
67531 + atomic_t users;
67532 spinlock_t lock;
67533 seqcount_t seq;
67534 int umask;
67535 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67536 index 5dfa0aa..6acf322 100644
67537 --- a/include/linux/fscache-cache.h
67538 +++ b/include/linux/fscache-cache.h
67539 @@ -112,7 +112,7 @@ struct fscache_operation {
67540 fscache_operation_release_t release;
67541 };
67542
67543 -extern atomic_t fscache_op_debug_id;
67544 +extern atomic_unchecked_t fscache_op_debug_id;
67545 extern void fscache_op_work_func(struct work_struct *work);
67546
67547 extern void fscache_enqueue_operation(struct fscache_operation *);
67548 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67549 INIT_WORK(&op->work, fscache_op_work_func);
67550 atomic_set(&op->usage, 1);
67551 op->state = FSCACHE_OP_ST_INITIALISED;
67552 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67553 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67554 op->processor = processor;
67555 op->release = release;
67556 INIT_LIST_HEAD(&op->pend_link);
67557 diff --git a/include/linux/fscache.h b/include/linux/fscache.h
67558 index 7a08623..4c07b0f 100644
67559 --- a/include/linux/fscache.h
67560 +++ b/include/linux/fscache.h
67561 @@ -152,7 +152,7 @@ struct fscache_cookie_def {
67562 * - this is mandatory for any object that may have data
67563 */
67564 void (*now_uncached)(void *cookie_netfs_data);
67565 -};
67566 +} __do_const;
67567
67568 /*
67569 * fscache cached network filesystem type
67570 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
67571 index a78680a..87bd73e 100644
67572 --- a/include/linux/fsnotify.h
67573 +++ b/include/linux/fsnotify.h
67574 @@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
67575 struct inode *inode = path->dentry->d_inode;
67576 __u32 mask = FS_ACCESS;
67577
67578 + if (is_sidechannel_device(inode))
67579 + return;
67580 +
67581 if (S_ISDIR(inode->i_mode))
67582 mask |= FS_ISDIR;
67583
67584 @@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
67585 struct inode *inode = path->dentry->d_inode;
67586 __u32 mask = FS_MODIFY;
67587
67588 + if (is_sidechannel_device(inode))
67589 + return;
67590 +
67591 if (S_ISDIR(inode->i_mode))
67592 mask |= FS_ISDIR;
67593
67594 @@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
67595 */
67596 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
67597 {
67598 - return kstrdup(name, GFP_KERNEL);
67599 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
67600 }
67601
67602 /*
67603 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67604 index 13a54d0..c6ce2a7 100644
67605 --- a/include/linux/ftrace_event.h
67606 +++ b/include/linux/ftrace_event.h
67607 @@ -274,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
67608 extern int trace_add_event_call(struct ftrace_event_call *call);
67609 extern void trace_remove_event_call(struct ftrace_event_call *call);
67610
67611 -#define is_signed_type(type) (((type)(-1)) < (type)0)
67612 +#define is_signed_type(type) (((type)(-1)) < (type)1)
67613
67614 int trace_set_clr_event(const char *system, const char *event, int set);
67615
67616 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67617 index 9f3c275..911b591 100644
67618 --- a/include/linux/genhd.h
67619 +++ b/include/linux/genhd.h
67620 @@ -194,7 +194,7 @@ struct gendisk {
67621 struct kobject *slave_dir;
67622
67623 struct timer_rand_state *random;
67624 - atomic_t sync_io; /* RAID */
67625 + atomic_unchecked_t sync_io; /* RAID */
67626 struct disk_events *ev;
67627 #ifdef CONFIG_BLK_DEV_INTEGRITY
67628 struct blk_integrity *integrity;
67629 diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
67630 index 023bc34..b02b46a 100644
67631 --- a/include/linux/genl_magic_func.h
67632 +++ b/include/linux/genl_magic_func.h
67633 @@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
67634 },
67635
67636 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
67637 -static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
67638 +static struct genl_ops ZZZ_genl_ops[] = {
67639 #include GENL_MAGIC_INCLUDE_FILE
67640 };
67641
67642 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
67643 index 0f615eb..5c3832f 100644
67644 --- a/include/linux/gfp.h
67645 +++ b/include/linux/gfp.h
67646 @@ -35,6 +35,13 @@ struct vm_area_struct;
67647 #define ___GFP_NO_KSWAPD 0x400000u
67648 #define ___GFP_OTHER_NODE 0x800000u
67649 #define ___GFP_WRITE 0x1000000u
67650 +
67651 +#ifdef CONFIG_PAX_USERCOPY_SLABS
67652 +#define ___GFP_USERCOPY 0x2000000u
67653 +#else
67654 +#define ___GFP_USERCOPY 0
67655 +#endif
67656 +
67657 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
67658
67659 /*
67660 @@ -92,6 +99,7 @@ struct vm_area_struct;
67661 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
67662 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
67663 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
67664 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
67665
67666 /*
67667 * This may seem redundant, but it's a way of annotating false positives vs.
67668 @@ -99,7 +107,7 @@ struct vm_area_struct;
67669 */
67670 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
67671
67672 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
67673 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
67674 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
67675
67676 /* This equals 0, but use constants in case they ever change */
67677 @@ -153,6 +161,8 @@ struct vm_area_struct;
67678 /* 4GB DMA on some platforms */
67679 #define GFP_DMA32 __GFP_DMA32
67680
67681 +#define GFP_USERCOPY __GFP_USERCOPY
67682 +
67683 /* Convert GFP flags to their corresponding migrate type */
67684 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
67685 {
67686 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67687 new file mode 100644
67688 index 0000000..ebe6d72
67689 --- /dev/null
67690 +++ b/include/linux/gracl.h
67691 @@ -0,0 +1,319 @@
67692 +#ifndef GR_ACL_H
67693 +#define GR_ACL_H
67694 +
67695 +#include <linux/grdefs.h>
67696 +#include <linux/resource.h>
67697 +#include <linux/capability.h>
67698 +#include <linux/dcache.h>
67699 +#include <asm/resource.h>
67700 +
67701 +/* Major status information */
67702 +
67703 +#define GR_VERSION "grsecurity 2.9.1"
67704 +#define GRSECURITY_VERSION 0x2901
67705 +
67706 +enum {
67707 + GR_SHUTDOWN = 0,
67708 + GR_ENABLE = 1,
67709 + GR_SPROLE = 2,
67710 + GR_RELOAD = 3,
67711 + GR_SEGVMOD = 4,
67712 + GR_STATUS = 5,
67713 + GR_UNSPROLE = 6,
67714 + GR_PASSSET = 7,
67715 + GR_SPROLEPAM = 8,
67716 +};
67717 +
67718 +/* Password setup definitions
67719 + * kernel/grhash.c */
67720 +enum {
67721 + GR_PW_LEN = 128,
67722 + GR_SALT_LEN = 16,
67723 + GR_SHA_LEN = 32,
67724 +};
67725 +
67726 +enum {
67727 + GR_SPROLE_LEN = 64,
67728 +};
67729 +
67730 +enum {
67731 + GR_NO_GLOB = 0,
67732 + GR_REG_GLOB,
67733 + GR_CREATE_GLOB
67734 +};
67735 +
67736 +#define GR_NLIMITS 32
67737 +
67738 +/* Begin Data Structures */
67739 +
67740 +struct sprole_pw {
67741 + unsigned char *rolename;
67742 + unsigned char salt[GR_SALT_LEN];
67743 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67744 +};
67745 +
67746 +struct name_entry {
67747 + __u32 key;
67748 + ino_t inode;
67749 + dev_t device;
67750 + char *name;
67751 + __u16 len;
67752 + __u8 deleted;
67753 + struct name_entry *prev;
67754 + struct name_entry *next;
67755 +};
67756 +
67757 +struct inodev_entry {
67758 + struct name_entry *nentry;
67759 + struct inodev_entry *prev;
67760 + struct inodev_entry *next;
67761 +};
67762 +
67763 +struct acl_role_db {
67764 + struct acl_role_label **r_hash;
67765 + __u32 r_size;
67766 +};
67767 +
67768 +struct inodev_db {
67769 + struct inodev_entry **i_hash;
67770 + __u32 i_size;
67771 +};
67772 +
67773 +struct name_db {
67774 + struct name_entry **n_hash;
67775 + __u32 n_size;
67776 +};
67777 +
67778 +struct crash_uid {
67779 + uid_t uid;
67780 + unsigned long expires;
67781 +};
67782 +
67783 +struct gr_hash_struct {
67784 + void **table;
67785 + void **nametable;
67786 + void *first;
67787 + __u32 table_size;
67788 + __u32 used_size;
67789 + int type;
67790 +};
67791 +
67792 +/* Userspace Grsecurity ACL data structures */
67793 +
67794 +struct acl_subject_label {
67795 + char *filename;
67796 + ino_t inode;
67797 + dev_t device;
67798 + __u32 mode;
67799 + kernel_cap_t cap_mask;
67800 + kernel_cap_t cap_lower;
67801 + kernel_cap_t cap_invert_audit;
67802 +
67803 + struct rlimit res[GR_NLIMITS];
67804 + __u32 resmask;
67805 +
67806 + __u8 user_trans_type;
67807 + __u8 group_trans_type;
67808 + uid_t *user_transitions;
67809 + gid_t *group_transitions;
67810 + __u16 user_trans_num;
67811 + __u16 group_trans_num;
67812 +
67813 + __u32 sock_families[2];
67814 + __u32 ip_proto[8];
67815 + __u32 ip_type;
67816 + struct acl_ip_label **ips;
67817 + __u32 ip_num;
67818 + __u32 inaddr_any_override;
67819 +
67820 + __u32 crashes;
67821 + unsigned long expires;
67822 +
67823 + struct acl_subject_label *parent_subject;
67824 + struct gr_hash_struct *hash;
67825 + struct acl_subject_label *prev;
67826 + struct acl_subject_label *next;
67827 +
67828 + struct acl_object_label **obj_hash;
67829 + __u32 obj_hash_size;
67830 + __u16 pax_flags;
67831 +};
67832 +
67833 +struct role_allowed_ip {
67834 + __u32 addr;
67835 + __u32 netmask;
67836 +
67837 + struct role_allowed_ip *prev;
67838 + struct role_allowed_ip *next;
67839 +};
67840 +
67841 +struct role_transition {
67842 + char *rolename;
67843 +
67844 + struct role_transition *prev;
67845 + struct role_transition *next;
67846 +};
67847 +
67848 +struct acl_role_label {
67849 + char *rolename;
67850 + uid_t uidgid;
67851 + __u16 roletype;
67852 +
67853 + __u16 auth_attempts;
67854 + unsigned long expires;
67855 +
67856 + struct acl_subject_label *root_label;
67857 + struct gr_hash_struct *hash;
67858 +
67859 + struct acl_role_label *prev;
67860 + struct acl_role_label *next;
67861 +
67862 + struct role_transition *transitions;
67863 + struct role_allowed_ip *allowed_ips;
67864 + uid_t *domain_children;
67865 + __u16 domain_child_num;
67866 +
67867 + umode_t umask;
67868 +
67869 + struct acl_subject_label **subj_hash;
67870 + __u32 subj_hash_size;
67871 +};
67872 +
67873 +struct user_acl_role_db {
67874 + struct acl_role_label **r_table;
67875 + __u32 num_pointers; /* Number of allocations to track */
67876 + __u32 num_roles; /* Number of roles */
67877 + __u32 num_domain_children; /* Number of domain children */
67878 + __u32 num_subjects; /* Number of subjects */
67879 + __u32 num_objects; /* Number of objects */
67880 +};
67881 +
67882 +struct acl_object_label {
67883 + char *filename;
67884 + ino_t inode;
67885 + dev_t device;
67886 + __u32 mode;
67887 +
67888 + struct acl_subject_label *nested;
67889 + struct acl_object_label *globbed;
67890 +
67891 + /* next two structures not used */
67892 +
67893 + struct acl_object_label *prev;
67894 + struct acl_object_label *next;
67895 +};
67896 +
67897 +struct acl_ip_label {
67898 + char *iface;
67899 + __u32 addr;
67900 + __u32 netmask;
67901 + __u16 low, high;
67902 + __u8 mode;
67903 + __u32 type;
67904 + __u32 proto[8];
67905 +
67906 + /* next two structures not used */
67907 +
67908 + struct acl_ip_label *prev;
67909 + struct acl_ip_label *next;
67910 +};
67911 +
67912 +struct gr_arg {
67913 + struct user_acl_role_db role_db;
67914 + unsigned char pw[GR_PW_LEN];
67915 + unsigned char salt[GR_SALT_LEN];
67916 + unsigned char sum[GR_SHA_LEN];
67917 + unsigned char sp_role[GR_SPROLE_LEN];
67918 + struct sprole_pw *sprole_pws;
67919 + dev_t segv_device;
67920 + ino_t segv_inode;
67921 + uid_t segv_uid;
67922 + __u16 num_sprole_pws;
67923 + __u16 mode;
67924 +};
67925 +
67926 +struct gr_arg_wrapper {
67927 + struct gr_arg *arg;
67928 + __u32 version;
67929 + __u32 size;
67930 +};
67931 +
67932 +struct subject_map {
67933 + struct acl_subject_label *user;
67934 + struct acl_subject_label *kernel;
67935 + struct subject_map *prev;
67936 + struct subject_map *next;
67937 +};
67938 +
67939 +struct acl_subj_map_db {
67940 + struct subject_map **s_hash;
67941 + __u32 s_size;
67942 +};
67943 +
67944 +/* End Data Structures Section */
67945 +
67946 +/* Hash functions generated by empirical testing by Brad Spengler
67947 + Makes good use of the low bits of the inode. Generally 0-1 times
67948 + in loop for successful match. 0-3 for unsuccessful match.
67949 + Shift/add algorithm with modulus of table size and an XOR*/
67950 +
67951 +static __inline__ unsigned int
67952 +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67953 +{
67954 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
67955 +}
67956 +
67957 + static __inline__ unsigned int
67958 +gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
67959 +{
67960 + return ((const unsigned long)userp % sz);
67961 +}
67962 +
67963 +static __inline__ unsigned int
67964 +gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67965 +{
67966 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67967 +}
67968 +
67969 +static __inline__ unsigned int
67970 +gr_nhash(const char *name, const __u16 len, const unsigned int sz)
67971 +{
67972 + return full_name_hash((const unsigned char *)name, len) % sz;
67973 +}
67974 +
67975 +#define FOR_EACH_ROLE_START(role) \
67976 + role = role_list; \
67977 + while (role) {
67978 +
67979 +#define FOR_EACH_ROLE_END(role) \
67980 + role = role->prev; \
67981 + }
67982 +
67983 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67984 + subj = NULL; \
67985 + iter = 0; \
67986 + while (iter < role->subj_hash_size) { \
67987 + if (subj == NULL) \
67988 + subj = role->subj_hash[iter]; \
67989 + if (subj == NULL) { \
67990 + iter++; \
67991 + continue; \
67992 + }
67993 +
67994 +#define FOR_EACH_SUBJECT_END(subj,iter) \
67995 + subj = subj->next; \
67996 + if (subj == NULL) \
67997 + iter++; \
67998 + }
67999 +
68000 +
68001 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68002 + subj = role->hash->first; \
68003 + while (subj != NULL) {
68004 +
68005 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68006 + subj = subj->next; \
68007 + }
68008 +
68009 +#endif
68010 +
68011 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68012 new file mode 100644
68013 index 0000000..323ecf2
68014 --- /dev/null
68015 +++ b/include/linux/gralloc.h
68016 @@ -0,0 +1,9 @@
68017 +#ifndef __GRALLOC_H
68018 +#define __GRALLOC_H
68019 +
68020 +void acl_free_all(void);
68021 +int acl_alloc_stack_init(unsigned long size);
68022 +void *acl_alloc(unsigned long len);
68023 +void *acl_alloc_num(unsigned long num, unsigned long len);
68024 +
68025 +#endif
68026 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68027 new file mode 100644
68028 index 0000000..be66033
68029 --- /dev/null
68030 +++ b/include/linux/grdefs.h
68031 @@ -0,0 +1,140 @@
68032 +#ifndef GRDEFS_H
68033 +#define GRDEFS_H
68034 +
68035 +/* Begin grsecurity status declarations */
68036 +
68037 +enum {
68038 + GR_READY = 0x01,
68039 + GR_STATUS_INIT = 0x00 // disabled state
68040 +};
68041 +
68042 +/* Begin ACL declarations */
68043 +
68044 +/* Role flags */
68045 +
68046 +enum {
68047 + GR_ROLE_USER = 0x0001,
68048 + GR_ROLE_GROUP = 0x0002,
68049 + GR_ROLE_DEFAULT = 0x0004,
68050 + GR_ROLE_SPECIAL = 0x0008,
68051 + GR_ROLE_AUTH = 0x0010,
68052 + GR_ROLE_NOPW = 0x0020,
68053 + GR_ROLE_GOD = 0x0040,
68054 + GR_ROLE_LEARN = 0x0080,
68055 + GR_ROLE_TPE = 0x0100,
68056 + GR_ROLE_DOMAIN = 0x0200,
68057 + GR_ROLE_PAM = 0x0400,
68058 + GR_ROLE_PERSIST = 0x0800
68059 +};
68060 +
68061 +/* ACL Subject and Object mode flags */
68062 +enum {
68063 + GR_DELETED = 0x80000000
68064 +};
68065 +
68066 +/* ACL Object-only mode flags */
68067 +enum {
68068 + GR_READ = 0x00000001,
68069 + GR_APPEND = 0x00000002,
68070 + GR_WRITE = 0x00000004,
68071 + GR_EXEC = 0x00000008,
68072 + GR_FIND = 0x00000010,
68073 + GR_INHERIT = 0x00000020,
68074 + GR_SETID = 0x00000040,
68075 + GR_CREATE = 0x00000080,
68076 + GR_DELETE = 0x00000100,
68077 + GR_LINK = 0x00000200,
68078 + GR_AUDIT_READ = 0x00000400,
68079 + GR_AUDIT_APPEND = 0x00000800,
68080 + GR_AUDIT_WRITE = 0x00001000,
68081 + GR_AUDIT_EXEC = 0x00002000,
68082 + GR_AUDIT_FIND = 0x00004000,
68083 + GR_AUDIT_INHERIT= 0x00008000,
68084 + GR_AUDIT_SETID = 0x00010000,
68085 + GR_AUDIT_CREATE = 0x00020000,
68086 + GR_AUDIT_DELETE = 0x00040000,
68087 + GR_AUDIT_LINK = 0x00080000,
68088 + GR_PTRACERD = 0x00100000,
68089 + GR_NOPTRACE = 0x00200000,
68090 + GR_SUPPRESS = 0x00400000,
68091 + GR_NOLEARN = 0x00800000,
68092 + GR_INIT_TRANSFER= 0x01000000
68093 +};
68094 +
68095 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68096 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68097 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68098 +
68099 +/* ACL subject-only mode flags */
68100 +enum {
68101 + GR_KILL = 0x00000001,
68102 + GR_VIEW = 0x00000002,
68103 + GR_PROTECTED = 0x00000004,
68104 + GR_LEARN = 0x00000008,
68105 + GR_OVERRIDE = 0x00000010,
68106 + /* just a placeholder, this mode is only used in userspace */
68107 + GR_DUMMY = 0x00000020,
68108 + GR_PROTSHM = 0x00000040,
68109 + GR_KILLPROC = 0x00000080,
68110 + GR_KILLIPPROC = 0x00000100,
68111 + /* just a placeholder, this mode is only used in userspace */
68112 + GR_NOTROJAN = 0x00000200,
68113 + GR_PROTPROCFD = 0x00000400,
68114 + GR_PROCACCT = 0x00000800,
68115 + GR_RELAXPTRACE = 0x00001000,
68116 + //GR_NESTED = 0x00002000,
68117 + GR_INHERITLEARN = 0x00004000,
68118 + GR_PROCFIND = 0x00008000,
68119 + GR_POVERRIDE = 0x00010000,
68120 + GR_KERNELAUTH = 0x00020000,
68121 + GR_ATSECURE = 0x00040000,
68122 + GR_SHMEXEC = 0x00080000
68123 +};
68124 +
68125 +enum {
68126 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68127 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68128 + GR_PAX_ENABLE_MPROTECT = 0x0004,
68129 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
68130 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68131 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68132 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68133 + GR_PAX_DISABLE_MPROTECT = 0x0400,
68134 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
68135 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68136 +};
68137 +
68138 +enum {
68139 + GR_ID_USER = 0x01,
68140 + GR_ID_GROUP = 0x02,
68141 +};
68142 +
68143 +enum {
68144 + GR_ID_ALLOW = 0x01,
68145 + GR_ID_DENY = 0x02,
68146 +};
68147 +
68148 +#define GR_CRASH_RES 31
68149 +#define GR_UIDTABLE_MAX 500
68150 +
68151 +/* begin resource learning section */
68152 +enum {
68153 + GR_RLIM_CPU_BUMP = 60,
68154 + GR_RLIM_FSIZE_BUMP = 50000,
68155 + GR_RLIM_DATA_BUMP = 10000,
68156 + GR_RLIM_STACK_BUMP = 1000,
68157 + GR_RLIM_CORE_BUMP = 10000,
68158 + GR_RLIM_RSS_BUMP = 500000,
68159 + GR_RLIM_NPROC_BUMP = 1,
68160 + GR_RLIM_NOFILE_BUMP = 5,
68161 + GR_RLIM_MEMLOCK_BUMP = 50000,
68162 + GR_RLIM_AS_BUMP = 500000,
68163 + GR_RLIM_LOCKS_BUMP = 2,
68164 + GR_RLIM_SIGPENDING_BUMP = 5,
68165 + GR_RLIM_MSGQUEUE_BUMP = 10000,
68166 + GR_RLIM_NICE_BUMP = 1,
68167 + GR_RLIM_RTPRIO_BUMP = 1,
68168 + GR_RLIM_RTTIME_BUMP = 1000000
68169 +};
68170 +
68171 +#endif
68172 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
68173 new file mode 100644
68174 index 0000000..5402bce
68175 --- /dev/null
68176 +++ b/include/linux/grinternal.h
68177 @@ -0,0 +1,215 @@
68178 +#ifndef __GRINTERNAL_H
68179 +#define __GRINTERNAL_H
68180 +
68181 +#ifdef CONFIG_GRKERNSEC
68182 +
68183 +#include <linux/fs.h>
68184 +#include <linux/mnt_namespace.h>
68185 +#include <linux/nsproxy.h>
68186 +#include <linux/gracl.h>
68187 +#include <linux/grdefs.h>
68188 +#include <linux/grmsg.h>
68189 +
68190 +void gr_add_learn_entry(const char *fmt, ...)
68191 + __attribute__ ((format (printf, 1, 2)));
68192 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
68193 + const struct vfsmount *mnt);
68194 +__u32 gr_check_create(const struct dentry *new_dentry,
68195 + const struct dentry *parent,
68196 + const struct vfsmount *mnt, const __u32 mode);
68197 +int gr_check_protected_task(const struct task_struct *task);
68198 +__u32 to_gr_audit(const __u32 reqmode);
68199 +int gr_set_acls(const int type);
68200 +int gr_apply_subject_to_task(struct task_struct *task);
68201 +int gr_acl_is_enabled(void);
68202 +char gr_roletype_to_char(void);
68203 +
68204 +void gr_handle_alertkill(struct task_struct *task);
68205 +char *gr_to_filename(const struct dentry *dentry,
68206 + const struct vfsmount *mnt);
68207 +char *gr_to_filename1(const struct dentry *dentry,
68208 + const struct vfsmount *mnt);
68209 +char *gr_to_filename2(const struct dentry *dentry,
68210 + const struct vfsmount *mnt);
68211 +char *gr_to_filename3(const struct dentry *dentry,
68212 + const struct vfsmount *mnt);
68213 +
68214 +extern int grsec_enable_ptrace_readexec;
68215 +extern int grsec_enable_harden_ptrace;
68216 +extern int grsec_enable_link;
68217 +extern int grsec_enable_fifo;
68218 +extern int grsec_enable_execve;
68219 +extern int grsec_enable_shm;
68220 +extern int grsec_enable_execlog;
68221 +extern int grsec_enable_signal;
68222 +extern int grsec_enable_audit_ptrace;
68223 +extern int grsec_enable_forkfail;
68224 +extern int grsec_enable_time;
68225 +extern int grsec_enable_rofs;
68226 +extern int grsec_enable_chroot_shmat;
68227 +extern int grsec_enable_chroot_mount;
68228 +extern int grsec_enable_chroot_double;
68229 +extern int grsec_enable_chroot_pivot;
68230 +extern int grsec_enable_chroot_chdir;
68231 +extern int grsec_enable_chroot_chmod;
68232 +extern int grsec_enable_chroot_mknod;
68233 +extern int grsec_enable_chroot_fchdir;
68234 +extern int grsec_enable_chroot_nice;
68235 +extern int grsec_enable_chroot_execlog;
68236 +extern int grsec_enable_chroot_caps;
68237 +extern int grsec_enable_chroot_sysctl;
68238 +extern int grsec_enable_chroot_unix;
68239 +extern int grsec_enable_symlinkown;
68240 +extern kgid_t grsec_symlinkown_gid;
68241 +extern int grsec_enable_tpe;
68242 +extern kgid_t grsec_tpe_gid;
68243 +extern int grsec_enable_tpe_all;
68244 +extern int grsec_enable_tpe_invert;
68245 +extern int grsec_enable_socket_all;
68246 +extern kgid_t grsec_socket_all_gid;
68247 +extern int grsec_enable_socket_client;
68248 +extern kgid_t grsec_socket_client_gid;
68249 +extern int grsec_enable_socket_server;
68250 +extern kgid_t grsec_socket_server_gid;
68251 +extern kgid_t grsec_audit_gid;
68252 +extern int grsec_enable_group;
68253 +extern int grsec_enable_audit_textrel;
68254 +extern int grsec_enable_log_rwxmaps;
68255 +extern int grsec_enable_mount;
68256 +extern int grsec_enable_chdir;
68257 +extern int grsec_resource_logging;
68258 +extern int grsec_enable_blackhole;
68259 +extern int grsec_lastack_retries;
68260 +extern int grsec_enable_brute;
68261 +extern int grsec_lock;
68262 +
68263 +extern spinlock_t grsec_alert_lock;
68264 +extern unsigned long grsec_alert_wtime;
68265 +extern unsigned long grsec_alert_fyet;
68266 +
68267 +extern spinlock_t grsec_audit_lock;
68268 +
68269 +extern rwlock_t grsec_exec_file_lock;
68270 +
68271 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
68272 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
68273 + (tsk)->exec_file->f_path.mnt) : "/")
68274 +
68275 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
68276 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
68277 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
68278 +
68279 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
68280 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
68281 + (tsk)->exec_file->f_path.mnt) : "/")
68282 +
68283 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
68284 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
68285 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
68286 +
68287 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
68288 +
68289 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
68290 +
68291 +#define GR_CHROOT_CAPS {{ \
68292 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
68293 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
68294 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
68295 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
68296 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
68297 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
68298 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
68299 +
68300 +#define security_learn(normal_msg,args...) \
68301 +({ \
68302 + read_lock(&grsec_exec_file_lock); \
68303 + gr_add_learn_entry(normal_msg "\n", ## args); \
68304 + read_unlock(&grsec_exec_file_lock); \
68305 +})
68306 +
68307 +enum {
68308 + GR_DO_AUDIT,
68309 + GR_DONT_AUDIT,
68310 + /* used for non-audit messages that we shouldn't kill the task on */
68311 + GR_DONT_AUDIT_GOOD
68312 +};
68313 +
68314 +enum {
68315 + GR_TTYSNIFF,
68316 + GR_RBAC,
68317 + GR_RBAC_STR,
68318 + GR_STR_RBAC,
68319 + GR_RBAC_MODE2,
68320 + GR_RBAC_MODE3,
68321 + GR_FILENAME,
68322 + GR_SYSCTL_HIDDEN,
68323 + GR_NOARGS,
68324 + GR_ONE_INT,
68325 + GR_ONE_INT_TWO_STR,
68326 + GR_ONE_STR,
68327 + GR_STR_INT,
68328 + GR_TWO_STR_INT,
68329 + GR_TWO_INT,
68330 + GR_TWO_U64,
68331 + GR_THREE_INT,
68332 + GR_FIVE_INT_TWO_STR,
68333 + GR_TWO_STR,
68334 + GR_THREE_STR,
68335 + GR_FOUR_STR,
68336 + GR_STR_FILENAME,
68337 + GR_FILENAME_STR,
68338 + GR_FILENAME_TWO_INT,
68339 + GR_FILENAME_TWO_INT_STR,
68340 + GR_TEXTREL,
68341 + GR_PTRACE,
68342 + GR_RESOURCE,
68343 + GR_CAP,
68344 + GR_SIG,
68345 + GR_SIG2,
68346 + GR_CRASH1,
68347 + GR_CRASH2,
68348 + GR_PSACCT,
68349 + GR_RWXMAP
68350 +};
68351 +
68352 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
68353 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
68354 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
68355 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
68356 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
68357 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
68358 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
68359 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
68360 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
68361 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
68362 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
68363 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
68364 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
68365 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
68366 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
68367 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
68368 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
68369 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
68370 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
68371 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
68372 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
68373 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
68374 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
68375 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
68376 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
68377 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
68378 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
68379 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
68380 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
68381 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
68382 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
68383 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
68384 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
68385 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
68386 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
68387 +
68388 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
68389 +
68390 +#endif
68391 +
68392 +#endif
68393 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
68394 new file mode 100644
68395 index 0000000..2bd4c8d
68396 --- /dev/null
68397 +++ b/include/linux/grmsg.h
68398 @@ -0,0 +1,111 @@
68399 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
68400 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
68401 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
68402 +#define GR_STOPMOD_MSG "denied modification of module state by "
68403 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
68404 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
68405 +#define GR_IOPERM_MSG "denied use of ioperm() by "
68406 +#define GR_IOPL_MSG "denied use of iopl() by "
68407 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
68408 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
68409 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
68410 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
68411 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
68412 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
68413 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
68414 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
68415 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
68416 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
68417 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
68418 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
68419 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
68420 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
68421 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
68422 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
68423 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
68424 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
68425 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
68426 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
68427 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
68428 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
68429 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
68430 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
68431 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
68432 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
68433 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
68434 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
68435 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
68436 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
68437 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
68438 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
68439 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
68440 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
68441 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
68442 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
68443 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
68444 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
68445 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
68446 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
68447 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
68448 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
68449 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
68450 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
68451 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
68452 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
68453 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
68454 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
68455 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
68456 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
68457 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
68458 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
68459 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
68460 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
68461 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
68462 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
68463 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
68464 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
68465 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
68466 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
68467 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
68468 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
68469 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
68470 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
68471 +#define GR_NICE_CHROOT_MSG "denied priority change by "
68472 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
68473 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
68474 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
68475 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
68476 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
68477 +#define GR_TIME_MSG "time set by "
68478 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
68479 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
68480 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
68481 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
68482 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
68483 +#define GR_BIND_MSG "denied bind() by "
68484 +#define GR_CONNECT_MSG "denied connect() by "
68485 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
68486 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
68487 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
68488 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
68489 +#define GR_CAP_ACL_MSG "use of %s denied for "
68490 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
68491 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
68492 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
68493 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
68494 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
68495 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
68496 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
68497 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
68498 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
68499 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
68500 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
68501 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
68502 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
68503 +#define GR_VM86_MSG "denied use of vm86 by "
68504 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
68505 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
68506 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
68507 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
68508 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
68509 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
68510 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
68511 new file mode 100644
68512 index 0000000..d7ef0ac
68513 --- /dev/null
68514 +++ b/include/linux/grsecurity.h
68515 @@ -0,0 +1,242 @@
68516 +#ifndef GR_SECURITY_H
68517 +#define GR_SECURITY_H
68518 +#include <linux/fs.h>
68519 +#include <linux/fs_struct.h>
68520 +#include <linux/binfmts.h>
68521 +#include <linux/gracl.h>
68522 +
68523 +/* notify of brain-dead configs */
68524 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68525 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
68526 +#endif
68527 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
68528 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
68529 +#endif
68530 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
68531 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
68532 +#endif
68533 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
68534 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
68535 +#endif
68536 +
68537 +void gr_handle_brute_attach(unsigned long mm_flags);
68538 +void gr_handle_brute_check(void);
68539 +void gr_handle_kernel_exploit(void);
68540 +int gr_process_user_ban(void);
68541 +
68542 +char gr_roletype_to_char(void);
68543 +
68544 +int gr_acl_enable_at_secure(void);
68545 +
68546 +int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
68547 +int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
68548 +
68549 +void gr_del_task_from_ip_table(struct task_struct *p);
68550 +
68551 +int gr_pid_is_chrooted(struct task_struct *p);
68552 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
68553 +int gr_handle_chroot_nice(void);
68554 +int gr_handle_chroot_sysctl(const int op);
68555 +int gr_handle_chroot_setpriority(struct task_struct *p,
68556 + const int niceval);
68557 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
68558 +int gr_handle_chroot_chroot(const struct dentry *dentry,
68559 + const struct vfsmount *mnt);
68560 +void gr_handle_chroot_chdir(const struct path *path);
68561 +int gr_handle_chroot_chmod(const struct dentry *dentry,
68562 + const struct vfsmount *mnt, const int mode);
68563 +int gr_handle_chroot_mknod(const struct dentry *dentry,
68564 + const struct vfsmount *mnt, const int mode);
68565 +int gr_handle_chroot_mount(const struct dentry *dentry,
68566 + const struct vfsmount *mnt,
68567 + const char *dev_name);
68568 +int gr_handle_chroot_pivot(void);
68569 +int gr_handle_chroot_unix(const pid_t pid);
68570 +
68571 +int gr_handle_rawio(const struct inode *inode);
68572 +
68573 +void gr_handle_ioperm(void);
68574 +void gr_handle_iopl(void);
68575 +
68576 +umode_t gr_acl_umask(void);
68577 +
68578 +int gr_tpe_allow(const struct file *file);
68579 +
68580 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
68581 +void gr_clear_chroot_entries(struct task_struct *task);
68582 +
68583 +void gr_log_forkfail(const int retval);
68584 +void gr_log_timechange(void);
68585 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68586 +void gr_log_chdir(const struct dentry *dentry,
68587 + const struct vfsmount *mnt);
68588 +void gr_log_chroot_exec(const struct dentry *dentry,
68589 + const struct vfsmount *mnt);
68590 +void gr_log_remount(const char *devname, const int retval);
68591 +void gr_log_unmount(const char *devname, const int retval);
68592 +void gr_log_mount(const char *from, const char *to, const int retval);
68593 +void gr_log_textrel(struct vm_area_struct *vma);
68594 +void gr_log_rwxmmap(struct file *file);
68595 +void gr_log_rwxmprotect(struct file *file);
68596 +
68597 +int gr_handle_follow_link(const struct inode *parent,
68598 + const struct inode *inode,
68599 + const struct dentry *dentry,
68600 + const struct vfsmount *mnt);
68601 +int gr_handle_fifo(const struct dentry *dentry,
68602 + const struct vfsmount *mnt,
68603 + const struct dentry *dir, const int flag,
68604 + const int acc_mode);
68605 +int gr_handle_hardlink(const struct dentry *dentry,
68606 + const struct vfsmount *mnt,
68607 + struct inode *inode,
68608 + const int mode, const struct filename *to);
68609 +
68610 +int gr_is_capable(const int cap);
68611 +int gr_is_capable_nolog(const int cap);
68612 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
68613 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
68614 +
68615 +void gr_copy_label(struct task_struct *tsk);
68616 +void gr_handle_crash(struct task_struct *task, const int sig);
68617 +int gr_handle_signal(const struct task_struct *p, const int sig);
68618 +int gr_check_crash_uid(const kuid_t uid);
68619 +int gr_check_protected_task(const struct task_struct *task);
68620 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68621 +int gr_acl_handle_mmap(const struct file *file,
68622 + const unsigned long prot);
68623 +int gr_acl_handle_mprotect(const struct file *file,
68624 + const unsigned long prot);
68625 +int gr_check_hidden_task(const struct task_struct *tsk);
68626 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68627 + const struct vfsmount *mnt);
68628 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
68629 + const struct vfsmount *mnt);
68630 +__u32 gr_acl_handle_access(const struct dentry *dentry,
68631 + const struct vfsmount *mnt, const int fmode);
68632 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68633 + const struct vfsmount *mnt, umode_t *mode);
68634 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
68635 + const struct vfsmount *mnt);
68636 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68637 + const struct vfsmount *mnt);
68638 +int gr_handle_ptrace(struct task_struct *task, const long request);
68639 +int gr_handle_proc_ptrace(struct task_struct *task);
68640 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
68641 + const struct vfsmount *mnt);
68642 +int gr_check_crash_exec(const struct file *filp);
68643 +int gr_acl_is_enabled(void);
68644 +void gr_set_kernel_label(struct task_struct *task);
68645 +void gr_set_role_label(struct task_struct *task, const kuid_t uid,
68646 + const kgid_t gid);
68647 +int gr_set_proc_label(const struct dentry *dentry,
68648 + const struct vfsmount *mnt,
68649 + const int unsafe_flags);
68650 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68651 + const struct vfsmount *mnt);
68652 +__u32 gr_acl_handle_open(const struct dentry *dentry,
68653 + const struct vfsmount *mnt, int acc_mode);
68654 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
68655 + const struct dentry *p_dentry,
68656 + const struct vfsmount *p_mnt,
68657 + int open_flags, int acc_mode, const int imode);
68658 +void gr_handle_create(const struct dentry *dentry,
68659 + const struct vfsmount *mnt);
68660 +void gr_handle_proc_create(const struct dentry *dentry,
68661 + const struct inode *inode);
68662 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68663 + const struct dentry *parent_dentry,
68664 + const struct vfsmount *parent_mnt,
68665 + const int mode);
68666 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68667 + const struct dentry *parent_dentry,
68668 + const struct vfsmount *parent_mnt);
68669 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68670 + const struct vfsmount *mnt);
68671 +void gr_handle_delete(const ino_t ino, const dev_t dev);
68672 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68673 + const struct vfsmount *mnt);
68674 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68675 + const struct dentry *parent_dentry,
68676 + const struct vfsmount *parent_mnt,
68677 + const struct filename *from);
68678 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68679 + const struct dentry *parent_dentry,
68680 + const struct vfsmount *parent_mnt,
68681 + const struct dentry *old_dentry,
68682 + const struct vfsmount *old_mnt, const struct filename *to);
68683 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
68684 +int gr_acl_handle_rename(struct dentry *new_dentry,
68685 + struct dentry *parent_dentry,
68686 + const struct vfsmount *parent_mnt,
68687 + struct dentry *old_dentry,
68688 + struct inode *old_parent_inode,
68689 + struct vfsmount *old_mnt, const struct filename *newname);
68690 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68691 + struct dentry *old_dentry,
68692 + struct dentry *new_dentry,
68693 + struct vfsmount *mnt, const __u8 replace);
68694 +__u32 gr_check_link(const struct dentry *new_dentry,
68695 + const struct dentry *parent_dentry,
68696 + const struct vfsmount *parent_mnt,
68697 + const struct dentry *old_dentry,
68698 + const struct vfsmount *old_mnt);
68699 +int gr_acl_handle_filldir(const struct file *file, const char *name,
68700 + const unsigned int namelen, const ino_t ino);
68701 +
68702 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
68703 + const struct vfsmount *mnt);
68704 +void gr_acl_handle_exit(void);
68705 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
68706 +int gr_acl_handle_procpidmem(const struct task_struct *task);
68707 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68708 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68709 +void gr_audit_ptrace(struct task_struct *task);
68710 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68711 +void gr_put_exec_file(struct task_struct *task);
68712 +
68713 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68714 +
68715 +#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
68716 +extern void gr_learn_resource(const struct task_struct *task, const int res,
68717 + const unsigned long wanted, const int gt);
68718 +#else
68719 +static inline void gr_learn_resource(const struct task_struct *task, const int res,
68720 + const unsigned long wanted, const int gt)
68721 +{
68722 +}
68723 +#endif
68724 +
68725 +#ifdef CONFIG_GRKERNSEC_RESLOG
68726 +extern void gr_log_resource(const struct task_struct *task, const int res,
68727 + const unsigned long wanted, const int gt);
68728 +#else
68729 +static inline void gr_log_resource(const struct task_struct *task, const int res,
68730 + const unsigned long wanted, const int gt)
68731 +{
68732 +}
68733 +#endif
68734 +
68735 +#ifdef CONFIG_GRKERNSEC
68736 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68737 +void gr_handle_vm86(void);
68738 +void gr_handle_mem_readwrite(u64 from, u64 to);
68739 +
68740 +void gr_log_badprocpid(const char *entry);
68741 +
68742 +extern int grsec_enable_dmesg;
68743 +extern int grsec_disable_privio;
68744 +
68745 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68746 +extern kgid_t grsec_proc_gid;
68747 +#endif
68748 +
68749 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68750 +extern int grsec_enable_chroot_findtask;
68751 +#endif
68752 +#ifdef CONFIG_GRKERNSEC_SETXID
68753 +extern int grsec_enable_setxid;
68754 +#endif
68755 +#endif
68756 +
68757 +#endif
68758 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
68759 new file mode 100644
68760 index 0000000..e7ffaaf
68761 --- /dev/null
68762 +++ b/include/linux/grsock.h
68763 @@ -0,0 +1,19 @@
68764 +#ifndef __GRSOCK_H
68765 +#define __GRSOCK_H
68766 +
68767 +extern void gr_attach_curr_ip(const struct sock *sk);
68768 +extern int gr_handle_sock_all(const int family, const int type,
68769 + const int protocol);
68770 +extern int gr_handle_sock_server(const struct sockaddr *sck);
68771 +extern int gr_handle_sock_server_other(const struct sock *sck);
68772 +extern int gr_handle_sock_client(const struct sockaddr *sck);
68773 +extern int gr_search_connect(struct socket * sock,
68774 + struct sockaddr_in * addr);
68775 +extern int gr_search_bind(struct socket * sock,
68776 + struct sockaddr_in * addr);
68777 +extern int gr_search_listen(struct socket * sock);
68778 +extern int gr_search_accept(struct socket * sock);
68779 +extern int gr_search_socket(const int domain, const int type,
68780 + const int protocol);
68781 +
68782 +#endif
68783 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68784 index 7fb31da..08b5114 100644
68785 --- a/include/linux/highmem.h
68786 +++ b/include/linux/highmem.h
68787 @@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
68788 kunmap_atomic(kaddr);
68789 }
68790
68791 +static inline void sanitize_highpage(struct page *page)
68792 +{
68793 + void *kaddr;
68794 + unsigned long flags;
68795 +
68796 + local_irq_save(flags);
68797 + kaddr = kmap_atomic(page);
68798 + clear_page(kaddr);
68799 + kunmap_atomic(kaddr);
68800 + local_irq_restore(flags);
68801 +}
68802 +
68803 static inline void zero_user_segments(struct page *page,
68804 unsigned start1, unsigned end1,
68805 unsigned start2, unsigned end2)
68806 diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
68807 index 1c7b89a..7f52502 100644
68808 --- a/include/linux/hwmon-sysfs.h
68809 +++ b/include/linux/hwmon-sysfs.h
68810 @@ -25,7 +25,8 @@
68811 struct sensor_device_attribute{
68812 struct device_attribute dev_attr;
68813 int index;
68814 -};
68815 +} __do_const;
68816 +typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
68817 #define to_sensor_dev_attr(_dev_attr) \
68818 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
68819
68820 @@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
68821 struct device_attribute dev_attr;
68822 u8 index;
68823 u8 nr;
68824 -};
68825 +} __do_const;
68826 #define to_sensor_dev_attr_2(_dev_attr) \
68827 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
68828
68829 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68830 index d0c4db7..61b3577 100644
68831 --- a/include/linux/i2c.h
68832 +++ b/include/linux/i2c.h
68833 @@ -369,6 +369,7 @@ struct i2c_algorithm {
68834 /* To determine what the adapter supports */
68835 u32 (*functionality) (struct i2c_adapter *);
68836 };
68837 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68838
68839 /*
68840 * i2c_adapter is the structure used to identify a physical i2c bus along
68841 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68842 index d23c3c2..eb63c81 100644
68843 --- a/include/linux/i2o.h
68844 +++ b/include/linux/i2o.h
68845 @@ -565,7 +565,7 @@ struct i2o_controller {
68846 struct i2o_device *exec; /* Executive */
68847 #if BITS_PER_LONG == 64
68848 spinlock_t context_list_lock; /* lock for context_list */
68849 - atomic_t context_list_counter; /* needed for unique contexts */
68850 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68851 struct list_head context_list; /* list of context id's
68852 and pointers */
68853 #endif
68854 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
68855 index aff7ad8..3942bbd 100644
68856 --- a/include/linux/if_pppox.h
68857 +++ b/include/linux/if_pppox.h
68858 @@ -76,7 +76,7 @@ struct pppox_proto {
68859 int (*ioctl)(struct socket *sock, unsigned int cmd,
68860 unsigned long arg);
68861 struct module *owner;
68862 -};
68863 +} __do_const;
68864
68865 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
68866 extern void unregister_pppox_proto(int proto_num);
68867 diff --git a/include/linux/init.h b/include/linux/init.h
68868 index 8618147..0821126 100644
68869 --- a/include/linux/init.h
68870 +++ b/include/linux/init.h
68871 @@ -39,9 +39,36 @@
68872 * Also note, that this data cannot be "const".
68873 */
68874
68875 +#ifdef MODULE
68876 +#define add_init_latent_entropy
68877 +#define add_devinit_latent_entropy
68878 +#define add_cpuinit_latent_entropy
68879 +#define add_meminit_latent_entropy
68880 +#else
68881 +#define add_init_latent_entropy __latent_entropy
68882 +
68883 +#ifdef CONFIG_HOTPLUG
68884 +#define add_devinit_latent_entropy
68885 +#else
68886 +#define add_devinit_latent_entropy __latent_entropy
68887 +#endif
68888 +
68889 +#ifdef CONFIG_HOTPLUG_CPU
68890 +#define add_cpuinit_latent_entropy
68891 +#else
68892 +#define add_cpuinit_latent_entropy __latent_entropy
68893 +#endif
68894 +
68895 +#ifdef CONFIG_MEMORY_HOTPLUG
68896 +#define add_meminit_latent_entropy
68897 +#else
68898 +#define add_meminit_latent_entropy __latent_entropy
68899 +#endif
68900 +#endif
68901 +
68902 /* These are for everybody (although not all archs will actually
68903 discard it in modules) */
68904 -#define __init __section(.init.text) __cold notrace
68905 +#define __init __section(.init.text) __cold notrace add_init_latent_entropy
68906 #define __initdata __section(.init.data)
68907 #define __initconst __constsection(.init.rodata)
68908 #define __exitdata __section(.exit.data)
68909 @@ -94,7 +121,7 @@
68910 #define __exit __section(.exit.text) __exitused __cold notrace
68911
68912 /* Used for HOTPLUG_CPU */
68913 -#define __cpuinit __section(.cpuinit.text) __cold notrace
68914 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
68915 #define __cpuinitdata __section(.cpuinit.data)
68916 #define __cpuinitconst __constsection(.cpuinit.rodata)
68917 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
68918 @@ -102,7 +129,7 @@
68919 #define __cpuexitconst __constsection(.cpuexit.rodata)
68920
68921 /* Used for MEMORY_HOTPLUG */
68922 -#define __meminit __section(.meminit.text) __cold notrace
68923 +#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
68924 #define __meminitdata __section(.meminit.data)
68925 #define __meminitconst __constsection(.meminit.rodata)
68926 #define __memexit __section(.memexit.text) __exitused __cold notrace
68927 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68928 index 5cd0f09..c9f67cc 100644
68929 --- a/include/linux/init_task.h
68930 +++ b/include/linux/init_task.h
68931 @@ -154,6 +154,12 @@ extern struct task_group root_task_group;
68932
68933 #define INIT_TASK_COMM "swapper"
68934
68935 +#ifdef CONFIG_X86
68936 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68937 +#else
68938 +#define INIT_TASK_THREAD_INFO
68939 +#endif
68940 +
68941 /*
68942 * INIT_TASK is used to set up the first task table, touch at
68943 * your own risk!. Base=0, limit=0x1fffff (=2MB)
68944 @@ -193,6 +199,7 @@ extern struct task_group root_task_group;
68945 RCU_POINTER_INITIALIZER(cred, &init_cred), \
68946 .comm = INIT_TASK_COMM, \
68947 .thread = INIT_THREAD, \
68948 + INIT_TASK_THREAD_INFO \
68949 .fs = &init_fs, \
68950 .files = &init_files, \
68951 .signal = &init_signals, \
68952 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68953 index 5fa5afe..ac55b25 100644
68954 --- a/include/linux/interrupt.h
68955 +++ b/include/linux/interrupt.h
68956 @@ -430,7 +430,7 @@ enum
68957 /* map softirq index to softirq name. update 'softirq_to_name' in
68958 * kernel/softirq.c when adding a new softirq.
68959 */
68960 -extern char *softirq_to_name[NR_SOFTIRQS];
68961 +extern const char * const softirq_to_name[NR_SOFTIRQS];
68962
68963 /* softirq mask and active fields moved to irq_cpustat_t in
68964 * asm/hardirq.h to get better cache usage. KAO
68965 @@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68966
68967 struct softirq_action
68968 {
68969 - void (*action)(struct softirq_action *);
68970 -};
68971 + void (*action)(void);
68972 +} __no_const;
68973
68974 asmlinkage void do_softirq(void);
68975 asmlinkage void __do_softirq(void);
68976 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68977 +extern void open_softirq(int nr, void (*action)(void));
68978 extern void softirq_init(void);
68979 extern void __raise_softirq_irqoff(unsigned int nr);
68980
68981 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
68982 index ba3b8a9..7e14ed8 100644
68983 --- a/include/linux/iommu.h
68984 +++ b/include/linux/iommu.h
68985 @@ -113,7 +113,7 @@ struct iommu_ops {
68986 u32 (*domain_get_windows)(struct iommu_domain *domain);
68987
68988 unsigned long pgsize_bitmap;
68989 -};
68990 +} __do_const;
68991
68992 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
68993 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
68994 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
68995 index 85ac9b9b..e5759ab 100644
68996 --- a/include/linux/ioport.h
68997 +++ b/include/linux/ioport.h
68998 @@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
68999 int adjust_resource(struct resource *res, resource_size_t start,
69000 resource_size_t size);
69001 resource_size_t resource_alignment(struct resource *res);
69002 -static inline resource_size_t resource_size(const struct resource *res)
69003 +static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
69004 {
69005 return res->end - res->start + 1;
69006 }
69007 diff --git a/include/linux/irq.h b/include/linux/irq.h
69008 index bc4e066..50468a9 100644
69009 --- a/include/linux/irq.h
69010 +++ b/include/linux/irq.h
69011 @@ -328,7 +328,8 @@ struct irq_chip {
69012 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69013
69014 unsigned long flags;
69015 -};
69016 +} __do_const;
69017 +typedef struct irq_chip __no_const irq_chip_no_const;
69018
69019 /*
69020 * irq_chip specific flags
69021 diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
69022 index 3fd8e42..a73e966 100644
69023 --- a/include/linux/irqchip/arm-gic.h
69024 +++ b/include/linux/irqchip/arm-gic.h
69025 @@ -59,9 +59,11 @@
69026
69027 #ifndef __ASSEMBLY__
69028
69029 +#include <linux/irq.h>
69030 +
69031 struct device_node;
69032
69033 -extern struct irq_chip gic_arch_extn;
69034 +extern irq_chip_no_const gic_arch_extn;
69035
69036 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
69037 u32 offset, struct device_node *);
69038 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
69039 index 6883e19..06992b1 100644
69040 --- a/include/linux/kallsyms.h
69041 +++ b/include/linux/kallsyms.h
69042 @@ -15,7 +15,8 @@
69043
69044 struct module;
69045
69046 -#ifdef CONFIG_KALLSYMS
69047 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
69048 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69049 /* Lookup the address for a symbol. Returns 0 if not found. */
69050 unsigned long kallsyms_lookup_name(const char *name);
69051
69052 @@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
69053 /* Stupid that this does nothing, but I didn't create this mess. */
69054 #define __print_symbol(fmt, addr)
69055 #endif /*CONFIG_KALLSYMS*/
69056 +#else /* when included by kallsyms.c, vsnprintf.c, or
69057 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
69058 +extern void __print_symbol(const char *fmt, unsigned long address);
69059 +extern int sprint_backtrace(char *buffer, unsigned long address);
69060 +extern int sprint_symbol(char *buffer, unsigned long address);
69061 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
69062 +const char *kallsyms_lookup(unsigned long addr,
69063 + unsigned long *symbolsize,
69064 + unsigned long *offset,
69065 + char **modname, char *namebuf);
69066 +#endif
69067
69068 /* This macro allows us to keep printk typechecking */
69069 static __printf(1, 2)
69070 diff --git a/include/linux/key-type.h b/include/linux/key-type.h
69071 index 518a53a..5e28358 100644
69072 --- a/include/linux/key-type.h
69073 +++ b/include/linux/key-type.h
69074 @@ -125,7 +125,7 @@ struct key_type {
69075 /* internal fields */
69076 struct list_head link; /* link in types list */
69077 struct lock_class_key lock_class; /* key->sem lock class */
69078 -};
69079 +} __do_const;
69080
69081 extern struct key_type key_type_keyring;
69082
69083 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
69084 index c6e091b..a940adf 100644
69085 --- a/include/linux/kgdb.h
69086 +++ b/include/linux/kgdb.h
69087 @@ -52,7 +52,7 @@ extern int kgdb_connected;
69088 extern int kgdb_io_module_registered;
69089
69090 extern atomic_t kgdb_setting_breakpoint;
69091 -extern atomic_t kgdb_cpu_doing_single_step;
69092 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
69093
69094 extern struct task_struct *kgdb_usethread;
69095 extern struct task_struct *kgdb_contthread;
69096 @@ -254,7 +254,7 @@ struct kgdb_arch {
69097 void (*correct_hw_break)(void);
69098
69099 void (*enable_nmi)(bool on);
69100 -};
69101 +} __do_const;
69102
69103 /**
69104 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
69105 @@ -279,7 +279,7 @@ struct kgdb_io {
69106 void (*pre_exception) (void);
69107 void (*post_exception) (void);
69108 int is_console;
69109 -};
69110 +} __do_const;
69111
69112 extern struct kgdb_arch arch_kgdb_ops;
69113
69114 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
69115 index 5398d58..5883a34 100644
69116 --- a/include/linux/kmod.h
69117 +++ b/include/linux/kmod.h
69118 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
69119 * usually useless though. */
69120 extern __printf(2, 3)
69121 int __request_module(bool wait, const char *name, ...);
69122 +extern __printf(3, 4)
69123 +int ___request_module(bool wait, char *param_name, const char *name, ...);
69124 #define request_module(mod...) __request_module(true, mod)
69125 #define request_module_nowait(mod...) __request_module(false, mod)
69126 #define try_then_request_module(x, mod...) \
69127 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69128 index 939b112..ed6ed51 100644
69129 --- a/include/linux/kobject.h
69130 +++ b/include/linux/kobject.h
69131 @@ -111,7 +111,7 @@ struct kobj_type {
69132 struct attribute **default_attrs;
69133 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
69134 const void *(*namespace)(struct kobject *kobj);
69135 -};
69136 +} __do_const;
69137
69138 struct kobj_uevent_env {
69139 char *envp[UEVENT_NUM_ENVP];
69140 @@ -134,6 +134,7 @@ struct kobj_attribute {
69141 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
69142 const char *buf, size_t count);
69143 };
69144 +typedef struct kobj_attribute __no_const kobj_attribute_no_const;
69145
69146 extern const struct sysfs_ops kobj_sysfs_ops;
69147
69148 diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
69149 index f66b065..c2c29b4 100644
69150 --- a/include/linux/kobject_ns.h
69151 +++ b/include/linux/kobject_ns.h
69152 @@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
69153 const void *(*netlink_ns)(struct sock *sk);
69154 const void *(*initial_ns)(void);
69155 void (*drop_ns)(void *);
69156 -};
69157 +} __do_const;
69158
69159 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
69160 int kobj_ns_type_registered(enum kobj_ns_type type);
69161 diff --git a/include/linux/kref.h b/include/linux/kref.h
69162 index 4972e6e..de4d19b 100644
69163 --- a/include/linux/kref.h
69164 +++ b/include/linux/kref.h
69165 @@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
69166 static inline int kref_sub(struct kref *kref, unsigned int count,
69167 void (*release)(struct kref *kref))
69168 {
69169 - WARN_ON(release == NULL);
69170 + BUG_ON(release == NULL);
69171
69172 if (atomic_sub_and_test((int) count, &kref->refcount)) {
69173 release(kref);
69174 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69175 index c139582..0b5b102 100644
69176 --- a/include/linux/kvm_host.h
69177 +++ b/include/linux/kvm_host.h
69178 @@ -424,7 +424,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69179 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
69180 void vcpu_put(struct kvm_vcpu *vcpu);
69181
69182 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69183 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69184 struct module *module);
69185 void kvm_exit(void);
69186
69187 @@ -582,7 +582,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
69188 struct kvm_guest_debug *dbg);
69189 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
69190
69191 -int kvm_arch_init(void *opaque);
69192 +int kvm_arch_init(const void *opaque);
69193 void kvm_arch_exit(void);
69194
69195 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
69196 diff --git a/include/linux/libata.h b/include/linux/libata.h
69197 index eae7a05..2cdd875 100644
69198 --- a/include/linux/libata.h
69199 +++ b/include/linux/libata.h
69200 @@ -919,7 +919,7 @@ struct ata_port_operations {
69201 * fields must be pointers.
69202 */
69203 const struct ata_port_operations *inherits;
69204 -};
69205 +} __do_const;
69206
69207 struct ata_port_info {
69208 unsigned long flags;
69209 diff --git a/include/linux/list.h b/include/linux/list.h
69210 index 6a1f8df..eaec1ff 100644
69211 --- a/include/linux/list.h
69212 +++ b/include/linux/list.h
69213 @@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
69214 extern void list_del(struct list_head *entry);
69215 #endif
69216
69217 +extern void __pax_list_add(struct list_head *new,
69218 + struct list_head *prev,
69219 + struct list_head *next);
69220 +static inline void pax_list_add(struct list_head *new, struct list_head *head)
69221 +{
69222 + __pax_list_add(new, head, head->next);
69223 +}
69224 +static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
69225 +{
69226 + __pax_list_add(new, head->prev, head);
69227 +}
69228 +extern void pax_list_del(struct list_head *entry);
69229 +
69230 /**
69231 * list_replace - replace old entry by new one
69232 * @old : the element to be replaced
69233 @@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
69234 INIT_LIST_HEAD(entry);
69235 }
69236
69237 +extern void pax_list_del_init(struct list_head *entry);
69238 +
69239 /**
69240 * list_move - delete from one list and add as another's head
69241 * @list: the entry to move
69242 diff --git a/include/linux/math64.h b/include/linux/math64.h
69243 index b8ba855..0148090 100644
69244 --- a/include/linux/math64.h
69245 +++ b/include/linux/math64.h
69246 @@ -14,7 +14,7 @@
69247 * This is commonly provided by 32bit archs to provide an optimized 64bit
69248 * divide.
69249 */
69250 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69251 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69252 {
69253 *remainder = dividend % divisor;
69254 return dividend / divisor;
69255 @@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
69256 #define div64_long(x,y) div_s64((x),(y))
69257
69258 #ifndef div_u64_rem
69259 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69260 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69261 {
69262 *remainder = do_div(dividend, divisor);
69263 return dividend;
69264 @@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
69265 * divide.
69266 */
69267 #ifndef div_u64
69268 -static inline u64 div_u64(u64 dividend, u32 divisor)
69269 +static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
69270 {
69271 u32 remainder;
69272 return div_u64_rem(dividend, divisor, &remainder);
69273 diff --git a/include/linux/mm.h b/include/linux/mm.h
69274 index e2091b8..821db54 100644
69275 --- a/include/linux/mm.h
69276 +++ b/include/linux/mm.h
69277 @@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
69278 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
69279 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
69280 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
69281 +
69282 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69283 +#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
69284 +#endif
69285 +
69286 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
69287
69288 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
69289 @@ -202,8 +207,8 @@ struct vm_operations_struct {
69290 /* called by access_process_vm when get_user_pages() fails, typically
69291 * for use by special VMAs that can switch between memory and hardware
69292 */
69293 - int (*access)(struct vm_area_struct *vma, unsigned long addr,
69294 - void *buf, int len, int write);
69295 + ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
69296 + void *buf, size_t len, int write);
69297 #ifdef CONFIG_NUMA
69298 /*
69299 * set_policy() op must add a reference to any non-NULL @new mempolicy
69300 @@ -233,6 +238,7 @@ struct vm_operations_struct {
69301 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
69302 unsigned long size, pgoff_t pgoff);
69303 };
69304 +typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
69305
69306 struct mmu_gather;
69307 struct inode;
69308 @@ -970,8 +976,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
69309 unsigned long *pfn);
69310 int follow_phys(struct vm_area_struct *vma, unsigned long address,
69311 unsigned int flags, unsigned long *prot, resource_size_t *phys);
69312 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69313 - void *buf, int len, int write);
69314 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69315 + void *buf, size_t len, int write);
69316
69317 static inline void unmap_shared_mapping_range(struct address_space *mapping,
69318 loff_t const holebegin, loff_t const holelen)
69319 @@ -1010,9 +1016,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
69320 }
69321 #endif
69322
69323 -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
69324 -extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
69325 - void *buf, int len, int write);
69326 +extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
69327 +extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
69328 + void *buf, size_t len, int write);
69329
69330 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69331 unsigned long start, unsigned long nr_pages,
69332 @@ -1043,34 +1049,6 @@ int set_page_dirty(struct page *page);
69333 int set_page_dirty_lock(struct page *page);
69334 int clear_page_dirty_for_io(struct page *page);
69335
69336 -/* Is the vma a continuation of the stack vma above it? */
69337 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
69338 -{
69339 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
69340 -}
69341 -
69342 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
69343 - unsigned long addr)
69344 -{
69345 - return (vma->vm_flags & VM_GROWSDOWN) &&
69346 - (vma->vm_start == addr) &&
69347 - !vma_growsdown(vma->vm_prev, addr);
69348 -}
69349 -
69350 -/* Is the vma a continuation of the stack vma below it? */
69351 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
69352 -{
69353 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
69354 -}
69355 -
69356 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
69357 - unsigned long addr)
69358 -{
69359 - return (vma->vm_flags & VM_GROWSUP) &&
69360 - (vma->vm_end == addr) &&
69361 - !vma_growsup(vma->vm_next, addr);
69362 -}
69363 -
69364 extern pid_t
69365 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
69366
69367 @@ -1173,6 +1151,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
69368 }
69369 #endif
69370
69371 +#ifdef CONFIG_MMU
69372 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
69373 +#else
69374 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69375 +{
69376 + return __pgprot(0);
69377 +}
69378 +#endif
69379 +
69380 int vma_wants_writenotify(struct vm_area_struct *vma);
69381
69382 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
69383 @@ -1191,8 +1178,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
69384 {
69385 return 0;
69386 }
69387 +
69388 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
69389 + unsigned long address)
69390 +{
69391 + return 0;
69392 +}
69393 #else
69394 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69395 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69396 #endif
69397
69398 #ifdef __PAGETABLE_PMD_FOLDED
69399 @@ -1201,8 +1195,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
69400 {
69401 return 0;
69402 }
69403 +
69404 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
69405 + unsigned long address)
69406 +{
69407 + return 0;
69408 +}
69409 #else
69410 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
69411 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
69412 #endif
69413
69414 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
69415 @@ -1220,11 +1221,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
69416 NULL: pud_offset(pgd, address);
69417 }
69418
69419 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
69420 +{
69421 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
69422 + NULL: pud_offset(pgd, address);
69423 +}
69424 +
69425 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
69426 {
69427 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
69428 NULL: pmd_offset(pud, address);
69429 }
69430 +
69431 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
69432 +{
69433 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
69434 + NULL: pmd_offset(pud, address);
69435 +}
69436 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
69437
69438 #if USE_SPLIT_PTLOCKS
69439 @@ -1455,6 +1468,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69440 unsigned long len, unsigned long prot, unsigned long flags,
69441 unsigned long pgoff, unsigned long *populate);
69442 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
69443 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
69444
69445 #ifdef CONFIG_MMU
69446 extern int __mm_populate(unsigned long addr, unsigned long len,
69447 @@ -1483,6 +1497,7 @@ struct vm_unmapped_area_info {
69448 unsigned long high_limit;
69449 unsigned long align_mask;
69450 unsigned long align_offset;
69451 + unsigned long threadstack_offset;
69452 };
69453
69454 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
69455 @@ -1561,6 +1576,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
69456 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
69457 struct vm_area_struct **pprev);
69458
69459 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
69460 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
69461 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
69462 +
69463 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
69464 NULL if none. Assume start_addr < end_addr. */
69465 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
69466 @@ -1589,15 +1608,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
69467 return vma;
69468 }
69469
69470 -#ifdef CONFIG_MMU
69471 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
69472 -#else
69473 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
69474 -{
69475 - return __pgprot(0);
69476 -}
69477 -#endif
69478 -
69479 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
69480 unsigned long change_prot_numa(struct vm_area_struct *vma,
69481 unsigned long start, unsigned long end);
69482 @@ -1649,6 +1659,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
69483 static inline void vm_stat_account(struct mm_struct *mm,
69484 unsigned long flags, struct file *file, long pages)
69485 {
69486 +
69487 +#ifdef CONFIG_PAX_RANDMMAP
69488 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
69489 +#endif
69490 +
69491 mm->total_vm += pages;
69492 }
69493 #endif /* CONFIG_PROC_FS */
69494 @@ -1725,7 +1740,7 @@ extern int unpoison_memory(unsigned long pfn);
69495 extern int sysctl_memory_failure_early_kill;
69496 extern int sysctl_memory_failure_recovery;
69497 extern void shake_page(struct page *p, int access);
69498 -extern atomic_long_t num_poisoned_pages;
69499 +extern atomic_long_unchecked_t num_poisoned_pages;
69500 extern int soft_offline_page(struct page *page, int flags);
69501
69502 extern void dump_page(struct page *page);
69503 @@ -1756,5 +1771,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
69504 static inline bool page_is_guard(struct page *page) { return false; }
69505 #endif /* CONFIG_DEBUG_PAGEALLOC */
69506
69507 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69508 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
69509 +#else
69510 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
69511 +#endif
69512 +
69513 #endif /* __KERNEL__ */
69514 #endif /* _LINUX_MM_H */
69515 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
69516 index ace9a5f..81bdb59 100644
69517 --- a/include/linux/mm_types.h
69518 +++ b/include/linux/mm_types.h
69519 @@ -289,6 +289,8 @@ struct vm_area_struct {
69520 #ifdef CONFIG_NUMA
69521 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
69522 #endif
69523 +
69524 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
69525 };
69526
69527 struct core_thread {
69528 @@ -437,6 +439,24 @@ struct mm_struct {
69529 int first_nid;
69530 #endif
69531 struct uprobes_state uprobes_state;
69532 +
69533 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69534 + unsigned long pax_flags;
69535 +#endif
69536 +
69537 +#ifdef CONFIG_PAX_DLRESOLVE
69538 + unsigned long call_dl_resolve;
69539 +#endif
69540 +
69541 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
69542 + unsigned long call_syscall;
69543 +#endif
69544 +
69545 +#ifdef CONFIG_PAX_ASLR
69546 + unsigned long delta_mmap; /* randomized offset */
69547 + unsigned long delta_stack; /* randomized offset */
69548 +#endif
69549 +
69550 };
69551
69552 /* first nid will either be a valid NID or one of these values */
69553 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
69554 index c5d5278..f0b68c8 100644
69555 --- a/include/linux/mmiotrace.h
69556 +++ b/include/linux/mmiotrace.h
69557 @@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
69558 /* Called from ioremap.c */
69559 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
69560 void __iomem *addr);
69561 -extern void mmiotrace_iounmap(volatile void __iomem *addr);
69562 +extern void mmiotrace_iounmap(const volatile void __iomem *addr);
69563
69564 /* For anyone to insert markers. Remember trailing newline. */
69565 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
69566 @@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
69567 {
69568 }
69569
69570 -static inline void mmiotrace_iounmap(volatile void __iomem *addr)
69571 +static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
69572 {
69573 }
69574
69575 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
69576 index c74092e..b663967 100644
69577 --- a/include/linux/mmzone.h
69578 +++ b/include/linux/mmzone.h
69579 @@ -396,7 +396,7 @@ struct zone {
69580 unsigned long flags; /* zone flags, see below */
69581
69582 /* Zone statistics */
69583 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69584 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69585
69586 /*
69587 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
69588 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
69589 index 779cf7c..e6768240 100644
69590 --- a/include/linux/mod_devicetable.h
69591 +++ b/include/linux/mod_devicetable.h
69592 @@ -12,7 +12,7 @@
69593 typedef unsigned long kernel_ulong_t;
69594 #endif
69595
69596 -#define PCI_ANY_ID (~0)
69597 +#define PCI_ANY_ID ((__u16)~0)
69598
69599 struct pci_device_id {
69600 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
69601 @@ -138,7 +138,7 @@ struct usb_device_id {
69602 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
69603 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
69604
69605 -#define HID_ANY_ID (~0)
69606 +#define HID_ANY_ID (~0U)
69607 #define HID_BUS_ANY 0xffff
69608 #define HID_GROUP_ANY 0x0000
69609
69610 @@ -464,7 +464,7 @@ struct dmi_system_id {
69611 const char *ident;
69612 struct dmi_strmatch matches[4];
69613 void *driver_data;
69614 -};
69615 +} __do_const;
69616 /*
69617 * struct dmi_device_id appears during expansion of
69618 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
69619 diff --git a/include/linux/module.h b/include/linux/module.h
69620 index ead1b57..81a3b6c 100644
69621 --- a/include/linux/module.h
69622 +++ b/include/linux/module.h
69623 @@ -17,9 +17,11 @@
69624 #include <linux/moduleparam.h>
69625 #include <linux/tracepoint.h>
69626 #include <linux/export.h>
69627 +#include <linux/fs.h>
69628
69629 #include <linux/percpu.h>
69630 #include <asm/module.h>
69631 +#include <asm/pgtable.h>
69632
69633 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
69634 #define MODULE_SIG_STRING "~Module signature appended~\n"
69635 @@ -54,12 +56,13 @@ struct module_attribute {
69636 int (*test)(struct module *);
69637 void (*free)(struct module *);
69638 };
69639 +typedef struct module_attribute __no_const module_attribute_no_const;
69640
69641 struct module_version_attribute {
69642 struct module_attribute mattr;
69643 const char *module_name;
69644 const char *version;
69645 -} __attribute__ ((__aligned__(sizeof(void *))));
69646 +} __do_const __attribute__ ((__aligned__(sizeof(void *))));
69647
69648 extern ssize_t __modver_version_show(struct module_attribute *,
69649 struct module_kobject *, char *);
69650 @@ -232,7 +235,7 @@ struct module
69651
69652 /* Sysfs stuff. */
69653 struct module_kobject mkobj;
69654 - struct module_attribute *modinfo_attrs;
69655 + module_attribute_no_const *modinfo_attrs;
69656 const char *version;
69657 const char *srcversion;
69658 struct kobject *holders_dir;
69659 @@ -281,19 +284,16 @@ struct module
69660 int (*init)(void);
69661
69662 /* If this is non-NULL, vfree after init() returns */
69663 - void *module_init;
69664 + void *module_init_rx, *module_init_rw;
69665
69666 /* Here is the actual code + data, vfree'd on unload. */
69667 - void *module_core;
69668 + void *module_core_rx, *module_core_rw;
69669
69670 /* Here are the sizes of the init and core sections */
69671 - unsigned int init_size, core_size;
69672 + unsigned int init_size_rw, core_size_rw;
69673
69674 /* The size of the executable code in each section. */
69675 - unsigned int init_text_size, core_text_size;
69676 -
69677 - /* Size of RO sections of the module (text+rodata) */
69678 - unsigned int init_ro_size, core_ro_size;
69679 + unsigned int init_size_rx, core_size_rx;
69680
69681 /* Arch-specific module values */
69682 struct mod_arch_specific arch;
69683 @@ -349,6 +349,10 @@ struct module
69684 #ifdef CONFIG_EVENT_TRACING
69685 struct ftrace_event_call **trace_events;
69686 unsigned int num_trace_events;
69687 + struct file_operations trace_id;
69688 + struct file_operations trace_enable;
69689 + struct file_operations trace_format;
69690 + struct file_operations trace_filter;
69691 #endif
69692 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
69693 unsigned int num_ftrace_callsites;
69694 @@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
69695 bool is_module_percpu_address(unsigned long addr);
69696 bool is_module_text_address(unsigned long addr);
69697
69698 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
69699 +{
69700 +
69701 +#ifdef CONFIG_PAX_KERNEXEC
69702 + if (ktla_ktva(addr) >= (unsigned long)start &&
69703 + ktla_ktva(addr) < (unsigned long)start + size)
69704 + return 1;
69705 +#endif
69706 +
69707 + return ((void *)addr >= start && (void *)addr < start + size);
69708 +}
69709 +
69710 +static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
69711 +{
69712 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
69713 +}
69714 +
69715 +static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
69716 +{
69717 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
69718 +}
69719 +
69720 +static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
69721 +{
69722 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
69723 +}
69724 +
69725 +static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
69726 +{
69727 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
69728 +}
69729 +
69730 static inline int within_module_core(unsigned long addr, const struct module *mod)
69731 {
69732 - return (unsigned long)mod->module_core <= addr &&
69733 - addr < (unsigned long)mod->module_core + mod->core_size;
69734 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
69735 }
69736
69737 static inline int within_module_init(unsigned long addr, const struct module *mod)
69738 {
69739 - return (unsigned long)mod->module_init <= addr &&
69740 - addr < (unsigned long)mod->module_init + mod->init_size;
69741 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
69742 }
69743
69744 /* Search for module by name: must hold module_mutex. */
69745 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
69746 index 560ca53..ef621ef 100644
69747 --- a/include/linux/moduleloader.h
69748 +++ b/include/linux/moduleloader.h
69749 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
69750 sections. Returns NULL on failure. */
69751 void *module_alloc(unsigned long size);
69752
69753 +#ifdef CONFIG_PAX_KERNEXEC
69754 +void *module_alloc_exec(unsigned long size);
69755 +#else
69756 +#define module_alloc_exec(x) module_alloc(x)
69757 +#endif
69758 +
69759 /* Free memory returned from module_alloc. */
69760 void module_free(struct module *mod, void *module_region);
69761
69762 +#ifdef CONFIG_PAX_KERNEXEC
69763 +void module_free_exec(struct module *mod, void *module_region);
69764 +#else
69765 +#define module_free_exec(x, y) module_free((x), (y))
69766 +#endif
69767 +
69768 /*
69769 * Apply the given relocation to the (simplified) ELF. Return -error
69770 * or 0.
69771 @@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
69772 unsigned int relsec,
69773 struct module *me)
69774 {
69775 +#ifdef CONFIG_MODULES
69776 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69777 +#endif
69778 return -ENOEXEC;
69779 }
69780 #endif
69781 @@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
69782 unsigned int relsec,
69783 struct module *me)
69784 {
69785 +#ifdef CONFIG_MODULES
69786 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69787 +#endif
69788 return -ENOEXEC;
69789 }
69790 #endif
69791 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69792 index 137b419..fe663ec 100644
69793 --- a/include/linux/moduleparam.h
69794 +++ b/include/linux/moduleparam.h
69795 @@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
69796 * @len is usually just sizeof(string).
69797 */
69798 #define module_param_string(name, string, len, perm) \
69799 - static const struct kparam_string __param_string_##name \
69800 + static const struct kparam_string __param_string_##name __used \
69801 = { len, string }; \
69802 __module_param_call(MODULE_PARAM_PREFIX, name, \
69803 &param_ops_string, \
69804 @@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
69805 */
69806 #define module_param_array_named(name, array, type, nump, perm) \
69807 param_check_##type(name, &(array)[0]); \
69808 - static const struct kparam_array __param_arr_##name \
69809 + static const struct kparam_array __param_arr_##name __used \
69810 = { .max = ARRAY_SIZE(array), .num = nump, \
69811 .ops = &param_ops_##type, \
69812 .elemsize = sizeof(array[0]), .elem = array }; \
69813 diff --git a/include/linux/namei.h b/include/linux/namei.h
69814 index 5a5ff57..5ae5070 100644
69815 --- a/include/linux/namei.h
69816 +++ b/include/linux/namei.h
69817 @@ -19,7 +19,7 @@ struct nameidata {
69818 unsigned seq;
69819 int last_type;
69820 unsigned depth;
69821 - char *saved_names[MAX_NESTED_LINKS + 1];
69822 + const char *saved_names[MAX_NESTED_LINKS + 1];
69823 };
69824
69825 /*
69826 @@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
69827
69828 extern void nd_jump_link(struct nameidata *nd, struct path *path);
69829
69830 -static inline void nd_set_link(struct nameidata *nd, char *path)
69831 +static inline void nd_set_link(struct nameidata *nd, const char *path)
69832 {
69833 nd->saved_names[nd->depth] = path;
69834 }
69835
69836 -static inline char *nd_get_link(struct nameidata *nd)
69837 +static inline const char *nd_get_link(const struct nameidata *nd)
69838 {
69839 return nd->saved_names[nd->depth];
69840 }
69841 diff --git a/include/linux/net.h b/include/linux/net.h
69842 index aa16731..514b875 100644
69843 --- a/include/linux/net.h
69844 +++ b/include/linux/net.h
69845 @@ -183,7 +183,7 @@ struct net_proto_family {
69846 int (*create)(struct net *net, struct socket *sock,
69847 int protocol, int kern);
69848 struct module *owner;
69849 -};
69850 +} __do_const;
69851
69852 struct iovec;
69853 struct kvec;
69854 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69855 index 6151e90..2e0afb0 100644
69856 --- a/include/linux/netdevice.h
69857 +++ b/include/linux/netdevice.h
69858 @@ -1028,6 +1028,7 @@ struct net_device_ops {
69859 int (*ndo_change_carrier)(struct net_device *dev,
69860 bool new_carrier);
69861 };
69862 +typedef struct net_device_ops __no_const net_device_ops_no_const;
69863
69864 /*
69865 * The DEVICE structure.
69866 @@ -1094,7 +1095,7 @@ struct net_device {
69867 int iflink;
69868
69869 struct net_device_stats stats;
69870 - atomic_long_t rx_dropped; /* dropped packets by core network
69871 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
69872 * Do not use this in drivers.
69873 */
69874
69875 diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
69876 index ee14284..bc65d63 100644
69877 --- a/include/linux/netfilter.h
69878 +++ b/include/linux/netfilter.h
69879 @@ -82,7 +82,7 @@ struct nf_sockopt_ops {
69880 #endif
69881 /* Use the module struct to lock set/get code in place */
69882 struct module *owner;
69883 -};
69884 +} __do_const;
69885
69886 /* Function to register/unregister hook points. */
69887 int nf_register_hook(struct nf_hook_ops *reg);
69888 diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
69889 index 7958e84..ed74d7a 100644
69890 --- a/include/linux/netfilter/ipset/ip_set.h
69891 +++ b/include/linux/netfilter/ipset/ip_set.h
69892 @@ -98,7 +98,7 @@ struct ip_set_type_variant {
69893 /* Return true if "b" set is the same as "a"
69894 * according to the create set parameters */
69895 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
69896 -};
69897 +} __do_const;
69898
69899 /* The core set type structure */
69900 struct ip_set_type {
69901 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
69902 index ecbb8e4..8a1c4e1 100644
69903 --- a/include/linux/netfilter/nfnetlink.h
69904 +++ b/include/linux/netfilter/nfnetlink.h
69905 @@ -16,7 +16,7 @@ struct nfnl_callback {
69906 const struct nlattr * const cda[]);
69907 const struct nla_policy *policy; /* netlink attribute policy */
69908 const u_int16_t attr_count; /* number of nlattr's */
69909 -};
69910 +} __do_const;
69911
69912 struct nfnetlink_subsystem {
69913 const char *name;
69914 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69915 new file mode 100644
69916 index 0000000..33f4af8
69917 --- /dev/null
69918 +++ b/include/linux/netfilter/xt_gradm.h
69919 @@ -0,0 +1,9 @@
69920 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
69921 +#define _LINUX_NETFILTER_XT_GRADM_H 1
69922 +
69923 +struct xt_gradm_mtinfo {
69924 + __u16 flags;
69925 + __u16 invflags;
69926 +};
69927 +
69928 +#endif
69929 diff --git a/include/linux/nls.h b/include/linux/nls.h
69930 index 5dc635f..35f5e11 100644
69931 --- a/include/linux/nls.h
69932 +++ b/include/linux/nls.h
69933 @@ -31,7 +31,7 @@ struct nls_table {
69934 const unsigned char *charset2upper;
69935 struct module *owner;
69936 struct nls_table *next;
69937 -};
69938 +} __do_const;
69939
69940 /* this value hold the maximum octet of charset */
69941 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
69942 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
69943 index d65746e..62e72c2 100644
69944 --- a/include/linux/notifier.h
69945 +++ b/include/linux/notifier.h
69946 @@ -51,7 +51,8 @@ struct notifier_block {
69947 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
69948 struct notifier_block __rcu *next;
69949 int priority;
69950 -};
69951 +} __do_const;
69952 +typedef struct notifier_block __no_const notifier_block_no_const;
69953
69954 struct atomic_notifier_head {
69955 spinlock_t lock;
69956 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69957 index a4c5624..79d6d88 100644
69958 --- a/include/linux/oprofile.h
69959 +++ b/include/linux/oprofile.h
69960 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69961 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69962 char const * name, ulong * val);
69963
69964 -/** Create a file for read-only access to an atomic_t. */
69965 +/** Create a file for read-only access to an atomic_unchecked_t. */
69966 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69967 - char const * name, atomic_t * val);
69968 + char const * name, atomic_unchecked_t * val);
69969
69970 /** create a directory */
69971 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69972 diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
69973 index 45fc162..01a4068 100644
69974 --- a/include/linux/pci_hotplug.h
69975 +++ b/include/linux/pci_hotplug.h
69976 @@ -80,7 +80,8 @@ struct hotplug_slot_ops {
69977 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
69978 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
69979 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
69980 -};
69981 +} __do_const;
69982 +typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
69983
69984 /**
69985 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
69986 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69987 index 1d795df..727aa7b 100644
69988 --- a/include/linux/perf_event.h
69989 +++ b/include/linux/perf_event.h
69990 @@ -333,8 +333,8 @@ struct perf_event {
69991
69992 enum perf_event_active_state state;
69993 unsigned int attach_state;
69994 - local64_t count;
69995 - atomic64_t child_count;
69996 + local64_t count; /* PaX: fix it one day */
69997 + atomic64_unchecked_t child_count;
69998
69999 /*
70000 * These are the total time in nanoseconds that the event
70001 @@ -385,8 +385,8 @@ struct perf_event {
70002 * These accumulate total time (in nanoseconds) that children
70003 * events have been enabled and running, respectively.
70004 */
70005 - atomic64_t child_total_time_enabled;
70006 - atomic64_t child_total_time_running;
70007 + atomic64_unchecked_t child_total_time_enabled;
70008 + atomic64_unchecked_t child_total_time_running;
70009
70010 /*
70011 * Protect attach/detach and child_list:
70012 @@ -704,7 +704,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
70013 entry->ip[entry->nr++] = ip;
70014 }
70015
70016 -extern int sysctl_perf_event_paranoid;
70017 +extern int sysctl_perf_event_legitimately_concerned;
70018 extern int sysctl_perf_event_mlock;
70019 extern int sysctl_perf_event_sample_rate;
70020
70021 @@ -714,17 +714,17 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
70022
70023 static inline bool perf_paranoid_tracepoint_raw(void)
70024 {
70025 - return sysctl_perf_event_paranoid > -1;
70026 + return sysctl_perf_event_legitimately_concerned > -1;
70027 }
70028
70029 static inline bool perf_paranoid_cpu(void)
70030 {
70031 - return sysctl_perf_event_paranoid > 0;
70032 + return sysctl_perf_event_legitimately_concerned > 0;
70033 }
70034
70035 static inline bool perf_paranoid_kernel(void)
70036 {
70037 - return sysctl_perf_event_paranoid > 1;
70038 + return sysctl_perf_event_legitimately_concerned > 1;
70039 }
70040
70041 extern void perf_event_init(void);
70042 @@ -812,7 +812,7 @@ static inline void perf_restore_debug_store(void) { }
70043 */
70044 #define perf_cpu_notifier(fn) \
70045 do { \
70046 - static struct notifier_block fn##_nb __cpuinitdata = \
70047 + static struct notifier_block fn##_nb = \
70048 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
70049 unsigned long cpu = smp_processor_id(); \
70050 unsigned long flags; \
70051 @@ -831,7 +831,7 @@ do { \
70052 struct perf_pmu_events_attr {
70053 struct device_attribute attr;
70054 u64 id;
70055 -};
70056 +} __do_const;
70057
70058 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
70059 static struct perf_pmu_events_attr _var = { \
70060 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
70061 index ad1a427..6419649 100644
70062 --- a/include/linux/pipe_fs_i.h
70063 +++ b/include/linux/pipe_fs_i.h
70064 @@ -45,9 +45,9 @@ struct pipe_buffer {
70065 struct pipe_inode_info {
70066 wait_queue_head_t wait;
70067 unsigned int nrbufs, curbuf, buffers;
70068 - unsigned int readers;
70069 - unsigned int writers;
70070 - unsigned int waiting_writers;
70071 + atomic_t readers;
70072 + atomic_t writers;
70073 + atomic_t waiting_writers;
70074 unsigned int r_counter;
70075 unsigned int w_counter;
70076 struct page *tmp_page;
70077 diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
70078 index 5f28cae..3d23723 100644
70079 --- a/include/linux/platform_data/usb-ehci-s5p.h
70080 +++ b/include/linux/platform_data/usb-ehci-s5p.h
70081 @@ -14,7 +14,7 @@
70082 struct s5p_ehci_platdata {
70083 int (*phy_init)(struct platform_device *pdev, int type);
70084 int (*phy_exit)(struct platform_device *pdev, int type);
70085 -};
70086 +} __no_const;
70087
70088 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
70089
70090 diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
70091 index c256c59..8ea94c7 100644
70092 --- a/include/linux/platform_data/usb-exynos.h
70093 +++ b/include/linux/platform_data/usb-exynos.h
70094 @@ -14,7 +14,7 @@
70095 struct exynos4_ohci_platdata {
70096 int (*phy_init)(struct platform_device *pdev, int type);
70097 int (*phy_exit)(struct platform_device *pdev, int type);
70098 -};
70099 +} __no_const;
70100
70101 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
70102
70103 diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
70104 index 7c1d252..c5c773e 100644
70105 --- a/include/linux/pm_domain.h
70106 +++ b/include/linux/pm_domain.h
70107 @@ -48,7 +48,7 @@ struct gpd_dev_ops {
70108
70109 struct gpd_cpu_data {
70110 unsigned int saved_exit_latency;
70111 - struct cpuidle_state *idle_state;
70112 + cpuidle_state_no_const *idle_state;
70113 };
70114
70115 struct generic_pm_domain {
70116 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
70117 index 7d7e09e..8671ef8 100644
70118 --- a/include/linux/pm_runtime.h
70119 +++ b/include/linux/pm_runtime.h
70120 @@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
70121
70122 static inline void pm_runtime_mark_last_busy(struct device *dev)
70123 {
70124 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
70125 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
70126 }
70127
70128 #else /* !CONFIG_PM_RUNTIME */
70129 diff --git a/include/linux/pnp.h b/include/linux/pnp.h
70130 index 195aafc..49a7bc2 100644
70131 --- a/include/linux/pnp.h
70132 +++ b/include/linux/pnp.h
70133 @@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
70134 struct pnp_fixup {
70135 char id[7];
70136 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
70137 -};
70138 +} __do_const;
70139
70140 /* config parameters */
70141 #define PNP_CONFIG_NORMAL 0x0001
70142 diff --git a/include/linux/poison.h b/include/linux/poison.h
70143 index 2110a81..13a11bb 100644
70144 --- a/include/linux/poison.h
70145 +++ b/include/linux/poison.h
70146 @@ -19,8 +19,8 @@
70147 * under normal circumstances, used to verify that nobody uses
70148 * non-initialized list entries.
70149 */
70150 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
70151 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
70152 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
70153 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
70154
70155 /********** include/linux/timer.h **********/
70156 /*
70157 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
70158 index c0f44c2..1572583 100644
70159 --- a/include/linux/power/smartreflex.h
70160 +++ b/include/linux/power/smartreflex.h
70161 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
70162 int (*notify)(struct omap_sr *sr, u32 status);
70163 u8 notify_flags;
70164 u8 class_type;
70165 -};
70166 +} __do_const;
70167
70168 /**
70169 * struct omap_sr_nvalue_table - Smartreflex n-target value info
70170 diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
70171 index 4ea1d37..80f4b33 100644
70172 --- a/include/linux/ppp-comp.h
70173 +++ b/include/linux/ppp-comp.h
70174 @@ -84,7 +84,7 @@ struct compressor {
70175 struct module *owner;
70176 /* Extra skb space needed by the compressor algorithm */
70177 unsigned int comp_extra;
70178 -};
70179 +} __do_const;
70180
70181 /*
70182 * The return value from decompress routine is the length of the
70183 diff --git a/include/linux/printk.h b/include/linux/printk.h
70184 index 822171f..12b30e8 100644
70185 --- a/include/linux/printk.h
70186 +++ b/include/linux/printk.h
70187 @@ -98,6 +98,8 @@ int no_printk(const char *fmt, ...)
70188 extern asmlinkage __printf(1, 2)
70189 void early_printk(const char *fmt, ...);
70190
70191 +extern int kptr_restrict;
70192 +
70193 #ifdef CONFIG_PRINTK
70194 asmlinkage __printf(5, 0)
70195 int vprintk_emit(int facility, int level,
70196 @@ -132,7 +134,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
70197
70198 extern int printk_delay_msec;
70199 extern int dmesg_restrict;
70200 -extern int kptr_restrict;
70201
70202 extern void wake_up_klogd(void);
70203
70204 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
70205 index 94dfb2a..88b9d3b 100644
70206 --- a/include/linux/proc_fs.h
70207 +++ b/include/linux/proc_fs.h
70208 @@ -165,6 +165,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
70209 return proc_create_data(name, mode, parent, proc_fops, NULL);
70210 }
70211
70212 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
70213 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
70214 +{
70215 +#ifdef CONFIG_GRKERNSEC_PROC_USER
70216 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
70217 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70218 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
70219 +#else
70220 + return proc_create_data(name, mode, parent, proc_fops, NULL);
70221 +#endif
70222 +}
70223 +
70224 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
70225 umode_t mode, struct proc_dir_entry *base,
70226 read_proc_t *read_proc, void * data)
70227 @@ -266,7 +278,7 @@ struct proc_ns_operations {
70228 void (*put)(void *ns);
70229 int (*install)(struct nsproxy *nsproxy, void *ns);
70230 unsigned int (*inum)(void *ns);
70231 -};
70232 +} __do_const;
70233 extern const struct proc_ns_operations netns_operations;
70234 extern const struct proc_ns_operations utsns_operations;
70235 extern const struct proc_ns_operations ipcns_operations;
70236 diff --git a/include/linux/random.h b/include/linux/random.h
70237 index 347ce55..880f97c 100644
70238 --- a/include/linux/random.h
70239 +++ b/include/linux/random.h
70240 @@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
70241 u32 prandom_u32_state(struct rnd_state *);
70242 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
70243
70244 +static inline unsigned long pax_get_random_long(void)
70245 +{
70246 + return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
70247 +}
70248 +
70249 /*
70250 * Handle minimum values for seeds
70251 */
70252 diff --git a/include/linux/rculist.h b/include/linux/rculist.h
70253 index 8089e35..3a0d59a 100644
70254 --- a/include/linux/rculist.h
70255 +++ b/include/linux/rculist.h
70256 @@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
70257 struct list_head *prev, struct list_head *next);
70258 #endif
70259
70260 +extern void __pax_list_add_rcu(struct list_head *new,
70261 + struct list_head *prev, struct list_head *next);
70262 +
70263 /**
70264 * list_add_rcu - add a new entry to rcu-protected list
70265 * @new: new entry to be added
70266 @@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
70267 __list_add_rcu(new, head, head->next);
70268 }
70269
70270 +static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
70271 +{
70272 + __pax_list_add_rcu(new, head, head->next);
70273 +}
70274 +
70275 /**
70276 * list_add_tail_rcu - add a new entry to rcu-protected list
70277 * @new: new entry to be added
70278 @@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
70279 __list_add_rcu(new, head->prev, head);
70280 }
70281
70282 +static inline void pax_list_add_tail_rcu(struct list_head *new,
70283 + struct list_head *head)
70284 +{
70285 + __pax_list_add_rcu(new, head->prev, head);
70286 +}
70287 +
70288 /**
70289 * list_del_rcu - deletes entry from list without re-initialization
70290 * @entry: the element to delete from the list.
70291 @@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
70292 entry->prev = LIST_POISON2;
70293 }
70294
70295 +extern void pax_list_del_rcu(struct list_head *entry);
70296 +
70297 /**
70298 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
70299 * @n: the element to delete from the hash list.
70300 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
70301 index 23b3630..e1bc12b 100644
70302 --- a/include/linux/reboot.h
70303 +++ b/include/linux/reboot.h
70304 @@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
70305 * Architecture-specific implementations of sys_reboot commands.
70306 */
70307
70308 -extern void machine_restart(char *cmd);
70309 -extern void machine_halt(void);
70310 -extern void machine_power_off(void);
70311 +extern void machine_restart(char *cmd) __noreturn;
70312 +extern void machine_halt(void) __noreturn;
70313 +extern void machine_power_off(void) __noreturn;
70314
70315 extern void machine_shutdown(void);
70316 struct pt_regs;
70317 @@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
70318 */
70319
70320 extern void kernel_restart_prepare(char *cmd);
70321 -extern void kernel_restart(char *cmd);
70322 -extern void kernel_halt(void);
70323 -extern void kernel_power_off(void);
70324 +extern void kernel_restart(char *cmd) __noreturn;
70325 +extern void kernel_halt(void) __noreturn;
70326 +extern void kernel_power_off(void) __noreturn;
70327
70328 extern int C_A_D; /* for sysctl */
70329 void ctrl_alt_del(void);
70330 @@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
70331 * Emergency restart, callable from an interrupt handler.
70332 */
70333
70334 -extern void emergency_restart(void);
70335 +extern void emergency_restart(void) __noreturn;
70336 #include <asm/emergency-restart.h>
70337
70338 #endif /* _LINUX_REBOOT_H */
70339 diff --git a/include/linux/regset.h b/include/linux/regset.h
70340 index 8e0c9fe..ac4d221 100644
70341 --- a/include/linux/regset.h
70342 +++ b/include/linux/regset.h
70343 @@ -161,7 +161,8 @@ struct user_regset {
70344 unsigned int align;
70345 unsigned int bias;
70346 unsigned int core_note_type;
70347 -};
70348 +} __do_const;
70349 +typedef struct user_regset __no_const user_regset_no_const;
70350
70351 /**
70352 * struct user_regset_view - available regsets
70353 diff --git a/include/linux/relay.h b/include/linux/relay.h
70354 index 91cacc3..b55ff74 100644
70355 --- a/include/linux/relay.h
70356 +++ b/include/linux/relay.h
70357 @@ -160,7 +160,7 @@ struct rchan_callbacks
70358 * The callback should return 0 if successful, negative if not.
70359 */
70360 int (*remove_buf_file)(struct dentry *dentry);
70361 -};
70362 +} __no_const;
70363
70364 /*
70365 * CONFIG_RELAY kernel API, kernel/relay.c
70366 diff --git a/include/linux/rio.h b/include/linux/rio.h
70367 index a3e7842..d973ca6 100644
70368 --- a/include/linux/rio.h
70369 +++ b/include/linux/rio.h
70370 @@ -339,7 +339,7 @@ struct rio_ops {
70371 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
70372 u64 rstart, u32 size, u32 flags);
70373 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
70374 -};
70375 +} __no_const;
70376
70377 #define RIO_RESOURCE_MEM 0x00000100
70378 #define RIO_RESOURCE_DOORBELL 0x00000200
70379 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
70380 index 6dacb93..6174423 100644
70381 --- a/include/linux/rmap.h
70382 +++ b/include/linux/rmap.h
70383 @@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
70384 void anon_vma_init(void); /* create anon_vma_cachep */
70385 int anon_vma_prepare(struct vm_area_struct *);
70386 void unlink_anon_vmas(struct vm_area_struct *);
70387 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
70388 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
70389 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
70390 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
70391
70392 static inline void anon_vma_merge(struct vm_area_struct *vma,
70393 struct vm_area_struct *next)
70394 diff --git a/include/linux/sched.h b/include/linux/sched.h
70395 index be4e742..7f9d593 100644
70396 --- a/include/linux/sched.h
70397 +++ b/include/linux/sched.h
70398 @@ -62,6 +62,7 @@ struct bio_list;
70399 struct fs_struct;
70400 struct perf_event_context;
70401 struct blk_plug;
70402 +struct linux_binprm;
70403
70404 /*
70405 * List of flags we want to share for kernel threads,
70406 @@ -315,7 +316,7 @@ extern char __sched_text_start[], __sched_text_end[];
70407 extern int in_sched_functions(unsigned long addr);
70408
70409 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
70410 -extern signed long schedule_timeout(signed long timeout);
70411 +extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
70412 extern signed long schedule_timeout_interruptible(signed long timeout);
70413 extern signed long schedule_timeout_killable(signed long timeout);
70414 extern signed long schedule_timeout_uninterruptible(signed long timeout);
70415 @@ -329,6 +330,18 @@ struct user_namespace;
70416 #include <linux/aio.h>
70417
70418 #ifdef CONFIG_MMU
70419 +
70420 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
70421 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
70422 +#else
70423 +static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
70424 +{
70425 + return 0;
70426 +}
70427 +#endif
70428 +
70429 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
70430 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
70431 extern void arch_pick_mmap_layout(struct mm_struct *mm);
70432 extern unsigned long
70433 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
70434 @@ -605,6 +618,17 @@ struct signal_struct {
70435 #ifdef CONFIG_TASKSTATS
70436 struct taskstats *stats;
70437 #endif
70438 +
70439 +#ifdef CONFIG_GRKERNSEC
70440 + u32 curr_ip;
70441 + u32 saved_ip;
70442 + u32 gr_saddr;
70443 + u32 gr_daddr;
70444 + u16 gr_sport;
70445 + u16 gr_dport;
70446 + u8 used_accept:1;
70447 +#endif
70448 +
70449 #ifdef CONFIG_AUDIT
70450 unsigned audit_tty;
70451 struct tty_audit_buf *tty_audit_buf;
70452 @@ -683,6 +707,11 @@ struct user_struct {
70453 struct key *session_keyring; /* UID's default session keyring */
70454 #endif
70455
70456 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
70457 + unsigned int banned;
70458 + unsigned long ban_expires;
70459 +#endif
70460 +
70461 /* Hash table maintenance information */
70462 struct hlist_node uidhash_node;
70463 kuid_t uid;
70464 @@ -1082,7 +1111,7 @@ struct sched_class {
70465 #ifdef CONFIG_FAIR_GROUP_SCHED
70466 void (*task_move_group) (struct task_struct *p, int on_rq);
70467 #endif
70468 -};
70469 +} __do_const;
70470
70471 struct load_weight {
70472 unsigned long weight, inv_weight;
70473 @@ -1323,8 +1352,8 @@ struct task_struct {
70474 struct list_head thread_group;
70475
70476 struct completion *vfork_done; /* for vfork() */
70477 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
70478 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70479 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
70480 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70481
70482 cputime_t utime, stime, utimescaled, stimescaled;
70483 cputime_t gtime;
70484 @@ -1349,11 +1378,6 @@ struct task_struct {
70485 struct task_cputime cputime_expires;
70486 struct list_head cpu_timers[3];
70487
70488 -/* process credentials */
70489 - const struct cred __rcu *real_cred; /* objective and real subjective task
70490 - * credentials (COW) */
70491 - const struct cred __rcu *cred; /* effective (overridable) subjective task
70492 - * credentials (COW) */
70493 char comm[TASK_COMM_LEN]; /* executable name excluding path
70494 - access with [gs]et_task_comm (which lock
70495 it with task_lock())
70496 @@ -1370,6 +1394,10 @@ struct task_struct {
70497 #endif
70498 /* CPU-specific state of this task */
70499 struct thread_struct thread;
70500 +/* thread_info moved to task_struct */
70501 +#ifdef CONFIG_X86
70502 + struct thread_info tinfo;
70503 +#endif
70504 /* filesystem information */
70505 struct fs_struct *fs;
70506 /* open file information */
70507 @@ -1443,6 +1471,10 @@ struct task_struct {
70508 gfp_t lockdep_reclaim_gfp;
70509 #endif
70510
70511 +/* process credentials */
70512 + const struct cred __rcu *real_cred; /* objective and real subjective task
70513 + * credentials (COW) */
70514 +
70515 /* journalling filesystem info */
70516 void *journal_info;
70517
70518 @@ -1481,6 +1513,10 @@ struct task_struct {
70519 /* cg_list protected by css_set_lock and tsk->alloc_lock */
70520 struct list_head cg_list;
70521 #endif
70522 +
70523 + const struct cred __rcu *cred; /* effective (overridable) subjective task
70524 + * credentials (COW) */
70525 +
70526 #ifdef CONFIG_FUTEX
70527 struct robust_list_head __user *robust_list;
70528 #ifdef CONFIG_COMPAT
70529 @@ -1577,8 +1613,74 @@ struct task_struct {
70530 #ifdef CONFIG_UPROBES
70531 struct uprobe_task *utask;
70532 #endif
70533 +
70534 +#ifdef CONFIG_GRKERNSEC
70535 + /* grsecurity */
70536 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70537 + u64 exec_id;
70538 +#endif
70539 +#ifdef CONFIG_GRKERNSEC_SETXID
70540 + const struct cred *delayed_cred;
70541 +#endif
70542 + struct dentry *gr_chroot_dentry;
70543 + struct acl_subject_label *acl;
70544 + struct acl_role_label *role;
70545 + struct file *exec_file;
70546 + unsigned long brute_expires;
70547 + u16 acl_role_id;
70548 + /* is this the task that authenticated to the special role */
70549 + u8 acl_sp_role;
70550 + u8 is_writable;
70551 + u8 brute;
70552 + u8 gr_is_chrooted;
70553 +#endif
70554 +
70555 };
70556
70557 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
70558 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
70559 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
70560 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
70561 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
70562 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
70563 +
70564 +#ifdef CONFIG_PAX_SOFTMODE
70565 +extern int pax_softmode;
70566 +#endif
70567 +
70568 +extern int pax_check_flags(unsigned long *);
70569 +
70570 +/* if tsk != current then task_lock must be held on it */
70571 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70572 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
70573 +{
70574 + if (likely(tsk->mm))
70575 + return tsk->mm->pax_flags;
70576 + else
70577 + return 0UL;
70578 +}
70579 +
70580 +/* if tsk != current then task_lock must be held on it */
70581 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
70582 +{
70583 + if (likely(tsk->mm)) {
70584 + tsk->mm->pax_flags = flags;
70585 + return 0;
70586 + }
70587 + return -EINVAL;
70588 +}
70589 +#endif
70590 +
70591 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
70592 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
70593 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
70594 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
70595 +#endif
70596 +
70597 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
70598 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
70599 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
70600 +
70601 /* Future-safe accessor for struct task_struct's cpus_allowed. */
70602 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
70603
70604 @@ -1637,7 +1739,7 @@ struct pid_namespace;
70605 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
70606 struct pid_namespace *ns);
70607
70608 -static inline pid_t task_pid_nr(struct task_struct *tsk)
70609 +static inline pid_t task_pid_nr(const struct task_struct *tsk)
70610 {
70611 return tsk->pid;
70612 }
70613 @@ -2073,7 +2175,9 @@ void yield(void);
70614 extern struct exec_domain default_exec_domain;
70615
70616 union thread_union {
70617 +#ifndef CONFIG_X86
70618 struct thread_info thread_info;
70619 +#endif
70620 unsigned long stack[THREAD_SIZE/sizeof(long)];
70621 };
70622
70623 @@ -2106,6 +2210,7 @@ extern struct pid_namespace init_pid_ns;
70624 */
70625
70626 extern struct task_struct *find_task_by_vpid(pid_t nr);
70627 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
70628 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
70629 struct pid_namespace *ns);
70630
70631 @@ -2272,7 +2377,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
70632 extern void exit_itimers(struct signal_struct *);
70633 extern void flush_itimer_signals(void);
70634
70635 -extern void do_group_exit(int);
70636 +extern __noreturn void do_group_exit(int);
70637
70638 extern int allow_signal(int);
70639 extern int disallow_signal(int);
70640 @@ -2463,9 +2568,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
70641
70642 #endif
70643
70644 -static inline int object_is_on_stack(void *obj)
70645 +static inline int object_starts_on_stack(void *obj)
70646 {
70647 - void *stack = task_stack_page(current);
70648 + const void *stack = task_stack_page(current);
70649
70650 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
70651 }
70652 diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
70653 index bf8086b..962b035 100644
70654 --- a/include/linux/sched/sysctl.h
70655 +++ b/include/linux/sched/sysctl.h
70656 @@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
70657 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
70658
70659 extern int sysctl_max_map_count;
70660 +extern unsigned long sysctl_heap_stack_gap;
70661
70662 extern unsigned int sysctl_sched_latency;
70663 extern unsigned int sysctl_sched_min_granularity;
70664 diff --git a/include/linux/security.h b/include/linux/security.h
70665 index 032c366..2c1c2dc2 100644
70666 --- a/include/linux/security.h
70667 +++ b/include/linux/security.h
70668 @@ -26,6 +26,7 @@
70669 #include <linux/capability.h>
70670 #include <linux/slab.h>
70671 #include <linux/err.h>
70672 +#include <linux/grsecurity.h>
70673
70674 struct linux_binprm;
70675 struct cred;
70676 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
70677 index 68a04a3..866e6a1 100644
70678 --- a/include/linux/seq_file.h
70679 +++ b/include/linux/seq_file.h
70680 @@ -26,6 +26,9 @@ struct seq_file {
70681 struct mutex lock;
70682 const struct seq_operations *op;
70683 int poll_event;
70684 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70685 + u64 exec_id;
70686 +#endif
70687 #ifdef CONFIG_USER_NS
70688 struct user_namespace *user_ns;
70689 #endif
70690 @@ -38,6 +41,7 @@ struct seq_operations {
70691 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
70692 int (*show) (struct seq_file *m, void *v);
70693 };
70694 +typedef struct seq_operations __no_const seq_operations_no_const;
70695
70696 #define SEQ_SKIP 1
70697
70698 diff --git a/include/linux/shm.h b/include/linux/shm.h
70699 index 429c199..4d42e38 100644
70700 --- a/include/linux/shm.h
70701 +++ b/include/linux/shm.h
70702 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
70703
70704 /* The task created the shm object. NULL if the task is dead. */
70705 struct task_struct *shm_creator;
70706 +#ifdef CONFIG_GRKERNSEC
70707 + time_t shm_createtime;
70708 + pid_t shm_lapid;
70709 +#endif
70710 };
70711
70712 /* shm_mode upper byte flags */
70713 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
70714 index b8292d8..96db310 100644
70715 --- a/include/linux/skbuff.h
70716 +++ b/include/linux/skbuff.h
70717 @@ -599,7 +599,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
70718 extern struct sk_buff *__alloc_skb(unsigned int size,
70719 gfp_t priority, int flags, int node);
70720 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
70721 -static inline struct sk_buff *alloc_skb(unsigned int size,
70722 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
70723 gfp_t priority)
70724 {
70725 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
70726 @@ -709,7 +709,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
70727 */
70728 static inline int skb_queue_empty(const struct sk_buff_head *list)
70729 {
70730 - return list->next == (struct sk_buff *)list;
70731 + return list->next == (const struct sk_buff *)list;
70732 }
70733
70734 /**
70735 @@ -722,7 +722,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
70736 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70737 const struct sk_buff *skb)
70738 {
70739 - return skb->next == (struct sk_buff *)list;
70740 + return skb->next == (const struct sk_buff *)list;
70741 }
70742
70743 /**
70744 @@ -735,7 +735,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
70745 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
70746 const struct sk_buff *skb)
70747 {
70748 - return skb->prev == (struct sk_buff *)list;
70749 + return skb->prev == (const struct sk_buff *)list;
70750 }
70751
70752 /**
70753 @@ -1756,7 +1756,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
70754 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
70755 */
70756 #ifndef NET_SKB_PAD
70757 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
70758 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
70759 #endif
70760
70761 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
70762 @@ -2351,7 +2351,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
70763 int noblock, int *err);
70764 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
70765 struct poll_table_struct *wait);
70766 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
70767 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
70768 int offset, struct iovec *to,
70769 int size);
70770 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
70771 @@ -2641,6 +2641,9 @@ static inline void nf_reset(struct sk_buff *skb)
70772 nf_bridge_put(skb->nf_bridge);
70773 skb->nf_bridge = NULL;
70774 #endif
70775 +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
70776 + skb->nf_trace = 0;
70777 +#endif
70778 }
70779
70780 static inline void nf_reset_trace(struct sk_buff *skb)
70781 diff --git a/include/linux/slab.h b/include/linux/slab.h
70782 index 5d168d7..720bff3 100644
70783 --- a/include/linux/slab.h
70784 +++ b/include/linux/slab.h
70785 @@ -12,13 +12,20 @@
70786 #include <linux/gfp.h>
70787 #include <linux/types.h>
70788 #include <linux/workqueue.h>
70789 -
70790 +#include <linux/err.h>
70791
70792 /*
70793 * Flags to pass to kmem_cache_create().
70794 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
70795 */
70796 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
70797 +
70798 +#ifdef CONFIG_PAX_USERCOPY_SLABS
70799 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
70800 +#else
70801 +#define SLAB_USERCOPY 0x00000000UL
70802 +#endif
70803 +
70804 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
70805 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
70806 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
70807 @@ -89,10 +96,13 @@
70808 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
70809 * Both make kfree a no-op.
70810 */
70811 -#define ZERO_SIZE_PTR ((void *)16)
70812 +#define ZERO_SIZE_PTR \
70813 +({ \
70814 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
70815 + (void *)(-MAX_ERRNO-1L); \
70816 +})
70817
70818 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
70819 - (unsigned long)ZERO_SIZE_PTR)
70820 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
70821
70822 /*
70823 * Common fields provided in kmem_cache by all slab allocators
70824 @@ -112,7 +122,7 @@ struct kmem_cache {
70825 unsigned int align; /* Alignment as calculated */
70826 unsigned long flags; /* Active flags on the slab */
70827 const char *name; /* Slab name for sysfs */
70828 - int refcount; /* Use counter */
70829 + atomic_t refcount; /* Use counter */
70830 void (*ctor)(void *); /* Called on object slot creation */
70831 struct list_head list; /* List of all slab caches on the system */
70832 };
70833 @@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
70834 void kfree(const void *);
70835 void kzfree(const void *);
70836 size_t ksize(const void *);
70837 +const char *check_heap_object(const void *ptr, unsigned long n);
70838 +bool is_usercopy_object(const void *ptr);
70839
70840 /*
70841 * Allocator specific definitions. These are mainly used to establish optimized
70842 @@ -311,6 +323,7 @@ size_t ksize(const void *);
70843 * for general use, and so are not documented here. For a full list of
70844 * potential flags, always refer to linux/gfp.h.
70845 */
70846 +
70847 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
70848 {
70849 if (size != 0 && n > SIZE_MAX / size)
70850 @@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
70851 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70852 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70853 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70854 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70855 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
70856 #define kmalloc_track_caller(size, flags) \
70857 __kmalloc_track_caller(size, flags, _RET_IP_)
70858 #else
70859 @@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70860 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70861 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70862 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70863 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
70864 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
70865 #define kmalloc_node_track_caller(size, flags, node) \
70866 __kmalloc_node_track_caller(size, flags, node, \
70867 _RET_IP_)
70868 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
70869 index 8bb6e0e..8eb0dbe 100644
70870 --- a/include/linux/slab_def.h
70871 +++ b/include/linux/slab_def.h
70872 @@ -52,7 +52,7 @@ struct kmem_cache {
70873 /* 4) cache creation/removal */
70874 const char *name;
70875 struct list_head list;
70876 - int refcount;
70877 + atomic_t refcount;
70878 int object_size;
70879 int align;
70880
70881 @@ -68,10 +68,10 @@ struct kmem_cache {
70882 unsigned long node_allocs;
70883 unsigned long node_frees;
70884 unsigned long node_overflow;
70885 - atomic_t allochit;
70886 - atomic_t allocmiss;
70887 - atomic_t freehit;
70888 - atomic_t freemiss;
70889 + atomic_unchecked_t allochit;
70890 + atomic_unchecked_t allocmiss;
70891 + atomic_unchecked_t freehit;
70892 + atomic_unchecked_t freemiss;
70893
70894 /*
70895 * If debugging is enabled, then the allocator can add additional
70896 @@ -111,11 +111,16 @@ struct cache_sizes {
70897 #ifdef CONFIG_ZONE_DMA
70898 struct kmem_cache *cs_dmacachep;
70899 #endif
70900 +
70901 +#ifdef CONFIG_PAX_USERCOPY_SLABS
70902 + struct kmem_cache *cs_usercopycachep;
70903 +#endif
70904 +
70905 };
70906 extern struct cache_sizes malloc_sizes[];
70907
70908 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70909 -void *__kmalloc(size_t size, gfp_t flags);
70910 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
70911
70912 #ifdef CONFIG_TRACING
70913 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
70914 @@ -152,6 +157,13 @@ found:
70915 cachep = malloc_sizes[i].cs_dmacachep;
70916 else
70917 #endif
70918 +
70919 +#ifdef CONFIG_PAX_USERCOPY_SLABS
70920 + if (flags & GFP_USERCOPY)
70921 + cachep = malloc_sizes[i].cs_usercopycachep;
70922 + else
70923 +#endif
70924 +
70925 cachep = malloc_sizes[i].cs_cachep;
70926
70927 ret = kmem_cache_alloc_trace(cachep, flags, size);
70928 @@ -162,7 +174,7 @@ found:
70929 }
70930
70931 #ifdef CONFIG_NUMA
70932 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
70933 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70934 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
70935
70936 #ifdef CONFIG_TRACING
70937 @@ -205,6 +217,13 @@ found:
70938 cachep = malloc_sizes[i].cs_dmacachep;
70939 else
70940 #endif
70941 +
70942 +#ifdef CONFIG_PAX_USERCOPY_SLABS
70943 + if (flags & GFP_USERCOPY)
70944 + cachep = malloc_sizes[i].cs_usercopycachep;
70945 + else
70946 +#endif
70947 +
70948 cachep = malloc_sizes[i].cs_cachep;
70949
70950 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
70951 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
70952 index f28e14a..7831211 100644
70953 --- a/include/linux/slob_def.h
70954 +++ b/include/linux/slob_def.h
70955 @@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
70956 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
70957 }
70958
70959 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
70960 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70961
70962 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
70963 {
70964 @@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
70965 return __kmalloc_node(size, flags, NUMA_NO_NODE);
70966 }
70967
70968 -static __always_inline void *__kmalloc(size_t size, gfp_t flags)
70969 +static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
70970 {
70971 return kmalloc(size, flags);
70972 }
70973 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
70974 index 9db4825..ed42fb5 100644
70975 --- a/include/linux/slub_def.h
70976 +++ b/include/linux/slub_def.h
70977 @@ -91,7 +91,7 @@ struct kmem_cache {
70978 struct kmem_cache_order_objects max;
70979 struct kmem_cache_order_objects min;
70980 gfp_t allocflags; /* gfp flags to use on each alloc */
70981 - int refcount; /* Refcount for slab cache destroy */
70982 + atomic_t refcount; /* Refcount for slab cache destroy */
70983 void (*ctor)(void *);
70984 int inuse; /* Offset to metadata */
70985 int align; /* Alignment */
70986 @@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
70987 * Sorry that the following has to be that ugly but some versions of GCC
70988 * have trouble with constant propagation and loops.
70989 */
70990 -static __always_inline int kmalloc_index(size_t size)
70991 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
70992 {
70993 if (!size)
70994 return 0;
70995 @@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
70996 }
70997
70998 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70999 -void *__kmalloc(size_t size, gfp_t flags);
71000 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71001
71002 static __always_inline void *
71003 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71004 @@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71005 }
71006 #endif
71007
71008 -static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
71009 +static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
71010 {
71011 unsigned int order = get_order(size);
71012 return kmalloc_order_trace(size, flags, order);
71013 @@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71014 }
71015
71016 #ifdef CONFIG_NUMA
71017 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
71018 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71019 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71020
71021 #ifdef CONFIG_TRACING
71022 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
71023 index e8d702e..0a56eb4 100644
71024 --- a/include/linux/sock_diag.h
71025 +++ b/include/linux/sock_diag.h
71026 @@ -10,7 +10,7 @@ struct sock;
71027 struct sock_diag_handler {
71028 __u8 family;
71029 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
71030 -};
71031 +} __do_const;
71032
71033 int sock_diag_register(const struct sock_diag_handler *h);
71034 void sock_diag_unregister(const struct sock_diag_handler *h);
71035 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
71036 index 680f9a3..f13aeb0 100644
71037 --- a/include/linux/sonet.h
71038 +++ b/include/linux/sonet.h
71039 @@ -7,7 +7,7 @@
71040 #include <uapi/linux/sonet.h>
71041
71042 struct k_sonet_stats {
71043 -#define __HANDLE_ITEM(i) atomic_t i
71044 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
71045 __SONET_ITEMS
71046 #undef __HANDLE_ITEM
71047 };
71048 diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
71049 index 07d8e53..dc934c9 100644
71050 --- a/include/linux/sunrpc/addr.h
71051 +++ b/include/linux/sunrpc/addr.h
71052 @@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
71053 {
71054 switch (sap->sa_family) {
71055 case AF_INET:
71056 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
71057 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
71058 case AF_INET6:
71059 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
71060 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
71061 }
71062 return 0;
71063 }
71064 @@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
71065 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
71066 const struct sockaddr *src)
71067 {
71068 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
71069 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
71070 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
71071
71072 dsin->sin_family = ssin->sin_family;
71073 @@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
71074 if (sa->sa_family != AF_INET6)
71075 return 0;
71076
71077 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
71078 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
71079 }
71080
71081 #endif /* _LINUX_SUNRPC_ADDR_H */
71082 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
71083 index 2cf4ffa..470d140 100644
71084 --- a/include/linux/sunrpc/clnt.h
71085 +++ b/include/linux/sunrpc/clnt.h
71086 @@ -96,7 +96,7 @@ struct rpc_procinfo {
71087 unsigned int p_timer; /* Which RTT timer to use */
71088 u32 p_statidx; /* Which procedure to account */
71089 const char * p_name; /* name of procedure */
71090 -};
71091 +} __do_const;
71092
71093 #ifdef __KERNEL__
71094
71095 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
71096 index 1f0216b..6a4fa50 100644
71097 --- a/include/linux/sunrpc/svc.h
71098 +++ b/include/linux/sunrpc/svc.h
71099 @@ -411,7 +411,7 @@ struct svc_procedure {
71100 unsigned int pc_count; /* call count */
71101 unsigned int pc_cachetype; /* cache info (NFS) */
71102 unsigned int pc_xdrressize; /* maximum size of XDR reply */
71103 -};
71104 +} __do_const;
71105
71106 /*
71107 * Function prototypes.
71108 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
71109 index 0b8e3e6..33e0a01 100644
71110 --- a/include/linux/sunrpc/svc_rdma.h
71111 +++ b/include/linux/sunrpc/svc_rdma.h
71112 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
71113 extern unsigned int svcrdma_max_requests;
71114 extern unsigned int svcrdma_max_req_size;
71115
71116 -extern atomic_t rdma_stat_recv;
71117 -extern atomic_t rdma_stat_read;
71118 -extern atomic_t rdma_stat_write;
71119 -extern atomic_t rdma_stat_sq_starve;
71120 -extern atomic_t rdma_stat_rq_starve;
71121 -extern atomic_t rdma_stat_rq_poll;
71122 -extern atomic_t rdma_stat_rq_prod;
71123 -extern atomic_t rdma_stat_sq_poll;
71124 -extern atomic_t rdma_stat_sq_prod;
71125 +extern atomic_unchecked_t rdma_stat_recv;
71126 +extern atomic_unchecked_t rdma_stat_read;
71127 +extern atomic_unchecked_t rdma_stat_write;
71128 +extern atomic_unchecked_t rdma_stat_sq_starve;
71129 +extern atomic_unchecked_t rdma_stat_rq_starve;
71130 +extern atomic_unchecked_t rdma_stat_rq_poll;
71131 +extern atomic_unchecked_t rdma_stat_rq_prod;
71132 +extern atomic_unchecked_t rdma_stat_sq_poll;
71133 +extern atomic_unchecked_t rdma_stat_sq_prod;
71134
71135 #define RPCRDMA_VERSION 1
71136
71137 diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
71138 index ff374ab..7fd2ecb 100644
71139 --- a/include/linux/sunrpc/svcauth.h
71140 +++ b/include/linux/sunrpc/svcauth.h
71141 @@ -109,7 +109,7 @@ struct auth_ops {
71142 int (*release)(struct svc_rqst *rq);
71143 void (*domain_release)(struct auth_domain *);
71144 int (*set_client)(struct svc_rqst *rq);
71145 -};
71146 +} __do_const;
71147
71148 #define SVC_GARBAGE 1
71149 #define SVC_SYSERR 2
71150 diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
71151 index a5ffd32..0935dea 100644
71152 --- a/include/linux/swiotlb.h
71153 +++ b/include/linux/swiotlb.h
71154 @@ -60,7 +60,8 @@ extern void
71155
71156 extern void
71157 swiotlb_free_coherent(struct device *hwdev, size_t size,
71158 - void *vaddr, dma_addr_t dma_handle);
71159 + void *vaddr, dma_addr_t dma_handle,
71160 + struct dma_attrs *attrs);
71161
71162 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
71163 unsigned long offset, size_t size,
71164 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
71165 index 313a8e0..1da8fc6 100644
71166 --- a/include/linux/syscalls.h
71167 +++ b/include/linux/syscalls.h
71168 @@ -634,7 +634,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
71169 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
71170 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
71171 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
71172 - struct sockaddr __user *, int);
71173 + struct sockaddr __user *, int) __intentional_overflow(0);
71174 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
71175 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
71176 unsigned int vlen, unsigned flags);
71177 diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
71178 index 27b3b0b..e093dd9 100644
71179 --- a/include/linux/syscore_ops.h
71180 +++ b/include/linux/syscore_ops.h
71181 @@ -16,7 +16,7 @@ struct syscore_ops {
71182 int (*suspend)(void);
71183 void (*resume)(void);
71184 void (*shutdown)(void);
71185 -};
71186 +} __do_const;
71187
71188 extern void register_syscore_ops(struct syscore_ops *ops);
71189 extern void unregister_syscore_ops(struct syscore_ops *ops);
71190 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
71191 index 14a8ff2..af52bad 100644
71192 --- a/include/linux/sysctl.h
71193 +++ b/include/linux/sysctl.h
71194 @@ -34,13 +34,13 @@ struct ctl_table_root;
71195 struct ctl_table_header;
71196 struct ctl_dir;
71197
71198 -typedef struct ctl_table ctl_table;
71199 -
71200 typedef int proc_handler (struct ctl_table *ctl, int write,
71201 void __user *buffer, size_t *lenp, loff_t *ppos);
71202
71203 extern int proc_dostring(struct ctl_table *, int,
71204 void __user *, size_t *, loff_t *);
71205 +extern int proc_dostring_modpriv(struct ctl_table *, int,
71206 + void __user *, size_t *, loff_t *);
71207 extern int proc_dointvec(struct ctl_table *, int,
71208 void __user *, size_t *, loff_t *);
71209 extern int proc_dointvec_minmax(struct ctl_table *, int,
71210 @@ -115,7 +115,9 @@ struct ctl_table
71211 struct ctl_table_poll *poll;
71212 void *extra1;
71213 void *extra2;
71214 -};
71215 +} __do_const;
71216 +typedef struct ctl_table __no_const ctl_table_no_const;
71217 +typedef struct ctl_table ctl_table;
71218
71219 struct ctl_node {
71220 struct rb_node node;
71221 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
71222 index e2cee22..3ddb921 100644
71223 --- a/include/linux/sysfs.h
71224 +++ b/include/linux/sysfs.h
71225 @@ -31,7 +31,8 @@ struct attribute {
71226 struct lock_class_key *key;
71227 struct lock_class_key skey;
71228 #endif
71229 -};
71230 +} __do_const;
71231 +typedef struct attribute __no_const attribute_no_const;
71232
71233 /**
71234 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
71235 @@ -59,8 +60,8 @@ struct attribute_group {
71236 umode_t (*is_visible)(struct kobject *,
71237 struct attribute *, int);
71238 struct attribute **attrs;
71239 -};
71240 -
71241 +} __do_const;
71242 +typedef struct attribute_group __no_const attribute_group_no_const;
71243
71244
71245 /**
71246 @@ -107,7 +108,8 @@ struct bin_attribute {
71247 char *, loff_t, size_t);
71248 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
71249 struct vm_area_struct *vma);
71250 -};
71251 +} __do_const;
71252 +typedef struct bin_attribute __no_const bin_attribute_no_const;
71253
71254 /**
71255 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
71256 diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
71257 index 7faf933..9b85a0c 100644
71258 --- a/include/linux/sysrq.h
71259 +++ b/include/linux/sysrq.h
71260 @@ -16,6 +16,7 @@
71261
71262 #include <linux/errno.h>
71263 #include <linux/types.h>
71264 +#include <linux/compiler.h>
71265
71266 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
71267 #define SYSRQ_DEFAULT_ENABLE 1
71268 @@ -36,7 +37,7 @@ struct sysrq_key_op {
71269 char *help_msg;
71270 char *action_msg;
71271 int enable_mask;
71272 -};
71273 +} __do_const;
71274
71275 #ifdef CONFIG_MAGIC_SYSRQ
71276
71277 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
71278 index e7e0473..7989295 100644
71279 --- a/include/linux/thread_info.h
71280 +++ b/include/linux/thread_info.h
71281 @@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
71282 #error "no set_restore_sigmask() provided and default one won't work"
71283 #endif
71284
71285 +extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
71286 +static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
71287 +{
71288 +#ifndef CONFIG_PAX_USERCOPY_DEBUG
71289 + if (!__builtin_constant_p(n))
71290 +#endif
71291 + __check_object_size(ptr, n, to_user);
71292 +}
71293 +
71294 #endif /* __KERNEL__ */
71295
71296 #endif /* _LINUX_THREAD_INFO_H */
71297 diff --git a/include/linux/tty.h b/include/linux/tty.h
71298 index c75d886..04cb148 100644
71299 --- a/include/linux/tty.h
71300 +++ b/include/linux/tty.h
71301 @@ -194,7 +194,7 @@ struct tty_port {
71302 const struct tty_port_operations *ops; /* Port operations */
71303 spinlock_t lock; /* Lock protecting tty field */
71304 int blocked_open; /* Waiting to open */
71305 - int count; /* Usage count */
71306 + atomic_t count; /* Usage count */
71307 wait_queue_head_t open_wait; /* Open waiters */
71308 wait_queue_head_t close_wait; /* Close waiters */
71309 wait_queue_head_t delta_msr_wait; /* Modem status change */
71310 @@ -515,7 +515,7 @@ extern int tty_port_open(struct tty_port *port,
71311 struct tty_struct *tty, struct file *filp);
71312 static inline int tty_port_users(struct tty_port *port)
71313 {
71314 - return port->count + port->blocked_open;
71315 + return atomic_read(&port->count) + port->blocked_open;
71316 }
71317
71318 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
71319 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
71320 index 756a609..b302dd6 100644
71321 --- a/include/linux/tty_driver.h
71322 +++ b/include/linux/tty_driver.h
71323 @@ -285,7 +285,7 @@ struct tty_operations {
71324 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
71325 #endif
71326 const struct file_operations *proc_fops;
71327 -};
71328 +} __do_const;
71329
71330 struct tty_driver {
71331 int magic; /* magic number for this structure */
71332 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
71333 index 455a0d7..bf97ff5 100644
71334 --- a/include/linux/tty_ldisc.h
71335 +++ b/include/linux/tty_ldisc.h
71336 @@ -146,7 +146,7 @@ struct tty_ldisc_ops {
71337
71338 struct module *owner;
71339
71340 - int refcount;
71341 + atomic_t refcount;
71342 };
71343
71344 struct tty_ldisc {
71345 diff --git a/include/linux/types.h b/include/linux/types.h
71346 index 4d118ba..c3ee9bf 100644
71347 --- a/include/linux/types.h
71348 +++ b/include/linux/types.h
71349 @@ -176,10 +176,26 @@ typedef struct {
71350 int counter;
71351 } atomic_t;
71352
71353 +#ifdef CONFIG_PAX_REFCOUNT
71354 +typedef struct {
71355 + int counter;
71356 +} atomic_unchecked_t;
71357 +#else
71358 +typedef atomic_t atomic_unchecked_t;
71359 +#endif
71360 +
71361 #ifdef CONFIG_64BIT
71362 typedef struct {
71363 long counter;
71364 } atomic64_t;
71365 +
71366 +#ifdef CONFIG_PAX_REFCOUNT
71367 +typedef struct {
71368 + long counter;
71369 +} atomic64_unchecked_t;
71370 +#else
71371 +typedef atomic64_t atomic64_unchecked_t;
71372 +#endif
71373 #endif
71374
71375 struct list_head {
71376 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
71377 index 5ca0951..ab496a5 100644
71378 --- a/include/linux/uaccess.h
71379 +++ b/include/linux/uaccess.h
71380 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
71381 long ret; \
71382 mm_segment_t old_fs = get_fs(); \
71383 \
71384 - set_fs(KERNEL_DS); \
71385 pagefault_disable(); \
71386 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
71387 - pagefault_enable(); \
71388 + set_fs(KERNEL_DS); \
71389 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
71390 set_fs(old_fs); \
71391 + pagefault_enable(); \
71392 ret; \
71393 })
71394
71395 diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
71396 index 8e522cbc..aa8572d 100644
71397 --- a/include/linux/uidgid.h
71398 +++ b/include/linux/uidgid.h
71399 @@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
71400
71401 #endif /* CONFIG_USER_NS */
71402
71403 +#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
71404 +#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
71405 +#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
71406 +#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
71407 +
71408 #endif /* _LINUX_UIDGID_H */
71409 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
71410 index 99c1b4d..562e6f3 100644
71411 --- a/include/linux/unaligned/access_ok.h
71412 +++ b/include/linux/unaligned/access_ok.h
71413 @@ -4,34 +4,34 @@
71414 #include <linux/kernel.h>
71415 #include <asm/byteorder.h>
71416
71417 -static inline u16 get_unaligned_le16(const void *p)
71418 +static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
71419 {
71420 - return le16_to_cpup((__le16 *)p);
71421 + return le16_to_cpup((const __le16 *)p);
71422 }
71423
71424 -static inline u32 get_unaligned_le32(const void *p)
71425 +static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
71426 {
71427 - return le32_to_cpup((__le32 *)p);
71428 + return le32_to_cpup((const __le32 *)p);
71429 }
71430
71431 -static inline u64 get_unaligned_le64(const void *p)
71432 +static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
71433 {
71434 - return le64_to_cpup((__le64 *)p);
71435 + return le64_to_cpup((const __le64 *)p);
71436 }
71437
71438 -static inline u16 get_unaligned_be16(const void *p)
71439 +static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
71440 {
71441 - return be16_to_cpup((__be16 *)p);
71442 + return be16_to_cpup((const __be16 *)p);
71443 }
71444
71445 -static inline u32 get_unaligned_be32(const void *p)
71446 +static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
71447 {
71448 - return be32_to_cpup((__be32 *)p);
71449 + return be32_to_cpup((const __be32 *)p);
71450 }
71451
71452 -static inline u64 get_unaligned_be64(const void *p)
71453 +static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
71454 {
71455 - return be64_to_cpup((__be64 *)p);
71456 + return be64_to_cpup((const __be64 *)p);
71457 }
71458
71459 static inline void put_unaligned_le16(u16 val, void *p)
71460 diff --git a/include/linux/usb.h b/include/linux/usb.h
71461 index 4d22d0f..8d0e8f8 100644
71462 --- a/include/linux/usb.h
71463 +++ b/include/linux/usb.h
71464 @@ -554,7 +554,7 @@ struct usb_device {
71465 int maxchild;
71466
71467 u32 quirks;
71468 - atomic_t urbnum;
71469 + atomic_unchecked_t urbnum;
71470
71471 unsigned long active_duration;
71472
71473 @@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
71474
71475 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
71476 __u8 request, __u8 requesttype, __u16 value, __u16 index,
71477 - void *data, __u16 size, int timeout);
71478 + void *data, __u16 size, int timeout) __intentional_overflow(-1);
71479 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
71480 void *data, int len, int *actual_length, int timeout);
71481 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
71482 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
71483 index c5d36c6..108f4f9 100644
71484 --- a/include/linux/usb/renesas_usbhs.h
71485 +++ b/include/linux/usb/renesas_usbhs.h
71486 @@ -39,7 +39,7 @@ enum {
71487 */
71488 struct renesas_usbhs_driver_callback {
71489 int (*notify_hotplug)(struct platform_device *pdev);
71490 -};
71491 +} __no_const;
71492
71493 /*
71494 * callback functions for platform
71495 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
71496 index 6f8fbcf..8259001 100644
71497 --- a/include/linux/vermagic.h
71498 +++ b/include/linux/vermagic.h
71499 @@ -25,9 +25,35 @@
71500 #define MODULE_ARCH_VERMAGIC ""
71501 #endif
71502
71503 +#ifdef CONFIG_PAX_REFCOUNT
71504 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
71505 +#else
71506 +#define MODULE_PAX_REFCOUNT ""
71507 +#endif
71508 +
71509 +#ifdef CONSTIFY_PLUGIN
71510 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
71511 +#else
71512 +#define MODULE_CONSTIFY_PLUGIN ""
71513 +#endif
71514 +
71515 +#ifdef STACKLEAK_PLUGIN
71516 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
71517 +#else
71518 +#define MODULE_STACKLEAK_PLUGIN ""
71519 +#endif
71520 +
71521 +#ifdef CONFIG_GRKERNSEC
71522 +#define MODULE_GRSEC "GRSEC "
71523 +#else
71524 +#define MODULE_GRSEC ""
71525 +#endif
71526 +
71527 #define VERMAGIC_STRING \
71528 UTS_RELEASE " " \
71529 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
71530 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
71531 - MODULE_ARCH_VERMAGIC
71532 + MODULE_ARCH_VERMAGIC \
71533 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
71534 + MODULE_GRSEC
71535
71536 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
71537 index 6071e91..ca6a489 100644
71538 --- a/include/linux/vmalloc.h
71539 +++ b/include/linux/vmalloc.h
71540 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
71541 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
71542 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
71543 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
71544 +
71545 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71546 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
71547 +#endif
71548 +
71549 /* bits [20..32] reserved for arch specific ioremap internals */
71550
71551 /*
71552 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
71553 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
71554 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
71555 unsigned long start, unsigned long end, gfp_t gfp_mask,
71556 - pgprot_t prot, int node, const void *caller);
71557 + pgprot_t prot, int node, const void *caller) __size_overflow(1);
71558 extern void vfree(const void *addr);
71559
71560 extern void *vmap(struct page **pages, unsigned int count,
71561 @@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
71562 extern void free_vm_area(struct vm_struct *area);
71563
71564 /* for /dev/kmem */
71565 -extern long vread(char *buf, char *addr, unsigned long count);
71566 -extern long vwrite(char *buf, char *addr, unsigned long count);
71567 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
71568 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
71569
71570 /*
71571 * Internals. Dont't use..
71572 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
71573 index 5fd71a7..e5ef9a9 100644
71574 --- a/include/linux/vmstat.h
71575 +++ b/include/linux/vmstat.h
71576 @@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
71577 /*
71578 * Zone based page accounting with per cpu differentials.
71579 */
71580 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71581 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71582
71583 static inline void zone_page_state_add(long x, struct zone *zone,
71584 enum zone_stat_item item)
71585 {
71586 - atomic_long_add(x, &zone->vm_stat[item]);
71587 - atomic_long_add(x, &vm_stat[item]);
71588 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
71589 + atomic_long_add_unchecked(x, &vm_stat[item]);
71590 }
71591
71592 static inline unsigned long global_page_state(enum zone_stat_item item)
71593 {
71594 - long x = atomic_long_read(&vm_stat[item]);
71595 + long x = atomic_long_read_unchecked(&vm_stat[item]);
71596 #ifdef CONFIG_SMP
71597 if (x < 0)
71598 x = 0;
71599 @@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
71600 static inline unsigned long zone_page_state(struct zone *zone,
71601 enum zone_stat_item item)
71602 {
71603 - long x = atomic_long_read(&zone->vm_stat[item]);
71604 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71605 #ifdef CONFIG_SMP
71606 if (x < 0)
71607 x = 0;
71608 @@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
71609 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
71610 enum zone_stat_item item)
71611 {
71612 - long x = atomic_long_read(&zone->vm_stat[item]);
71613 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
71614
71615 #ifdef CONFIG_SMP
71616 int cpu;
71617 @@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
71618
71619 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
71620 {
71621 - atomic_long_inc(&zone->vm_stat[item]);
71622 - atomic_long_inc(&vm_stat[item]);
71623 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
71624 + atomic_long_inc_unchecked(&vm_stat[item]);
71625 }
71626
71627 static inline void __inc_zone_page_state(struct page *page,
71628 @@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
71629
71630 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
71631 {
71632 - atomic_long_dec(&zone->vm_stat[item]);
71633 - atomic_long_dec(&vm_stat[item]);
71634 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
71635 + atomic_long_dec_unchecked(&vm_stat[item]);
71636 }
71637
71638 static inline void __dec_zone_page_state(struct page *page,
71639 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
71640 index fdbafc6..49dfe4f 100644
71641 --- a/include/linux/xattr.h
71642 +++ b/include/linux/xattr.h
71643 @@ -28,7 +28,7 @@ struct xattr_handler {
71644 size_t size, int handler_flags);
71645 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
71646 size_t size, int flags, int handler_flags);
71647 -};
71648 +} __do_const;
71649
71650 struct xattr {
71651 char *name;
71652 @@ -37,6 +37,9 @@ struct xattr {
71653 };
71654
71655 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
71656 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
71657 +ssize_t pax_getxattr(struct dentry *, void *, size_t);
71658 +#endif
71659 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
71660 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
71661 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
71662 diff --git a/include/linux/zlib.h b/include/linux/zlib.h
71663 index 9c5a6b4..09c9438 100644
71664 --- a/include/linux/zlib.h
71665 +++ b/include/linux/zlib.h
71666 @@ -31,6 +31,7 @@
71667 #define _ZLIB_H
71668
71669 #include <linux/zconf.h>
71670 +#include <linux/compiler.h>
71671
71672 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
71673 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
71674 @@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
71675
71676 /* basic functions */
71677
71678 -extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
71679 +extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
71680 /*
71681 Returns the number of bytes that needs to be allocated for a per-
71682 stream workspace with the specified parameters. A pointer to this
71683 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
71684 index 95d1c91..6798cca 100644
71685 --- a/include/media/v4l2-dev.h
71686 +++ b/include/media/v4l2-dev.h
71687 @@ -76,7 +76,7 @@ struct v4l2_file_operations {
71688 int (*mmap) (struct file *, struct vm_area_struct *);
71689 int (*open) (struct file *);
71690 int (*release) (struct file *);
71691 -};
71692 +} __do_const;
71693
71694 /*
71695 * Newer version of video_device, handled by videodev2.c
71696 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
71697 index 4118ad1..cb7e25f 100644
71698 --- a/include/media/v4l2-ioctl.h
71699 +++ b/include/media/v4l2-ioctl.h
71700 @@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
71701 bool valid_prio, int cmd, void *arg);
71702 };
71703
71704 -
71705 /* v4l debugging and diagnostics */
71706
71707 /* Debug bitmask flags to be used on V4L2 */
71708 diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
71709 index adcbb20..62c2559 100644
71710 --- a/include/net/9p/transport.h
71711 +++ b/include/net/9p/transport.h
71712 @@ -57,7 +57,7 @@ struct p9_trans_module {
71713 int (*cancel) (struct p9_client *, struct p9_req_t *req);
71714 int (*zc_request)(struct p9_client *, struct p9_req_t *,
71715 char *, char *, int , int, int, int);
71716 -};
71717 +} __do_const;
71718
71719 void v9fs_register_trans(struct p9_trans_module *m);
71720 void v9fs_unregister_trans(struct p9_trans_module *m);
71721 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
71722 index cdd3302..76f8ede 100644
71723 --- a/include/net/bluetooth/l2cap.h
71724 +++ b/include/net/bluetooth/l2cap.h
71725 @@ -551,7 +551,7 @@ struct l2cap_ops {
71726 void (*defer) (struct l2cap_chan *chan);
71727 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
71728 unsigned long len, int nb);
71729 -};
71730 +} __do_const;
71731
71732 struct l2cap_conn {
71733 struct hci_conn *hcon;
71734 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
71735 index 9e5425b..8136ffc 100644
71736 --- a/include/net/caif/cfctrl.h
71737 +++ b/include/net/caif/cfctrl.h
71738 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
71739 void (*radioset_rsp)(void);
71740 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
71741 struct cflayer *client_layer);
71742 -};
71743 +} __no_const;
71744
71745 /* Link Setup Parameters for CAIF-Links. */
71746 struct cfctrl_link_param {
71747 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
71748 struct cfctrl {
71749 struct cfsrvl serv;
71750 struct cfctrl_rsp res;
71751 - atomic_t req_seq_no;
71752 - atomic_t rsp_seq_no;
71753 + atomic_unchecked_t req_seq_no;
71754 + atomic_unchecked_t rsp_seq_no;
71755 struct list_head list;
71756 /* Protects from simultaneous access to first_req list */
71757 spinlock_t info_list_lock;
71758 diff --git a/include/net/flow.h b/include/net/flow.h
71759 index 628e11b..4c475df 100644
71760 --- a/include/net/flow.h
71761 +++ b/include/net/flow.h
71762 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
71763
71764 extern void flow_cache_flush(void);
71765 extern void flow_cache_flush_deferred(void);
71766 -extern atomic_t flow_cache_genid;
71767 +extern atomic_unchecked_t flow_cache_genid;
71768
71769 #endif
71770 diff --git a/include/net/genetlink.h b/include/net/genetlink.h
71771 index bdfbe68..4402ebe 100644
71772 --- a/include/net/genetlink.h
71773 +++ b/include/net/genetlink.h
71774 @@ -118,7 +118,7 @@ struct genl_ops {
71775 struct netlink_callback *cb);
71776 int (*done)(struct netlink_callback *cb);
71777 struct list_head ops_list;
71778 -};
71779 +} __do_const;
71780
71781 extern int genl_register_family(struct genl_family *family);
71782 extern int genl_register_family_with_ops(struct genl_family *family,
71783 diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
71784 index 734d9b5..48a9a4b 100644
71785 --- a/include/net/gro_cells.h
71786 +++ b/include/net/gro_cells.h
71787 @@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
71788 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
71789
71790 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
71791 - atomic_long_inc(&dev->rx_dropped);
71792 + atomic_long_inc_unchecked(&dev->rx_dropped);
71793 kfree_skb(skb);
71794 return;
71795 }
71796 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
71797 index 1832927..ce39aea 100644
71798 --- a/include/net/inet_connection_sock.h
71799 +++ b/include/net/inet_connection_sock.h
71800 @@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
71801 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
71802 int (*bind_conflict)(const struct sock *sk,
71803 const struct inet_bind_bucket *tb, bool relax);
71804 -};
71805 +} __do_const;
71806
71807 /** inet_connection_sock - INET connection oriented sock
71808 *
71809 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
71810 index 53f464d..ba76aaa 100644
71811 --- a/include/net/inetpeer.h
71812 +++ b/include/net/inetpeer.h
71813 @@ -47,8 +47,8 @@ struct inet_peer {
71814 */
71815 union {
71816 struct {
71817 - atomic_t rid; /* Frag reception counter */
71818 - atomic_t ip_id_count; /* IP ID for the next packet */
71819 + atomic_unchecked_t rid; /* Frag reception counter */
71820 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
71821 };
71822 struct rcu_head rcu;
71823 struct inet_peer *gc_next;
71824 @@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
71825 more++;
71826 inet_peer_refcheck(p);
71827 do {
71828 - old = atomic_read(&p->ip_id_count);
71829 + old = atomic_read_unchecked(&p->ip_id_count);
71830 new = old + more;
71831 if (!new)
71832 new = 1;
71833 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
71834 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
71835 return new;
71836 }
71837
71838 diff --git a/include/net/ip.h b/include/net/ip.h
71839 index a68f838..74518ab 100644
71840 --- a/include/net/ip.h
71841 +++ b/include/net/ip.h
71842 @@ -202,7 +202,7 @@ extern struct local_ports {
71843 } sysctl_local_ports;
71844 extern void inet_get_local_port_range(int *low, int *high);
71845
71846 -extern unsigned long *sysctl_local_reserved_ports;
71847 +extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
71848 static inline int inet_is_reserved_local_port(int port)
71849 {
71850 return test_bit(port, sysctl_local_reserved_ports);
71851 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
71852 index e49db91..76a81de 100644
71853 --- a/include/net/ip_fib.h
71854 +++ b/include/net/ip_fib.h
71855 @@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
71856
71857 #define FIB_RES_SADDR(net, res) \
71858 ((FIB_RES_NH(res).nh_saddr_genid == \
71859 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
71860 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
71861 FIB_RES_NH(res).nh_saddr : \
71862 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
71863 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
71864 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
71865 index fce8e6b..3ca4916 100644
71866 --- a/include/net/ip_vs.h
71867 +++ b/include/net/ip_vs.h
71868 @@ -599,7 +599,7 @@ struct ip_vs_conn {
71869 struct ip_vs_conn *control; /* Master control connection */
71870 atomic_t n_control; /* Number of controlled ones */
71871 struct ip_vs_dest *dest; /* real server */
71872 - atomic_t in_pkts; /* incoming packet counter */
71873 + atomic_unchecked_t in_pkts; /* incoming packet counter */
71874
71875 /* packet transmitter for different forwarding methods. If it
71876 mangles the packet, it must return NF_DROP or better NF_STOLEN,
71877 @@ -737,7 +737,7 @@ struct ip_vs_dest {
71878 __be16 port; /* port number of the server */
71879 union nf_inet_addr addr; /* IP address of the server */
71880 volatile unsigned int flags; /* dest status flags */
71881 - atomic_t conn_flags; /* flags to copy to conn */
71882 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
71883 atomic_t weight; /* server weight */
71884
71885 atomic_t refcnt; /* reference counter */
71886 @@ -981,11 +981,11 @@ struct netns_ipvs {
71887 /* ip_vs_lblc */
71888 int sysctl_lblc_expiration;
71889 struct ctl_table_header *lblc_ctl_header;
71890 - struct ctl_table *lblc_ctl_table;
71891 + ctl_table_no_const *lblc_ctl_table;
71892 /* ip_vs_lblcr */
71893 int sysctl_lblcr_expiration;
71894 struct ctl_table_header *lblcr_ctl_header;
71895 - struct ctl_table *lblcr_ctl_table;
71896 + ctl_table_no_const *lblcr_ctl_table;
71897 /* ip_vs_est */
71898 struct list_head est_list; /* estimator list */
71899 spinlock_t est_lock;
71900 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
71901 index 80ffde3..968b0f4 100644
71902 --- a/include/net/irda/ircomm_tty.h
71903 +++ b/include/net/irda/ircomm_tty.h
71904 @@ -35,6 +35,7 @@
71905 #include <linux/termios.h>
71906 #include <linux/timer.h>
71907 #include <linux/tty.h> /* struct tty_struct */
71908 +#include <asm/local.h>
71909
71910 #include <net/irda/irias_object.h>
71911 #include <net/irda/ircomm_core.h>
71912 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
71913 index 714cc9a..ea05f3e 100644
71914 --- a/include/net/iucv/af_iucv.h
71915 +++ b/include/net/iucv/af_iucv.h
71916 @@ -149,7 +149,7 @@ struct iucv_skb_cb {
71917 struct iucv_sock_list {
71918 struct hlist_head head;
71919 rwlock_t lock;
71920 - atomic_t autobind_name;
71921 + atomic_unchecked_t autobind_name;
71922 };
71923
71924 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
71925 diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
71926 index df83f69..9b640b8 100644
71927 --- a/include/net/llc_c_ac.h
71928 +++ b/include/net/llc_c_ac.h
71929 @@ -87,7 +87,7 @@
71930 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
71931 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
71932
71933 -typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71934 +typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71935
71936 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
71937 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
71938 diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
71939 index 6ca3113..f8026dd 100644
71940 --- a/include/net/llc_c_ev.h
71941 +++ b/include/net/llc_c_ev.h
71942 @@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
71943 return (struct llc_conn_state_ev *)skb->cb;
71944 }
71945
71946 -typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71947 -typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71948 +typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71949 +typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71950
71951 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
71952 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
71953 diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
71954 index 0e79cfb..f46db31 100644
71955 --- a/include/net/llc_c_st.h
71956 +++ b/include/net/llc_c_st.h
71957 @@ -37,7 +37,7 @@ struct llc_conn_state_trans {
71958 u8 next_state;
71959 llc_conn_ev_qfyr_t *ev_qualifiers;
71960 llc_conn_action_t *ev_actions;
71961 -};
71962 +} __do_const;
71963
71964 struct llc_conn_state {
71965 u8 current_state;
71966 diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
71967 index 37a3bbd..55a4241 100644
71968 --- a/include/net/llc_s_ac.h
71969 +++ b/include/net/llc_s_ac.h
71970 @@ -23,7 +23,7 @@
71971 #define SAP_ACT_TEST_IND 9
71972
71973 /* All action functions must look like this */
71974 -typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71975 +typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71976
71977 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
71978 struct sk_buff *skb);
71979 diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
71980 index 567c681..cd73ac0 100644
71981 --- a/include/net/llc_s_st.h
71982 +++ b/include/net/llc_s_st.h
71983 @@ -20,7 +20,7 @@ struct llc_sap_state_trans {
71984 llc_sap_ev_t ev;
71985 u8 next_state;
71986 llc_sap_action_t *ev_actions;
71987 -};
71988 +} __do_const;
71989
71990 struct llc_sap_state {
71991 u8 curr_state;
71992 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
71993 index f7eba13..91ed983 100644
71994 --- a/include/net/mac80211.h
71995 +++ b/include/net/mac80211.h
71996 @@ -4119,7 +4119,7 @@ struct rate_control_ops {
71997 void (*add_sta_debugfs)(void *priv, void *priv_sta,
71998 struct dentry *dir);
71999 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
72000 -};
72001 +} __do_const;
72002
72003 static inline int rate_supported(struct ieee80211_sta *sta,
72004 enum ieee80211_band band,
72005 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
72006 index 7e748ad..5c6229b 100644
72007 --- a/include/net/neighbour.h
72008 +++ b/include/net/neighbour.h
72009 @@ -123,7 +123,7 @@ struct neigh_ops {
72010 void (*error_report)(struct neighbour *, struct sk_buff *);
72011 int (*output)(struct neighbour *, struct sk_buff *);
72012 int (*connected_output)(struct neighbour *, struct sk_buff *);
72013 -};
72014 +} __do_const;
72015
72016 struct pneigh_entry {
72017 struct pneigh_entry *next;
72018 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
72019 index de644bc..dfbcc4c 100644
72020 --- a/include/net/net_namespace.h
72021 +++ b/include/net/net_namespace.h
72022 @@ -115,7 +115,7 @@ struct net {
72023 #endif
72024 struct netns_ipvs *ipvs;
72025 struct sock *diag_nlsk;
72026 - atomic_t rt_genid;
72027 + atomic_unchecked_t rt_genid;
72028 };
72029
72030 /*
72031 @@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
72032 #define __net_init __init
72033 #define __net_exit __exit_refok
72034 #define __net_initdata __initdata
72035 +#ifdef CONSTIFY_PLUGIN
72036 #define __net_initconst __initconst
72037 +#else
72038 +#define __net_initconst __initdata
72039 +#endif
72040 #endif
72041
72042 struct pernet_operations {
72043 @@ -282,7 +286,7 @@ struct pernet_operations {
72044 void (*exit_batch)(struct list_head *net_exit_list);
72045 int *id;
72046 size_t size;
72047 -};
72048 +} __do_const;
72049
72050 /*
72051 * Use these carefully. If you implement a network device and it
72052 @@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
72053
72054 static inline int rt_genid(struct net *net)
72055 {
72056 - return atomic_read(&net->rt_genid);
72057 + return atomic_read_unchecked(&net->rt_genid);
72058 }
72059
72060 static inline void rt_genid_bump(struct net *net)
72061 {
72062 - atomic_inc(&net->rt_genid);
72063 + atomic_inc_unchecked(&net->rt_genid);
72064 }
72065
72066 #endif /* __NET_NET_NAMESPACE_H */
72067 diff --git a/include/net/netdma.h b/include/net/netdma.h
72068 index 8ba8ce2..99b7fff 100644
72069 --- a/include/net/netdma.h
72070 +++ b/include/net/netdma.h
72071 @@ -24,7 +24,7 @@
72072 #include <linux/dmaengine.h>
72073 #include <linux/skbuff.h>
72074
72075 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72076 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72077 struct sk_buff *skb, int offset, struct iovec *to,
72078 size_t len, struct dma_pinned_list *pinned_list);
72079
72080 diff --git a/include/net/netlink.h b/include/net/netlink.h
72081 index 9690b0f..87aded7 100644
72082 --- a/include/net/netlink.h
72083 +++ b/include/net/netlink.h
72084 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
72085 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
72086 {
72087 if (mark)
72088 - skb_trim(skb, (unsigned char *) mark - skb->data);
72089 + skb_trim(skb, (const unsigned char *) mark - skb->data);
72090 }
72091
72092 /**
72093 diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
72094 index c9c0c53..53f24c3 100644
72095 --- a/include/net/netns/conntrack.h
72096 +++ b/include/net/netns/conntrack.h
72097 @@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
72098 struct nf_proto_net {
72099 #ifdef CONFIG_SYSCTL
72100 struct ctl_table_header *ctl_table_header;
72101 - struct ctl_table *ctl_table;
72102 + ctl_table_no_const *ctl_table;
72103 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
72104 struct ctl_table_header *ctl_compat_header;
72105 - struct ctl_table *ctl_compat_table;
72106 + ctl_table_no_const *ctl_compat_table;
72107 #endif
72108 #endif
72109 unsigned int users;
72110 @@ -58,7 +58,7 @@ struct nf_ip_net {
72111 struct nf_icmp_net icmpv6;
72112 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
72113 struct ctl_table_header *ctl_table_header;
72114 - struct ctl_table *ctl_table;
72115 + ctl_table_no_const *ctl_table;
72116 #endif
72117 };
72118
72119 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
72120 index 2ba9de8..47bd6c7 100644
72121 --- a/include/net/netns/ipv4.h
72122 +++ b/include/net/netns/ipv4.h
72123 @@ -67,7 +67,7 @@ struct netns_ipv4 {
72124 kgid_t sysctl_ping_group_range[2];
72125 long sysctl_tcp_mem[3];
72126
72127 - atomic_t dev_addr_genid;
72128 + atomic_unchecked_t dev_addr_genid;
72129
72130 #ifdef CONFIG_IP_MROUTE
72131 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
72132 diff --git a/include/net/protocol.h b/include/net/protocol.h
72133 index 047c047..b9dad15 100644
72134 --- a/include/net/protocol.h
72135 +++ b/include/net/protocol.h
72136 @@ -44,7 +44,7 @@ struct net_protocol {
72137 void (*err_handler)(struct sk_buff *skb, u32 info);
72138 unsigned int no_policy:1,
72139 netns_ok:1;
72140 -};
72141 +} __do_const;
72142
72143 #if IS_ENABLED(CONFIG_IPV6)
72144 struct inet6_protocol {
72145 @@ -57,7 +57,7 @@ struct inet6_protocol {
72146 u8 type, u8 code, int offset,
72147 __be32 info);
72148 unsigned int flags; /* INET6_PROTO_xxx */
72149 -};
72150 +} __do_const;
72151
72152 #define INET6_PROTO_NOPOLICY 0x1
72153 #define INET6_PROTO_FINAL 0x2
72154 diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
72155 index 5a15fab..d799ea7 100644
72156 --- a/include/net/rtnetlink.h
72157 +++ b/include/net/rtnetlink.h
72158 @@ -81,7 +81,7 @@ struct rtnl_link_ops {
72159 const struct net_device *dev);
72160 unsigned int (*get_num_tx_queues)(void);
72161 unsigned int (*get_num_rx_queues)(void);
72162 -};
72163 +} __do_const;
72164
72165 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
72166 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
72167 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
72168 index df85a0c..19ac300 100644
72169 --- a/include/net/sctp/sctp.h
72170 +++ b/include/net/sctp/sctp.h
72171 @@ -330,9 +330,9 @@ do { \
72172
72173 #else /* SCTP_DEBUG */
72174
72175 -#define SCTP_DEBUG_PRINTK(whatever...)
72176 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
72177 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
72178 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
72179 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
72180 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
72181 #define SCTP_ENABLE_DEBUG
72182 #define SCTP_DISABLE_DEBUG
72183 #define SCTP_ASSERT(expr, str, func)
72184 diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
72185 index 2a82d13..62a31c2 100644
72186 --- a/include/net/sctp/sm.h
72187 +++ b/include/net/sctp/sm.h
72188 @@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
72189 typedef struct {
72190 sctp_state_fn_t *fn;
72191 const char *name;
72192 -} sctp_sm_table_entry_t;
72193 +} __do_const sctp_sm_table_entry_t;
72194
72195 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
72196 * currently in use.
72197 @@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
72198 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
72199
72200 /* Extern declarations for major data structures. */
72201 -extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72202 +extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72203
72204
72205 /* Get the size of a DATA chunk payload. */
72206 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
72207 index 0e0f9d2..cd05ebb 100644
72208 --- a/include/net/sctp/structs.h
72209 +++ b/include/net/sctp/structs.h
72210 @@ -517,7 +517,7 @@ struct sctp_pf {
72211 struct sctp_association *asoc);
72212 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
72213 struct sctp_af *af;
72214 -};
72215 +} __do_const;
72216
72217
72218 /* Structure to track chunk fragments that have been acked, but peer
72219 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
72220 index c2e542b..6ca975b 100644
72221 --- a/include/net/secure_seq.h
72222 +++ b/include/net/secure_seq.h
72223 @@ -3,6 +3,7 @@
72224
72225 #include <linux/types.h>
72226
72227 +extern void net_secret_init(void);
72228 extern __u32 secure_ip_id(__be32 daddr);
72229 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
72230 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
72231 diff --git a/include/net/sock.h b/include/net/sock.h
72232 index 14f6e9d..7cd56d0 100644
72233 --- a/include/net/sock.h
72234 +++ b/include/net/sock.h
72235 @@ -325,7 +325,7 @@ struct sock {
72236 #ifdef CONFIG_RPS
72237 __u32 sk_rxhash;
72238 #endif
72239 - atomic_t sk_drops;
72240 + atomic_unchecked_t sk_drops;
72241 int sk_rcvbuf;
72242
72243 struct sk_filter __rcu *sk_filter;
72244 @@ -1784,7 +1784,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
72245 }
72246
72247 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
72248 - char __user *from, char *to,
72249 + char __user *from, unsigned char *to,
72250 int copy, int offset)
72251 {
72252 if (skb->ip_summed == CHECKSUM_NONE) {
72253 @@ -2043,7 +2043,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
72254 }
72255 }
72256
72257 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72258 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72259
72260 /**
72261 * sk_page_frag - return an appropriate page_frag
72262 diff --git a/include/net/tcp.h b/include/net/tcp.h
72263 index cf0694d..52a6881 100644
72264 --- a/include/net/tcp.h
72265 +++ b/include/net/tcp.h
72266 @@ -529,7 +529,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
72267 extern void tcp_xmit_retransmit_queue(struct sock *);
72268 extern void tcp_simple_retransmit(struct sock *);
72269 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
72270 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72271 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72272
72273 extern void tcp_send_probe0(struct sock *);
72274 extern void tcp_send_partial(struct sock *);
72275 @@ -700,8 +700,8 @@ struct tcp_skb_cb {
72276 struct inet6_skb_parm h6;
72277 #endif
72278 } header; /* For incoming frames */
72279 - __u32 seq; /* Starting sequence number */
72280 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
72281 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
72282 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
72283 __u32 when; /* used to compute rtt's */
72284 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
72285
72286 @@ -715,7 +715,7 @@ struct tcp_skb_cb {
72287
72288 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
72289 /* 1 byte hole */
72290 - __u32 ack_seq; /* Sequence number ACK'd */
72291 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
72292 };
72293
72294 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
72295 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
72296 index 24c8886..e6fb816 100644
72297 --- a/include/net/xfrm.h
72298 +++ b/include/net/xfrm.h
72299 @@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
72300 struct net_device *dev,
72301 const struct flowi *fl);
72302 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
72303 -};
72304 +} __do_const;
72305
72306 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
72307 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
72308 @@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
72309 struct sk_buff *skb);
72310 int (*transport_finish)(struct sk_buff *skb,
72311 int async);
72312 -};
72313 +} __do_const;
72314
72315 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
72316 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
72317 @@ -423,7 +423,7 @@ struct xfrm_mode {
72318 struct module *owner;
72319 unsigned int encap;
72320 int flags;
72321 -};
72322 +} __do_const;
72323
72324 /* Flags for xfrm_mode. */
72325 enum {
72326 @@ -520,7 +520,7 @@ struct xfrm_policy {
72327 struct timer_list timer;
72328
72329 struct flow_cache_object flo;
72330 - atomic_t genid;
72331 + atomic_unchecked_t genid;
72332 u32 priority;
72333 u32 index;
72334 struct xfrm_mark mark;
72335 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
72336 index 1a046b1..ee0bef0 100644
72337 --- a/include/rdma/iw_cm.h
72338 +++ b/include/rdma/iw_cm.h
72339 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
72340 int backlog);
72341
72342 int (*destroy_listen)(struct iw_cm_id *cm_id);
72343 -};
72344 +} __no_const;
72345
72346 /**
72347 * iw_create_cm_id - Create an IW CM identifier.
72348 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
72349 index e1379b4..67eafbe 100644
72350 --- a/include/scsi/libfc.h
72351 +++ b/include/scsi/libfc.h
72352 @@ -762,6 +762,7 @@ struct libfc_function_template {
72353 */
72354 void (*disc_stop_final) (struct fc_lport *);
72355 };
72356 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
72357
72358 /**
72359 * struct fc_disc - Discovery context
72360 @@ -866,7 +867,7 @@ struct fc_lport {
72361 struct fc_vport *vport;
72362
72363 /* Operational Information */
72364 - struct libfc_function_template tt;
72365 + libfc_function_template_no_const tt;
72366 u8 link_up;
72367 u8 qfull;
72368 enum fc_lport_state state;
72369 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
72370 index a7f9cba..b1f44d0 100644
72371 --- a/include/scsi/scsi_device.h
72372 +++ b/include/scsi/scsi_device.h
72373 @@ -171,9 +171,9 @@ struct scsi_device {
72374 unsigned int max_device_blocked; /* what device_blocked counts down from */
72375 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
72376
72377 - atomic_t iorequest_cnt;
72378 - atomic_t iodone_cnt;
72379 - atomic_t ioerr_cnt;
72380 + atomic_unchecked_t iorequest_cnt;
72381 + atomic_unchecked_t iodone_cnt;
72382 + atomic_unchecked_t ioerr_cnt;
72383
72384 struct device sdev_gendev,
72385 sdev_dev;
72386 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
72387 index b797e8f..8e2c3aa 100644
72388 --- a/include/scsi/scsi_transport_fc.h
72389 +++ b/include/scsi/scsi_transport_fc.h
72390 @@ -751,7 +751,8 @@ struct fc_function_template {
72391 unsigned long show_host_system_hostname:1;
72392
72393 unsigned long disable_target_scan:1;
72394 -};
72395 +} __do_const;
72396 +typedef struct fc_function_template __no_const fc_function_template_no_const;
72397
72398
72399 /**
72400 diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
72401 index ff6c741..393815f 100644
72402 --- a/include/sound/compress_driver.h
72403 +++ b/include/sound/compress_driver.h
72404 @@ -130,7 +130,7 @@ struct snd_compr_ops {
72405 struct snd_compr_caps *caps);
72406 int (*get_codec_caps) (struct snd_compr_stream *stream,
72407 struct snd_compr_codec_caps *codec);
72408 -};
72409 +} __no_const;
72410
72411 /**
72412 * struct snd_compr: Compressed device
72413 diff --git a/include/sound/soc.h b/include/sound/soc.h
72414 index a6a059c..2243336 100644
72415 --- a/include/sound/soc.h
72416 +++ b/include/sound/soc.h
72417 @@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
72418 /* probe ordering - for components with runtime dependencies */
72419 int probe_order;
72420 int remove_order;
72421 -};
72422 +} __do_const;
72423
72424 /* SoC platform interface */
72425 struct snd_soc_platform_driver {
72426 @@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
72427 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
72428 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
72429 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
72430 -};
72431 +} __do_const;
72432
72433 struct snd_soc_platform {
72434 const char *name;
72435 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
72436 index c4af592..20c52d2 100644
72437 --- a/include/target/target_core_base.h
72438 +++ b/include/target/target_core_base.h
72439 @@ -657,7 +657,7 @@ struct se_device {
72440 spinlock_t stats_lock;
72441 /* Active commands on this virtual SE device */
72442 atomic_t simple_cmds;
72443 - atomic_t dev_ordered_id;
72444 + atomic_unchecked_t dev_ordered_id;
72445 atomic_t dev_ordered_sync;
72446 atomic_t dev_qf_count;
72447 int export_count;
72448 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
72449 new file mode 100644
72450 index 0000000..fb634b7
72451 --- /dev/null
72452 +++ b/include/trace/events/fs.h
72453 @@ -0,0 +1,53 @@
72454 +#undef TRACE_SYSTEM
72455 +#define TRACE_SYSTEM fs
72456 +
72457 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
72458 +#define _TRACE_FS_H
72459 +
72460 +#include <linux/fs.h>
72461 +#include <linux/tracepoint.h>
72462 +
72463 +TRACE_EVENT(do_sys_open,
72464 +
72465 + TP_PROTO(const char *filename, int flags, int mode),
72466 +
72467 + TP_ARGS(filename, flags, mode),
72468 +
72469 + TP_STRUCT__entry(
72470 + __string( filename, filename )
72471 + __field( int, flags )
72472 + __field( int, mode )
72473 + ),
72474 +
72475 + TP_fast_assign(
72476 + __assign_str(filename, filename);
72477 + __entry->flags = flags;
72478 + __entry->mode = mode;
72479 + ),
72480 +
72481 + TP_printk("\"%s\" %x %o",
72482 + __get_str(filename), __entry->flags, __entry->mode)
72483 +);
72484 +
72485 +TRACE_EVENT(open_exec,
72486 +
72487 + TP_PROTO(const char *filename),
72488 +
72489 + TP_ARGS(filename),
72490 +
72491 + TP_STRUCT__entry(
72492 + __string( filename, filename )
72493 + ),
72494 +
72495 + TP_fast_assign(
72496 + __assign_str(filename, filename);
72497 + ),
72498 +
72499 + TP_printk("\"%s\"",
72500 + __get_str(filename))
72501 +);
72502 +
72503 +#endif /* _TRACE_FS_H */
72504 +
72505 +/* This part must be outside protection */
72506 +#include <trace/define_trace.h>
72507 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
72508 index 1c09820..7f5ec79 100644
72509 --- a/include/trace/events/irq.h
72510 +++ b/include/trace/events/irq.h
72511 @@ -36,7 +36,7 @@ struct softirq_action;
72512 */
72513 TRACE_EVENT(irq_handler_entry,
72514
72515 - TP_PROTO(int irq, struct irqaction *action),
72516 + TP_PROTO(int irq, const struct irqaction *action),
72517
72518 TP_ARGS(irq, action),
72519
72520 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
72521 */
72522 TRACE_EVENT(irq_handler_exit,
72523
72524 - TP_PROTO(int irq, struct irqaction *action, int ret),
72525 + TP_PROTO(int irq, const struct irqaction *action, int ret),
72526
72527 TP_ARGS(irq, action, ret),
72528
72529 diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
72530 index 7caf44c..23c6f27 100644
72531 --- a/include/uapi/linux/a.out.h
72532 +++ b/include/uapi/linux/a.out.h
72533 @@ -39,6 +39,14 @@ enum machine_type {
72534 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
72535 };
72536
72537 +/* Constants for the N_FLAGS field */
72538 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
72539 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
72540 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
72541 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
72542 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
72543 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
72544 +
72545 #if !defined (N_MAGIC)
72546 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
72547 #endif
72548 diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
72549 index d876736..ccce5c0 100644
72550 --- a/include/uapi/linux/byteorder/little_endian.h
72551 +++ b/include/uapi/linux/byteorder/little_endian.h
72552 @@ -42,51 +42,51 @@
72553
72554 static inline __le64 __cpu_to_le64p(const __u64 *p)
72555 {
72556 - return (__force __le64)*p;
72557 + return (__force const __le64)*p;
72558 }
72559 -static inline __u64 __le64_to_cpup(const __le64 *p)
72560 +static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
72561 {
72562 - return (__force __u64)*p;
72563 + return (__force const __u64)*p;
72564 }
72565 static inline __le32 __cpu_to_le32p(const __u32 *p)
72566 {
72567 - return (__force __le32)*p;
72568 + return (__force const __le32)*p;
72569 }
72570 static inline __u32 __le32_to_cpup(const __le32 *p)
72571 {
72572 - return (__force __u32)*p;
72573 + return (__force const __u32)*p;
72574 }
72575 static inline __le16 __cpu_to_le16p(const __u16 *p)
72576 {
72577 - return (__force __le16)*p;
72578 + return (__force const __le16)*p;
72579 }
72580 static inline __u16 __le16_to_cpup(const __le16 *p)
72581 {
72582 - return (__force __u16)*p;
72583 + return (__force const __u16)*p;
72584 }
72585 static inline __be64 __cpu_to_be64p(const __u64 *p)
72586 {
72587 - return (__force __be64)__swab64p(p);
72588 + return (__force const __be64)__swab64p(p);
72589 }
72590 static inline __u64 __be64_to_cpup(const __be64 *p)
72591 {
72592 - return __swab64p((__u64 *)p);
72593 + return __swab64p((const __u64 *)p);
72594 }
72595 static inline __be32 __cpu_to_be32p(const __u32 *p)
72596 {
72597 - return (__force __be32)__swab32p(p);
72598 + return (__force const __be32)__swab32p(p);
72599 }
72600 -static inline __u32 __be32_to_cpup(const __be32 *p)
72601 +static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
72602 {
72603 - return __swab32p((__u32 *)p);
72604 + return __swab32p((const __u32 *)p);
72605 }
72606 static inline __be16 __cpu_to_be16p(const __u16 *p)
72607 {
72608 - return (__force __be16)__swab16p(p);
72609 + return (__force const __be16)__swab16p(p);
72610 }
72611 static inline __u16 __be16_to_cpup(const __be16 *p)
72612 {
72613 - return __swab16p((__u16 *)p);
72614 + return __swab16p((const __u16 *)p);
72615 }
72616 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
72617 #define __le64_to_cpus(x) do { (void)(x); } while (0)
72618 diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
72619 index 8072d35..e77aeb8 100644
72620 --- a/include/uapi/linux/elf.h
72621 +++ b/include/uapi/linux/elf.h
72622 @@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
72623 #define PT_GNU_EH_FRAME 0x6474e550
72624
72625 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
72626 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
72627 +
72628 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
72629 +
72630 +/* Constants for the e_flags field */
72631 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
72632 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
72633 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
72634 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
72635 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
72636 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
72637
72638 /*
72639 * Extended Numbering
72640 @@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
72641 #define DT_DEBUG 21
72642 #define DT_TEXTREL 22
72643 #define DT_JMPREL 23
72644 +#define DT_FLAGS 30
72645 + #define DF_TEXTREL 0x00000004
72646 #define DT_ENCODING 32
72647 #define OLD_DT_LOOS 0x60000000
72648 #define DT_LOOS 0x6000000d
72649 @@ -240,6 +253,19 @@ typedef struct elf64_hdr {
72650 #define PF_W 0x2
72651 #define PF_X 0x1
72652
72653 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
72654 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
72655 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
72656 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
72657 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
72658 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
72659 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
72660 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
72661 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
72662 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
72663 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
72664 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
72665 +
72666 typedef struct elf32_phdr{
72667 Elf32_Word p_type;
72668 Elf32_Off p_offset;
72669 @@ -332,6 +358,8 @@ typedef struct elf64_shdr {
72670 #define EI_OSABI 7
72671 #define EI_PAD 8
72672
72673 +#define EI_PAX 14
72674 +
72675 #define ELFMAG0 0x7f /* EI_MAG */
72676 #define ELFMAG1 'E'
72677 #define ELFMAG2 'L'
72678 diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
72679 index aa169c4..6a2771d 100644
72680 --- a/include/uapi/linux/personality.h
72681 +++ b/include/uapi/linux/personality.h
72682 @@ -30,6 +30,7 @@ enum {
72683 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
72684 ADDR_NO_RANDOMIZE | \
72685 ADDR_COMPAT_LAYOUT | \
72686 + ADDR_LIMIT_3GB | \
72687 MMAP_PAGE_ZERO)
72688
72689 /*
72690 diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
72691 index 7530e74..e714828 100644
72692 --- a/include/uapi/linux/screen_info.h
72693 +++ b/include/uapi/linux/screen_info.h
72694 @@ -43,7 +43,8 @@ struct screen_info {
72695 __u16 pages; /* 0x32 */
72696 __u16 vesa_attributes; /* 0x34 */
72697 __u32 capabilities; /* 0x36 */
72698 - __u8 _reserved[6]; /* 0x3a */
72699 + __u16 vesapm_size; /* 0x3a */
72700 + __u8 _reserved[4]; /* 0x3c */
72701 } __attribute__((packed));
72702
72703 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
72704 diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
72705 index 0e011eb..82681b1 100644
72706 --- a/include/uapi/linux/swab.h
72707 +++ b/include/uapi/linux/swab.h
72708 @@ -43,7 +43,7 @@
72709 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
72710 */
72711
72712 -static inline __attribute_const__ __u16 __fswab16(__u16 val)
72713 +static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
72714 {
72715 #ifdef __HAVE_BUILTIN_BSWAP16__
72716 return __builtin_bswap16(val);
72717 @@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
72718 #endif
72719 }
72720
72721 -static inline __attribute_const__ __u32 __fswab32(__u32 val)
72722 +static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
72723 {
72724 #ifdef __HAVE_BUILTIN_BSWAP32__
72725 return __builtin_bswap32(val);
72726 @@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
72727 #endif
72728 }
72729
72730 -static inline __attribute_const__ __u64 __fswab64(__u64 val)
72731 +static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
72732 {
72733 #ifdef __HAVE_BUILTIN_BSWAP64__
72734 return __builtin_bswap64(val);
72735 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
72736 index 6d67213..8dab561 100644
72737 --- a/include/uapi/linux/sysctl.h
72738 +++ b/include/uapi/linux/sysctl.h
72739 @@ -155,7 +155,11 @@ enum
72740 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
72741 };
72742
72743 -
72744 +#ifdef CONFIG_PAX_SOFTMODE
72745 +enum {
72746 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
72747 +};
72748 +#endif
72749
72750 /* CTL_VM names: */
72751 enum
72752 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
72753 index e4629b9..6958086 100644
72754 --- a/include/uapi/linux/xattr.h
72755 +++ b/include/uapi/linux/xattr.h
72756 @@ -63,5 +63,9 @@
72757 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
72758 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
72759
72760 +/* User namespace */
72761 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
72762 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
72763 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
72764
72765 #endif /* _UAPI_LINUX_XATTR_H */
72766 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
72767 index f9466fa..f4e2b81 100644
72768 --- a/include/video/udlfb.h
72769 +++ b/include/video/udlfb.h
72770 @@ -53,10 +53,10 @@ struct dlfb_data {
72771 u32 pseudo_palette[256];
72772 int blank_mode; /*one of FB_BLANK_ */
72773 /* blit-only rendering path metrics, exposed through sysfs */
72774 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
72775 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
72776 - atomic_t bytes_sent; /* to usb, after compression including overhead */
72777 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
72778 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
72779 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
72780 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
72781 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
72782 };
72783
72784 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
72785 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
72786 index 1a91850..28573f8 100644
72787 --- a/include/video/uvesafb.h
72788 +++ b/include/video/uvesafb.h
72789 @@ -122,6 +122,7 @@ struct uvesafb_par {
72790 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
72791 u8 pmi_setpal; /* PMI for palette changes */
72792 u16 *pmi_base; /* protected mode interface location */
72793 + u8 *pmi_code; /* protected mode code location */
72794 void *pmi_start;
72795 void *pmi_pal;
72796 u8 *vbe_state_orig; /*
72797 diff --git a/init/Kconfig b/init/Kconfig
72798 index 5341d72..153f24f 100644
72799 --- a/init/Kconfig
72800 +++ b/init/Kconfig
72801 @@ -984,6 +984,7 @@ endif # CGROUPS
72802
72803 config CHECKPOINT_RESTORE
72804 bool "Checkpoint/restore support" if EXPERT
72805 + depends on !GRKERNSEC
72806 default n
72807 help
72808 Enables additional kernel features in a sake of checkpoint/restore.
72809 @@ -1471,7 +1472,7 @@ config SLUB_DEBUG
72810
72811 config COMPAT_BRK
72812 bool "Disable heap randomization"
72813 - default y
72814 + default n
72815 help
72816 Randomizing heap placement makes heap exploits harder, but it
72817 also breaks ancient binaries (including anything libc5 based).
72818 @@ -1734,7 +1735,7 @@ config INIT_ALL_POSSIBLE
72819 config STOP_MACHINE
72820 bool
72821 default y
72822 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
72823 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
72824 help
72825 Need stop_machine() primitive.
72826
72827 diff --git a/init/Makefile b/init/Makefile
72828 index 7bc47ee..6da2dc7 100644
72829 --- a/init/Makefile
72830 +++ b/init/Makefile
72831 @@ -2,6 +2,9 @@
72832 # Makefile for the linux kernel.
72833 #
72834
72835 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
72836 +asflags-y := $(GCC_PLUGINS_AFLAGS)
72837 +
72838 obj-y := main.o version.o mounts.o
72839 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
72840 obj-y += noinitramfs.o
72841 diff --git a/init/do_mounts.c b/init/do_mounts.c
72842 index a2b49f2..03a0e17c 100644
72843 --- a/init/do_mounts.c
72844 +++ b/init/do_mounts.c
72845 @@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
72846 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
72847 {
72848 struct super_block *s;
72849 - int err = sys_mount(name, "/root", fs, flags, data);
72850 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
72851 if (err)
72852 return err;
72853
72854 - sys_chdir("/root");
72855 + sys_chdir((const char __force_user *)"/root");
72856 s = current->fs->pwd.dentry->d_sb;
72857 ROOT_DEV = s->s_dev;
72858 printk(KERN_INFO
72859 @@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
72860 va_start(args, fmt);
72861 vsprintf(buf, fmt, args);
72862 va_end(args);
72863 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
72864 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
72865 if (fd >= 0) {
72866 sys_ioctl(fd, FDEJECT, 0);
72867 sys_close(fd);
72868 }
72869 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
72870 - fd = sys_open("/dev/console", O_RDWR, 0);
72871 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
72872 if (fd >= 0) {
72873 sys_ioctl(fd, TCGETS, (long)&termios);
72874 termios.c_lflag &= ~ICANON;
72875 sys_ioctl(fd, TCSETSF, (long)&termios);
72876 - sys_read(fd, &c, 1);
72877 + sys_read(fd, (char __user *)&c, 1);
72878 termios.c_lflag |= ICANON;
72879 sys_ioctl(fd, TCSETSF, (long)&termios);
72880 sys_close(fd);
72881 @@ -585,6 +585,6 @@ void __init prepare_namespace(void)
72882 mount_root();
72883 out:
72884 devtmpfs_mount("dev");
72885 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
72886 - sys_chroot(".");
72887 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
72888 + sys_chroot((const char __force_user *)".");
72889 }
72890 diff --git a/init/do_mounts.h b/init/do_mounts.h
72891 index f5b978a..69dbfe8 100644
72892 --- a/init/do_mounts.h
72893 +++ b/init/do_mounts.h
72894 @@ -15,15 +15,15 @@ extern int root_mountflags;
72895
72896 static inline int create_dev(char *name, dev_t dev)
72897 {
72898 - sys_unlink(name);
72899 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
72900 + sys_unlink((char __force_user *)name);
72901 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
72902 }
72903
72904 #if BITS_PER_LONG == 32
72905 static inline u32 bstat(char *name)
72906 {
72907 struct stat64 stat;
72908 - if (sys_stat64(name, &stat) != 0)
72909 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
72910 return 0;
72911 if (!S_ISBLK(stat.st_mode))
72912 return 0;
72913 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
72914 static inline u32 bstat(char *name)
72915 {
72916 struct stat stat;
72917 - if (sys_newstat(name, &stat) != 0)
72918 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
72919 return 0;
72920 if (!S_ISBLK(stat.st_mode))
72921 return 0;
72922 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
72923 index a32ec1c..ac08811 100644
72924 --- a/init/do_mounts_initrd.c
72925 +++ b/init/do_mounts_initrd.c
72926 @@ -58,8 +58,8 @@ static void __init handle_initrd(void)
72927 create_dev("/dev/root.old", Root_RAM0);
72928 /* mount initrd on rootfs' /root */
72929 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
72930 - sys_mkdir("/old", 0700);
72931 - sys_chdir("/old");
72932 + sys_mkdir((const char __force_user *)"/old", 0700);
72933 + sys_chdir((const char __force_user *)"/old");
72934
72935 /* try loading default modules from initrd */
72936 load_default_modules();
72937 @@ -76,31 +76,31 @@ static void __init handle_initrd(void)
72938 current->flags &= ~PF_FREEZER_SKIP;
72939
72940 /* move initrd to rootfs' /old */
72941 - sys_mount("..", ".", NULL, MS_MOVE, NULL);
72942 + sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
72943 /* switch root and cwd back to / of rootfs */
72944 - sys_chroot("..");
72945 + sys_chroot((const char __force_user *)"..");
72946
72947 if (new_decode_dev(real_root_dev) == Root_RAM0) {
72948 - sys_chdir("/old");
72949 + sys_chdir((const char __force_user *)"/old");
72950 return;
72951 }
72952
72953 - sys_chdir("/");
72954 + sys_chdir((const char __force_user *)"/");
72955 ROOT_DEV = new_decode_dev(real_root_dev);
72956 mount_root();
72957
72958 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
72959 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
72960 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
72961 if (!error)
72962 printk("okay\n");
72963 else {
72964 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
72965 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
72966 if (error == -ENOENT)
72967 printk("/initrd does not exist. Ignored.\n");
72968 else
72969 printk("failed\n");
72970 printk(KERN_NOTICE "Unmounting old root\n");
72971 - sys_umount("/old", MNT_DETACH);
72972 + sys_umount((char __force_user *)"/old", MNT_DETACH);
72973 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
72974 if (fd < 0) {
72975 error = fd;
72976 @@ -123,11 +123,11 @@ int __init initrd_load(void)
72977 * mounted in the normal path.
72978 */
72979 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
72980 - sys_unlink("/initrd.image");
72981 + sys_unlink((const char __force_user *)"/initrd.image");
72982 handle_initrd();
72983 return 1;
72984 }
72985 }
72986 - sys_unlink("/initrd.image");
72987 + sys_unlink((const char __force_user *)"/initrd.image");
72988 return 0;
72989 }
72990 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
72991 index 8cb6db5..d729f50 100644
72992 --- a/init/do_mounts_md.c
72993 +++ b/init/do_mounts_md.c
72994 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
72995 partitioned ? "_d" : "", minor,
72996 md_setup_args[ent].device_names);
72997
72998 - fd = sys_open(name, 0, 0);
72999 + fd = sys_open((char __force_user *)name, 0, 0);
73000 if (fd < 0) {
73001 printk(KERN_ERR "md: open failed - cannot start "
73002 "array %s\n", name);
73003 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
73004 * array without it
73005 */
73006 sys_close(fd);
73007 - fd = sys_open(name, 0, 0);
73008 + fd = sys_open((char __force_user *)name, 0, 0);
73009 sys_ioctl(fd, BLKRRPART, 0);
73010 }
73011 sys_close(fd);
73012 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
73013
73014 wait_for_device_probe();
73015
73016 - fd = sys_open("/dev/md0", 0, 0);
73017 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
73018 if (fd >= 0) {
73019 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73020 sys_close(fd);
73021 diff --git a/init/init_task.c b/init/init_task.c
73022 index ba0a7f36..2bcf1d5 100644
73023 --- a/init/init_task.c
73024 +++ b/init/init_task.c
73025 @@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
73026 * Initial thread structure. Alignment of this is handled by a special
73027 * linker map entry.
73028 */
73029 +#ifdef CONFIG_X86
73030 +union thread_union init_thread_union __init_task_data;
73031 +#else
73032 union thread_union init_thread_union __init_task_data =
73033 { INIT_THREAD_INFO(init_task) };
73034 +#endif
73035 diff --git a/init/initramfs.c b/init/initramfs.c
73036 index a67ef9d..3d88592 100644
73037 --- a/init/initramfs.c
73038 +++ b/init/initramfs.c
73039 @@ -84,7 +84,7 @@ static void __init free_hash(void)
73040 }
73041 }
73042
73043 -static long __init do_utime(char *filename, time_t mtime)
73044 +static long __init do_utime(char __force_user *filename, time_t mtime)
73045 {
73046 struct timespec t[2];
73047
73048 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
73049 struct dir_entry *de, *tmp;
73050 list_for_each_entry_safe(de, tmp, &dir_list, list) {
73051 list_del(&de->list);
73052 - do_utime(de->name, de->mtime);
73053 + do_utime((char __force_user *)de->name, de->mtime);
73054 kfree(de->name);
73055 kfree(de);
73056 }
73057 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
73058 if (nlink >= 2) {
73059 char *old = find_link(major, minor, ino, mode, collected);
73060 if (old)
73061 - return (sys_link(old, collected) < 0) ? -1 : 1;
73062 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
73063 }
73064 return 0;
73065 }
73066 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
73067 {
73068 struct stat st;
73069
73070 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
73071 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
73072 if (S_ISDIR(st.st_mode))
73073 - sys_rmdir(path);
73074 + sys_rmdir((char __force_user *)path);
73075 else
73076 - sys_unlink(path);
73077 + sys_unlink((char __force_user *)path);
73078 }
73079 }
73080
73081 @@ -315,7 +315,7 @@ static int __init do_name(void)
73082 int openflags = O_WRONLY|O_CREAT;
73083 if (ml != 1)
73084 openflags |= O_TRUNC;
73085 - wfd = sys_open(collected, openflags, mode);
73086 + wfd = sys_open((char __force_user *)collected, openflags, mode);
73087
73088 if (wfd >= 0) {
73089 sys_fchown(wfd, uid, gid);
73090 @@ -327,17 +327,17 @@ static int __init do_name(void)
73091 }
73092 }
73093 } else if (S_ISDIR(mode)) {
73094 - sys_mkdir(collected, mode);
73095 - sys_chown(collected, uid, gid);
73096 - sys_chmod(collected, mode);
73097 + sys_mkdir((char __force_user *)collected, mode);
73098 + sys_chown((char __force_user *)collected, uid, gid);
73099 + sys_chmod((char __force_user *)collected, mode);
73100 dir_add(collected, mtime);
73101 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
73102 S_ISFIFO(mode) || S_ISSOCK(mode)) {
73103 if (maybe_link() == 0) {
73104 - sys_mknod(collected, mode, rdev);
73105 - sys_chown(collected, uid, gid);
73106 - sys_chmod(collected, mode);
73107 - do_utime(collected, mtime);
73108 + sys_mknod((char __force_user *)collected, mode, rdev);
73109 + sys_chown((char __force_user *)collected, uid, gid);
73110 + sys_chmod((char __force_user *)collected, mode);
73111 + do_utime((char __force_user *)collected, mtime);
73112 }
73113 }
73114 return 0;
73115 @@ -346,15 +346,15 @@ static int __init do_name(void)
73116 static int __init do_copy(void)
73117 {
73118 if (count >= body_len) {
73119 - sys_write(wfd, victim, body_len);
73120 + sys_write(wfd, (char __force_user *)victim, body_len);
73121 sys_close(wfd);
73122 - do_utime(vcollected, mtime);
73123 + do_utime((char __force_user *)vcollected, mtime);
73124 kfree(vcollected);
73125 eat(body_len);
73126 state = SkipIt;
73127 return 0;
73128 } else {
73129 - sys_write(wfd, victim, count);
73130 + sys_write(wfd, (char __force_user *)victim, count);
73131 body_len -= count;
73132 eat(count);
73133 return 1;
73134 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
73135 {
73136 collected[N_ALIGN(name_len) + body_len] = '\0';
73137 clean_path(collected, 0);
73138 - sys_symlink(collected + N_ALIGN(name_len), collected);
73139 - sys_lchown(collected, uid, gid);
73140 - do_utime(collected, mtime);
73141 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
73142 + sys_lchown((char __force_user *)collected, uid, gid);
73143 + do_utime((char __force_user *)collected, mtime);
73144 state = SkipIt;
73145 next_state = Reset;
73146 return 0;
73147 diff --git a/init/main.c b/init/main.c
73148 index 63534a1..8abcaf1 100644
73149 --- a/init/main.c
73150 +++ b/init/main.c
73151 @@ -98,6 +98,8 @@ static inline void mark_rodata_ro(void) { }
73152 extern void tc_init(void);
73153 #endif
73154
73155 +extern void grsecurity_init(void);
73156 +
73157 /*
73158 * Debug helper: via this flag we know that we are in 'early bootup code'
73159 * where only the boot processor is running with IRQ disabled. This means
73160 @@ -151,6 +153,64 @@ static int __init set_reset_devices(char *str)
73161
73162 __setup("reset_devices", set_reset_devices);
73163
73164 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73165 +kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
73166 +static int __init setup_grsec_proc_gid(char *str)
73167 +{
73168 + grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
73169 + return 1;
73170 +}
73171 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
73172 +#endif
73173 +
73174 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
73175 +unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
73176 +EXPORT_SYMBOL(pax_user_shadow_base);
73177 +extern char pax_enter_kernel_user[];
73178 +extern char pax_exit_kernel_user[];
73179 +extern pgdval_t clone_pgd_mask;
73180 +#endif
73181 +
73182 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
73183 +static int __init setup_pax_nouderef(char *str)
73184 +{
73185 +#ifdef CONFIG_X86_32
73186 + unsigned int cpu;
73187 + struct desc_struct *gdt;
73188 +
73189 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
73190 + gdt = get_cpu_gdt_table(cpu);
73191 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
73192 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
73193 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
73194 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
73195 + }
73196 + loadsegment(ds, __KERNEL_DS);
73197 + loadsegment(es, __KERNEL_DS);
73198 + loadsegment(ss, __KERNEL_DS);
73199 +#else
73200 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
73201 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
73202 + clone_pgd_mask = ~(pgdval_t)0UL;
73203 + pax_user_shadow_base = 0UL;
73204 +#endif
73205 +
73206 + return 0;
73207 +}
73208 +early_param("pax_nouderef", setup_pax_nouderef);
73209 +#endif
73210 +
73211 +#ifdef CONFIG_PAX_SOFTMODE
73212 +int pax_softmode;
73213 +
73214 +static int __init setup_pax_softmode(char *str)
73215 +{
73216 + get_option(&str, &pax_softmode);
73217 + return 1;
73218 +}
73219 +__setup("pax_softmode=", setup_pax_softmode);
73220 +#endif
73221 +
73222 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
73223 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
73224 static const char *panic_later, *panic_param;
73225 @@ -683,6 +743,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
73226 {
73227 int count = preempt_count();
73228 int ret;
73229 + const char *msg1 = "", *msg2 = "";
73230
73231 if (initcall_debug)
73232 ret = do_one_initcall_debug(fn);
73233 @@ -695,15 +756,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
73234 sprintf(msgbuf, "error code %d ", ret);
73235
73236 if (preempt_count() != count) {
73237 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
73238 + msg1 = " preemption imbalance";
73239 preempt_count() = count;
73240 }
73241 if (irqs_disabled()) {
73242 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
73243 + msg2 = " disabled interrupts";
73244 local_irq_enable();
73245 }
73246 - if (msgbuf[0]) {
73247 - printk("initcall %pF returned with %s\n", fn, msgbuf);
73248 + if (msgbuf[0] || *msg1 || *msg2) {
73249 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
73250 }
73251
73252 return ret;
73253 @@ -757,8 +818,14 @@ static void __init do_initcall_level(int level)
73254 level, level,
73255 &repair_env_string);
73256
73257 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
73258 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
73259 do_one_initcall(*fn);
73260 +
73261 +#ifdef LATENT_ENTROPY_PLUGIN
73262 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73263 +#endif
73264 +
73265 + }
73266 }
73267
73268 static void __init do_initcalls(void)
73269 @@ -792,8 +859,14 @@ static void __init do_pre_smp_initcalls(void)
73270 {
73271 initcall_t *fn;
73272
73273 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
73274 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
73275 do_one_initcall(*fn);
73276 +
73277 +#ifdef LATENT_ENTROPY_PLUGIN
73278 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73279 +#endif
73280 +
73281 + }
73282 }
73283
73284 /*
73285 @@ -890,7 +963,7 @@ static noinline void __init kernel_init_freeable(void)
73286 do_basic_setup();
73287
73288 /* Open the /dev/console on the rootfs, this should never fail */
73289 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
73290 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
73291 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
73292
73293 (void) sys_dup(0);
73294 @@ -903,11 +976,13 @@ static noinline void __init kernel_init_freeable(void)
73295 if (!ramdisk_execute_command)
73296 ramdisk_execute_command = "/init";
73297
73298 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
73299 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
73300 ramdisk_execute_command = NULL;
73301 prepare_namespace();
73302 }
73303
73304 + grsecurity_init();
73305 +
73306 /*
73307 * Ok, we have completed the initial bootup, and
73308 * we're essentially up and running. Get rid of the
73309 diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
73310 index 130dfec..cc88451 100644
73311 --- a/ipc/ipc_sysctl.c
73312 +++ b/ipc/ipc_sysctl.c
73313 @@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
73314 static int proc_ipc_dointvec(ctl_table *table, int write,
73315 void __user *buffer, size_t *lenp, loff_t *ppos)
73316 {
73317 - struct ctl_table ipc_table;
73318 + ctl_table_no_const ipc_table;
73319
73320 memcpy(&ipc_table, table, sizeof(ipc_table));
73321 ipc_table.data = get_ipc(table);
73322 @@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
73323 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
73324 void __user *buffer, size_t *lenp, loff_t *ppos)
73325 {
73326 - struct ctl_table ipc_table;
73327 + ctl_table_no_const ipc_table;
73328
73329 memcpy(&ipc_table, table, sizeof(ipc_table));
73330 ipc_table.data = get_ipc(table);
73331 @@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
73332 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73333 void __user *buffer, size_t *lenp, loff_t *ppos)
73334 {
73335 - struct ctl_table ipc_table;
73336 + ctl_table_no_const ipc_table;
73337 size_t lenp_bef = *lenp;
73338 int rc;
73339
73340 @@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73341 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
73342 void __user *buffer, size_t *lenp, loff_t *ppos)
73343 {
73344 - struct ctl_table ipc_table;
73345 + ctl_table_no_const ipc_table;
73346 memcpy(&ipc_table, table, sizeof(ipc_table));
73347 ipc_table.data = get_ipc(table);
73348
73349 @@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
73350 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
73351 void __user *buffer, size_t *lenp, loff_t *ppos)
73352 {
73353 - struct ctl_table ipc_table;
73354 + ctl_table_no_const ipc_table;
73355 size_t lenp_bef = *lenp;
73356 int oldval;
73357 int rc;
73358 diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
73359 index 383d638..943fdbb 100644
73360 --- a/ipc/mq_sysctl.c
73361 +++ b/ipc/mq_sysctl.c
73362 @@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
73363 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
73364 void __user *buffer, size_t *lenp, loff_t *ppos)
73365 {
73366 - struct ctl_table mq_table;
73367 + ctl_table_no_const mq_table;
73368 memcpy(&mq_table, table, sizeof(mq_table));
73369 mq_table.data = get_mq(table);
73370
73371 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
73372 index e4e47f6..a85e0ad 100644
73373 --- a/ipc/mqueue.c
73374 +++ b/ipc/mqueue.c
73375 @@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
73376 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
73377 info->attr.mq_msgsize);
73378
73379 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
73380 spin_lock(&mq_lock);
73381 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
73382 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
73383 diff --git a/ipc/msg.c b/ipc/msg.c
73384 index fede1d0..9778e0f8 100644
73385 --- a/ipc/msg.c
73386 +++ b/ipc/msg.c
73387 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
73388 return security_msg_queue_associate(msq, msgflg);
73389 }
73390
73391 +static struct ipc_ops msg_ops = {
73392 + .getnew = newque,
73393 + .associate = msg_security,
73394 + .more_checks = NULL
73395 +};
73396 +
73397 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
73398 {
73399 struct ipc_namespace *ns;
73400 - struct ipc_ops msg_ops;
73401 struct ipc_params msg_params;
73402
73403 ns = current->nsproxy->ipc_ns;
73404
73405 - msg_ops.getnew = newque;
73406 - msg_ops.associate = msg_security;
73407 - msg_ops.more_checks = NULL;
73408 -
73409 msg_params.key = key;
73410 msg_params.flg = msgflg;
73411
73412 diff --git a/ipc/sem.c b/ipc/sem.c
73413 index 58d31f1..cce7a55 100644
73414 --- a/ipc/sem.c
73415 +++ b/ipc/sem.c
73416 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
73417 return 0;
73418 }
73419
73420 +static struct ipc_ops sem_ops = {
73421 + .getnew = newary,
73422 + .associate = sem_security,
73423 + .more_checks = sem_more_checks
73424 +};
73425 +
73426 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73427 {
73428 struct ipc_namespace *ns;
73429 - struct ipc_ops sem_ops;
73430 struct ipc_params sem_params;
73431
73432 ns = current->nsproxy->ipc_ns;
73433 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73434 if (nsems < 0 || nsems > ns->sc_semmsl)
73435 return -EINVAL;
73436
73437 - sem_ops.getnew = newary;
73438 - sem_ops.associate = sem_security;
73439 - sem_ops.more_checks = sem_more_checks;
73440 -
73441 sem_params.key = key;
73442 sem_params.flg = semflg;
73443 sem_params.u.nsems = nsems;
73444 diff --git a/ipc/shm.c b/ipc/shm.c
73445 index 34af1fe..85fc1aa 100644
73446 --- a/ipc/shm.c
73447 +++ b/ipc/shm.c
73448 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
73449 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73450 #endif
73451
73452 +#ifdef CONFIG_GRKERNSEC
73453 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73454 + const time_t shm_createtime, const kuid_t cuid,
73455 + const int shmid);
73456 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73457 + const time_t shm_createtime);
73458 +#endif
73459 +
73460 void shm_init_ns(struct ipc_namespace *ns)
73461 {
73462 ns->shm_ctlmax = SHMMAX;
73463 @@ -525,6 +533,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
73464 shp->shm_lprid = 0;
73465 shp->shm_atim = shp->shm_dtim = 0;
73466 shp->shm_ctim = get_seconds();
73467 +#ifdef CONFIG_GRKERNSEC
73468 + {
73469 + struct timespec timeval;
73470 + do_posix_clock_monotonic_gettime(&timeval);
73471 +
73472 + shp->shm_createtime = timeval.tv_sec;
73473 + }
73474 +#endif
73475 shp->shm_segsz = size;
73476 shp->shm_nattch = 0;
73477 shp->shm_file = file;
73478 @@ -576,18 +592,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
73479 return 0;
73480 }
73481
73482 +static struct ipc_ops shm_ops = {
73483 + .getnew = newseg,
73484 + .associate = shm_security,
73485 + .more_checks = shm_more_checks
73486 +};
73487 +
73488 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
73489 {
73490 struct ipc_namespace *ns;
73491 - struct ipc_ops shm_ops;
73492 struct ipc_params shm_params;
73493
73494 ns = current->nsproxy->ipc_ns;
73495
73496 - shm_ops.getnew = newseg;
73497 - shm_ops.associate = shm_security;
73498 - shm_ops.more_checks = shm_more_checks;
73499 -
73500 shm_params.key = key;
73501 shm_params.flg = shmflg;
73502 shm_params.u.size = size;
73503 @@ -1008,6 +1025,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
73504 f_mode = FMODE_READ | FMODE_WRITE;
73505 }
73506 if (shmflg & SHM_EXEC) {
73507 +
73508 +#ifdef CONFIG_PAX_MPROTECT
73509 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
73510 + goto out;
73511 +#endif
73512 +
73513 prot |= PROT_EXEC;
73514 acc_mode |= S_IXUGO;
73515 }
73516 @@ -1031,9 +1054,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
73517 if (err)
73518 goto out_unlock;
73519
73520 +#ifdef CONFIG_GRKERNSEC
73521 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
73522 + shp->shm_perm.cuid, shmid) ||
73523 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
73524 + err = -EACCES;
73525 + goto out_unlock;
73526 + }
73527 +#endif
73528 +
73529 path = shp->shm_file->f_path;
73530 path_get(&path);
73531 shp->shm_nattch++;
73532 +#ifdef CONFIG_GRKERNSEC
73533 + shp->shm_lapid = current->pid;
73534 +#endif
73535 size = i_size_read(path.dentry->d_inode);
73536 shm_unlock(shp);
73537
73538 diff --git a/kernel/acct.c b/kernel/acct.c
73539 index b9bd7f0..1762b4a 100644
73540 --- a/kernel/acct.c
73541 +++ b/kernel/acct.c
73542 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
73543 */
73544 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
73545 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
73546 - file->f_op->write(file, (char *)&ac,
73547 + file->f_op->write(file, (char __force_user *)&ac,
73548 sizeof(acct_t), &file->f_pos);
73549 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
73550 set_fs(fs);
73551 diff --git a/kernel/audit.c b/kernel/audit.c
73552 index d596e53..dbef3c3 100644
73553 --- a/kernel/audit.c
73554 +++ b/kernel/audit.c
73555 @@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
73556 3) suppressed due to audit_rate_limit
73557 4) suppressed due to audit_backlog_limit
73558 */
73559 -static atomic_t audit_lost = ATOMIC_INIT(0);
73560 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
73561
73562 /* The netlink socket. */
73563 static struct sock *audit_sock;
73564 @@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
73565 unsigned long now;
73566 int print;
73567
73568 - atomic_inc(&audit_lost);
73569 + atomic_inc_unchecked(&audit_lost);
73570
73571 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
73572
73573 @@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
73574 printk(KERN_WARNING
73575 "audit: audit_lost=%d audit_rate_limit=%d "
73576 "audit_backlog_limit=%d\n",
73577 - atomic_read(&audit_lost),
73578 + atomic_read_unchecked(&audit_lost),
73579 audit_rate_limit,
73580 audit_backlog_limit);
73581 audit_panic(message);
73582 @@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
73583 status_set.pid = audit_pid;
73584 status_set.rate_limit = audit_rate_limit;
73585 status_set.backlog_limit = audit_backlog_limit;
73586 - status_set.lost = atomic_read(&audit_lost);
73587 + status_set.lost = atomic_read_unchecked(&audit_lost);
73588 status_set.backlog = skb_queue_len(&audit_skb_queue);
73589 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
73590 &status_set, sizeof(status_set));
73591 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
73592 index a371f85..da826c1 100644
73593 --- a/kernel/auditsc.c
73594 +++ b/kernel/auditsc.c
73595 @@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
73596 }
73597
73598 /* global counter which is incremented every time something logs in */
73599 -static atomic_t session_id = ATOMIC_INIT(0);
73600 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
73601
73602 /**
73603 * audit_set_loginuid - set current task's audit_context loginuid
73604 @@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
73605 return -EPERM;
73606 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
73607
73608 - sessionid = atomic_inc_return(&session_id);
73609 + sessionid = atomic_inc_return_unchecked(&session_id);
73610 if (context && context->in_syscall) {
73611 struct audit_buffer *ab;
73612
73613 diff --git a/kernel/capability.c b/kernel/capability.c
73614 index f6c2ce5..982c0f9 100644
73615 --- a/kernel/capability.c
73616 +++ b/kernel/capability.c
73617 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
73618 * before modification is attempted and the application
73619 * fails.
73620 */
73621 + if (tocopy > ARRAY_SIZE(kdata))
73622 + return -EFAULT;
73623 +
73624 if (copy_to_user(dataptr, kdata, tocopy
73625 * sizeof(struct __user_cap_data_struct))) {
73626 return -EFAULT;
73627 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
73628 int ret;
73629
73630 rcu_read_lock();
73631 - ret = security_capable(__task_cred(t), ns, cap);
73632 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
73633 + gr_task_is_capable(t, __task_cred(t), cap);
73634 rcu_read_unlock();
73635
73636 - return (ret == 0);
73637 + return ret;
73638 }
73639
73640 /**
73641 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
73642 int ret;
73643
73644 rcu_read_lock();
73645 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
73646 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
73647 rcu_read_unlock();
73648
73649 - return (ret == 0);
73650 + return ret;
73651 }
73652
73653 /**
73654 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
73655 BUG();
73656 }
73657
73658 - if (security_capable(current_cred(), ns, cap) == 0) {
73659 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
73660 current->flags |= PF_SUPERPRIV;
73661 return true;
73662 }
73663 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
73664 }
73665 EXPORT_SYMBOL(ns_capable);
73666
73667 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
73668 +{
73669 + if (unlikely(!cap_valid(cap))) {
73670 + printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
73671 + BUG();
73672 + }
73673 +
73674 + if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
73675 + current->flags |= PF_SUPERPRIV;
73676 + return true;
73677 + }
73678 + return false;
73679 +}
73680 +EXPORT_SYMBOL(ns_capable_nolog);
73681 +
73682 /**
73683 * file_ns_capable - Determine if the file's opener had a capability in effect
73684 * @file: The file we want to check
73685 @@ -432,6 +451,12 @@ bool capable(int cap)
73686 }
73687 EXPORT_SYMBOL(capable);
73688
73689 +bool capable_nolog(int cap)
73690 +{
73691 + return ns_capable_nolog(&init_user_ns, cap);
73692 +}
73693 +EXPORT_SYMBOL(capable_nolog);
73694 +
73695 /**
73696 * nsown_capable - Check superior capability to one's own user_ns
73697 * @cap: The capability in question
73698 @@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
73699
73700 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
73701 }
73702 +
73703 +bool inode_capable_nolog(const struct inode *inode, int cap)
73704 +{
73705 + struct user_namespace *ns = current_user_ns();
73706 +
73707 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
73708 +}
73709 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
73710 index ba1f977..f840d9c 100644
73711 --- a/kernel/cgroup.c
73712 +++ b/kernel/cgroup.c
73713 @@ -5569,7 +5569,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
73714 struct css_set *cg = link->cg;
73715 struct task_struct *task;
73716 int count = 0;
73717 - seq_printf(seq, "css_set %p\n", cg);
73718 + seq_printf(seq, "css_set %pK\n", cg);
73719 list_for_each_entry(task, &cg->tasks, cg_list) {
73720 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
73721 seq_puts(seq, " ...\n");
73722 diff --git a/kernel/compat.c b/kernel/compat.c
73723 index 19971d8..02fe2df 100644
73724 --- a/kernel/compat.c
73725 +++ b/kernel/compat.c
73726 @@ -13,6 +13,7 @@
73727
73728 #include <linux/linkage.h>
73729 #include <linux/compat.h>
73730 +#include <linux/module.h>
73731 #include <linux/errno.h>
73732 #include <linux/time.h>
73733 #include <linux/signal.h>
73734 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
73735 mm_segment_t oldfs;
73736 long ret;
73737
73738 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
73739 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
73740 oldfs = get_fs();
73741 set_fs(KERNEL_DS);
73742 ret = hrtimer_nanosleep_restart(restart);
73743 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
73744 oldfs = get_fs();
73745 set_fs(KERNEL_DS);
73746 ret = hrtimer_nanosleep(&tu,
73747 - rmtp ? (struct timespec __user *)&rmt : NULL,
73748 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
73749 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
73750 set_fs(oldfs);
73751
73752 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
73753 mm_segment_t old_fs = get_fs();
73754
73755 set_fs(KERNEL_DS);
73756 - ret = sys_sigpending((old_sigset_t __user *) &s);
73757 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
73758 set_fs(old_fs);
73759 if (ret == 0)
73760 ret = put_user(s, set);
73761 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
73762 mm_segment_t old_fs = get_fs();
73763
73764 set_fs(KERNEL_DS);
73765 - ret = sys_old_getrlimit(resource, &r);
73766 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
73767 set_fs(old_fs);
73768
73769 if (!ret) {
73770 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
73771 mm_segment_t old_fs = get_fs();
73772
73773 set_fs(KERNEL_DS);
73774 - ret = sys_getrusage(who, (struct rusage __user *) &r);
73775 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
73776 set_fs(old_fs);
73777
73778 if (ret)
73779 @@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
73780 set_fs (KERNEL_DS);
73781 ret = sys_wait4(pid,
73782 (stat_addr ?
73783 - (unsigned int __user *) &status : NULL),
73784 - options, (struct rusage __user *) &r);
73785 + (unsigned int __force_user *) &status : NULL),
73786 + options, (struct rusage __force_user *) &r);
73787 set_fs (old_fs);
73788
73789 if (ret > 0) {
73790 @@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
73791 memset(&info, 0, sizeof(info));
73792
73793 set_fs(KERNEL_DS);
73794 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
73795 - uru ? (struct rusage __user *)&ru : NULL);
73796 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
73797 + uru ? (struct rusage __force_user *)&ru : NULL);
73798 set_fs(old_fs);
73799
73800 if ((ret < 0) || (info.si_signo == 0))
73801 @@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
73802 oldfs = get_fs();
73803 set_fs(KERNEL_DS);
73804 err = sys_timer_settime(timer_id, flags,
73805 - (struct itimerspec __user *) &newts,
73806 - (struct itimerspec __user *) &oldts);
73807 + (struct itimerspec __force_user *) &newts,
73808 + (struct itimerspec __force_user *) &oldts);
73809 set_fs(oldfs);
73810 if (!err && old && put_compat_itimerspec(old, &oldts))
73811 return -EFAULT;
73812 @@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
73813 oldfs = get_fs();
73814 set_fs(KERNEL_DS);
73815 err = sys_timer_gettime(timer_id,
73816 - (struct itimerspec __user *) &ts);
73817 + (struct itimerspec __force_user *) &ts);
73818 set_fs(oldfs);
73819 if (!err && put_compat_itimerspec(setting, &ts))
73820 return -EFAULT;
73821 @@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
73822 oldfs = get_fs();
73823 set_fs(KERNEL_DS);
73824 err = sys_clock_settime(which_clock,
73825 - (struct timespec __user *) &ts);
73826 + (struct timespec __force_user *) &ts);
73827 set_fs(oldfs);
73828 return err;
73829 }
73830 @@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
73831 oldfs = get_fs();
73832 set_fs(KERNEL_DS);
73833 err = sys_clock_gettime(which_clock,
73834 - (struct timespec __user *) &ts);
73835 + (struct timespec __force_user *) &ts);
73836 set_fs(oldfs);
73837 if (!err && put_compat_timespec(&ts, tp))
73838 return -EFAULT;
73839 @@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
73840
73841 oldfs = get_fs();
73842 set_fs(KERNEL_DS);
73843 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
73844 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
73845 set_fs(oldfs);
73846
73847 err = compat_put_timex(utp, &txc);
73848 @@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
73849 oldfs = get_fs();
73850 set_fs(KERNEL_DS);
73851 err = sys_clock_getres(which_clock,
73852 - (struct timespec __user *) &ts);
73853 + (struct timespec __force_user *) &ts);
73854 set_fs(oldfs);
73855 if (!err && tp && put_compat_timespec(&ts, tp))
73856 return -EFAULT;
73857 @@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
73858 long err;
73859 mm_segment_t oldfs;
73860 struct timespec tu;
73861 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
73862 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
73863
73864 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
73865 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
73866 oldfs = get_fs();
73867 set_fs(KERNEL_DS);
73868 err = clock_nanosleep_restart(restart);
73869 @@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
73870 oldfs = get_fs();
73871 set_fs(KERNEL_DS);
73872 err = sys_clock_nanosleep(which_clock, flags,
73873 - (struct timespec __user *) &in,
73874 - (struct timespec __user *) &out);
73875 + (struct timespec __force_user *) &in,
73876 + (struct timespec __force_user *) &out);
73877 set_fs(oldfs);
73878
73879 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
73880 diff --git a/kernel/configs.c b/kernel/configs.c
73881 index 42e8fa0..9e7406b 100644
73882 --- a/kernel/configs.c
73883 +++ b/kernel/configs.c
73884 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
73885 struct proc_dir_entry *entry;
73886
73887 /* create the current config file */
73888 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
73889 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
73890 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
73891 + &ikconfig_file_ops);
73892 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73893 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
73894 + &ikconfig_file_ops);
73895 +#endif
73896 +#else
73897 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
73898 &ikconfig_file_ops);
73899 +#endif
73900 +
73901 if (!entry)
73902 return -ENOMEM;
73903
73904 diff --git a/kernel/cred.c b/kernel/cred.c
73905 index e0573a4..3874e41 100644
73906 --- a/kernel/cred.c
73907 +++ b/kernel/cred.c
73908 @@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
73909 validate_creds(cred);
73910 alter_cred_subscribers(cred, -1);
73911 put_cred(cred);
73912 +
73913 +#ifdef CONFIG_GRKERNSEC_SETXID
73914 + cred = (struct cred *) tsk->delayed_cred;
73915 + if (cred != NULL) {
73916 + tsk->delayed_cred = NULL;
73917 + validate_creds(cred);
73918 + alter_cred_subscribers(cred, -1);
73919 + put_cred(cred);
73920 + }
73921 +#endif
73922 }
73923
73924 /**
73925 @@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
73926 * Always returns 0 thus allowing this function to be tail-called at the end
73927 * of, say, sys_setgid().
73928 */
73929 -int commit_creds(struct cred *new)
73930 +static int __commit_creds(struct cred *new)
73931 {
73932 struct task_struct *task = current;
73933 const struct cred *old = task->real_cred;
73934 @@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
73935
73936 get_cred(new); /* we will require a ref for the subj creds too */
73937
73938 + gr_set_role_label(task, new->uid, new->gid);
73939 +
73940 /* dumpability changes */
73941 if (!uid_eq(old->euid, new->euid) ||
73942 !gid_eq(old->egid, new->egid) ||
73943 @@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
73944 put_cred(old);
73945 return 0;
73946 }
73947 +#ifdef CONFIG_GRKERNSEC_SETXID
73948 +extern int set_user(struct cred *new);
73949 +
73950 +void gr_delayed_cred_worker(void)
73951 +{
73952 + const struct cred *new = current->delayed_cred;
73953 + struct cred *ncred;
73954 +
73955 + current->delayed_cred = NULL;
73956 +
73957 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
73958 + // from doing get_cred on it when queueing this
73959 + put_cred(new);
73960 + return;
73961 + } else if (new == NULL)
73962 + return;
73963 +
73964 + ncred = prepare_creds();
73965 + if (!ncred)
73966 + goto die;
73967 + // uids
73968 + ncred->uid = new->uid;
73969 + ncred->euid = new->euid;
73970 + ncred->suid = new->suid;
73971 + ncred->fsuid = new->fsuid;
73972 + // gids
73973 + ncred->gid = new->gid;
73974 + ncred->egid = new->egid;
73975 + ncred->sgid = new->sgid;
73976 + ncred->fsgid = new->fsgid;
73977 + // groups
73978 + if (set_groups(ncred, new->group_info) < 0) {
73979 + abort_creds(ncred);
73980 + goto die;
73981 + }
73982 + // caps
73983 + ncred->securebits = new->securebits;
73984 + ncred->cap_inheritable = new->cap_inheritable;
73985 + ncred->cap_permitted = new->cap_permitted;
73986 + ncred->cap_effective = new->cap_effective;
73987 + ncred->cap_bset = new->cap_bset;
73988 +
73989 + if (set_user(ncred)) {
73990 + abort_creds(ncred);
73991 + goto die;
73992 + }
73993 +
73994 + // from doing get_cred on it when queueing this
73995 + put_cred(new);
73996 +
73997 + __commit_creds(ncred);
73998 + return;
73999 +die:
74000 + // from doing get_cred on it when queueing this
74001 + put_cred(new);
74002 + do_group_exit(SIGKILL);
74003 +}
74004 +#endif
74005 +
74006 +int commit_creds(struct cred *new)
74007 +{
74008 +#ifdef CONFIG_GRKERNSEC_SETXID
74009 + int ret;
74010 + int schedule_it = 0;
74011 + struct task_struct *t;
74012 +
74013 + /* we won't get called with tasklist_lock held for writing
74014 + and interrupts disabled as the cred struct in that case is
74015 + init_cred
74016 + */
74017 + if (grsec_enable_setxid && !current_is_single_threaded() &&
74018 + uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
74019 + !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
74020 + schedule_it = 1;
74021 + }
74022 + ret = __commit_creds(new);
74023 + if (schedule_it) {
74024 + rcu_read_lock();
74025 + read_lock(&tasklist_lock);
74026 + for (t = next_thread(current); t != current;
74027 + t = next_thread(t)) {
74028 + if (t->delayed_cred == NULL) {
74029 + t->delayed_cred = get_cred(new);
74030 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
74031 + set_tsk_need_resched(t);
74032 + }
74033 + }
74034 + read_unlock(&tasklist_lock);
74035 + rcu_read_unlock();
74036 + }
74037 + return ret;
74038 +#else
74039 + return __commit_creds(new);
74040 +#endif
74041 +}
74042 +
74043 EXPORT_SYMBOL(commit_creds);
74044
74045 /**
74046 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
74047 index c26278f..e323fb8 100644
74048 --- a/kernel/debug/debug_core.c
74049 +++ b/kernel/debug/debug_core.c
74050 @@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
74051 */
74052 static atomic_t masters_in_kgdb;
74053 static atomic_t slaves_in_kgdb;
74054 -static atomic_t kgdb_break_tasklet_var;
74055 +static atomic_unchecked_t kgdb_break_tasklet_var;
74056 atomic_t kgdb_setting_breakpoint;
74057
74058 struct task_struct *kgdb_usethread;
74059 @@ -133,7 +133,7 @@ int kgdb_single_step;
74060 static pid_t kgdb_sstep_pid;
74061
74062 /* to keep track of the CPU which is doing the single stepping*/
74063 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74064 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74065
74066 /*
74067 * If you are debugging a problem where roundup (the collection of
74068 @@ -541,7 +541,7 @@ return_normal:
74069 * kernel will only try for the value of sstep_tries before
74070 * giving up and continuing on.
74071 */
74072 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
74073 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
74074 (kgdb_info[cpu].task &&
74075 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
74076 atomic_set(&kgdb_active, -1);
74077 @@ -635,8 +635,8 @@ cpu_master_loop:
74078 }
74079
74080 kgdb_restore:
74081 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
74082 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
74083 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
74084 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
74085 if (kgdb_info[sstep_cpu].task)
74086 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
74087 else
74088 @@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
74089 static void kgdb_tasklet_bpt(unsigned long ing)
74090 {
74091 kgdb_breakpoint();
74092 - atomic_set(&kgdb_break_tasklet_var, 0);
74093 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
74094 }
74095
74096 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
74097
74098 void kgdb_schedule_breakpoint(void)
74099 {
74100 - if (atomic_read(&kgdb_break_tasklet_var) ||
74101 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
74102 atomic_read(&kgdb_active) != -1 ||
74103 atomic_read(&kgdb_setting_breakpoint))
74104 return;
74105 - atomic_inc(&kgdb_break_tasklet_var);
74106 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
74107 tasklet_schedule(&kgdb_tasklet_breakpoint);
74108 }
74109 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
74110 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
74111 index 00eb8f7..d7e3244 100644
74112 --- a/kernel/debug/kdb/kdb_main.c
74113 +++ b/kernel/debug/kdb/kdb_main.c
74114 @@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
74115 continue;
74116
74117 kdb_printf("%-20s%8u 0x%p ", mod->name,
74118 - mod->core_size, (void *)mod);
74119 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
74120 #ifdef CONFIG_MODULE_UNLOAD
74121 kdb_printf("%4ld ", module_refcount(mod));
74122 #endif
74123 @@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
74124 kdb_printf(" (Loading)");
74125 else
74126 kdb_printf(" (Live)");
74127 - kdb_printf(" 0x%p", mod->module_core);
74128 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74129
74130 #ifdef CONFIG_MODULE_UNLOAD
74131 {
74132 diff --git a/kernel/events/core.c b/kernel/events/core.c
74133 index 9fcb094..5c06aeb 100644
74134 --- a/kernel/events/core.c
74135 +++ b/kernel/events/core.c
74136 @@ -155,7 +155,11 @@ static struct srcu_struct pmus_srcu;
74137 * 1 - disallow cpu events for unpriv
74138 * 2 - disallow kernel profiling for unpriv
74139 */
74140 -int sysctl_perf_event_paranoid __read_mostly = 1;
74141 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74142 +int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
74143 +#else
74144 +int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
74145 +#endif
74146
74147 /* Minimum for 512 kiB + 1 user control page */
74148 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
74149 @@ -182,7 +186,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
74150 return 0;
74151 }
74152
74153 -static atomic64_t perf_event_id;
74154 +static atomic64_unchecked_t perf_event_id;
74155
74156 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
74157 enum event_type_t event_type);
74158 @@ -2677,7 +2681,7 @@ static void __perf_event_read(void *info)
74159
74160 static inline u64 perf_event_count(struct perf_event *event)
74161 {
74162 - return local64_read(&event->count) + atomic64_read(&event->child_count);
74163 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
74164 }
74165
74166 static u64 perf_event_read(struct perf_event *event)
74167 @@ -3007,9 +3011,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
74168 mutex_lock(&event->child_mutex);
74169 total += perf_event_read(event);
74170 *enabled += event->total_time_enabled +
74171 - atomic64_read(&event->child_total_time_enabled);
74172 + atomic64_read_unchecked(&event->child_total_time_enabled);
74173 *running += event->total_time_running +
74174 - atomic64_read(&event->child_total_time_running);
74175 + atomic64_read_unchecked(&event->child_total_time_running);
74176
74177 list_for_each_entry(child, &event->child_list, child_list) {
74178 total += perf_event_read(child);
74179 @@ -3412,10 +3416,10 @@ void perf_event_update_userpage(struct perf_event *event)
74180 userpg->offset -= local64_read(&event->hw.prev_count);
74181
74182 userpg->time_enabled = enabled +
74183 - atomic64_read(&event->child_total_time_enabled);
74184 + atomic64_read_unchecked(&event->child_total_time_enabled);
74185
74186 userpg->time_running = running +
74187 - atomic64_read(&event->child_total_time_running);
74188 + atomic64_read_unchecked(&event->child_total_time_running);
74189
74190 arch_perf_update_userpage(userpg, now);
74191
74192 @@ -3974,11 +3978,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74193 values[n++] = perf_event_count(event);
74194 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74195 values[n++] = enabled +
74196 - atomic64_read(&event->child_total_time_enabled);
74197 + atomic64_read_unchecked(&event->child_total_time_enabled);
74198 }
74199 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74200 values[n++] = running +
74201 - atomic64_read(&event->child_total_time_running);
74202 + atomic64_read_unchecked(&event->child_total_time_running);
74203 }
74204 if (read_format & PERF_FORMAT_ID)
74205 values[n++] = primary_event_id(event);
74206 @@ -4726,12 +4730,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74207 * need to add enough zero bytes after the string to handle
74208 * the 64bit alignment we do later.
74209 */
74210 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74211 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
74212 if (!buf) {
74213 name = strncpy(tmp, "//enomem", sizeof(tmp));
74214 goto got_name;
74215 }
74216 - name = d_path(&file->f_path, buf, PATH_MAX);
74217 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74218 if (IS_ERR(name)) {
74219 name = strncpy(tmp, "//toolong", sizeof(tmp));
74220 goto got_name;
74221 @@ -6167,7 +6171,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
74222 event->parent = parent_event;
74223
74224 event->ns = get_pid_ns(task_active_pid_ns(current));
74225 - event->id = atomic64_inc_return(&perf_event_id);
74226 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
74227
74228 event->state = PERF_EVENT_STATE_INACTIVE;
74229
74230 @@ -6795,10 +6799,10 @@ static void sync_child_event(struct perf_event *child_event,
74231 /*
74232 * Add back the child's count to the parent's count:
74233 */
74234 - atomic64_add(child_val, &parent_event->child_count);
74235 - atomic64_add(child_event->total_time_enabled,
74236 + atomic64_add_unchecked(child_val, &parent_event->child_count);
74237 + atomic64_add_unchecked(child_event->total_time_enabled,
74238 &parent_event->child_total_time_enabled);
74239 - atomic64_add(child_event->total_time_running,
74240 + atomic64_add_unchecked(child_event->total_time_running,
74241 &parent_event->child_total_time_running);
74242
74243 /*
74244 diff --git a/kernel/exit.c b/kernel/exit.c
74245 index 60bc027..ca6d727 100644
74246 --- a/kernel/exit.c
74247 +++ b/kernel/exit.c
74248 @@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
74249 struct task_struct *leader;
74250 int zap_leader;
74251 repeat:
74252 +#ifdef CONFIG_NET
74253 + gr_del_task_from_ip_table(p);
74254 +#endif
74255 +
74256 /* don't need to get the RCU readlock here - the process is dead and
74257 * can't be modifying its own credentials. But shut RCU-lockdep up */
74258 rcu_read_lock();
74259 @@ -340,7 +344,7 @@ int allow_signal(int sig)
74260 * know it'll be handled, so that they don't get converted to
74261 * SIGKILL or just silently dropped.
74262 */
74263 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
74264 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
74265 recalc_sigpending();
74266 spin_unlock_irq(&current->sighand->siglock);
74267 return 0;
74268 @@ -710,6 +714,8 @@ void do_exit(long code)
74269 struct task_struct *tsk = current;
74270 int group_dead;
74271
74272 + set_fs(USER_DS);
74273 +
74274 profile_task_exit(tsk);
74275
74276 WARN_ON(blk_needs_flush_plug(tsk));
74277 @@ -726,7 +732,6 @@ void do_exit(long code)
74278 * mm_release()->clear_child_tid() from writing to a user-controlled
74279 * kernel address.
74280 */
74281 - set_fs(USER_DS);
74282
74283 ptrace_event(PTRACE_EVENT_EXIT, code);
74284
74285 @@ -785,6 +790,9 @@ void do_exit(long code)
74286 tsk->exit_code = code;
74287 taskstats_exit(tsk, group_dead);
74288
74289 + gr_acl_handle_psacct(tsk, code);
74290 + gr_acl_handle_exit();
74291 +
74292 exit_mm(tsk);
74293
74294 if (group_dead)
74295 @@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
74296 * Take down every thread in the group. This is called by fatal signals
74297 * as well as by sys_exit_group (below).
74298 */
74299 -void
74300 +__noreturn void
74301 do_group_exit(int exit_code)
74302 {
74303 struct signal_struct *sig = current->signal;
74304 diff --git a/kernel/fork.c b/kernel/fork.c
74305 index 1766d32..c0e44e2 100644
74306 --- a/kernel/fork.c
74307 +++ b/kernel/fork.c
74308 @@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
74309 *stackend = STACK_END_MAGIC; /* for overflow detection */
74310
74311 #ifdef CONFIG_CC_STACKPROTECTOR
74312 - tsk->stack_canary = get_random_int();
74313 + tsk->stack_canary = pax_get_random_long();
74314 #endif
74315
74316 /*
74317 @@ -344,13 +344,81 @@ free_tsk:
74318 }
74319
74320 #ifdef CONFIG_MMU
74321 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
74322 +{
74323 + struct vm_area_struct *tmp;
74324 + unsigned long charge;
74325 + struct mempolicy *pol;
74326 + struct file *file;
74327 +
74328 + charge = 0;
74329 + if (mpnt->vm_flags & VM_ACCOUNT) {
74330 + unsigned long len = vma_pages(mpnt);
74331 +
74332 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74333 + goto fail_nomem;
74334 + charge = len;
74335 + }
74336 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74337 + if (!tmp)
74338 + goto fail_nomem;
74339 + *tmp = *mpnt;
74340 + tmp->vm_mm = mm;
74341 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
74342 + pol = mpol_dup(vma_policy(mpnt));
74343 + if (IS_ERR(pol))
74344 + goto fail_nomem_policy;
74345 + vma_set_policy(tmp, pol);
74346 + if (anon_vma_fork(tmp, mpnt))
74347 + goto fail_nomem_anon_vma_fork;
74348 + tmp->vm_flags &= ~VM_LOCKED;
74349 + tmp->vm_next = tmp->vm_prev = NULL;
74350 + tmp->vm_mirror = NULL;
74351 + file = tmp->vm_file;
74352 + if (file) {
74353 + struct inode *inode = file_inode(file);
74354 + struct address_space *mapping = file->f_mapping;
74355 +
74356 + get_file(file);
74357 + if (tmp->vm_flags & VM_DENYWRITE)
74358 + atomic_dec(&inode->i_writecount);
74359 + mutex_lock(&mapping->i_mmap_mutex);
74360 + if (tmp->vm_flags & VM_SHARED)
74361 + mapping->i_mmap_writable++;
74362 + flush_dcache_mmap_lock(mapping);
74363 + /* insert tmp into the share list, just after mpnt */
74364 + if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74365 + vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
74366 + else
74367 + vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
74368 + flush_dcache_mmap_unlock(mapping);
74369 + mutex_unlock(&mapping->i_mmap_mutex);
74370 + }
74371 +
74372 + /*
74373 + * Clear hugetlb-related page reserves for children. This only
74374 + * affects MAP_PRIVATE mappings. Faults generated by the child
74375 + * are not guaranteed to succeed, even if read-only
74376 + */
74377 + if (is_vm_hugetlb_page(tmp))
74378 + reset_vma_resv_huge_pages(tmp);
74379 +
74380 + return tmp;
74381 +
74382 +fail_nomem_anon_vma_fork:
74383 + mpol_put(pol);
74384 +fail_nomem_policy:
74385 + kmem_cache_free(vm_area_cachep, tmp);
74386 +fail_nomem:
74387 + vm_unacct_memory(charge);
74388 + return NULL;
74389 +}
74390 +
74391 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74392 {
74393 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
74394 struct rb_node **rb_link, *rb_parent;
74395 int retval;
74396 - unsigned long charge;
74397 - struct mempolicy *pol;
74398
74399 uprobe_start_dup_mmap();
74400 down_write(&oldmm->mmap_sem);
74401 @@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74402 mm->locked_vm = 0;
74403 mm->mmap = NULL;
74404 mm->mmap_cache = NULL;
74405 - mm->free_area_cache = oldmm->mmap_base;
74406 - mm->cached_hole_size = ~0UL;
74407 + mm->free_area_cache = oldmm->free_area_cache;
74408 + mm->cached_hole_size = oldmm->cached_hole_size;
74409 mm->map_count = 0;
74410 cpumask_clear(mm_cpumask(mm));
74411 mm->mm_rb = RB_ROOT;
74412 @@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74413
74414 prev = NULL;
74415 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
74416 - struct file *file;
74417 -
74418 if (mpnt->vm_flags & VM_DONTCOPY) {
74419 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
74420 -vma_pages(mpnt));
74421 continue;
74422 }
74423 - charge = 0;
74424 - if (mpnt->vm_flags & VM_ACCOUNT) {
74425 - unsigned long len = vma_pages(mpnt);
74426 -
74427 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74428 - goto fail_nomem;
74429 - charge = len;
74430 - }
74431 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74432 - if (!tmp)
74433 - goto fail_nomem;
74434 - *tmp = *mpnt;
74435 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
74436 - pol = mpol_dup(vma_policy(mpnt));
74437 - retval = PTR_ERR(pol);
74438 - if (IS_ERR(pol))
74439 - goto fail_nomem_policy;
74440 - vma_set_policy(tmp, pol);
74441 - tmp->vm_mm = mm;
74442 - if (anon_vma_fork(tmp, mpnt))
74443 - goto fail_nomem_anon_vma_fork;
74444 - tmp->vm_flags &= ~VM_LOCKED;
74445 - tmp->vm_next = tmp->vm_prev = NULL;
74446 - file = tmp->vm_file;
74447 - if (file) {
74448 - struct inode *inode = file_inode(file);
74449 - struct address_space *mapping = file->f_mapping;
74450 -
74451 - get_file(file);
74452 - if (tmp->vm_flags & VM_DENYWRITE)
74453 - atomic_dec(&inode->i_writecount);
74454 - mutex_lock(&mapping->i_mmap_mutex);
74455 - if (tmp->vm_flags & VM_SHARED)
74456 - mapping->i_mmap_writable++;
74457 - flush_dcache_mmap_lock(mapping);
74458 - /* insert tmp into the share list, just after mpnt */
74459 - if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74460 - vma_nonlinear_insert(tmp,
74461 - &mapping->i_mmap_nonlinear);
74462 - else
74463 - vma_interval_tree_insert_after(tmp, mpnt,
74464 - &mapping->i_mmap);
74465 - flush_dcache_mmap_unlock(mapping);
74466 - mutex_unlock(&mapping->i_mmap_mutex);
74467 + tmp = dup_vma(mm, oldmm, mpnt);
74468 + if (!tmp) {
74469 + retval = -ENOMEM;
74470 + goto out;
74471 }
74472
74473 /*
74474 @@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74475 if (retval)
74476 goto out;
74477 }
74478 +
74479 +#ifdef CONFIG_PAX_SEGMEXEC
74480 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
74481 + struct vm_area_struct *mpnt_m;
74482 +
74483 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
74484 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
74485 +
74486 + if (!mpnt->vm_mirror)
74487 + continue;
74488 +
74489 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
74490 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
74491 + mpnt->vm_mirror = mpnt_m;
74492 + } else {
74493 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
74494 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
74495 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
74496 + mpnt->vm_mirror->vm_mirror = mpnt;
74497 + }
74498 + }
74499 + BUG_ON(mpnt_m);
74500 + }
74501 +#endif
74502 +
74503 /* a new mm has just been created */
74504 arch_dup_mmap(oldmm, mm);
74505 retval = 0;
74506 @@ -472,14 +523,6 @@ out:
74507 up_write(&oldmm->mmap_sem);
74508 uprobe_end_dup_mmap();
74509 return retval;
74510 -fail_nomem_anon_vma_fork:
74511 - mpol_put(pol);
74512 -fail_nomem_policy:
74513 - kmem_cache_free(vm_area_cachep, tmp);
74514 -fail_nomem:
74515 - retval = -ENOMEM;
74516 - vm_unacct_memory(charge);
74517 - goto out;
74518 }
74519
74520 static inline int mm_alloc_pgd(struct mm_struct *mm)
74521 @@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
74522 return ERR_PTR(err);
74523
74524 mm = get_task_mm(task);
74525 - if (mm && mm != current->mm &&
74526 - !ptrace_may_access(task, mode)) {
74527 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
74528 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
74529 mmput(mm);
74530 mm = ERR_PTR(-EACCES);
74531 }
74532 @@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
74533 spin_unlock(&fs->lock);
74534 return -EAGAIN;
74535 }
74536 - fs->users++;
74537 + atomic_inc(&fs->users);
74538 spin_unlock(&fs->lock);
74539 return 0;
74540 }
74541 tsk->fs = copy_fs_struct(fs);
74542 if (!tsk->fs)
74543 return -ENOMEM;
74544 + /* Carry through gr_chroot_dentry and is_chrooted instead
74545 + of recomputing it here. Already copied when the task struct
74546 + is duplicated. This allows pivot_root to not be treated as
74547 + a chroot
74548 + */
74549 + //gr_set_chroot_entries(tsk, &tsk->fs->root);
74550 +
74551 return 0;
74552 }
74553
74554 @@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
74555 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
74556 #endif
74557 retval = -EAGAIN;
74558 +
74559 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
74560 +
74561 if (atomic_read(&p->real_cred->user->processes) >=
74562 task_rlimit(p, RLIMIT_NPROC)) {
74563 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
74564 @@ -1441,6 +1494,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
74565 goto bad_fork_free_pid;
74566 }
74567
74568 + /* synchronizes with gr_set_acls()
74569 + we need to call this past the point of no return for fork()
74570 + */
74571 + gr_copy_label(p);
74572 +
74573 if (clone_flags & CLONE_THREAD) {
74574 current->signal->nr_threads++;
74575 atomic_inc(&current->signal->live);
74576 @@ -1524,6 +1582,8 @@ bad_fork_cleanup_count:
74577 bad_fork_free:
74578 free_task(p);
74579 fork_out:
74580 + gr_log_forkfail(retval);
74581 +
74582 return ERR_PTR(retval);
74583 }
74584
74585 @@ -1574,6 +1634,23 @@ long do_fork(unsigned long clone_flags,
74586 return -EINVAL;
74587 }
74588
74589 +#ifdef CONFIG_GRKERNSEC
74590 + if (clone_flags & CLONE_NEWUSER) {
74591 + /*
74592 + * This doesn't really inspire confidence:
74593 + * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
74594 + * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
74595 + * Increases kernel attack surface in areas developers
74596 + * previously cared little about ("low importance due
74597 + * to requiring "root" capability")
74598 + * To be removed when this code receives *proper* review
74599 + */
74600 + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
74601 + !capable(CAP_SETGID))
74602 + return -EPERM;
74603 + }
74604 +#endif
74605 +
74606 /*
74607 * Determine whether and which event to report to ptracer. When
74608 * called from kernel_thread or CLONE_UNTRACED is explicitly
74609 @@ -1608,6 +1685,8 @@ long do_fork(unsigned long clone_flags,
74610 if (clone_flags & CLONE_PARENT_SETTID)
74611 put_user(nr, parent_tidptr);
74612
74613 + gr_handle_brute_check();
74614 +
74615 if (clone_flags & CLONE_VFORK) {
74616 p->vfork_done = &vfork;
74617 init_completion(&vfork);
74618 @@ -1761,7 +1840,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
74619 return 0;
74620
74621 /* don't need lock here; in the worst case we'll do useless copy */
74622 - if (fs->users == 1)
74623 + if (atomic_read(&fs->users) == 1)
74624 return 0;
74625
74626 *new_fsp = copy_fs_struct(fs);
74627 @@ -1873,7 +1952,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
74628 fs = current->fs;
74629 spin_lock(&fs->lock);
74630 current->fs = new_fs;
74631 - if (--fs->users)
74632 + gr_set_chroot_entries(current, &current->fs->root);
74633 + if (atomic_dec_return(&fs->users))
74634 new_fs = NULL;
74635 else
74636 new_fs = fs;
74637 diff --git a/kernel/futex.c b/kernel/futex.c
74638 index b26dcfc..39e266a 100644
74639 --- a/kernel/futex.c
74640 +++ b/kernel/futex.c
74641 @@ -54,6 +54,7 @@
74642 #include <linux/mount.h>
74643 #include <linux/pagemap.h>
74644 #include <linux/syscalls.h>
74645 +#include <linux/ptrace.h>
74646 #include <linux/signal.h>
74647 #include <linux/export.h>
74648 #include <linux/magic.h>
74649 @@ -241,6 +242,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
74650 struct page *page, *page_head;
74651 int err, ro = 0;
74652
74653 +#ifdef CONFIG_PAX_SEGMEXEC
74654 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
74655 + return -EFAULT;
74656 +#endif
74657 +
74658 /*
74659 * The futex address must be "naturally" aligned.
74660 */
74661 @@ -2732,6 +2738,7 @@ static int __init futex_init(void)
74662 {
74663 u32 curval;
74664 int i;
74665 + mm_segment_t oldfs;
74666
74667 /*
74668 * This will fail and we want it. Some arch implementations do
74669 @@ -2743,8 +2750,11 @@ static int __init futex_init(void)
74670 * implementation, the non-functional ones will return
74671 * -ENOSYS.
74672 */
74673 + oldfs = get_fs();
74674 + set_fs(USER_DS);
74675 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
74676 futex_cmpxchg_enabled = 1;
74677 + set_fs(oldfs);
74678
74679 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
74680 plist_head_init(&futex_queues[i].chain);
74681 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
74682 index f9f44fd..29885e4 100644
74683 --- a/kernel/futex_compat.c
74684 +++ b/kernel/futex_compat.c
74685 @@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
74686 return 0;
74687 }
74688
74689 -static void __user *futex_uaddr(struct robust_list __user *entry,
74690 +static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
74691 compat_long_t futex_offset)
74692 {
74693 compat_uptr_t base = ptr_to_compat(entry);
74694 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
74695 index 9b22d03..6295b62 100644
74696 --- a/kernel/gcov/base.c
74697 +++ b/kernel/gcov/base.c
74698 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
74699 }
74700
74701 #ifdef CONFIG_MODULES
74702 -static inline int within(void *addr, void *start, unsigned long size)
74703 -{
74704 - return ((addr >= start) && (addr < start + size));
74705 -}
74706 -
74707 /* Update list and generate events when modules are unloaded. */
74708 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
74709 void *data)
74710 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
74711 prev = NULL;
74712 /* Remove entries located in module from linked list. */
74713 for (info = gcov_info_head; info; info = info->next) {
74714 - if (within(info, mod->module_core, mod->core_size)) {
74715 + if (within_module_core_rw((unsigned long)info, mod)) {
74716 if (prev)
74717 prev->next = info->next;
74718 else
74719 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
74720 index 7ef5556..8247f11 100644
74721 --- a/kernel/hrtimer.c
74722 +++ b/kernel/hrtimer.c
74723 @@ -1416,7 +1416,7 @@ void hrtimer_peek_ahead_timers(void)
74724 local_irq_restore(flags);
74725 }
74726
74727 -static void run_hrtimer_softirq(struct softirq_action *h)
74728 +static void run_hrtimer_softirq(void)
74729 {
74730 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
74731
74732 @@ -1758,7 +1758,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
74733 return NOTIFY_OK;
74734 }
74735
74736 -static struct notifier_block __cpuinitdata hrtimers_nb = {
74737 +static struct notifier_block hrtimers_nb = {
74738 .notifier_call = hrtimer_cpu_notify,
74739 };
74740
74741 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
74742 index 55fcce6..0e4cf34 100644
74743 --- a/kernel/irq_work.c
74744 +++ b/kernel/irq_work.c
74745 @@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
74746 return NOTIFY_OK;
74747 }
74748
74749 -static struct notifier_block cpu_notify;
74750 +static struct notifier_block cpu_notify = {
74751 + .notifier_call = irq_work_cpu_notify,
74752 + .priority = 0,
74753 +};
74754
74755 static __init int irq_work_init_cpu_notifier(void)
74756 {
74757 - cpu_notify.notifier_call = irq_work_cpu_notify;
74758 - cpu_notify.priority = 0;
74759 register_cpu_notifier(&cpu_notify);
74760 return 0;
74761 }
74762 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
74763 index 60f48fa..7f3a770 100644
74764 --- a/kernel/jump_label.c
74765 +++ b/kernel/jump_label.c
74766 @@ -13,6 +13,7 @@
74767 #include <linux/sort.h>
74768 #include <linux/err.h>
74769 #include <linux/static_key.h>
74770 +#include <linux/mm.h>
74771
74772 #ifdef HAVE_JUMP_LABEL
74773
74774 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
74775
74776 size = (((unsigned long)stop - (unsigned long)start)
74777 / sizeof(struct jump_entry));
74778 + pax_open_kernel();
74779 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
74780 + pax_close_kernel();
74781 }
74782
74783 static void jump_label_update(struct static_key *key, int enable);
74784 @@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
74785 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
74786 struct jump_entry *iter;
74787
74788 + pax_open_kernel();
74789 for (iter = iter_start; iter < iter_stop; iter++) {
74790 if (within_module_init(iter->code, mod))
74791 iter->code = 0;
74792 }
74793 + pax_close_kernel();
74794 }
74795
74796 static int
74797 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
74798 index 2169fee..706ccca 100644
74799 --- a/kernel/kallsyms.c
74800 +++ b/kernel/kallsyms.c
74801 @@ -11,6 +11,9 @@
74802 * Changed the compression method from stem compression to "table lookup"
74803 * compression (see scripts/kallsyms.c for a more complete description)
74804 */
74805 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74806 +#define __INCLUDED_BY_HIDESYM 1
74807 +#endif
74808 #include <linux/kallsyms.h>
74809 #include <linux/module.h>
74810 #include <linux/init.h>
74811 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
74812
74813 static inline int is_kernel_inittext(unsigned long addr)
74814 {
74815 + if (system_state != SYSTEM_BOOTING)
74816 + return 0;
74817 +
74818 if (addr >= (unsigned long)_sinittext
74819 && addr <= (unsigned long)_einittext)
74820 return 1;
74821 return 0;
74822 }
74823
74824 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74825 +#ifdef CONFIG_MODULES
74826 +static inline int is_module_text(unsigned long addr)
74827 +{
74828 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
74829 + return 1;
74830 +
74831 + addr = ktla_ktva(addr);
74832 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
74833 +}
74834 +#else
74835 +static inline int is_module_text(unsigned long addr)
74836 +{
74837 + return 0;
74838 +}
74839 +#endif
74840 +#endif
74841 +
74842 static inline int is_kernel_text(unsigned long addr)
74843 {
74844 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
74845 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
74846
74847 static inline int is_kernel(unsigned long addr)
74848 {
74849 +
74850 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74851 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
74852 + return 1;
74853 +
74854 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
74855 +#else
74856 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
74857 +#endif
74858 +
74859 return 1;
74860 return in_gate_area_no_mm(addr);
74861 }
74862
74863 static int is_ksym_addr(unsigned long addr)
74864 {
74865 +
74866 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74867 + if (is_module_text(addr))
74868 + return 0;
74869 +#endif
74870 +
74871 if (all_var)
74872 return is_kernel(addr);
74873
74874 @@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
74875
74876 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
74877 {
74878 - iter->name[0] = '\0';
74879 iter->nameoff = get_symbol_offset(new_pos);
74880 iter->pos = new_pos;
74881 }
74882 @@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
74883 {
74884 struct kallsym_iter *iter = m->private;
74885
74886 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74887 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
74888 + return 0;
74889 +#endif
74890 +
74891 /* Some debugging symbols have no name. Ignore them. */
74892 if (!iter->name[0])
74893 return 0;
74894 @@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
74895 */
74896 type = iter->exported ? toupper(iter->type) :
74897 tolower(iter->type);
74898 +
74899 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
74900 type, iter->name, iter->module_name);
74901 } else
74902 @@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
74903 struct kallsym_iter *iter;
74904 int ret;
74905
74906 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
74907 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
74908 if (!iter)
74909 return -ENOMEM;
74910 reset_iter(iter, 0);
74911 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
74912 index e30ac0f..3528cac 100644
74913 --- a/kernel/kcmp.c
74914 +++ b/kernel/kcmp.c
74915 @@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
74916 struct task_struct *task1, *task2;
74917 int ret;
74918
74919 +#ifdef CONFIG_GRKERNSEC
74920 + return -ENOSYS;
74921 +#endif
74922 +
74923 rcu_read_lock();
74924
74925 /*
74926 diff --git a/kernel/kexec.c b/kernel/kexec.c
74927 index ffd4e11..c3ff6bf 100644
74928 --- a/kernel/kexec.c
74929 +++ b/kernel/kexec.c
74930 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
74931 unsigned long flags)
74932 {
74933 struct compat_kexec_segment in;
74934 - struct kexec_segment out, __user *ksegments;
74935 + struct kexec_segment out;
74936 + struct kexec_segment __user *ksegments;
74937 unsigned long i, result;
74938
74939 /* Don't allow clients that don't understand the native
74940 diff --git a/kernel/kmod.c b/kernel/kmod.c
74941 index 56dd349..336e1dc 100644
74942 --- a/kernel/kmod.c
74943 +++ b/kernel/kmod.c
74944 @@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
74945 kfree(info->argv);
74946 }
74947
74948 -static int call_modprobe(char *module_name, int wait)
74949 +static int call_modprobe(char *module_name, char *module_param, int wait)
74950 {
74951 static char *envp[] = {
74952 "HOME=/",
74953 @@ -84,7 +84,7 @@ static int call_modprobe(char *module_name, int wait)
74954 NULL
74955 };
74956
74957 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
74958 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
74959 if (!argv)
74960 goto out;
74961
74962 @@ -96,7 +96,8 @@ static int call_modprobe(char *module_name, int wait)
74963 argv[1] = "-q";
74964 argv[2] = "--";
74965 argv[3] = module_name; /* check free_modprobe_argv() */
74966 - argv[4] = NULL;
74967 + argv[4] = module_param;
74968 + argv[5] = NULL;
74969
74970 return call_usermodehelper_fns(modprobe_path, argv, envp,
74971 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
74972 @@ -121,9 +122,8 @@ out:
74973 * If module auto-loading support is disabled then this function
74974 * becomes a no-operation.
74975 */
74976 -int __request_module(bool wait, const char *fmt, ...)
74977 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
74978 {
74979 - va_list args;
74980 char module_name[MODULE_NAME_LEN];
74981 unsigned int max_modprobes;
74982 int ret;
74983 @@ -139,9 +139,7 @@ int __request_module(bool wait, const char *fmt, ...)
74984 */
74985 WARN_ON_ONCE(wait && current_is_async());
74986
74987 - va_start(args, fmt);
74988 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
74989 - va_end(args);
74990 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
74991 if (ret >= MODULE_NAME_LEN)
74992 return -ENAMETOOLONG;
74993
74994 @@ -149,6 +147,20 @@ int __request_module(bool wait, const char *fmt, ...)
74995 if (ret)
74996 return ret;
74997
74998 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74999 + if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75000 + /* hack to workaround consolekit/udisks stupidity */
75001 + read_lock(&tasklist_lock);
75002 + if (!strcmp(current->comm, "mount") &&
75003 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
75004 + read_unlock(&tasklist_lock);
75005 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
75006 + return -EPERM;
75007 + }
75008 + read_unlock(&tasklist_lock);
75009 + }
75010 +#endif
75011 +
75012 /* If modprobe needs a service that is in a module, we get a recursive
75013 * loop. Limit the number of running kmod threads to max_threads/2 or
75014 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
75015 @@ -177,11 +189,52 @@ int __request_module(bool wait, const char *fmt, ...)
75016
75017 trace_module_request(module_name, wait, _RET_IP_);
75018
75019 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75020 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75021
75022 atomic_dec(&kmod_concurrent);
75023 return ret;
75024 }
75025 +
75026 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
75027 +{
75028 + va_list args;
75029 + int ret;
75030 +
75031 + va_start(args, fmt);
75032 + ret = ____request_module(wait, module_param, fmt, args);
75033 + va_end(args);
75034 +
75035 + return ret;
75036 +}
75037 +
75038 +int __request_module(bool wait, const char *fmt, ...)
75039 +{
75040 + va_list args;
75041 + int ret;
75042 +
75043 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75044 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75045 + char module_param[MODULE_NAME_LEN];
75046 +
75047 + memset(module_param, 0, sizeof(module_param));
75048 +
75049 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
75050 +
75051 + va_start(args, fmt);
75052 + ret = ____request_module(wait, module_param, fmt, args);
75053 + va_end(args);
75054 +
75055 + return ret;
75056 + }
75057 +#endif
75058 +
75059 + va_start(args, fmt);
75060 + ret = ____request_module(wait, NULL, fmt, args);
75061 + va_end(args);
75062 +
75063 + return ret;
75064 +}
75065 +
75066 EXPORT_SYMBOL(__request_module);
75067 #endif /* CONFIG_MODULES */
75068
75069 @@ -292,7 +345,7 @@ static int wait_for_helper(void *data)
75070 *
75071 * Thus the __user pointer cast is valid here.
75072 */
75073 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
75074 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
75075
75076 /*
75077 * If ret is 0, either ____call_usermodehelper failed and the
75078 @@ -644,7 +697,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
75079 static int proc_cap_handler(struct ctl_table *table, int write,
75080 void __user *buffer, size_t *lenp, loff_t *ppos)
75081 {
75082 - struct ctl_table t;
75083 + ctl_table_no_const t;
75084 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
75085 kernel_cap_t new_cap;
75086 int err, i;
75087 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
75088 index 3fed7f0..a3f95ed 100644
75089 --- a/kernel/kprobes.c
75090 +++ b/kernel/kprobes.c
75091 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
75092 * kernel image and loaded module images reside. This is required
75093 * so x86_64 can correctly handle the %rip-relative fixups.
75094 */
75095 - kip->insns = module_alloc(PAGE_SIZE);
75096 + kip->insns = module_alloc_exec(PAGE_SIZE);
75097 if (!kip->insns) {
75098 kfree(kip);
75099 return NULL;
75100 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
75101 */
75102 if (!list_is_singular(&kip->list)) {
75103 list_del(&kip->list);
75104 - module_free(NULL, kip->insns);
75105 + module_free_exec(NULL, kip->insns);
75106 kfree(kip);
75107 }
75108 return 1;
75109 @@ -2073,7 +2073,7 @@ static int __init init_kprobes(void)
75110 {
75111 int i, err = 0;
75112 unsigned long offset = 0, size = 0;
75113 - char *modname, namebuf[128];
75114 + char *modname, namebuf[KSYM_NAME_LEN];
75115 const char *symbol_name;
75116 void *addr;
75117 struct kprobe_blackpoint *kb;
75118 @@ -2158,11 +2158,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
75119 kprobe_type = "k";
75120
75121 if (sym)
75122 - seq_printf(pi, "%p %s %s+0x%x %s ",
75123 + seq_printf(pi, "%pK %s %s+0x%x %s ",
75124 p->addr, kprobe_type, sym, offset,
75125 (modname ? modname : " "));
75126 else
75127 - seq_printf(pi, "%p %s %p ",
75128 + seq_printf(pi, "%pK %s %pK ",
75129 p->addr, kprobe_type, p->addr);
75130
75131 if (!pp)
75132 @@ -2199,7 +2199,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
75133 const char *sym = NULL;
75134 unsigned int i = *(loff_t *) v;
75135 unsigned long offset = 0;
75136 - char *modname, namebuf[128];
75137 + char *modname, namebuf[KSYM_NAME_LEN];
75138
75139 head = &kprobe_table[i];
75140 preempt_disable();
75141 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
75142 index 6ada93c..dce7d5d 100644
75143 --- a/kernel/ksysfs.c
75144 +++ b/kernel/ksysfs.c
75145 @@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
75146 {
75147 if (count+1 > UEVENT_HELPER_PATH_LEN)
75148 return -ENOENT;
75149 + if (!capable(CAP_SYS_ADMIN))
75150 + return -EPERM;
75151 memcpy(uevent_helper, buf, count);
75152 uevent_helper[count] = '\0';
75153 if (count && uevent_helper[count-1] == '\n')
75154 @@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
75155 return count;
75156 }
75157
75158 -static struct bin_attribute notes_attr = {
75159 +static bin_attribute_no_const notes_attr __read_only = {
75160 .attr = {
75161 .name = "notes",
75162 .mode = S_IRUGO,
75163 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
75164 index 8a0efac..56f1e2d 100644
75165 --- a/kernel/lockdep.c
75166 +++ b/kernel/lockdep.c
75167 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
75168 end = (unsigned long) &_end,
75169 addr = (unsigned long) obj;
75170
75171 +#ifdef CONFIG_PAX_KERNEXEC
75172 + start = ktla_ktva(start);
75173 +#endif
75174 +
75175 /*
75176 * static variable?
75177 */
75178 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
75179 if (!static_obj(lock->key)) {
75180 debug_locks_off();
75181 printk("INFO: trying to register non-static key.\n");
75182 + printk("lock:%pS key:%pS.\n", lock, lock->key);
75183 printk("the code is fine but needs lockdep annotation.\n");
75184 printk("turning off the locking correctness validator.\n");
75185 dump_stack();
75186 @@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
75187 if (!class)
75188 return 0;
75189 }
75190 - atomic_inc((atomic_t *)&class->ops);
75191 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
75192 if (very_verbose(class)) {
75193 printk("\nacquire class [%p] %s", class->key, class->name);
75194 if (class->name_version > 1)
75195 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
75196 index b2c71c5..7b88d63 100644
75197 --- a/kernel/lockdep_proc.c
75198 +++ b/kernel/lockdep_proc.c
75199 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
75200 return 0;
75201 }
75202
75203 - seq_printf(m, "%p", class->key);
75204 + seq_printf(m, "%pK", class->key);
75205 #ifdef CONFIG_DEBUG_LOCKDEP
75206 seq_printf(m, " OPS:%8ld", class->ops);
75207 #endif
75208 @@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
75209
75210 list_for_each_entry(entry, &class->locks_after, entry) {
75211 if (entry->distance == 1) {
75212 - seq_printf(m, " -> [%p] ", entry->class->key);
75213 + seq_printf(m, " -> [%pK] ", entry->class->key);
75214 print_name(m, entry->class);
75215 seq_puts(m, "\n");
75216 }
75217 @@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
75218 if (!class->key)
75219 continue;
75220
75221 - seq_printf(m, "[%p] ", class->key);
75222 + seq_printf(m, "[%pK] ", class->key);
75223 print_name(m, class);
75224 seq_puts(m, "\n");
75225 }
75226 @@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75227 if (!i)
75228 seq_line(m, '-', 40-namelen, namelen);
75229
75230 - snprintf(ip, sizeof(ip), "[<%p>]",
75231 + snprintf(ip, sizeof(ip), "[<%pK>]",
75232 (void *)class->contention_point[i]);
75233 seq_printf(m, "%40s %14lu %29s %pS\n",
75234 name, stats->contention_point[i],
75235 @@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75236 if (!i)
75237 seq_line(m, '-', 40-namelen, namelen);
75238
75239 - snprintf(ip, sizeof(ip), "[<%p>]",
75240 + snprintf(ip, sizeof(ip), "[<%pK>]",
75241 (void *)class->contending_point[i]);
75242 seq_printf(m, "%40s %14lu %29s %pS\n",
75243 name, stats->contending_point[i],
75244 diff --git a/kernel/module.c b/kernel/module.c
75245 index 0925c9a..6b044ac 100644
75246 --- a/kernel/module.c
75247 +++ b/kernel/module.c
75248 @@ -61,6 +61,7 @@
75249 #include <linux/pfn.h>
75250 #include <linux/bsearch.h>
75251 #include <linux/fips.h>
75252 +#include <linux/grsecurity.h>
75253 #include <uapi/linux/module.h>
75254 #include "module-internal.h"
75255
75256 @@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
75257
75258 /* Bounds of module allocation, for speeding __module_address.
75259 * Protected by module_mutex. */
75260 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75261 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
75262 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
75263
75264 int register_module_notifier(struct notifier_block * nb)
75265 {
75266 @@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75267 return true;
75268
75269 list_for_each_entry_rcu(mod, &modules, list) {
75270 - struct symsearch arr[] = {
75271 + struct symsearch modarr[] = {
75272 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
75273 NOT_GPL_ONLY, false },
75274 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
75275 @@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75276 if (mod->state == MODULE_STATE_UNFORMED)
75277 continue;
75278
75279 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
75280 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
75281 return true;
75282 }
75283 return false;
75284 @@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
75285 static int percpu_modalloc(struct module *mod,
75286 unsigned long size, unsigned long align)
75287 {
75288 - if (align > PAGE_SIZE) {
75289 + if (align-1 >= PAGE_SIZE) {
75290 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
75291 mod->name, align, PAGE_SIZE);
75292 align = PAGE_SIZE;
75293 @@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
75294 static ssize_t show_coresize(struct module_attribute *mattr,
75295 struct module_kobject *mk, char *buffer)
75296 {
75297 - return sprintf(buffer, "%u\n", mk->mod->core_size);
75298 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
75299 }
75300
75301 static struct module_attribute modinfo_coresize =
75302 @@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
75303 static ssize_t show_initsize(struct module_attribute *mattr,
75304 struct module_kobject *mk, char *buffer)
75305 {
75306 - return sprintf(buffer, "%u\n", mk->mod->init_size);
75307 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
75308 }
75309
75310 static struct module_attribute modinfo_initsize =
75311 @@ -1312,7 +1314,7 @@ resolve_symbol_wait(struct module *mod,
75312 */
75313 #ifdef CONFIG_SYSFS
75314
75315 -#ifdef CONFIG_KALLSYMS
75316 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
75317 static inline bool sect_empty(const Elf_Shdr *sect)
75318 {
75319 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
75320 @@ -1452,7 +1454,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
75321 {
75322 unsigned int notes, loaded, i;
75323 struct module_notes_attrs *notes_attrs;
75324 - struct bin_attribute *nattr;
75325 + bin_attribute_no_const *nattr;
75326
75327 /* failed to create section attributes, so can't create notes */
75328 if (!mod->sect_attrs)
75329 @@ -1564,7 +1566,7 @@ static void del_usage_links(struct module *mod)
75330 static int module_add_modinfo_attrs(struct module *mod)
75331 {
75332 struct module_attribute *attr;
75333 - struct module_attribute *temp_attr;
75334 + module_attribute_no_const *temp_attr;
75335 int error = 0;
75336 int i;
75337
75338 @@ -1778,21 +1780,21 @@ static void set_section_ro_nx(void *base,
75339
75340 static void unset_module_core_ro_nx(struct module *mod)
75341 {
75342 - set_page_attributes(mod->module_core + mod->core_text_size,
75343 - mod->module_core + mod->core_size,
75344 + set_page_attributes(mod->module_core_rw,
75345 + mod->module_core_rw + mod->core_size_rw,
75346 set_memory_x);
75347 - set_page_attributes(mod->module_core,
75348 - mod->module_core + mod->core_ro_size,
75349 + set_page_attributes(mod->module_core_rx,
75350 + mod->module_core_rx + mod->core_size_rx,
75351 set_memory_rw);
75352 }
75353
75354 static void unset_module_init_ro_nx(struct module *mod)
75355 {
75356 - set_page_attributes(mod->module_init + mod->init_text_size,
75357 - mod->module_init + mod->init_size,
75358 + set_page_attributes(mod->module_init_rw,
75359 + mod->module_init_rw + mod->init_size_rw,
75360 set_memory_x);
75361 - set_page_attributes(mod->module_init,
75362 - mod->module_init + mod->init_ro_size,
75363 + set_page_attributes(mod->module_init_rx,
75364 + mod->module_init_rx + mod->init_size_rx,
75365 set_memory_rw);
75366 }
75367
75368 @@ -1805,14 +1807,14 @@ void set_all_modules_text_rw(void)
75369 list_for_each_entry_rcu(mod, &modules, list) {
75370 if (mod->state == MODULE_STATE_UNFORMED)
75371 continue;
75372 - if ((mod->module_core) && (mod->core_text_size)) {
75373 - set_page_attributes(mod->module_core,
75374 - mod->module_core + mod->core_text_size,
75375 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
75376 + set_page_attributes(mod->module_core_rx,
75377 + mod->module_core_rx + mod->core_size_rx,
75378 set_memory_rw);
75379 }
75380 - if ((mod->module_init) && (mod->init_text_size)) {
75381 - set_page_attributes(mod->module_init,
75382 - mod->module_init + mod->init_text_size,
75383 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
75384 + set_page_attributes(mod->module_init_rx,
75385 + mod->module_init_rx + mod->init_size_rx,
75386 set_memory_rw);
75387 }
75388 }
75389 @@ -1828,14 +1830,14 @@ void set_all_modules_text_ro(void)
75390 list_for_each_entry_rcu(mod, &modules, list) {
75391 if (mod->state == MODULE_STATE_UNFORMED)
75392 continue;
75393 - if ((mod->module_core) && (mod->core_text_size)) {
75394 - set_page_attributes(mod->module_core,
75395 - mod->module_core + mod->core_text_size,
75396 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
75397 + set_page_attributes(mod->module_core_rx,
75398 + mod->module_core_rx + mod->core_size_rx,
75399 set_memory_ro);
75400 }
75401 - if ((mod->module_init) && (mod->init_text_size)) {
75402 - set_page_attributes(mod->module_init,
75403 - mod->module_init + mod->init_text_size,
75404 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
75405 + set_page_attributes(mod->module_init_rx,
75406 + mod->module_init_rx + mod->init_size_rx,
75407 set_memory_ro);
75408 }
75409 }
75410 @@ -1881,16 +1883,19 @@ static void free_module(struct module *mod)
75411
75412 /* This may be NULL, but that's OK */
75413 unset_module_init_ro_nx(mod);
75414 - module_free(mod, mod->module_init);
75415 + module_free(mod, mod->module_init_rw);
75416 + module_free_exec(mod, mod->module_init_rx);
75417 kfree(mod->args);
75418 percpu_modfree(mod);
75419
75420 /* Free lock-classes: */
75421 - lockdep_free_key_range(mod->module_core, mod->core_size);
75422 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
75423 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
75424
75425 /* Finally, free the core (containing the module structure) */
75426 unset_module_core_ro_nx(mod);
75427 - module_free(mod, mod->module_core);
75428 + module_free_exec(mod, mod->module_core_rx);
75429 + module_free(mod, mod->module_core_rw);
75430
75431 #ifdef CONFIG_MPU
75432 update_protections(current->mm);
75433 @@ -1960,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75434 int ret = 0;
75435 const struct kernel_symbol *ksym;
75436
75437 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75438 + int is_fs_load = 0;
75439 + int register_filesystem_found = 0;
75440 + char *p;
75441 +
75442 + p = strstr(mod->args, "grsec_modharden_fs");
75443 + if (p) {
75444 + char *endptr = p + sizeof("grsec_modharden_fs") - 1;
75445 + /* copy \0 as well */
75446 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
75447 + is_fs_load = 1;
75448 + }
75449 +#endif
75450 +
75451 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
75452 const char *name = info->strtab + sym[i].st_name;
75453
75454 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75455 + /* it's a real shame this will never get ripped and copied
75456 + upstream! ;(
75457 + */
75458 + if (is_fs_load && !strcmp(name, "register_filesystem"))
75459 + register_filesystem_found = 1;
75460 +#endif
75461 +
75462 switch (sym[i].st_shndx) {
75463 case SHN_COMMON:
75464 /* We compiled with -fno-common. These are not
75465 @@ -1983,7 +2010,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75466 ksym = resolve_symbol_wait(mod, info, name);
75467 /* Ok if resolved. */
75468 if (ksym && !IS_ERR(ksym)) {
75469 + pax_open_kernel();
75470 sym[i].st_value = ksym->value;
75471 + pax_close_kernel();
75472 break;
75473 }
75474
75475 @@ -2002,11 +2031,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75476 secbase = (unsigned long)mod_percpu(mod);
75477 else
75478 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
75479 + pax_open_kernel();
75480 sym[i].st_value += secbase;
75481 + pax_close_kernel();
75482 break;
75483 }
75484 }
75485
75486 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75487 + if (is_fs_load && !register_filesystem_found) {
75488 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
75489 + ret = -EPERM;
75490 + }
75491 +#endif
75492 +
75493 return ret;
75494 }
75495
75496 @@ -2090,22 +2128,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
75497 || s->sh_entsize != ~0UL
75498 || strstarts(sname, ".init"))
75499 continue;
75500 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
75501 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75502 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
75503 + else
75504 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
75505 pr_debug("\t%s\n", sname);
75506 }
75507 - switch (m) {
75508 - case 0: /* executable */
75509 - mod->core_size = debug_align(mod->core_size);
75510 - mod->core_text_size = mod->core_size;
75511 - break;
75512 - case 1: /* RO: text and ro-data */
75513 - mod->core_size = debug_align(mod->core_size);
75514 - mod->core_ro_size = mod->core_size;
75515 - break;
75516 - case 3: /* whole core */
75517 - mod->core_size = debug_align(mod->core_size);
75518 - break;
75519 - }
75520 }
75521
75522 pr_debug("Init section allocation order:\n");
75523 @@ -2119,23 +2147,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
75524 || s->sh_entsize != ~0UL
75525 || !strstarts(sname, ".init"))
75526 continue;
75527 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
75528 - | INIT_OFFSET_MASK);
75529 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75530 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
75531 + else
75532 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
75533 + s->sh_entsize |= INIT_OFFSET_MASK;
75534 pr_debug("\t%s\n", sname);
75535 }
75536 - switch (m) {
75537 - case 0: /* executable */
75538 - mod->init_size = debug_align(mod->init_size);
75539 - mod->init_text_size = mod->init_size;
75540 - break;
75541 - case 1: /* RO: text and ro-data */
75542 - mod->init_size = debug_align(mod->init_size);
75543 - mod->init_ro_size = mod->init_size;
75544 - break;
75545 - case 3: /* whole init */
75546 - mod->init_size = debug_align(mod->init_size);
75547 - break;
75548 - }
75549 }
75550 }
75551
75552 @@ -2308,7 +2326,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
75553
75554 /* Put symbol section at end of init part of module. */
75555 symsect->sh_flags |= SHF_ALLOC;
75556 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
75557 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
75558 info->index.sym) | INIT_OFFSET_MASK;
75559 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
75560
75561 @@ -2325,13 +2343,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
75562 }
75563
75564 /* Append room for core symbols at end of core part. */
75565 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
75566 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
75567 - mod->core_size += strtab_size;
75568 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
75569 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
75570 + mod->core_size_rx += strtab_size;
75571
75572 /* Put string table section at end of init part of module. */
75573 strsect->sh_flags |= SHF_ALLOC;
75574 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
75575 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
75576 info->index.str) | INIT_OFFSET_MASK;
75577 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
75578 }
75579 @@ -2349,12 +2367,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
75580 /* Make sure we get permanent strtab: don't use info->strtab. */
75581 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
75582
75583 + pax_open_kernel();
75584 +
75585 /* Set types up while we still have access to sections. */
75586 for (i = 0; i < mod->num_symtab; i++)
75587 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
75588
75589 - mod->core_symtab = dst = mod->module_core + info->symoffs;
75590 - mod->core_strtab = s = mod->module_core + info->stroffs;
75591 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
75592 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
75593 src = mod->symtab;
75594 for (ndst = i = 0; i < mod->num_symtab; i++) {
75595 if (i == 0 ||
75596 @@ -2366,6 +2386,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
75597 }
75598 }
75599 mod->core_num_syms = ndst;
75600 +
75601 + pax_close_kernel();
75602 }
75603 #else
75604 static inline void layout_symtab(struct module *mod, struct load_info *info)
75605 @@ -2399,17 +2421,33 @@ void * __weak module_alloc(unsigned long size)
75606 return vmalloc_exec(size);
75607 }
75608
75609 -static void *module_alloc_update_bounds(unsigned long size)
75610 +static void *module_alloc_update_bounds_rw(unsigned long size)
75611 {
75612 void *ret = module_alloc(size);
75613
75614 if (ret) {
75615 mutex_lock(&module_mutex);
75616 /* Update module bounds. */
75617 - if ((unsigned long)ret < module_addr_min)
75618 - module_addr_min = (unsigned long)ret;
75619 - if ((unsigned long)ret + size > module_addr_max)
75620 - module_addr_max = (unsigned long)ret + size;
75621 + if ((unsigned long)ret < module_addr_min_rw)
75622 + module_addr_min_rw = (unsigned long)ret;
75623 + if ((unsigned long)ret + size > module_addr_max_rw)
75624 + module_addr_max_rw = (unsigned long)ret + size;
75625 + mutex_unlock(&module_mutex);
75626 + }
75627 + return ret;
75628 +}
75629 +
75630 +static void *module_alloc_update_bounds_rx(unsigned long size)
75631 +{
75632 + void *ret = module_alloc_exec(size);
75633 +
75634 + if (ret) {
75635 + mutex_lock(&module_mutex);
75636 + /* Update module bounds. */
75637 + if ((unsigned long)ret < module_addr_min_rx)
75638 + module_addr_min_rx = (unsigned long)ret;
75639 + if ((unsigned long)ret + size > module_addr_max_rx)
75640 + module_addr_max_rx = (unsigned long)ret + size;
75641 mutex_unlock(&module_mutex);
75642 }
75643 return ret;
75644 @@ -2685,8 +2723,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
75645 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
75646 {
75647 const char *modmagic = get_modinfo(info, "vermagic");
75648 + const char *license = get_modinfo(info, "license");
75649 int err;
75650
75651 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
75652 + if (!license || !license_is_gpl_compatible(license))
75653 + return -ENOEXEC;
75654 +#endif
75655 +
75656 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
75657 modmagic = NULL;
75658
75659 @@ -2712,7 +2756,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
75660 }
75661
75662 /* Set up license info based on the info section */
75663 - set_license(mod, get_modinfo(info, "license"));
75664 + set_license(mod, license);
75665
75666 return 0;
75667 }
75668 @@ -2806,7 +2850,7 @@ static int move_module(struct module *mod, struct load_info *info)
75669 void *ptr;
75670
75671 /* Do the allocs. */
75672 - ptr = module_alloc_update_bounds(mod->core_size);
75673 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
75674 /*
75675 * The pointer to this block is stored in the module structure
75676 * which is inside the block. Just mark it as not being a
75677 @@ -2816,11 +2860,11 @@ static int move_module(struct module *mod, struct load_info *info)
75678 if (!ptr)
75679 return -ENOMEM;
75680
75681 - memset(ptr, 0, mod->core_size);
75682 - mod->module_core = ptr;
75683 + memset(ptr, 0, mod->core_size_rw);
75684 + mod->module_core_rw = ptr;
75685
75686 - if (mod->init_size) {
75687 - ptr = module_alloc_update_bounds(mod->init_size);
75688 + if (mod->init_size_rw) {
75689 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
75690 /*
75691 * The pointer to this block is stored in the module structure
75692 * which is inside the block. This block doesn't need to be
75693 @@ -2829,13 +2873,45 @@ static int move_module(struct module *mod, struct load_info *info)
75694 */
75695 kmemleak_ignore(ptr);
75696 if (!ptr) {
75697 - module_free(mod, mod->module_core);
75698 + module_free(mod, mod->module_core_rw);
75699 return -ENOMEM;
75700 }
75701 - memset(ptr, 0, mod->init_size);
75702 - mod->module_init = ptr;
75703 + memset(ptr, 0, mod->init_size_rw);
75704 + mod->module_init_rw = ptr;
75705 } else
75706 - mod->module_init = NULL;
75707 + mod->module_init_rw = NULL;
75708 +
75709 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
75710 + kmemleak_not_leak(ptr);
75711 + if (!ptr) {
75712 + if (mod->module_init_rw)
75713 + module_free(mod, mod->module_init_rw);
75714 + module_free(mod, mod->module_core_rw);
75715 + return -ENOMEM;
75716 + }
75717 +
75718 + pax_open_kernel();
75719 + memset(ptr, 0, mod->core_size_rx);
75720 + pax_close_kernel();
75721 + mod->module_core_rx = ptr;
75722 +
75723 + if (mod->init_size_rx) {
75724 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
75725 + kmemleak_ignore(ptr);
75726 + if (!ptr && mod->init_size_rx) {
75727 + module_free_exec(mod, mod->module_core_rx);
75728 + if (mod->module_init_rw)
75729 + module_free(mod, mod->module_init_rw);
75730 + module_free(mod, mod->module_core_rw);
75731 + return -ENOMEM;
75732 + }
75733 +
75734 + pax_open_kernel();
75735 + memset(ptr, 0, mod->init_size_rx);
75736 + pax_close_kernel();
75737 + mod->module_init_rx = ptr;
75738 + } else
75739 + mod->module_init_rx = NULL;
75740
75741 /* Transfer each section which specifies SHF_ALLOC */
75742 pr_debug("final section addresses:\n");
75743 @@ -2846,16 +2922,45 @@ static int move_module(struct module *mod, struct load_info *info)
75744 if (!(shdr->sh_flags & SHF_ALLOC))
75745 continue;
75746
75747 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
75748 - dest = mod->module_init
75749 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75750 - else
75751 - dest = mod->module_core + shdr->sh_entsize;
75752 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
75753 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
75754 + dest = mod->module_init_rw
75755 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75756 + else
75757 + dest = mod->module_init_rx
75758 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
75759 + } else {
75760 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
75761 + dest = mod->module_core_rw + shdr->sh_entsize;
75762 + else
75763 + dest = mod->module_core_rx + shdr->sh_entsize;
75764 + }
75765 +
75766 + if (shdr->sh_type != SHT_NOBITS) {
75767 +
75768 +#ifdef CONFIG_PAX_KERNEXEC
75769 +#ifdef CONFIG_X86_64
75770 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
75771 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
75772 +#endif
75773 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
75774 + pax_open_kernel();
75775 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
75776 + pax_close_kernel();
75777 + } else
75778 +#endif
75779
75780 - if (shdr->sh_type != SHT_NOBITS)
75781 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
75782 + }
75783 /* Update sh_addr to point to copy in image. */
75784 - shdr->sh_addr = (unsigned long)dest;
75785 +
75786 +#ifdef CONFIG_PAX_KERNEXEC
75787 + if (shdr->sh_flags & SHF_EXECINSTR)
75788 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
75789 + else
75790 +#endif
75791 +
75792 + shdr->sh_addr = (unsigned long)dest;
75793 pr_debug("\t0x%lx %s\n",
75794 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
75795 }
75796 @@ -2912,12 +3017,12 @@ static void flush_module_icache(const struct module *mod)
75797 * Do it before processing of module parameters, so the module
75798 * can provide parameter accessor functions of its own.
75799 */
75800 - if (mod->module_init)
75801 - flush_icache_range((unsigned long)mod->module_init,
75802 - (unsigned long)mod->module_init
75803 - + mod->init_size);
75804 - flush_icache_range((unsigned long)mod->module_core,
75805 - (unsigned long)mod->module_core + mod->core_size);
75806 + if (mod->module_init_rx)
75807 + flush_icache_range((unsigned long)mod->module_init_rx,
75808 + (unsigned long)mod->module_init_rx
75809 + + mod->init_size_rx);
75810 + flush_icache_range((unsigned long)mod->module_core_rx,
75811 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
75812
75813 set_fs(old_fs);
75814 }
75815 @@ -2987,8 +3092,10 @@ out:
75816 static void module_deallocate(struct module *mod, struct load_info *info)
75817 {
75818 percpu_modfree(mod);
75819 - module_free(mod, mod->module_init);
75820 - module_free(mod, mod->module_core);
75821 + module_free_exec(mod, mod->module_init_rx);
75822 + module_free_exec(mod, mod->module_core_rx);
75823 + module_free(mod, mod->module_init_rw);
75824 + module_free(mod, mod->module_core_rw);
75825 }
75826
75827 int __weak module_finalize(const Elf_Ehdr *hdr,
75828 @@ -3001,7 +3108,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
75829 static int post_relocation(struct module *mod, const struct load_info *info)
75830 {
75831 /* Sort exception table now relocations are done. */
75832 + pax_open_kernel();
75833 sort_extable(mod->extable, mod->extable + mod->num_exentries);
75834 + pax_close_kernel();
75835
75836 /* Copy relocated percpu area over. */
75837 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
75838 @@ -3055,16 +3164,16 @@ static int do_init_module(struct module *mod)
75839 MODULE_STATE_COMING, mod);
75840
75841 /* Set RO and NX regions for core */
75842 - set_section_ro_nx(mod->module_core,
75843 - mod->core_text_size,
75844 - mod->core_ro_size,
75845 - mod->core_size);
75846 + set_section_ro_nx(mod->module_core_rx,
75847 + mod->core_size_rx,
75848 + mod->core_size_rx,
75849 + mod->core_size_rx);
75850
75851 /* Set RO and NX regions for init */
75852 - set_section_ro_nx(mod->module_init,
75853 - mod->init_text_size,
75854 - mod->init_ro_size,
75855 - mod->init_size);
75856 + set_section_ro_nx(mod->module_init_rx,
75857 + mod->init_size_rx,
75858 + mod->init_size_rx,
75859 + mod->init_size_rx);
75860
75861 do_mod_ctors(mod);
75862 /* Start the module */
75863 @@ -3126,11 +3235,12 @@ static int do_init_module(struct module *mod)
75864 mod->strtab = mod->core_strtab;
75865 #endif
75866 unset_module_init_ro_nx(mod);
75867 - module_free(mod, mod->module_init);
75868 - mod->module_init = NULL;
75869 - mod->init_size = 0;
75870 - mod->init_ro_size = 0;
75871 - mod->init_text_size = 0;
75872 + module_free(mod, mod->module_init_rw);
75873 + module_free_exec(mod, mod->module_init_rx);
75874 + mod->module_init_rw = NULL;
75875 + mod->module_init_rx = NULL;
75876 + mod->init_size_rw = 0;
75877 + mod->init_size_rx = 0;
75878 mutex_unlock(&module_mutex);
75879 wake_up_all(&module_wq);
75880
75881 @@ -3257,9 +3367,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
75882 if (err)
75883 goto free_unload;
75884
75885 + /* Now copy in args */
75886 + mod->args = strndup_user(uargs, ~0UL >> 1);
75887 + if (IS_ERR(mod->args)) {
75888 + err = PTR_ERR(mod->args);
75889 + goto free_unload;
75890 + }
75891 +
75892 /* Set up MODINFO_ATTR fields */
75893 setup_modinfo(mod, info);
75894
75895 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75896 + {
75897 + char *p, *p2;
75898 +
75899 + if (strstr(mod->args, "grsec_modharden_netdev")) {
75900 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
75901 + err = -EPERM;
75902 + goto free_modinfo;
75903 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
75904 + p += sizeof("grsec_modharden_normal") - 1;
75905 + p2 = strstr(p, "_");
75906 + if (p2) {
75907 + *p2 = '\0';
75908 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
75909 + *p2 = '_';
75910 + }
75911 + err = -EPERM;
75912 + goto free_modinfo;
75913 + }
75914 + }
75915 +#endif
75916 +
75917 /* Fix up syms, so that st_value is a pointer to location. */
75918 err = simplify_symbols(mod, info);
75919 if (err < 0)
75920 @@ -3275,13 +3414,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
75921
75922 flush_module_icache(mod);
75923
75924 - /* Now copy in args */
75925 - mod->args = strndup_user(uargs, ~0UL >> 1);
75926 - if (IS_ERR(mod->args)) {
75927 - err = PTR_ERR(mod->args);
75928 - goto free_arch_cleanup;
75929 - }
75930 -
75931 dynamic_debug_setup(info->debug, info->num_debug);
75932
75933 /* Finally it's fully formed, ready to start executing. */
75934 @@ -3316,11 +3448,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
75935 ddebug_cleanup:
75936 dynamic_debug_remove(info->debug);
75937 synchronize_sched();
75938 - kfree(mod->args);
75939 - free_arch_cleanup:
75940 module_arch_cleanup(mod);
75941 free_modinfo:
75942 free_modinfo(mod);
75943 + kfree(mod->args);
75944 free_unload:
75945 module_unload_free(mod);
75946 unlink_mod:
75947 @@ -3403,10 +3534,16 @@ static const char *get_ksymbol(struct module *mod,
75948 unsigned long nextval;
75949
75950 /* At worse, next value is at end of module */
75951 - if (within_module_init(addr, mod))
75952 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
75953 + if (within_module_init_rx(addr, mod))
75954 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
75955 + else if (within_module_init_rw(addr, mod))
75956 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
75957 + else if (within_module_core_rx(addr, mod))
75958 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
75959 + else if (within_module_core_rw(addr, mod))
75960 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
75961 else
75962 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
75963 + return NULL;
75964
75965 /* Scan for closest preceding symbol, and next symbol. (ELF
75966 starts real symbols at 1). */
75967 @@ -3659,7 +3796,7 @@ static int m_show(struct seq_file *m, void *p)
75968 return 0;
75969
75970 seq_printf(m, "%s %u",
75971 - mod->name, mod->init_size + mod->core_size);
75972 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
75973 print_unload_info(m, mod);
75974
75975 /* Informative for users. */
75976 @@ -3668,7 +3805,7 @@ static int m_show(struct seq_file *m, void *p)
75977 mod->state == MODULE_STATE_COMING ? "Loading":
75978 "Live");
75979 /* Used by oprofile and other similar tools. */
75980 - seq_printf(m, " 0x%pK", mod->module_core);
75981 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
75982
75983 /* Taints info */
75984 if (mod->taints)
75985 @@ -3704,7 +3841,17 @@ static const struct file_operations proc_modules_operations = {
75986
75987 static int __init proc_modules_init(void)
75988 {
75989 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75990 +#ifdef CONFIG_GRKERNSEC_PROC_USER
75991 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75992 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75993 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
75994 +#else
75995 proc_create("modules", 0, NULL, &proc_modules_operations);
75996 +#endif
75997 +#else
75998 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75999 +#endif
76000 return 0;
76001 }
76002 module_init(proc_modules_init);
76003 @@ -3765,14 +3912,14 @@ struct module *__module_address(unsigned long addr)
76004 {
76005 struct module *mod;
76006
76007 - if (addr < module_addr_min || addr > module_addr_max)
76008 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
76009 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
76010 return NULL;
76011
76012 list_for_each_entry_rcu(mod, &modules, list) {
76013 if (mod->state == MODULE_STATE_UNFORMED)
76014 continue;
76015 - if (within_module_core(addr, mod)
76016 - || within_module_init(addr, mod))
76017 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
76018 return mod;
76019 }
76020 return NULL;
76021 @@ -3807,11 +3954,20 @@ bool is_module_text_address(unsigned long addr)
76022 */
76023 struct module *__module_text_address(unsigned long addr)
76024 {
76025 - struct module *mod = __module_address(addr);
76026 + struct module *mod;
76027 +
76028 +#ifdef CONFIG_X86_32
76029 + addr = ktla_ktva(addr);
76030 +#endif
76031 +
76032 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
76033 + return NULL;
76034 +
76035 + mod = __module_address(addr);
76036 +
76037 if (mod) {
76038 /* Make sure it's within the text section. */
76039 - if (!within(addr, mod->module_init, mod->init_text_size)
76040 - && !within(addr, mod->module_core, mod->core_text_size))
76041 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
76042 mod = NULL;
76043 }
76044 return mod;
76045 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
76046 index 7e3443f..b2a1e6b 100644
76047 --- a/kernel/mutex-debug.c
76048 +++ b/kernel/mutex-debug.c
76049 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
76050 }
76051
76052 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76053 - struct thread_info *ti)
76054 + struct task_struct *task)
76055 {
76056 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
76057
76058 /* Mark the current thread as blocked on the lock: */
76059 - ti->task->blocked_on = waiter;
76060 + task->blocked_on = waiter;
76061 }
76062
76063 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76064 - struct thread_info *ti)
76065 + struct task_struct *task)
76066 {
76067 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
76068 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
76069 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
76070 - ti->task->blocked_on = NULL;
76071 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
76072 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
76073 + task->blocked_on = NULL;
76074
76075 list_del_init(&waiter->list);
76076 waiter->task = NULL;
76077 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
76078 index 0799fd3..d06ae3b 100644
76079 --- a/kernel/mutex-debug.h
76080 +++ b/kernel/mutex-debug.h
76081 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
76082 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
76083 extern void debug_mutex_add_waiter(struct mutex *lock,
76084 struct mutex_waiter *waiter,
76085 - struct thread_info *ti);
76086 + struct task_struct *task);
76087 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76088 - struct thread_info *ti);
76089 + struct task_struct *task);
76090 extern void debug_mutex_unlock(struct mutex *lock);
76091 extern void debug_mutex_init(struct mutex *lock, const char *name,
76092 struct lock_class_key *key);
76093 diff --git a/kernel/mutex.c b/kernel/mutex.c
76094 index 52f2301..73f7528 100644
76095 --- a/kernel/mutex.c
76096 +++ b/kernel/mutex.c
76097 @@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76098 spin_lock_mutex(&lock->wait_lock, flags);
76099
76100 debug_mutex_lock_common(lock, &waiter);
76101 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
76102 + debug_mutex_add_waiter(lock, &waiter, task);
76103
76104 /* add waiting tasks to the end of the waitqueue (FIFO): */
76105 list_add_tail(&waiter.list, &lock->wait_list);
76106 @@ -228,8 +228,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76107 * TASK_UNINTERRUPTIBLE case.)
76108 */
76109 if (unlikely(signal_pending_state(state, task))) {
76110 - mutex_remove_waiter(lock, &waiter,
76111 - task_thread_info(task));
76112 + mutex_remove_waiter(lock, &waiter, task);
76113 mutex_release(&lock->dep_map, 1, ip);
76114 spin_unlock_mutex(&lock->wait_lock, flags);
76115
76116 @@ -248,7 +247,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76117 done:
76118 lock_acquired(&lock->dep_map, ip);
76119 /* got the lock - rejoice! */
76120 - mutex_remove_waiter(lock, &waiter, current_thread_info());
76121 + mutex_remove_waiter(lock, &waiter, task);
76122 mutex_set_owner(lock);
76123
76124 /* set it to 0 if there are no waiters left: */
76125 diff --git a/kernel/notifier.c b/kernel/notifier.c
76126 index 2d5cc4c..d9ea600 100644
76127 --- a/kernel/notifier.c
76128 +++ b/kernel/notifier.c
76129 @@ -5,6 +5,7 @@
76130 #include <linux/rcupdate.h>
76131 #include <linux/vmalloc.h>
76132 #include <linux/reboot.h>
76133 +#include <linux/mm.h>
76134
76135 /*
76136 * Notifier list for kernel code which wants to be called
76137 @@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
76138 while ((*nl) != NULL) {
76139 if (n->priority > (*nl)->priority)
76140 break;
76141 - nl = &((*nl)->next);
76142 + nl = (struct notifier_block **)&((*nl)->next);
76143 }
76144 - n->next = *nl;
76145 + pax_open_kernel();
76146 + *(const void **)&n->next = *nl;
76147 rcu_assign_pointer(*nl, n);
76148 + pax_close_kernel();
76149 return 0;
76150 }
76151
76152 @@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
76153 return 0;
76154 if (n->priority > (*nl)->priority)
76155 break;
76156 - nl = &((*nl)->next);
76157 + nl = (struct notifier_block **)&((*nl)->next);
76158 }
76159 - n->next = *nl;
76160 + pax_open_kernel();
76161 + *(const void **)&n->next = *nl;
76162 rcu_assign_pointer(*nl, n);
76163 + pax_close_kernel();
76164 return 0;
76165 }
76166
76167 @@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
76168 {
76169 while ((*nl) != NULL) {
76170 if ((*nl) == n) {
76171 + pax_open_kernel();
76172 rcu_assign_pointer(*nl, n->next);
76173 + pax_close_kernel();
76174 return 0;
76175 }
76176 - nl = &((*nl)->next);
76177 + nl = (struct notifier_block **)&((*nl)->next);
76178 }
76179 return -ENOENT;
76180 }
76181 diff --git a/kernel/panic.c b/kernel/panic.c
76182 index 7c57cc9..28f1b3f 100644
76183 --- a/kernel/panic.c
76184 +++ b/kernel/panic.c
76185 @@ -403,7 +403,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
76186 const char *board;
76187
76188 printk(KERN_WARNING "------------[ cut here ]------------\n");
76189 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
76190 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
76191 board = dmi_get_system_info(DMI_PRODUCT_NAME);
76192 if (board)
76193 printk(KERN_WARNING "Hardware name: %s\n", board);
76194 @@ -459,7 +459,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
76195 */
76196 void __stack_chk_fail(void)
76197 {
76198 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
76199 + dump_stack();
76200 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
76201 __builtin_return_address(0));
76202 }
76203 EXPORT_SYMBOL(__stack_chk_fail);
76204 diff --git a/kernel/pid.c b/kernel/pid.c
76205 index 047dc62..418d74b 100644
76206 --- a/kernel/pid.c
76207 +++ b/kernel/pid.c
76208 @@ -33,6 +33,7 @@
76209 #include <linux/rculist.h>
76210 #include <linux/bootmem.h>
76211 #include <linux/hash.h>
76212 +#include <linux/security.h>
76213 #include <linux/pid_namespace.h>
76214 #include <linux/init_task.h>
76215 #include <linux/syscalls.h>
76216 @@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
76217
76218 int pid_max = PID_MAX_DEFAULT;
76219
76220 -#define RESERVED_PIDS 300
76221 +#define RESERVED_PIDS 500
76222
76223 int pid_max_min = RESERVED_PIDS + 1;
76224 int pid_max_max = PID_MAX_LIMIT;
76225 @@ -440,10 +441,18 @@ EXPORT_SYMBOL(pid_task);
76226 */
76227 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
76228 {
76229 + struct task_struct *task;
76230 +
76231 rcu_lockdep_assert(rcu_read_lock_held(),
76232 "find_task_by_pid_ns() needs rcu_read_lock()"
76233 " protection");
76234 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76235 +
76236 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76237 +
76238 + if (gr_pid_is_chrooted(task))
76239 + return NULL;
76240 +
76241 + return task;
76242 }
76243
76244 struct task_struct *find_task_by_vpid(pid_t vnr)
76245 @@ -451,6 +460,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
76246 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
76247 }
76248
76249 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
76250 +{
76251 + rcu_lockdep_assert(rcu_read_lock_held(),
76252 + "find_task_by_pid_ns() needs rcu_read_lock()"
76253 + " protection");
76254 + return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
76255 +}
76256 +
76257 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
76258 {
76259 struct pid *pid;
76260 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
76261 index bea15bd..789f3d0 100644
76262 --- a/kernel/pid_namespace.c
76263 +++ b/kernel/pid_namespace.c
76264 @@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
76265 void __user *buffer, size_t *lenp, loff_t *ppos)
76266 {
76267 struct pid_namespace *pid_ns = task_active_pid_ns(current);
76268 - struct ctl_table tmp = *table;
76269 + ctl_table_no_const tmp = *table;
76270
76271 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
76272 return -EPERM;
76273 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
76274 index 8fd709c..542bf4b 100644
76275 --- a/kernel/posix-cpu-timers.c
76276 +++ b/kernel/posix-cpu-timers.c
76277 @@ -1592,14 +1592,14 @@ struct k_clock clock_posix_cpu = {
76278
76279 static __init int init_posix_cpu_timers(void)
76280 {
76281 - struct k_clock process = {
76282 + static struct k_clock process = {
76283 .clock_getres = process_cpu_clock_getres,
76284 .clock_get = process_cpu_clock_get,
76285 .timer_create = process_cpu_timer_create,
76286 .nsleep = process_cpu_nsleep,
76287 .nsleep_restart = process_cpu_nsleep_restart,
76288 };
76289 - struct k_clock thread = {
76290 + static struct k_clock thread = {
76291 .clock_getres = thread_cpu_clock_getres,
76292 .clock_get = thread_cpu_clock_get,
76293 .timer_create = thread_cpu_timer_create,
76294 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
76295 index 6edbb2c..334f085 100644
76296 --- a/kernel/posix-timers.c
76297 +++ b/kernel/posix-timers.c
76298 @@ -43,6 +43,7 @@
76299 #include <linux/idr.h>
76300 #include <linux/posix-clock.h>
76301 #include <linux/posix-timers.h>
76302 +#include <linux/grsecurity.h>
76303 #include <linux/syscalls.h>
76304 #include <linux/wait.h>
76305 #include <linux/workqueue.h>
76306 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
76307 * which we beg off on and pass to do_sys_settimeofday().
76308 */
76309
76310 -static struct k_clock posix_clocks[MAX_CLOCKS];
76311 +static struct k_clock *posix_clocks[MAX_CLOCKS];
76312
76313 /*
76314 * These ones are defined below.
76315 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
76316 */
76317 static __init int init_posix_timers(void)
76318 {
76319 - struct k_clock clock_realtime = {
76320 + static struct k_clock clock_realtime = {
76321 .clock_getres = hrtimer_get_res,
76322 .clock_get = posix_clock_realtime_get,
76323 .clock_set = posix_clock_realtime_set,
76324 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
76325 .timer_get = common_timer_get,
76326 .timer_del = common_timer_del,
76327 };
76328 - struct k_clock clock_monotonic = {
76329 + static struct k_clock clock_monotonic = {
76330 .clock_getres = hrtimer_get_res,
76331 .clock_get = posix_ktime_get_ts,
76332 .nsleep = common_nsleep,
76333 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
76334 .timer_get = common_timer_get,
76335 .timer_del = common_timer_del,
76336 };
76337 - struct k_clock clock_monotonic_raw = {
76338 + static struct k_clock clock_monotonic_raw = {
76339 .clock_getres = hrtimer_get_res,
76340 .clock_get = posix_get_monotonic_raw,
76341 };
76342 - struct k_clock clock_realtime_coarse = {
76343 + static struct k_clock clock_realtime_coarse = {
76344 .clock_getres = posix_get_coarse_res,
76345 .clock_get = posix_get_realtime_coarse,
76346 };
76347 - struct k_clock clock_monotonic_coarse = {
76348 + static struct k_clock clock_monotonic_coarse = {
76349 .clock_getres = posix_get_coarse_res,
76350 .clock_get = posix_get_monotonic_coarse,
76351 };
76352 - struct k_clock clock_boottime = {
76353 + static struct k_clock clock_boottime = {
76354 .clock_getres = hrtimer_get_res,
76355 .clock_get = posix_get_boottime,
76356 .nsleep = common_nsleep,
76357 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
76358 return;
76359 }
76360
76361 - posix_clocks[clock_id] = *new_clock;
76362 + posix_clocks[clock_id] = new_clock;
76363 }
76364 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
76365
76366 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
76367 return (id & CLOCKFD_MASK) == CLOCKFD ?
76368 &clock_posix_dynamic : &clock_posix_cpu;
76369
76370 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
76371 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
76372 return NULL;
76373 - return &posix_clocks[id];
76374 + return posix_clocks[id];
76375 }
76376
76377 static int common_timer_create(struct k_itimer *new_timer)
76378 @@ -964,6 +965,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
76379 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
76380 return -EFAULT;
76381
76382 + /* only the CLOCK_REALTIME clock can be set, all other clocks
76383 + have their clock_set fptr set to a nosettime dummy function
76384 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
76385 + call common_clock_set, which calls do_sys_settimeofday, which
76386 + we hook
76387 + */
76388 +
76389 return kc->clock_set(which_clock, &new_tp);
76390 }
76391
76392 diff --git a/kernel/power/process.c b/kernel/power/process.c
76393 index 98088e0..aaf95c0 100644
76394 --- a/kernel/power/process.c
76395 +++ b/kernel/power/process.c
76396 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
76397 u64 elapsed_csecs64;
76398 unsigned int elapsed_csecs;
76399 bool wakeup = false;
76400 + bool timedout = false;
76401
76402 do_gettimeofday(&start);
76403
76404 @@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
76405
76406 while (true) {
76407 todo = 0;
76408 + if (time_after(jiffies, end_time))
76409 + timedout = true;
76410 read_lock(&tasklist_lock);
76411 do_each_thread(g, p) {
76412 if (p == current || !freeze_task(p))
76413 continue;
76414
76415 - if (!freezer_should_skip(p))
76416 + if (!freezer_should_skip(p)) {
76417 todo++;
76418 + if (timedout) {
76419 + printk(KERN_ERR "Task refusing to freeze:\n");
76420 + sched_show_task(p);
76421 + }
76422 + }
76423 } while_each_thread(g, p);
76424 read_unlock(&tasklist_lock);
76425
76426 @@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
76427 todo += wq_busy;
76428 }
76429
76430 - if (!todo || time_after(jiffies, end_time))
76431 + if (!todo || timedout)
76432 break;
76433
76434 if (pm_wakeup_pending()) {
76435 diff --git a/kernel/printk.c b/kernel/printk.c
76436 index abbdd9e..f294251 100644
76437 --- a/kernel/printk.c
76438 +++ b/kernel/printk.c
76439 @@ -615,11 +615,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
76440 return ret;
76441 }
76442
76443 +static int check_syslog_permissions(int type, bool from_file);
76444 +
76445 static int devkmsg_open(struct inode *inode, struct file *file)
76446 {
76447 struct devkmsg_user *user;
76448 int err;
76449
76450 + err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
76451 + if (err)
76452 + return err;
76453 +
76454 /* write-only does not need any file context */
76455 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
76456 return 0;
76457 @@ -828,7 +834,7 @@ static int syslog_action_restricted(int type)
76458 if (dmesg_restrict)
76459 return 1;
76460 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
76461 - return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76462 + return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76463 }
76464
76465 static int check_syslog_permissions(int type, bool from_file)
76466 @@ -840,6 +846,11 @@ static int check_syslog_permissions(int type, bool from_file)
76467 if (from_file && type != SYSLOG_ACTION_OPEN)
76468 return 0;
76469
76470 +#ifdef CONFIG_GRKERNSEC_DMESG
76471 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
76472 + return -EPERM;
76473 +#endif
76474 +
76475 if (syslog_action_restricted(type)) {
76476 if (capable(CAP_SYSLOG))
76477 return 0;
76478 diff --git a/kernel/profile.c b/kernel/profile.c
76479 index dc3384e..0de5b49 100644
76480 --- a/kernel/profile.c
76481 +++ b/kernel/profile.c
76482 @@ -37,7 +37,7 @@ struct profile_hit {
76483 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
76484 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
76485
76486 -static atomic_t *prof_buffer;
76487 +static atomic_unchecked_t *prof_buffer;
76488 static unsigned long prof_len, prof_shift;
76489
76490 int prof_on __read_mostly;
76491 @@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
76492 hits[i].pc = 0;
76493 continue;
76494 }
76495 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76496 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76497 hits[i].hits = hits[i].pc = 0;
76498 }
76499 }
76500 @@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76501 * Add the current hit(s) and flush the write-queue out
76502 * to the global buffer:
76503 */
76504 - atomic_add(nr_hits, &prof_buffer[pc]);
76505 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
76506 for (i = 0; i < NR_PROFILE_HIT; ++i) {
76507 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76508 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76509 hits[i].pc = hits[i].hits = 0;
76510 }
76511 out:
76512 @@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76513 {
76514 unsigned long pc;
76515 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
76516 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76517 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76518 }
76519 #endif /* !CONFIG_SMP */
76520
76521 @@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
76522 return -EFAULT;
76523 buf++; p++; count--; read++;
76524 }
76525 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
76526 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
76527 if (copy_to_user(buf, (void *)pnt, count))
76528 return -EFAULT;
76529 read += count;
76530 @@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
76531 }
76532 #endif
76533 profile_discard_flip_buffers();
76534 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
76535 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
76536 return count;
76537 }
76538
76539 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
76540 index acbd284..00bb0c9 100644
76541 --- a/kernel/ptrace.c
76542 +++ b/kernel/ptrace.c
76543 @@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
76544 if (seize)
76545 flags |= PT_SEIZED;
76546 rcu_read_lock();
76547 - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
76548 + if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
76549 flags |= PT_PTRACE_CAP;
76550 rcu_read_unlock();
76551 task->ptrace = flags;
76552 @@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
76553 break;
76554 return -EIO;
76555 }
76556 - if (copy_to_user(dst, buf, retval))
76557 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
76558 return -EFAULT;
76559 copied += retval;
76560 src += retval;
76561 @@ -726,7 +726,7 @@ int ptrace_request(struct task_struct *child, long request,
76562 bool seized = child->ptrace & PT_SEIZED;
76563 int ret = -EIO;
76564 siginfo_t siginfo, *si;
76565 - void __user *datavp = (void __user *) data;
76566 + void __user *datavp = (__force void __user *) data;
76567 unsigned long __user *datalp = datavp;
76568 unsigned long flags;
76569
76570 @@ -928,14 +928,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
76571 goto out;
76572 }
76573
76574 + if (gr_handle_ptrace(child, request)) {
76575 + ret = -EPERM;
76576 + goto out_put_task_struct;
76577 + }
76578 +
76579 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
76580 ret = ptrace_attach(child, request, addr, data);
76581 /*
76582 * Some architectures need to do book-keeping after
76583 * a ptrace attach.
76584 */
76585 - if (!ret)
76586 + if (!ret) {
76587 arch_ptrace_attach(child);
76588 + gr_audit_ptrace(child);
76589 + }
76590 goto out_put_task_struct;
76591 }
76592
76593 @@ -963,7 +970,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
76594 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
76595 if (copied != sizeof(tmp))
76596 return -EIO;
76597 - return put_user(tmp, (unsigned long __user *)data);
76598 + return put_user(tmp, (__force unsigned long __user *)data);
76599 }
76600
76601 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
76602 @@ -1057,7 +1064,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
76603 }
76604
76605 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76606 - compat_long_t addr, compat_long_t data)
76607 + compat_ulong_t addr, compat_ulong_t data)
76608 {
76609 struct task_struct *child;
76610 long ret;
76611 @@ -1073,14 +1080,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76612 goto out;
76613 }
76614
76615 + if (gr_handle_ptrace(child, request)) {
76616 + ret = -EPERM;
76617 + goto out_put_task_struct;
76618 + }
76619 +
76620 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
76621 ret = ptrace_attach(child, request, addr, data);
76622 /*
76623 * Some architectures need to do book-keeping after
76624 * a ptrace attach.
76625 */
76626 - if (!ret)
76627 + if (!ret) {
76628 arch_ptrace_attach(child);
76629 + gr_audit_ptrace(child);
76630 + }
76631 goto out_put_task_struct;
76632 }
76633
76634 diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
76635 index 48ab703..07561d4 100644
76636 --- a/kernel/rcupdate.c
76637 +++ b/kernel/rcupdate.c
76638 @@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
76639 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
76640 */
76641 if (till_stall_check < 3) {
76642 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
76643 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
76644 till_stall_check = 3;
76645 } else if (till_stall_check > 300) {
76646 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
76647 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
76648 till_stall_check = 300;
76649 }
76650 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
76651 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
76652 index a0714a5..2ab5e34 100644
76653 --- a/kernel/rcutiny.c
76654 +++ b/kernel/rcutiny.c
76655 @@ -46,7 +46,7 @@
76656 struct rcu_ctrlblk;
76657 static void invoke_rcu_callbacks(void);
76658 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
76659 -static void rcu_process_callbacks(struct softirq_action *unused);
76660 +static void rcu_process_callbacks(void);
76661 static void __call_rcu(struct rcu_head *head,
76662 void (*func)(struct rcu_head *rcu),
76663 struct rcu_ctrlblk *rcp);
76664 @@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
76665 rcu_is_callbacks_kthread()));
76666 }
76667
76668 -static void rcu_process_callbacks(struct softirq_action *unused)
76669 +static void rcu_process_callbacks(void)
76670 {
76671 __rcu_process_callbacks(&rcu_sched_ctrlblk);
76672 __rcu_process_callbacks(&rcu_bh_ctrlblk);
76673 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
76674 index 8a23300..4255818 100644
76675 --- a/kernel/rcutiny_plugin.h
76676 +++ b/kernel/rcutiny_plugin.h
76677 @@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
76678 have_rcu_kthread_work = morework;
76679 local_irq_restore(flags);
76680 if (work)
76681 - rcu_process_callbacks(NULL);
76682 + rcu_process_callbacks();
76683 schedule_timeout_interruptible(1); /* Leave CPU for others. */
76684 }
76685
76686 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
76687 index e1f3a8c..42c94a2 100644
76688 --- a/kernel/rcutorture.c
76689 +++ b/kernel/rcutorture.c
76690 @@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
76691 { 0 };
76692 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
76693 { 0 };
76694 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
76695 -static atomic_t n_rcu_torture_alloc;
76696 -static atomic_t n_rcu_torture_alloc_fail;
76697 -static atomic_t n_rcu_torture_free;
76698 -static atomic_t n_rcu_torture_mberror;
76699 -static atomic_t n_rcu_torture_error;
76700 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
76701 +static atomic_unchecked_t n_rcu_torture_alloc;
76702 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
76703 +static atomic_unchecked_t n_rcu_torture_free;
76704 +static atomic_unchecked_t n_rcu_torture_mberror;
76705 +static atomic_unchecked_t n_rcu_torture_error;
76706 static long n_rcu_torture_barrier_error;
76707 static long n_rcu_torture_boost_ktrerror;
76708 static long n_rcu_torture_boost_rterror;
76709 @@ -287,11 +287,11 @@ rcu_torture_alloc(void)
76710
76711 spin_lock_bh(&rcu_torture_lock);
76712 if (list_empty(&rcu_torture_freelist)) {
76713 - atomic_inc(&n_rcu_torture_alloc_fail);
76714 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
76715 spin_unlock_bh(&rcu_torture_lock);
76716 return NULL;
76717 }
76718 - atomic_inc(&n_rcu_torture_alloc);
76719 + atomic_inc_unchecked(&n_rcu_torture_alloc);
76720 p = rcu_torture_freelist.next;
76721 list_del_init(p);
76722 spin_unlock_bh(&rcu_torture_lock);
76723 @@ -304,7 +304,7 @@ rcu_torture_alloc(void)
76724 static void
76725 rcu_torture_free(struct rcu_torture *p)
76726 {
76727 - atomic_inc(&n_rcu_torture_free);
76728 + atomic_inc_unchecked(&n_rcu_torture_free);
76729 spin_lock_bh(&rcu_torture_lock);
76730 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
76731 spin_unlock_bh(&rcu_torture_lock);
76732 @@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
76733 i = rp->rtort_pipe_count;
76734 if (i > RCU_TORTURE_PIPE_LEN)
76735 i = RCU_TORTURE_PIPE_LEN;
76736 - atomic_inc(&rcu_torture_wcount[i]);
76737 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
76738 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
76739 rp->rtort_mbtest = 0;
76740 rcu_torture_free(rp);
76741 @@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
76742 i = rp->rtort_pipe_count;
76743 if (i > RCU_TORTURE_PIPE_LEN)
76744 i = RCU_TORTURE_PIPE_LEN;
76745 - atomic_inc(&rcu_torture_wcount[i]);
76746 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
76747 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
76748 rp->rtort_mbtest = 0;
76749 list_del(&rp->rtort_free);
76750 @@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
76751 i = old_rp->rtort_pipe_count;
76752 if (i > RCU_TORTURE_PIPE_LEN)
76753 i = RCU_TORTURE_PIPE_LEN;
76754 - atomic_inc(&rcu_torture_wcount[i]);
76755 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
76756 old_rp->rtort_pipe_count++;
76757 cur_ops->deferred_free(old_rp);
76758 }
76759 @@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
76760 return;
76761 }
76762 if (p->rtort_mbtest == 0)
76763 - atomic_inc(&n_rcu_torture_mberror);
76764 + atomic_inc_unchecked(&n_rcu_torture_mberror);
76765 spin_lock(&rand_lock);
76766 cur_ops->read_delay(&rand);
76767 n_rcu_torture_timers++;
76768 @@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
76769 continue;
76770 }
76771 if (p->rtort_mbtest == 0)
76772 - atomic_inc(&n_rcu_torture_mberror);
76773 + atomic_inc_unchecked(&n_rcu_torture_mberror);
76774 cur_ops->read_delay(&rand);
76775 preempt_disable();
76776 pipe_count = p->rtort_pipe_count;
76777 @@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
76778 rcu_torture_current,
76779 rcu_torture_current_version,
76780 list_empty(&rcu_torture_freelist),
76781 - atomic_read(&n_rcu_torture_alloc),
76782 - atomic_read(&n_rcu_torture_alloc_fail),
76783 - atomic_read(&n_rcu_torture_free));
76784 + atomic_read_unchecked(&n_rcu_torture_alloc),
76785 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
76786 + atomic_read_unchecked(&n_rcu_torture_free));
76787 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
76788 - atomic_read(&n_rcu_torture_mberror),
76789 + atomic_read_unchecked(&n_rcu_torture_mberror),
76790 n_rcu_torture_boost_ktrerror,
76791 n_rcu_torture_boost_rterror);
76792 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
76793 @@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
76794 n_barrier_attempts,
76795 n_rcu_torture_barrier_error);
76796 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
76797 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
76798 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
76799 n_rcu_torture_barrier_error != 0 ||
76800 n_rcu_torture_boost_ktrerror != 0 ||
76801 n_rcu_torture_boost_rterror != 0 ||
76802 n_rcu_torture_boost_failure != 0 ||
76803 i > 1) {
76804 cnt += sprintf(&page[cnt], "!!! ");
76805 - atomic_inc(&n_rcu_torture_error);
76806 + atomic_inc_unchecked(&n_rcu_torture_error);
76807 WARN_ON_ONCE(1);
76808 }
76809 cnt += sprintf(&page[cnt], "Reader Pipe: ");
76810 @@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
76811 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
76812 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
76813 cnt += sprintf(&page[cnt], " %d",
76814 - atomic_read(&rcu_torture_wcount[i]));
76815 + atomic_read_unchecked(&rcu_torture_wcount[i]));
76816 }
76817 cnt += sprintf(&page[cnt], "\n");
76818 if (cur_ops->stats)
76819 @@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
76820
76821 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
76822
76823 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
76824 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
76825 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
76826 else if (n_online_successes != n_online_attempts ||
76827 n_offline_successes != n_offline_attempts)
76828 @@ -2031,18 +2031,18 @@ rcu_torture_init(void)
76829
76830 rcu_torture_current = NULL;
76831 rcu_torture_current_version = 0;
76832 - atomic_set(&n_rcu_torture_alloc, 0);
76833 - atomic_set(&n_rcu_torture_alloc_fail, 0);
76834 - atomic_set(&n_rcu_torture_free, 0);
76835 - atomic_set(&n_rcu_torture_mberror, 0);
76836 - atomic_set(&n_rcu_torture_error, 0);
76837 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
76838 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
76839 + atomic_set_unchecked(&n_rcu_torture_free, 0);
76840 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
76841 + atomic_set_unchecked(&n_rcu_torture_error, 0);
76842 n_rcu_torture_barrier_error = 0;
76843 n_rcu_torture_boost_ktrerror = 0;
76844 n_rcu_torture_boost_rterror = 0;
76845 n_rcu_torture_boost_failure = 0;
76846 n_rcu_torture_boosts = 0;
76847 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
76848 - atomic_set(&rcu_torture_wcount[i], 0);
76849 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
76850 for_each_possible_cpu(cpu) {
76851 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
76852 per_cpu(rcu_torture_count, cpu)[i] = 0;
76853 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
76854 index 5b8ad82..17274d1 100644
76855 --- a/kernel/rcutree.c
76856 +++ b/kernel/rcutree.c
76857 @@ -353,9 +353,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
76858 rcu_prepare_for_idle(smp_processor_id());
76859 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
76860 smp_mb__before_atomic_inc(); /* See above. */
76861 - atomic_inc(&rdtp->dynticks);
76862 + atomic_inc_unchecked(&rdtp->dynticks);
76863 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
76864 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
76865 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
76866
76867 /*
76868 * It is illegal to enter an extended quiescent state while
76869 @@ -491,10 +491,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
76870 int user)
76871 {
76872 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
76873 - atomic_inc(&rdtp->dynticks);
76874 + atomic_inc_unchecked(&rdtp->dynticks);
76875 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
76876 smp_mb__after_atomic_inc(); /* See above. */
76877 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
76878 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
76879 rcu_cleanup_after_idle(smp_processor_id());
76880 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
76881 if (!user && !is_idle_task(current)) {
76882 @@ -633,14 +633,14 @@ void rcu_nmi_enter(void)
76883 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
76884
76885 if (rdtp->dynticks_nmi_nesting == 0 &&
76886 - (atomic_read(&rdtp->dynticks) & 0x1))
76887 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
76888 return;
76889 rdtp->dynticks_nmi_nesting++;
76890 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
76891 - atomic_inc(&rdtp->dynticks);
76892 + atomic_inc_unchecked(&rdtp->dynticks);
76893 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
76894 smp_mb__after_atomic_inc(); /* See above. */
76895 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
76896 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
76897 }
76898
76899 /**
76900 @@ -659,9 +659,9 @@ void rcu_nmi_exit(void)
76901 return;
76902 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
76903 smp_mb__before_atomic_inc(); /* See above. */
76904 - atomic_inc(&rdtp->dynticks);
76905 + atomic_inc_unchecked(&rdtp->dynticks);
76906 smp_mb__after_atomic_inc(); /* Force delay to next write. */
76907 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
76908 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
76909 }
76910
76911 /**
76912 @@ -675,7 +675,7 @@ int rcu_is_cpu_idle(void)
76913 int ret;
76914
76915 preempt_disable();
76916 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76917 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76918 preempt_enable();
76919 return ret;
76920 }
76921 @@ -743,7 +743,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
76922 */
76923 static int dyntick_save_progress_counter(struct rcu_data *rdp)
76924 {
76925 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
76926 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76927 return (rdp->dynticks_snap & 0x1) == 0;
76928 }
76929
76930 @@ -758,7 +758,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
76931 unsigned int curr;
76932 unsigned int snap;
76933
76934 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
76935 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76936 snap = (unsigned int)rdp->dynticks_snap;
76937
76938 /*
76939 @@ -1698,7 +1698,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
76940 rsp->qlen += rdp->qlen;
76941 rdp->n_cbs_orphaned += rdp->qlen;
76942 rdp->qlen_lazy = 0;
76943 - ACCESS_ONCE(rdp->qlen) = 0;
76944 + ACCESS_ONCE_RW(rdp->qlen) = 0;
76945 }
76946
76947 /*
76948 @@ -1944,7 +1944,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
76949 }
76950 smp_mb(); /* List handling before counting for rcu_barrier(). */
76951 rdp->qlen_lazy -= count_lazy;
76952 - ACCESS_ONCE(rdp->qlen) -= count;
76953 + ACCESS_ONCE_RW(rdp->qlen) -= count;
76954 rdp->n_cbs_invoked += count;
76955
76956 /* Reinstate batch limit if we have worked down the excess. */
76957 @@ -2137,7 +2137,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
76958 /*
76959 * Do RCU core processing for the current CPU.
76960 */
76961 -static void rcu_process_callbacks(struct softirq_action *unused)
76962 +static void rcu_process_callbacks(void)
76963 {
76964 struct rcu_state *rsp;
76965
76966 @@ -2260,7 +2260,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
76967 local_irq_restore(flags);
76968 return;
76969 }
76970 - ACCESS_ONCE(rdp->qlen)++;
76971 + ACCESS_ONCE_RW(rdp->qlen)++;
76972 if (lazy)
76973 rdp->qlen_lazy++;
76974 else
76975 @@ -2469,11 +2469,11 @@ void synchronize_sched_expedited(void)
76976 * counter wrap on a 32-bit system. Quite a few more CPUs would of
76977 * course be required on a 64-bit system.
76978 */
76979 - if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
76980 + if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
76981 (ulong)atomic_long_read(&rsp->expedited_done) +
76982 ULONG_MAX / 8)) {
76983 synchronize_sched();
76984 - atomic_long_inc(&rsp->expedited_wrap);
76985 + atomic_long_inc_unchecked(&rsp->expedited_wrap);
76986 return;
76987 }
76988
76989 @@ -2481,7 +2481,7 @@ void synchronize_sched_expedited(void)
76990 * Take a ticket. Note that atomic_inc_return() implies a
76991 * full memory barrier.
76992 */
76993 - snap = atomic_long_inc_return(&rsp->expedited_start);
76994 + snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
76995 firstsnap = snap;
76996 get_online_cpus();
76997 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
76998 @@ -2494,14 +2494,14 @@ void synchronize_sched_expedited(void)
76999 synchronize_sched_expedited_cpu_stop,
77000 NULL) == -EAGAIN) {
77001 put_online_cpus();
77002 - atomic_long_inc(&rsp->expedited_tryfail);
77003 + atomic_long_inc_unchecked(&rsp->expedited_tryfail);
77004
77005 /* Check to see if someone else did our work for us. */
77006 s = atomic_long_read(&rsp->expedited_done);
77007 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77008 /* ensure test happens before caller kfree */
77009 smp_mb__before_atomic_inc(); /* ^^^ */
77010 - atomic_long_inc(&rsp->expedited_workdone1);
77011 + atomic_long_inc_unchecked(&rsp->expedited_workdone1);
77012 return;
77013 }
77014
77015 @@ -2510,7 +2510,7 @@ void synchronize_sched_expedited(void)
77016 udelay(trycount * num_online_cpus());
77017 } else {
77018 wait_rcu_gp(call_rcu_sched);
77019 - atomic_long_inc(&rsp->expedited_normal);
77020 + atomic_long_inc_unchecked(&rsp->expedited_normal);
77021 return;
77022 }
77023
77024 @@ -2519,7 +2519,7 @@ void synchronize_sched_expedited(void)
77025 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77026 /* ensure test happens before caller kfree */
77027 smp_mb__before_atomic_inc(); /* ^^^ */
77028 - atomic_long_inc(&rsp->expedited_workdone2);
77029 + atomic_long_inc_unchecked(&rsp->expedited_workdone2);
77030 return;
77031 }
77032
77033 @@ -2531,10 +2531,10 @@ void synchronize_sched_expedited(void)
77034 * period works for us.
77035 */
77036 get_online_cpus();
77037 - snap = atomic_long_read(&rsp->expedited_start);
77038 + snap = atomic_long_read_unchecked(&rsp->expedited_start);
77039 smp_mb(); /* ensure read is before try_stop_cpus(). */
77040 }
77041 - atomic_long_inc(&rsp->expedited_stoppedcpus);
77042 + atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
77043
77044 /*
77045 * Everyone up to our most recent fetch is covered by our grace
77046 @@ -2543,16 +2543,16 @@ void synchronize_sched_expedited(void)
77047 * than we did already did their update.
77048 */
77049 do {
77050 - atomic_long_inc(&rsp->expedited_done_tries);
77051 + atomic_long_inc_unchecked(&rsp->expedited_done_tries);
77052 s = atomic_long_read(&rsp->expedited_done);
77053 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
77054 /* ensure test happens before caller kfree */
77055 smp_mb__before_atomic_inc(); /* ^^^ */
77056 - atomic_long_inc(&rsp->expedited_done_lost);
77057 + atomic_long_inc_unchecked(&rsp->expedited_done_lost);
77058 break;
77059 }
77060 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
77061 - atomic_long_inc(&rsp->expedited_done_exit);
77062 + atomic_long_inc_unchecked(&rsp->expedited_done_exit);
77063
77064 put_online_cpus();
77065 }
77066 @@ -2726,7 +2726,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77067 * ACCESS_ONCE() to prevent the compiler from speculating
77068 * the increment to precede the early-exit check.
77069 */
77070 - ACCESS_ONCE(rsp->n_barrier_done)++;
77071 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77072 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
77073 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
77074 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
77075 @@ -2776,7 +2776,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77076
77077 /* Increment ->n_barrier_done to prevent duplicate work. */
77078 smp_mb(); /* Keep increment after above mechanism. */
77079 - ACCESS_ONCE(rsp->n_barrier_done)++;
77080 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77081 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
77082 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
77083 smp_mb(); /* Keep increment before caller's subsequent code. */
77084 @@ -2821,10 +2821,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
77085 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
77086 init_callback_list(rdp);
77087 rdp->qlen_lazy = 0;
77088 - ACCESS_ONCE(rdp->qlen) = 0;
77089 + ACCESS_ONCE_RW(rdp->qlen) = 0;
77090 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
77091 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
77092 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
77093 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
77094 rdp->cpu = cpu;
77095 rdp->rsp = rsp;
77096 rcu_boot_init_nocb_percpu_data(rdp);
77097 @@ -2857,8 +2857,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
77098 rdp->blimit = blimit;
77099 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
77100 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
77101 - atomic_set(&rdp->dynticks->dynticks,
77102 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
77103 + atomic_set_unchecked(&rdp->dynticks->dynticks,
77104 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
77105 rcu_prepare_for_idle_init(cpu);
77106 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77107
77108 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
77109 index c896b50..c357252 100644
77110 --- a/kernel/rcutree.h
77111 +++ b/kernel/rcutree.h
77112 @@ -86,7 +86,7 @@ struct rcu_dynticks {
77113 long long dynticks_nesting; /* Track irq/process nesting level. */
77114 /* Process level is worth LLONG_MAX/2. */
77115 int dynticks_nmi_nesting; /* Track NMI nesting level. */
77116 - atomic_t dynticks; /* Even value for idle, else odd. */
77117 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
77118 #ifdef CONFIG_RCU_FAST_NO_HZ
77119 int dyntick_drain; /* Prepare-for-idle state variable. */
77120 unsigned long dyntick_holdoff;
77121 @@ -416,17 +416,17 @@ struct rcu_state {
77122 /* _rcu_barrier(). */
77123 /* End of fields guarded by barrier_mutex. */
77124
77125 - atomic_long_t expedited_start; /* Starting ticket. */
77126 - atomic_long_t expedited_done; /* Done ticket. */
77127 - atomic_long_t expedited_wrap; /* # near-wrap incidents. */
77128 - atomic_long_t expedited_tryfail; /* # acquisition failures. */
77129 - atomic_long_t expedited_workdone1; /* # done by others #1. */
77130 - atomic_long_t expedited_workdone2; /* # done by others #2. */
77131 - atomic_long_t expedited_normal; /* # fallbacks to normal. */
77132 - atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
77133 - atomic_long_t expedited_done_tries; /* # tries to update _done. */
77134 - atomic_long_t expedited_done_lost; /* # times beaten to _done. */
77135 - atomic_long_t expedited_done_exit; /* # times exited _done loop. */
77136 + atomic_long_unchecked_t expedited_start; /* Starting ticket. */
77137 + atomic_long_t expedited_done; /* Done ticket. */
77138 + atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
77139 + atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
77140 + atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
77141 + atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
77142 + atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
77143 + atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
77144 + atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
77145 + atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
77146 + atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
77147
77148 unsigned long jiffies_force_qs; /* Time at which to invoke */
77149 /* force_quiescent_state(). */
77150 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
77151 index c1cc7e1..f62e436 100644
77152 --- a/kernel/rcutree_plugin.h
77153 +++ b/kernel/rcutree_plugin.h
77154 @@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
77155
77156 /* Clean up and exit. */
77157 smp_mb(); /* ensure expedited GP seen before counter increment. */
77158 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
77159 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
77160 unlock_mb_ret:
77161 mutex_unlock(&sync_rcu_preempt_exp_mutex);
77162 mb_ret:
77163 @@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
77164 free_cpumask_var(cm);
77165 }
77166
77167 -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
77168 +static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
77169 .store = &rcu_cpu_kthread_task,
77170 .thread_should_run = rcu_cpu_kthread_should_run,
77171 .thread_fn = rcu_cpu_kthread,
77172 @@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
77173 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
77174 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
77175 cpu, ticks_value, ticks_title,
77176 - atomic_read(&rdtp->dynticks) & 0xfff,
77177 + atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
77178 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
77179 fast_no_hz);
77180 }
77181 @@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
77182
77183 /* Enqueue the callback on the nocb list and update counts. */
77184 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
77185 - ACCESS_ONCE(*old_rhpp) = rhp;
77186 + ACCESS_ONCE_RW(*old_rhpp) = rhp;
77187 atomic_long_add(rhcount, &rdp->nocb_q_count);
77188 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
77189
77190 @@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
77191 * Extract queued callbacks, update counts, and wait
77192 * for a grace period to elapse.
77193 */
77194 - ACCESS_ONCE(rdp->nocb_head) = NULL;
77195 + ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
77196 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
77197 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
77198 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
77199 - ACCESS_ONCE(rdp->nocb_p_count) += c;
77200 - ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
77201 + ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
77202 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
77203 wait_rcu_gp(rdp->rsp->call_remote);
77204
77205 /* Each pass through the following loop invokes a callback. */
77206 @@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
77207 list = next;
77208 }
77209 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
77210 - ACCESS_ONCE(rdp->nocb_p_count) -= c;
77211 - ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
77212 + ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
77213 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
77214 rdp->n_nocbs_invoked += c;
77215 }
77216 return 0;
77217 @@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
77218 rdp = per_cpu_ptr(rsp->rda, cpu);
77219 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
77220 BUG_ON(IS_ERR(t));
77221 - ACCESS_ONCE(rdp->nocb_kthread) = t;
77222 + ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
77223 }
77224 }
77225
77226 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
77227 index 93f8e8f..cf812ae 100644
77228 --- a/kernel/rcutree_trace.c
77229 +++ b/kernel/rcutree_trace.c
77230 @@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77231 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
77232 rdp->passed_quiesce, rdp->qs_pending);
77233 seq_printf(m, " dt=%d/%llx/%d df=%lu",
77234 - atomic_read(&rdp->dynticks->dynticks),
77235 + atomic_read_unchecked(&rdp->dynticks->dynticks),
77236 rdp->dynticks->dynticks_nesting,
77237 rdp->dynticks->dynticks_nmi_nesting,
77238 rdp->dynticks_fqs);
77239 @@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
77240 struct rcu_state *rsp = (struct rcu_state *)m->private;
77241
77242 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
77243 - atomic_long_read(&rsp->expedited_start),
77244 + atomic_long_read_unchecked(&rsp->expedited_start),
77245 atomic_long_read(&rsp->expedited_done),
77246 - atomic_long_read(&rsp->expedited_wrap),
77247 - atomic_long_read(&rsp->expedited_tryfail),
77248 - atomic_long_read(&rsp->expedited_workdone1),
77249 - atomic_long_read(&rsp->expedited_workdone2),
77250 - atomic_long_read(&rsp->expedited_normal),
77251 - atomic_long_read(&rsp->expedited_stoppedcpus),
77252 - atomic_long_read(&rsp->expedited_done_tries),
77253 - atomic_long_read(&rsp->expedited_done_lost),
77254 - atomic_long_read(&rsp->expedited_done_exit));
77255 + atomic_long_read_unchecked(&rsp->expedited_wrap),
77256 + atomic_long_read_unchecked(&rsp->expedited_tryfail),
77257 + atomic_long_read_unchecked(&rsp->expedited_workdone1),
77258 + atomic_long_read_unchecked(&rsp->expedited_workdone2),
77259 + atomic_long_read_unchecked(&rsp->expedited_normal),
77260 + atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
77261 + atomic_long_read_unchecked(&rsp->expedited_done_tries),
77262 + atomic_long_read_unchecked(&rsp->expedited_done_lost),
77263 + atomic_long_read_unchecked(&rsp->expedited_done_exit));
77264 return 0;
77265 }
77266
77267 diff --git a/kernel/resource.c b/kernel/resource.c
77268 index 73f35d4..4684fc4 100644
77269 --- a/kernel/resource.c
77270 +++ b/kernel/resource.c
77271 @@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
77272
77273 static int __init ioresources_init(void)
77274 {
77275 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
77276 +#ifdef CONFIG_GRKERNSEC_PROC_USER
77277 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
77278 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
77279 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77280 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
77281 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
77282 +#endif
77283 +#else
77284 proc_create("ioports", 0, NULL, &proc_ioports_operations);
77285 proc_create("iomem", 0, NULL, &proc_iomem_operations);
77286 +#endif
77287 return 0;
77288 }
77289 __initcall(ioresources_init);
77290 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
77291 index 7890b10..8b68605f 100644
77292 --- a/kernel/rtmutex-tester.c
77293 +++ b/kernel/rtmutex-tester.c
77294 @@ -21,7 +21,7 @@
77295 #define MAX_RT_TEST_MUTEXES 8
77296
77297 static spinlock_t rttest_lock;
77298 -static atomic_t rttest_event;
77299 +static atomic_unchecked_t rttest_event;
77300
77301 struct test_thread_data {
77302 int opcode;
77303 @@ -62,7 +62,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77304
77305 case RTTEST_LOCKCONT:
77306 td->mutexes[td->opdata] = 1;
77307 - td->event = atomic_add_return(1, &rttest_event);
77308 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77309 return 0;
77310
77311 case RTTEST_RESET:
77312 @@ -75,7 +75,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77313 return 0;
77314
77315 case RTTEST_RESETEVENT:
77316 - atomic_set(&rttest_event, 0);
77317 + atomic_set_unchecked(&rttest_event, 0);
77318 return 0;
77319
77320 default:
77321 @@ -92,9 +92,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77322 return ret;
77323
77324 td->mutexes[id] = 1;
77325 - td->event = atomic_add_return(1, &rttest_event);
77326 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77327 rt_mutex_lock(&mutexes[id]);
77328 - td->event = atomic_add_return(1, &rttest_event);
77329 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77330 td->mutexes[id] = 4;
77331 return 0;
77332
77333 @@ -105,9 +105,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77334 return ret;
77335
77336 td->mutexes[id] = 1;
77337 - td->event = atomic_add_return(1, &rttest_event);
77338 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77339 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
77340 - td->event = atomic_add_return(1, &rttest_event);
77341 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77342 td->mutexes[id] = ret ? 0 : 4;
77343 return ret ? -EINTR : 0;
77344
77345 @@ -116,9 +116,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77346 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
77347 return ret;
77348
77349 - td->event = atomic_add_return(1, &rttest_event);
77350 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77351 rt_mutex_unlock(&mutexes[id]);
77352 - td->event = atomic_add_return(1, &rttest_event);
77353 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77354 td->mutexes[id] = 0;
77355 return 0;
77356
77357 @@ -165,7 +165,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77358 break;
77359
77360 td->mutexes[dat] = 2;
77361 - td->event = atomic_add_return(1, &rttest_event);
77362 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77363 break;
77364
77365 default:
77366 @@ -185,7 +185,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77367 return;
77368
77369 td->mutexes[dat] = 3;
77370 - td->event = atomic_add_return(1, &rttest_event);
77371 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77372 break;
77373
77374 case RTTEST_LOCKNOWAIT:
77375 @@ -197,7 +197,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77376 return;
77377
77378 td->mutexes[dat] = 1;
77379 - td->event = atomic_add_return(1, &rttest_event);
77380 + td->event = atomic_add_return_unchecked(1, &rttest_event);
77381 return;
77382
77383 default:
77384 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
77385 index 64de5f8..7735e12 100644
77386 --- a/kernel/sched/auto_group.c
77387 +++ b/kernel/sched/auto_group.c
77388 @@ -11,7 +11,7 @@
77389
77390 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
77391 static struct autogroup autogroup_default;
77392 -static atomic_t autogroup_seq_nr;
77393 +static atomic_unchecked_t autogroup_seq_nr;
77394
77395 void __init autogroup_init(struct task_struct *init_task)
77396 {
77397 @@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
77398
77399 kref_init(&ag->kref);
77400 init_rwsem(&ag->lock);
77401 - ag->id = atomic_inc_return(&autogroup_seq_nr);
77402 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
77403 ag->tg = tg;
77404 #ifdef CONFIG_RT_GROUP_SCHED
77405 /*
77406 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
77407 index 67d0465..4cf9361 100644
77408 --- a/kernel/sched/core.c
77409 +++ b/kernel/sched/core.c
77410 @@ -3406,7 +3406,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
77411 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77412 * positive (at least 1, or number of jiffies left till timeout) if completed.
77413 */
77414 -long __sched
77415 +long __sched __intentional_overflow(-1)
77416 wait_for_completion_interruptible_timeout(struct completion *x,
77417 unsigned long timeout)
77418 {
77419 @@ -3423,7 +3423,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
77420 *
77421 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
77422 */
77423 -int __sched wait_for_completion_killable(struct completion *x)
77424 +int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
77425 {
77426 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
77427 if (t == -ERESTARTSYS)
77428 @@ -3444,7 +3444,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
77429 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77430 * positive (at least 1, or number of jiffies left till timeout) if completed.
77431 */
77432 -long __sched
77433 +long __sched __intentional_overflow(-1)
77434 wait_for_completion_killable_timeout(struct completion *x,
77435 unsigned long timeout)
77436 {
77437 @@ -3670,6 +3670,8 @@ int can_nice(const struct task_struct *p, const int nice)
77438 /* convert nice value [19,-20] to rlimit style value [1,40] */
77439 int nice_rlim = 20 - nice;
77440
77441 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
77442 +
77443 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
77444 capable(CAP_SYS_NICE));
77445 }
77446 @@ -3703,7 +3705,8 @@ SYSCALL_DEFINE1(nice, int, increment)
77447 if (nice > 19)
77448 nice = 19;
77449
77450 - if (increment < 0 && !can_nice(current, nice))
77451 + if (increment < 0 && (!can_nice(current, nice) ||
77452 + gr_handle_chroot_nice()))
77453 return -EPERM;
77454
77455 retval = security_task_setnice(current, nice);
77456 @@ -3857,6 +3860,7 @@ recheck:
77457 unsigned long rlim_rtprio =
77458 task_rlimit(p, RLIMIT_RTPRIO);
77459
77460 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
77461 /* can't set/change the rt policy */
77462 if (policy != p->policy && !rlim_rtprio)
77463 return -EPERM;
77464 @@ -4954,7 +4958,7 @@ static void migrate_tasks(unsigned int dead_cpu)
77465
77466 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
77467
77468 -static struct ctl_table sd_ctl_dir[] = {
77469 +static ctl_table_no_const sd_ctl_dir[] __read_only = {
77470 {
77471 .procname = "sched_domain",
77472 .mode = 0555,
77473 @@ -4971,17 +4975,17 @@ static struct ctl_table sd_ctl_root[] = {
77474 {}
77475 };
77476
77477 -static struct ctl_table *sd_alloc_ctl_entry(int n)
77478 +static ctl_table_no_const *sd_alloc_ctl_entry(int n)
77479 {
77480 - struct ctl_table *entry =
77481 + ctl_table_no_const *entry =
77482 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
77483
77484 return entry;
77485 }
77486
77487 -static void sd_free_ctl_entry(struct ctl_table **tablep)
77488 +static void sd_free_ctl_entry(ctl_table_no_const *tablep)
77489 {
77490 - struct ctl_table *entry;
77491 + ctl_table_no_const *entry;
77492
77493 /*
77494 * In the intermediate directories, both the child directory and
77495 @@ -4989,22 +4993,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
77496 * will always be set. In the lowest directory the names are
77497 * static strings and all have proc handlers.
77498 */
77499 - for (entry = *tablep; entry->mode; entry++) {
77500 - if (entry->child)
77501 - sd_free_ctl_entry(&entry->child);
77502 + for (entry = tablep; entry->mode; entry++) {
77503 + if (entry->child) {
77504 + sd_free_ctl_entry(entry->child);
77505 + pax_open_kernel();
77506 + entry->child = NULL;
77507 + pax_close_kernel();
77508 + }
77509 if (entry->proc_handler == NULL)
77510 kfree(entry->procname);
77511 }
77512
77513 - kfree(*tablep);
77514 - *tablep = NULL;
77515 + kfree(tablep);
77516 }
77517
77518 static int min_load_idx = 0;
77519 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
77520
77521 static void
77522 -set_table_entry(struct ctl_table *entry,
77523 +set_table_entry(ctl_table_no_const *entry,
77524 const char *procname, void *data, int maxlen,
77525 umode_t mode, proc_handler *proc_handler,
77526 bool load_idx)
77527 @@ -5024,7 +5031,7 @@ set_table_entry(struct ctl_table *entry,
77528 static struct ctl_table *
77529 sd_alloc_ctl_domain_table(struct sched_domain *sd)
77530 {
77531 - struct ctl_table *table = sd_alloc_ctl_entry(13);
77532 + ctl_table_no_const *table = sd_alloc_ctl_entry(13);
77533
77534 if (table == NULL)
77535 return NULL;
77536 @@ -5059,9 +5066,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
77537 return table;
77538 }
77539
77540 -static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
77541 +static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
77542 {
77543 - struct ctl_table *entry, *table;
77544 + ctl_table_no_const *entry, *table;
77545 struct sched_domain *sd;
77546 int domain_num = 0, i;
77547 char buf[32];
77548 @@ -5088,11 +5095,13 @@ static struct ctl_table_header *sd_sysctl_header;
77549 static void register_sched_domain_sysctl(void)
77550 {
77551 int i, cpu_num = num_possible_cpus();
77552 - struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
77553 + ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
77554 char buf[32];
77555
77556 WARN_ON(sd_ctl_dir[0].child);
77557 + pax_open_kernel();
77558 sd_ctl_dir[0].child = entry;
77559 + pax_close_kernel();
77560
77561 if (entry == NULL)
77562 return;
77563 @@ -5115,8 +5124,12 @@ static void unregister_sched_domain_sysctl(void)
77564 if (sd_sysctl_header)
77565 unregister_sysctl_table(sd_sysctl_header);
77566 sd_sysctl_header = NULL;
77567 - if (sd_ctl_dir[0].child)
77568 - sd_free_ctl_entry(&sd_ctl_dir[0].child);
77569 + if (sd_ctl_dir[0].child) {
77570 + sd_free_ctl_entry(sd_ctl_dir[0].child);
77571 + pax_open_kernel();
77572 + sd_ctl_dir[0].child = NULL;
77573 + pax_close_kernel();
77574 + }
77575 }
77576 #else
77577 static void register_sched_domain_sysctl(void)
77578 @@ -5215,7 +5228,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
77579 * happens before everything else. This has to be lower priority than
77580 * the notifier in the perf_event subsystem, though.
77581 */
77582 -static struct notifier_block __cpuinitdata migration_notifier = {
77583 +static struct notifier_block migration_notifier = {
77584 .notifier_call = migration_call,
77585 .priority = CPU_PRI_MIGRATION,
77586 };
77587 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
77588 index 7a33e59..2f7730c 100644
77589 --- a/kernel/sched/fair.c
77590 +++ b/kernel/sched/fair.c
77591 @@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
77592
77593 static void reset_ptenuma_scan(struct task_struct *p)
77594 {
77595 - ACCESS_ONCE(p->mm->numa_scan_seq)++;
77596 + ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
77597 p->mm->numa_scan_offset = 0;
77598 }
77599
77600 @@ -5654,7 +5654,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
77601 * run_rebalance_domains is triggered when needed from the scheduler tick.
77602 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
77603 */
77604 -static void run_rebalance_domains(struct softirq_action *h)
77605 +static void run_rebalance_domains(void)
77606 {
77607 int this_cpu = smp_processor_id();
77608 struct rq *this_rq = cpu_rq(this_cpu);
77609 diff --git a/kernel/signal.c b/kernel/signal.c
77610 index 598dc06..471310a 100644
77611 --- a/kernel/signal.c
77612 +++ b/kernel/signal.c
77613 @@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
77614
77615 int print_fatal_signals __read_mostly;
77616
77617 -static void __user *sig_handler(struct task_struct *t, int sig)
77618 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
77619 {
77620 return t->sighand->action[sig - 1].sa.sa_handler;
77621 }
77622
77623 -static int sig_handler_ignored(void __user *handler, int sig)
77624 +static int sig_handler_ignored(__sighandler_t handler, int sig)
77625 {
77626 /* Is it explicitly or implicitly ignored? */
77627 return handler == SIG_IGN ||
77628 @@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
77629
77630 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
77631 {
77632 - void __user *handler;
77633 + __sighandler_t handler;
77634
77635 handler = sig_handler(t, sig);
77636
77637 @@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
77638 atomic_inc(&user->sigpending);
77639 rcu_read_unlock();
77640
77641 + if (!override_rlimit)
77642 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
77643 +
77644 if (override_rlimit ||
77645 atomic_read(&user->sigpending) <=
77646 task_rlimit(t, RLIMIT_SIGPENDING)) {
77647 @@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
77648
77649 int unhandled_signal(struct task_struct *tsk, int sig)
77650 {
77651 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
77652 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
77653 if (is_global_init(tsk))
77654 return 1;
77655 if (handler != SIG_IGN && handler != SIG_DFL)
77656 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
77657 }
77658 }
77659
77660 + /* allow glibc communication via tgkill to other threads in our
77661 + thread group */
77662 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
77663 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
77664 + && gr_handle_signal(t, sig))
77665 + return -EPERM;
77666 +
77667 return security_task_kill(t, info, sig, 0);
77668 }
77669
77670 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
77671 return send_signal(sig, info, p, 1);
77672 }
77673
77674 -static int
77675 +int
77676 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77677 {
77678 return send_signal(sig, info, t, 0);
77679 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77680 unsigned long int flags;
77681 int ret, blocked, ignored;
77682 struct k_sigaction *action;
77683 + int is_unhandled = 0;
77684
77685 spin_lock_irqsave(&t->sighand->siglock, flags);
77686 action = &t->sighand->action[sig-1];
77687 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
77688 }
77689 if (action->sa.sa_handler == SIG_DFL)
77690 t->signal->flags &= ~SIGNAL_UNKILLABLE;
77691 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
77692 + is_unhandled = 1;
77693 ret = specific_send_sig_info(sig, info, t);
77694 spin_unlock_irqrestore(&t->sighand->siglock, flags);
77695
77696 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
77697 + normal operation */
77698 + if (is_unhandled) {
77699 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
77700 + gr_handle_crash(t, sig);
77701 + }
77702 +
77703 return ret;
77704 }
77705
77706 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
77707 ret = check_kill_permission(sig, info, p);
77708 rcu_read_unlock();
77709
77710 - if (!ret && sig)
77711 + if (!ret && sig) {
77712 ret = do_send_sig_info(sig, info, p, true);
77713 + if (!ret)
77714 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
77715 + }
77716
77717 return ret;
77718 }
77719 @@ -2923,7 +2946,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
77720 int error = -ESRCH;
77721
77722 rcu_read_lock();
77723 - p = find_task_by_vpid(pid);
77724 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77725 + /* allow glibc communication via tgkill to other threads in our
77726 + thread group */
77727 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
77728 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
77729 + p = find_task_by_vpid_unrestricted(pid);
77730 + else
77731 +#endif
77732 + p = find_task_by_vpid(pid);
77733 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
77734 error = check_kill_permission(sig, info, p);
77735 /*
77736 @@ -3237,8 +3268,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
77737 }
77738 seg = get_fs();
77739 set_fs(KERNEL_DS);
77740 - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
77741 - (stack_t __force __user *) &uoss,
77742 + ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
77743 + (stack_t __force_user *) &uoss,
77744 compat_user_stack_pointer());
77745 set_fs(seg);
77746 if (ret >= 0 && uoss_ptr) {
77747 diff --git a/kernel/smp.c b/kernel/smp.c
77748 index 8e451f3..8322029 100644
77749 --- a/kernel/smp.c
77750 +++ b/kernel/smp.c
77751 @@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
77752 return NOTIFY_OK;
77753 }
77754
77755 -static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
77756 +static struct notifier_block hotplug_cfd_notifier = {
77757 .notifier_call = hotplug_cfd,
77758 };
77759
77760 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
77761 index 02fc5c9..e54c335 100644
77762 --- a/kernel/smpboot.c
77763 +++ b/kernel/smpboot.c
77764 @@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
77765 }
77766 smpboot_unpark_thread(plug_thread, cpu);
77767 }
77768 - list_add(&plug_thread->list, &hotplug_threads);
77769 + pax_list_add(&plug_thread->list, &hotplug_threads);
77770 out:
77771 mutex_unlock(&smpboot_threads_lock);
77772 return ret;
77773 @@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
77774 {
77775 get_online_cpus();
77776 mutex_lock(&smpboot_threads_lock);
77777 - list_del(&plug_thread->list);
77778 + pax_list_del(&plug_thread->list);
77779 smpboot_destroy_threads(plug_thread);
77780 mutex_unlock(&smpboot_threads_lock);
77781 put_online_cpus();
77782 diff --git a/kernel/softirq.c b/kernel/softirq.c
77783 index 14d7758..012121f 100644
77784 --- a/kernel/softirq.c
77785 +++ b/kernel/softirq.c
77786 @@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
77787 EXPORT_SYMBOL(irq_stat);
77788 #endif
77789
77790 -static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
77791 +static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
77792
77793 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
77794
77795 -char *softirq_to_name[NR_SOFTIRQS] = {
77796 +const char * const softirq_to_name[NR_SOFTIRQS] = {
77797 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
77798 "TASKLET", "SCHED", "HRTIMER", "RCU"
77799 };
77800 @@ -244,7 +244,7 @@ restart:
77801 kstat_incr_softirqs_this_cpu(vec_nr);
77802
77803 trace_softirq_entry(vec_nr);
77804 - h->action(h);
77805 + h->action();
77806 trace_softirq_exit(vec_nr);
77807 if (unlikely(prev_count != preempt_count())) {
77808 printk(KERN_ERR "huh, entered softirq %u %s %p"
77809 @@ -389,7 +389,7 @@ void __raise_softirq_irqoff(unsigned int nr)
77810 or_softirq_pending(1UL << nr);
77811 }
77812
77813 -void open_softirq(int nr, void (*action)(struct softirq_action *))
77814 +void __init open_softirq(int nr, void (*action)(void))
77815 {
77816 softirq_vec[nr].action = action;
77817 }
77818 @@ -445,7 +445,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
77819
77820 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
77821
77822 -static void tasklet_action(struct softirq_action *a)
77823 +static void tasklet_action(void)
77824 {
77825 struct tasklet_struct *list;
77826
77827 @@ -480,7 +480,7 @@ static void tasklet_action(struct softirq_action *a)
77828 }
77829 }
77830
77831 -static void tasklet_hi_action(struct softirq_action *a)
77832 +static void tasklet_hi_action(void)
77833 {
77834 struct tasklet_struct *list;
77835
77836 @@ -716,7 +716,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
77837 return NOTIFY_OK;
77838 }
77839
77840 -static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
77841 +static struct notifier_block remote_softirq_cpu_notifier = {
77842 .notifier_call = remote_softirq_cpu_notify,
77843 };
77844
77845 @@ -833,11 +833,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
77846 return NOTIFY_OK;
77847 }
77848
77849 -static struct notifier_block __cpuinitdata cpu_nfb = {
77850 +static struct notifier_block cpu_nfb = {
77851 .notifier_call = cpu_callback
77852 };
77853
77854 -static struct smp_hotplug_thread softirq_threads = {
77855 +static struct smp_hotplug_thread softirq_threads __read_only = {
77856 .store = &ksoftirqd,
77857 .thread_should_run = ksoftirqd_should_run,
77858 .thread_fn = run_ksoftirqd,
77859 diff --git a/kernel/srcu.c b/kernel/srcu.c
77860 index 01d5ccb..cdcbee6 100644
77861 --- a/kernel/srcu.c
77862 +++ b/kernel/srcu.c
77863 @@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
77864
77865 idx = ACCESS_ONCE(sp->completed) & 0x1;
77866 preempt_disable();
77867 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77868 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77869 smp_mb(); /* B */ /* Avoid leaking the critical section. */
77870 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77871 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77872 preempt_enable();
77873 return idx;
77874 }
77875 diff --git a/kernel/sys.c b/kernel/sys.c
77876 index 0da73cf..a22106a 100644
77877 --- a/kernel/sys.c
77878 +++ b/kernel/sys.c
77879 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
77880 error = -EACCES;
77881 goto out;
77882 }
77883 +
77884 + if (gr_handle_chroot_setpriority(p, niceval)) {
77885 + error = -EACCES;
77886 + goto out;
77887 + }
77888 +
77889 no_nice = security_task_setnice(p, niceval);
77890 if (no_nice) {
77891 error = no_nice;
77892 @@ -598,6 +604,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
77893 goto error;
77894 }
77895
77896 + if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
77897 + goto error;
77898 +
77899 if (rgid != (gid_t) -1 ||
77900 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
77901 new->sgid = new->egid;
77902 @@ -633,6 +642,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
77903 old = current_cred();
77904
77905 retval = -EPERM;
77906 +
77907 + if (gr_check_group_change(kgid, kgid, kgid))
77908 + goto error;
77909 +
77910 if (nsown_capable(CAP_SETGID))
77911 new->gid = new->egid = new->sgid = new->fsgid = kgid;
77912 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
77913 @@ -650,7 +663,7 @@ error:
77914 /*
77915 * change the user struct in a credentials set to match the new UID
77916 */
77917 -static int set_user(struct cred *new)
77918 +int set_user(struct cred *new)
77919 {
77920 struct user_struct *new_user;
77921
77922 @@ -730,6 +743,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
77923 goto error;
77924 }
77925
77926 + if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
77927 + goto error;
77928 +
77929 if (!uid_eq(new->uid, old->uid)) {
77930 retval = set_user(new);
77931 if (retval < 0)
77932 @@ -780,6 +796,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
77933 old = current_cred();
77934
77935 retval = -EPERM;
77936 +
77937 + if (gr_check_crash_uid(kuid))
77938 + goto error;
77939 + if (gr_check_user_change(kuid, kuid, kuid))
77940 + goto error;
77941 +
77942 if (nsown_capable(CAP_SETUID)) {
77943 new->suid = new->uid = kuid;
77944 if (!uid_eq(kuid, old->uid)) {
77945 @@ -849,6 +871,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
77946 goto error;
77947 }
77948
77949 + if (gr_check_user_change(kruid, keuid, INVALID_UID))
77950 + goto error;
77951 +
77952 if (ruid != (uid_t) -1) {
77953 new->uid = kruid;
77954 if (!uid_eq(kruid, old->uid)) {
77955 @@ -931,6 +956,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
77956 goto error;
77957 }
77958
77959 + if (gr_check_group_change(krgid, kegid, INVALID_GID))
77960 + goto error;
77961 +
77962 if (rgid != (gid_t) -1)
77963 new->gid = krgid;
77964 if (egid != (gid_t) -1)
77965 @@ -992,12 +1020,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
77966 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
77967 nsown_capable(CAP_SETUID)) {
77968 if (!uid_eq(kuid, old->fsuid)) {
77969 + if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
77970 + goto error;
77971 +
77972 new->fsuid = kuid;
77973 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
77974 goto change_okay;
77975 }
77976 }
77977
77978 +error:
77979 abort_creds(new);
77980 return old_fsuid;
77981
77982 @@ -1030,12 +1062,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
77983 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
77984 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
77985 nsown_capable(CAP_SETGID)) {
77986 + if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
77987 + goto error;
77988 +
77989 if (!gid_eq(kgid, old->fsgid)) {
77990 new->fsgid = kgid;
77991 goto change_okay;
77992 }
77993 }
77994
77995 +error:
77996 abort_creds(new);
77997 return old_fsgid;
77998
77999 @@ -1343,19 +1379,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
78000 return -EFAULT;
78001
78002 down_read(&uts_sem);
78003 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
78004 + error = __copy_to_user(name->sysname, &utsname()->sysname,
78005 __OLD_UTS_LEN);
78006 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
78007 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
78008 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
78009 __OLD_UTS_LEN);
78010 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
78011 - error |= __copy_to_user(&name->release, &utsname()->release,
78012 + error |= __copy_to_user(name->release, &utsname()->release,
78013 __OLD_UTS_LEN);
78014 error |= __put_user(0, name->release + __OLD_UTS_LEN);
78015 - error |= __copy_to_user(&name->version, &utsname()->version,
78016 + error |= __copy_to_user(name->version, &utsname()->version,
78017 __OLD_UTS_LEN);
78018 error |= __put_user(0, name->version + __OLD_UTS_LEN);
78019 - error |= __copy_to_user(&name->machine, &utsname()->machine,
78020 + error |= __copy_to_user(name->machine, &utsname()->machine,
78021 __OLD_UTS_LEN);
78022 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
78023 up_read(&uts_sem);
78024 @@ -1557,6 +1593,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
78025 */
78026 new_rlim->rlim_cur = 1;
78027 }
78028 + /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
78029 + is changed to a lower value. Since tasks can be created by the same
78030 + user in between this limit change and an execve by this task, force
78031 + a recheck only for this task by setting PF_NPROC_EXCEEDED
78032 + */
78033 + if (resource == RLIMIT_NPROC)
78034 + tsk->flags |= PF_NPROC_EXCEEDED;
78035 }
78036 if (!retval) {
78037 if (old_rlim)
78038 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
78039 index afc1dc6..5e28bbf 100644
78040 --- a/kernel/sysctl.c
78041 +++ b/kernel/sysctl.c
78042 @@ -93,7 +93,6 @@
78043
78044
78045 #if defined(CONFIG_SYSCTL)
78046 -
78047 /* External variables not in a header file. */
78048 extern int sysctl_overcommit_memory;
78049 extern int sysctl_overcommit_ratio;
78050 @@ -178,10 +177,8 @@ static int proc_taint(struct ctl_table *table, int write,
78051 void __user *buffer, size_t *lenp, loff_t *ppos);
78052 #endif
78053
78054 -#ifdef CONFIG_PRINTK
78055 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78056 void __user *buffer, size_t *lenp, loff_t *ppos);
78057 -#endif
78058
78059 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
78060 void __user *buffer, size_t *lenp, loff_t *ppos);
78061 @@ -212,6 +209,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
78062
78063 #endif
78064
78065 +extern struct ctl_table grsecurity_table[];
78066 +
78067 static struct ctl_table kern_table[];
78068 static struct ctl_table vm_table[];
78069 static struct ctl_table fs_table[];
78070 @@ -226,6 +225,20 @@ extern struct ctl_table epoll_table[];
78071 int sysctl_legacy_va_layout;
78072 #endif
78073
78074 +#ifdef CONFIG_PAX_SOFTMODE
78075 +static ctl_table pax_table[] = {
78076 + {
78077 + .procname = "softmode",
78078 + .data = &pax_softmode,
78079 + .maxlen = sizeof(unsigned int),
78080 + .mode = 0600,
78081 + .proc_handler = &proc_dointvec,
78082 + },
78083 +
78084 + { }
78085 +};
78086 +#endif
78087 +
78088 /* The default sysctl tables: */
78089
78090 static struct ctl_table sysctl_base_table[] = {
78091 @@ -274,6 +287,22 @@ static int max_extfrag_threshold = 1000;
78092 #endif
78093
78094 static struct ctl_table kern_table[] = {
78095 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
78096 + {
78097 + .procname = "grsecurity",
78098 + .mode = 0500,
78099 + .child = grsecurity_table,
78100 + },
78101 +#endif
78102 +
78103 +#ifdef CONFIG_PAX_SOFTMODE
78104 + {
78105 + .procname = "pax",
78106 + .mode = 0500,
78107 + .child = pax_table,
78108 + },
78109 +#endif
78110 +
78111 {
78112 .procname = "sched_child_runs_first",
78113 .data = &sysctl_sched_child_runs_first,
78114 @@ -608,7 +637,7 @@ static struct ctl_table kern_table[] = {
78115 .data = &modprobe_path,
78116 .maxlen = KMOD_PATH_LEN,
78117 .mode = 0644,
78118 - .proc_handler = proc_dostring,
78119 + .proc_handler = proc_dostring_modpriv,
78120 },
78121 {
78122 .procname = "modules_disabled",
78123 @@ -775,16 +804,20 @@ static struct ctl_table kern_table[] = {
78124 .extra1 = &zero,
78125 .extra2 = &one,
78126 },
78127 +#endif
78128 {
78129 .procname = "kptr_restrict",
78130 .data = &kptr_restrict,
78131 .maxlen = sizeof(int),
78132 .mode = 0644,
78133 .proc_handler = proc_dointvec_minmax_sysadmin,
78134 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78135 + .extra1 = &two,
78136 +#else
78137 .extra1 = &zero,
78138 +#endif
78139 .extra2 = &two,
78140 },
78141 -#endif
78142 {
78143 .procname = "ngroups_max",
78144 .data = &ngroups_max,
78145 @@ -1026,8 +1059,8 @@ static struct ctl_table kern_table[] = {
78146 */
78147 {
78148 .procname = "perf_event_paranoid",
78149 - .data = &sysctl_perf_event_paranoid,
78150 - .maxlen = sizeof(sysctl_perf_event_paranoid),
78151 + .data = &sysctl_perf_event_legitimately_concerned,
78152 + .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
78153 .mode = 0644,
78154 .proc_handler = proc_dointvec,
78155 },
78156 @@ -1283,6 +1316,13 @@ static struct ctl_table vm_table[] = {
78157 .proc_handler = proc_dointvec_minmax,
78158 .extra1 = &zero,
78159 },
78160 + {
78161 + .procname = "heap_stack_gap",
78162 + .data = &sysctl_heap_stack_gap,
78163 + .maxlen = sizeof(sysctl_heap_stack_gap),
78164 + .mode = 0644,
78165 + .proc_handler = proc_doulongvec_minmax,
78166 + },
78167 #else
78168 {
78169 .procname = "nr_trim_pages",
78170 @@ -1733,6 +1773,16 @@ int proc_dostring(struct ctl_table *table, int write,
78171 buffer, lenp, ppos);
78172 }
78173
78174 +int proc_dostring_modpriv(struct ctl_table *table, int write,
78175 + void __user *buffer, size_t *lenp, loff_t *ppos)
78176 +{
78177 + if (write && !capable(CAP_SYS_MODULE))
78178 + return -EPERM;
78179 +
78180 + return _proc_do_string(table->data, table->maxlen, write,
78181 + buffer, lenp, ppos);
78182 +}
78183 +
78184 static size_t proc_skip_spaces(char **buf)
78185 {
78186 size_t ret;
78187 @@ -1838,6 +1888,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
78188 len = strlen(tmp);
78189 if (len > *size)
78190 len = *size;
78191 + if (len > sizeof(tmp))
78192 + len = sizeof(tmp);
78193 if (copy_to_user(*buf, tmp, len))
78194 return -EFAULT;
78195 *size -= len;
78196 @@ -2002,7 +2054,7 @@ int proc_dointvec(struct ctl_table *table, int write,
78197 static int proc_taint(struct ctl_table *table, int write,
78198 void __user *buffer, size_t *lenp, loff_t *ppos)
78199 {
78200 - struct ctl_table t;
78201 + ctl_table_no_const t;
78202 unsigned long tmptaint = get_taint();
78203 int err;
78204
78205 @@ -2030,7 +2082,6 @@ static int proc_taint(struct ctl_table *table, int write,
78206 return err;
78207 }
78208
78209 -#ifdef CONFIG_PRINTK
78210 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78211 void __user *buffer, size_t *lenp, loff_t *ppos)
78212 {
78213 @@ -2039,7 +2090,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78214
78215 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
78216 }
78217 -#endif
78218
78219 struct do_proc_dointvec_minmax_conv_param {
78220 int *min;
78221 @@ -2186,8 +2236,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
78222 *i = val;
78223 } else {
78224 val = convdiv * (*i) / convmul;
78225 - if (!first)
78226 + if (!first) {
78227 err = proc_put_char(&buffer, &left, '\t');
78228 + if (err)
78229 + break;
78230 + }
78231 err = proc_put_long(&buffer, &left, val, false);
78232 if (err)
78233 break;
78234 @@ -2579,6 +2632,12 @@ int proc_dostring(struct ctl_table *table, int write,
78235 return -ENOSYS;
78236 }
78237
78238 +int proc_dostring_modpriv(struct ctl_table *table, int write,
78239 + void __user *buffer, size_t *lenp, loff_t *ppos)
78240 +{
78241 + return -ENOSYS;
78242 +}
78243 +
78244 int proc_dointvec(struct ctl_table *table, int write,
78245 void __user *buffer, size_t *lenp, loff_t *ppos)
78246 {
78247 @@ -2635,5 +2694,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
78248 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
78249 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
78250 EXPORT_SYMBOL(proc_dostring);
78251 +EXPORT_SYMBOL(proc_dostring_modpriv);
78252 EXPORT_SYMBOL(proc_doulongvec_minmax);
78253 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
78254 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
78255 index 145bb4d..b2aa969 100644
78256 --- a/kernel/taskstats.c
78257 +++ b/kernel/taskstats.c
78258 @@ -28,9 +28,12 @@
78259 #include <linux/fs.h>
78260 #include <linux/file.h>
78261 #include <linux/pid_namespace.h>
78262 +#include <linux/grsecurity.h>
78263 #include <net/genetlink.h>
78264 #include <linux/atomic.h>
78265
78266 +extern int gr_is_taskstats_denied(int pid);
78267 +
78268 /*
78269 * Maximum length of a cpumask that can be specified in
78270 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
78271 @@ -570,6 +573,9 @@ err:
78272
78273 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
78274 {
78275 + if (gr_is_taskstats_denied(current->pid))
78276 + return -EACCES;
78277 +
78278 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
78279 return cmd_attr_register_cpumask(info);
78280 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
78281 diff --git a/kernel/time.c b/kernel/time.c
78282 index f8342a4..288f13b 100644
78283 --- a/kernel/time.c
78284 +++ b/kernel/time.c
78285 @@ -171,6 +171,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
78286 return error;
78287
78288 if (tz) {
78289 + /* we log in do_settimeofday called below, so don't log twice
78290 + */
78291 + if (!tv)
78292 + gr_log_timechange();
78293 +
78294 sys_tz = *tz;
78295 update_vsyscall_tz();
78296 if (firsttime) {
78297 @@ -501,7 +506,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
78298 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
78299 * value to a scaled second value.
78300 */
78301 -unsigned long
78302 +unsigned long __intentional_overflow(-1)
78303 timespec_to_jiffies(const struct timespec *value)
78304 {
78305 unsigned long sec = value->tv_sec;
78306 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
78307 index f11d83b..d016d91 100644
78308 --- a/kernel/time/alarmtimer.c
78309 +++ b/kernel/time/alarmtimer.c
78310 @@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
78311 struct platform_device *pdev;
78312 int error = 0;
78313 int i;
78314 - struct k_clock alarm_clock = {
78315 + static struct k_clock alarm_clock = {
78316 .clock_getres = alarm_clock_getres,
78317 .clock_get = alarm_clock_get,
78318 .timer_create = alarm_timer_create,
78319 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
78320 index 90ad470..1814e9a 100644
78321 --- a/kernel/time/tick-broadcast.c
78322 +++ b/kernel/time/tick-broadcast.c
78323 @@ -138,7 +138,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
78324 * then clear the broadcast bit.
78325 */
78326 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
78327 - int cpu = smp_processor_id();
78328 + cpu = smp_processor_id();
78329 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
78330 tick_broadcast_clear_oneshot(cpu);
78331 } else {
78332 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
78333 index 9a0bc98..fceb7d0 100644
78334 --- a/kernel/time/timekeeping.c
78335 +++ b/kernel/time/timekeeping.c
78336 @@ -15,6 +15,7 @@
78337 #include <linux/init.h>
78338 #include <linux/mm.h>
78339 #include <linux/sched.h>
78340 +#include <linux/grsecurity.h>
78341 #include <linux/syscore_ops.h>
78342 #include <linux/clocksource.h>
78343 #include <linux/jiffies.h>
78344 @@ -448,6 +449,8 @@ int do_settimeofday(const struct timespec *tv)
78345 if (!timespec_valid_strict(tv))
78346 return -EINVAL;
78347
78348 + gr_log_timechange();
78349 +
78350 write_seqlock_irqsave(&tk->lock, flags);
78351
78352 timekeeping_forward_now(tk);
78353 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
78354 index af5a7e9..715611a 100644
78355 --- a/kernel/time/timer_list.c
78356 +++ b/kernel/time/timer_list.c
78357 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
78358
78359 static void print_name_offset(struct seq_file *m, void *sym)
78360 {
78361 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78362 + SEQ_printf(m, "<%p>", NULL);
78363 +#else
78364 char symname[KSYM_NAME_LEN];
78365
78366 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
78367 SEQ_printf(m, "<%pK>", sym);
78368 else
78369 SEQ_printf(m, "%s", symname);
78370 +#endif
78371 }
78372
78373 static void
78374 @@ -112,7 +116,11 @@ next_one:
78375 static void
78376 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
78377 {
78378 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78379 + SEQ_printf(m, " .base: %p\n", NULL);
78380 +#else
78381 SEQ_printf(m, " .base: %pK\n", base);
78382 +#endif
78383 SEQ_printf(m, " .index: %d\n",
78384 base->index);
78385 SEQ_printf(m, " .resolution: %Lu nsecs\n",
78386 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
78387 {
78388 struct proc_dir_entry *pe;
78389
78390 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
78391 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
78392 +#else
78393 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
78394 +#endif
78395 if (!pe)
78396 return -ENOMEM;
78397 return 0;
78398 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
78399 index 0b537f2..40d6c20 100644
78400 --- a/kernel/time/timer_stats.c
78401 +++ b/kernel/time/timer_stats.c
78402 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
78403 static unsigned long nr_entries;
78404 static struct entry entries[MAX_ENTRIES];
78405
78406 -static atomic_t overflow_count;
78407 +static atomic_unchecked_t overflow_count;
78408
78409 /*
78410 * The entries are in a hash-table, for fast lookup:
78411 @@ -140,7 +140,7 @@ static void reset_entries(void)
78412 nr_entries = 0;
78413 memset(entries, 0, sizeof(entries));
78414 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
78415 - atomic_set(&overflow_count, 0);
78416 + atomic_set_unchecked(&overflow_count, 0);
78417 }
78418
78419 static struct entry *alloc_entry(void)
78420 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78421 if (likely(entry))
78422 entry->count++;
78423 else
78424 - atomic_inc(&overflow_count);
78425 + atomic_inc_unchecked(&overflow_count);
78426
78427 out_unlock:
78428 raw_spin_unlock_irqrestore(lock, flags);
78429 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78430
78431 static void print_name_offset(struct seq_file *m, unsigned long addr)
78432 {
78433 +#ifdef CONFIG_GRKERNSEC_HIDESYM
78434 + seq_printf(m, "<%p>", NULL);
78435 +#else
78436 char symname[KSYM_NAME_LEN];
78437
78438 if (lookup_symbol_name(addr, symname) < 0)
78439 - seq_printf(m, "<%p>", (void *)addr);
78440 + seq_printf(m, "<%pK>", (void *)addr);
78441 else
78442 seq_printf(m, "%s", symname);
78443 +#endif
78444 }
78445
78446 static int tstats_show(struct seq_file *m, void *v)
78447 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
78448
78449 seq_puts(m, "Timer Stats Version: v0.2\n");
78450 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
78451 - if (atomic_read(&overflow_count))
78452 + if (atomic_read_unchecked(&overflow_count))
78453 seq_printf(m, "Overflow: %d entries\n",
78454 - atomic_read(&overflow_count));
78455 + atomic_read_unchecked(&overflow_count));
78456
78457 for (i = 0; i < nr_entries; i++) {
78458 entry = entries + i;
78459 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
78460 {
78461 struct proc_dir_entry *pe;
78462
78463 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
78464 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
78465 +#else
78466 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
78467 +#endif
78468 if (!pe)
78469 return -ENOMEM;
78470 return 0;
78471 diff --git a/kernel/timer.c b/kernel/timer.c
78472 index dbf7a78..e2148f0 100644
78473 --- a/kernel/timer.c
78474 +++ b/kernel/timer.c
78475 @@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
78476 /*
78477 * This function runs timers and the timer-tq in bottom half context.
78478 */
78479 -static void run_timer_softirq(struct softirq_action *h)
78480 +static void run_timer_softirq(void)
78481 {
78482 struct tvec_base *base = __this_cpu_read(tvec_bases);
78483
78484 @@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
78485 *
78486 * In all cases the return value is guaranteed to be non-negative.
78487 */
78488 -signed long __sched schedule_timeout(signed long timeout)
78489 +signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
78490 {
78491 struct timer_list timer;
78492 unsigned long expire;
78493 @@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
78494 return NOTIFY_OK;
78495 }
78496
78497 -static struct notifier_block __cpuinitdata timers_nb = {
78498 +static struct notifier_block timers_nb = {
78499 .notifier_call = timer_cpu_notify,
78500 };
78501
78502 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
78503 index 5a0f781..1497f95 100644
78504 --- a/kernel/trace/blktrace.c
78505 +++ b/kernel/trace/blktrace.c
78506 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
78507 struct blk_trace *bt = filp->private_data;
78508 char buf[16];
78509
78510 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
78511 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
78512
78513 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
78514 }
78515 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
78516 return 1;
78517
78518 bt = buf->chan->private_data;
78519 - atomic_inc(&bt->dropped);
78520 + atomic_inc_unchecked(&bt->dropped);
78521 return 0;
78522 }
78523
78524 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
78525
78526 bt->dir = dir;
78527 bt->dev = dev;
78528 - atomic_set(&bt->dropped, 0);
78529 + atomic_set_unchecked(&bt->dropped, 0);
78530
78531 ret = -EIO;
78532 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
78533 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
78534 index 0a0e2a6..943495e 100644
78535 --- a/kernel/trace/ftrace.c
78536 +++ b/kernel/trace/ftrace.c
78537 @@ -1909,12 +1909,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
78538 if (unlikely(ftrace_disabled))
78539 return 0;
78540
78541 + ret = ftrace_arch_code_modify_prepare();
78542 + FTRACE_WARN_ON(ret);
78543 + if (ret)
78544 + return 0;
78545 +
78546 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
78547 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
78548 if (ret) {
78549 ftrace_bug(ret, ip);
78550 - return 0;
78551 }
78552 - return 1;
78553 + return ret ? 0 : 1;
78554 }
78555
78556 /*
78557 @@ -2986,7 +2991,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
78558
78559 int
78560 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
78561 - void *data)
78562 + void *data)
78563 {
78564 struct ftrace_func_probe *entry;
78565 struct ftrace_page *pg;
78566 @@ -3854,8 +3859,10 @@ static int ftrace_process_locs(struct module *mod,
78567 if (!count)
78568 return 0;
78569
78570 + pax_open_kernel();
78571 sort(start, count, sizeof(*start),
78572 ftrace_cmp_ips, ftrace_swap_ips);
78573 + pax_close_kernel();
78574
78575 start_pg = ftrace_allocate_pages(count);
78576 if (!start_pg)
78577 @@ -4574,8 +4581,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
78578 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
78579
78580 static int ftrace_graph_active;
78581 -static struct notifier_block ftrace_suspend_notifier;
78582 -
78583 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
78584 {
78585 return 0;
78586 @@ -4719,6 +4724,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
78587 return NOTIFY_DONE;
78588 }
78589
78590 +static struct notifier_block ftrace_suspend_notifier = {
78591 + .notifier_call = ftrace_suspend_notifier_call
78592 +};
78593 +
78594 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
78595 trace_func_graph_ent_t entryfunc)
78596 {
78597 @@ -4732,7 +4741,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
78598 goto out;
78599 }
78600
78601 - ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
78602 register_pm_notifier(&ftrace_suspend_notifier);
78603
78604 ftrace_graph_active++;
78605 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
78606 index 6989df2..c2265cf 100644
78607 --- a/kernel/trace/ring_buffer.c
78608 +++ b/kernel/trace/ring_buffer.c
78609 @@ -349,9 +349,9 @@ struct buffer_data_page {
78610 */
78611 struct buffer_page {
78612 struct list_head list; /* list of buffer pages */
78613 - local_t write; /* index for next write */
78614 + local_unchecked_t write; /* index for next write */
78615 unsigned read; /* index for next read */
78616 - local_t entries; /* entries on this page */
78617 + local_unchecked_t entries; /* entries on this page */
78618 unsigned long real_end; /* real end of data */
78619 struct buffer_data_page *page; /* Actual data page */
78620 };
78621 @@ -464,8 +464,8 @@ struct ring_buffer_per_cpu {
78622 unsigned long last_overrun;
78623 local_t entries_bytes;
78624 local_t entries;
78625 - local_t overrun;
78626 - local_t commit_overrun;
78627 + local_unchecked_t overrun;
78628 + local_unchecked_t commit_overrun;
78629 local_t dropped_events;
78630 local_t committing;
78631 local_t commits;
78632 @@ -864,8 +864,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
78633 *
78634 * We add a counter to the write field to denote this.
78635 */
78636 - old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
78637 - old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
78638 + old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
78639 + old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
78640
78641 /*
78642 * Just make sure we have seen our old_write and synchronize
78643 @@ -893,8 +893,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
78644 * cmpxchg to only update if an interrupt did not already
78645 * do it for us. If the cmpxchg fails, we don't care.
78646 */
78647 - (void)local_cmpxchg(&next_page->write, old_write, val);
78648 - (void)local_cmpxchg(&next_page->entries, old_entries, eval);
78649 + (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
78650 + (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
78651
78652 /*
78653 * No need to worry about races with clearing out the commit.
78654 @@ -1253,12 +1253,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
78655
78656 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
78657 {
78658 - return local_read(&bpage->entries) & RB_WRITE_MASK;
78659 + return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
78660 }
78661
78662 static inline unsigned long rb_page_write(struct buffer_page *bpage)
78663 {
78664 - return local_read(&bpage->write) & RB_WRITE_MASK;
78665 + return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
78666 }
78667
78668 static int
78669 @@ -1353,7 +1353,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
78670 * bytes consumed in ring buffer from here.
78671 * Increment overrun to account for the lost events.
78672 */
78673 - local_add(page_entries, &cpu_buffer->overrun);
78674 + local_add_unchecked(page_entries, &cpu_buffer->overrun);
78675 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
78676 }
78677
78678 @@ -1909,7 +1909,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
78679 * it is our responsibility to update
78680 * the counters.
78681 */
78682 - local_add(entries, &cpu_buffer->overrun);
78683 + local_add_unchecked(entries, &cpu_buffer->overrun);
78684 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
78685
78686 /*
78687 @@ -2059,7 +2059,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78688 if (tail == BUF_PAGE_SIZE)
78689 tail_page->real_end = 0;
78690
78691 - local_sub(length, &tail_page->write);
78692 + local_sub_unchecked(length, &tail_page->write);
78693 return;
78694 }
78695
78696 @@ -2094,7 +2094,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78697 rb_event_set_padding(event);
78698
78699 /* Set the write back to the previous setting */
78700 - local_sub(length, &tail_page->write);
78701 + local_sub_unchecked(length, &tail_page->write);
78702 return;
78703 }
78704
78705 @@ -2106,7 +2106,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
78706
78707 /* Set write to end of buffer */
78708 length = (tail + length) - BUF_PAGE_SIZE;
78709 - local_sub(length, &tail_page->write);
78710 + local_sub_unchecked(length, &tail_page->write);
78711 }
78712
78713 /*
78714 @@ -2132,7 +2132,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
78715 * about it.
78716 */
78717 if (unlikely(next_page == commit_page)) {
78718 - local_inc(&cpu_buffer->commit_overrun);
78719 + local_inc_unchecked(&cpu_buffer->commit_overrun);
78720 goto out_reset;
78721 }
78722
78723 @@ -2188,7 +2188,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
78724 cpu_buffer->tail_page) &&
78725 (cpu_buffer->commit_page ==
78726 cpu_buffer->reader_page))) {
78727 - local_inc(&cpu_buffer->commit_overrun);
78728 + local_inc_unchecked(&cpu_buffer->commit_overrun);
78729 goto out_reset;
78730 }
78731 }
78732 @@ -2236,7 +2236,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
78733 length += RB_LEN_TIME_EXTEND;
78734
78735 tail_page = cpu_buffer->tail_page;
78736 - write = local_add_return(length, &tail_page->write);
78737 + write = local_add_return_unchecked(length, &tail_page->write);
78738
78739 /* set write to only the index of the write */
78740 write &= RB_WRITE_MASK;
78741 @@ -2253,7 +2253,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
78742 kmemcheck_annotate_bitfield(event, bitfield);
78743 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
78744
78745 - local_inc(&tail_page->entries);
78746 + local_inc_unchecked(&tail_page->entries);
78747
78748 /*
78749 * If this is the first commit on the page, then update
78750 @@ -2286,7 +2286,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
78751
78752 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
78753 unsigned long write_mask =
78754 - local_read(&bpage->write) & ~RB_WRITE_MASK;
78755 + local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
78756 unsigned long event_length = rb_event_length(event);
78757 /*
78758 * This is on the tail page. It is possible that
78759 @@ -2296,7 +2296,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
78760 */
78761 old_index += write_mask;
78762 new_index += write_mask;
78763 - index = local_cmpxchg(&bpage->write, old_index, new_index);
78764 + index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
78765 if (index == old_index) {
78766 /* update counters */
78767 local_sub(event_length, &cpu_buffer->entries_bytes);
78768 @@ -2670,7 +2670,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
78769
78770 /* Do the likely case first */
78771 if (likely(bpage->page == (void *)addr)) {
78772 - local_dec(&bpage->entries);
78773 + local_dec_unchecked(&bpage->entries);
78774 return;
78775 }
78776
78777 @@ -2682,7 +2682,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
78778 start = bpage;
78779 do {
78780 if (bpage->page == (void *)addr) {
78781 - local_dec(&bpage->entries);
78782 + local_dec_unchecked(&bpage->entries);
78783 return;
78784 }
78785 rb_inc_page(cpu_buffer, &bpage);
78786 @@ -2964,7 +2964,7 @@ static inline unsigned long
78787 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
78788 {
78789 return local_read(&cpu_buffer->entries) -
78790 - (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
78791 + (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
78792 }
78793
78794 /**
78795 @@ -3053,7 +3053,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
78796 return 0;
78797
78798 cpu_buffer = buffer->buffers[cpu];
78799 - ret = local_read(&cpu_buffer->overrun);
78800 + ret = local_read_unchecked(&cpu_buffer->overrun);
78801
78802 return ret;
78803 }
78804 @@ -3076,7 +3076,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
78805 return 0;
78806
78807 cpu_buffer = buffer->buffers[cpu];
78808 - ret = local_read(&cpu_buffer->commit_overrun);
78809 + ret = local_read_unchecked(&cpu_buffer->commit_overrun);
78810
78811 return ret;
78812 }
78813 @@ -3161,7 +3161,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
78814 /* if you care about this being correct, lock the buffer */
78815 for_each_buffer_cpu(buffer, cpu) {
78816 cpu_buffer = buffer->buffers[cpu];
78817 - overruns += local_read(&cpu_buffer->overrun);
78818 + overruns += local_read_unchecked(&cpu_buffer->overrun);
78819 }
78820
78821 return overruns;
78822 @@ -3337,8 +3337,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78823 /*
78824 * Reset the reader page to size zero.
78825 */
78826 - local_set(&cpu_buffer->reader_page->write, 0);
78827 - local_set(&cpu_buffer->reader_page->entries, 0);
78828 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
78829 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
78830 local_set(&cpu_buffer->reader_page->page->commit, 0);
78831 cpu_buffer->reader_page->real_end = 0;
78832
78833 @@ -3372,7 +3372,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78834 * want to compare with the last_overrun.
78835 */
78836 smp_mb();
78837 - overwrite = local_read(&(cpu_buffer->overrun));
78838 + overwrite = local_read_unchecked(&(cpu_buffer->overrun));
78839
78840 /*
78841 * Here's the tricky part.
78842 @@ -3942,8 +3942,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
78843
78844 cpu_buffer->head_page
78845 = list_entry(cpu_buffer->pages, struct buffer_page, list);
78846 - local_set(&cpu_buffer->head_page->write, 0);
78847 - local_set(&cpu_buffer->head_page->entries, 0);
78848 + local_set_unchecked(&cpu_buffer->head_page->write, 0);
78849 + local_set_unchecked(&cpu_buffer->head_page->entries, 0);
78850 local_set(&cpu_buffer->head_page->page->commit, 0);
78851
78852 cpu_buffer->head_page->read = 0;
78853 @@ -3953,14 +3953,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
78854
78855 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
78856 INIT_LIST_HEAD(&cpu_buffer->new_pages);
78857 - local_set(&cpu_buffer->reader_page->write, 0);
78858 - local_set(&cpu_buffer->reader_page->entries, 0);
78859 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
78860 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
78861 local_set(&cpu_buffer->reader_page->page->commit, 0);
78862 cpu_buffer->reader_page->read = 0;
78863
78864 local_set(&cpu_buffer->entries_bytes, 0);
78865 - local_set(&cpu_buffer->overrun, 0);
78866 - local_set(&cpu_buffer->commit_overrun, 0);
78867 + local_set_unchecked(&cpu_buffer->overrun, 0);
78868 + local_set_unchecked(&cpu_buffer->commit_overrun, 0);
78869 local_set(&cpu_buffer->dropped_events, 0);
78870 local_set(&cpu_buffer->entries, 0);
78871 local_set(&cpu_buffer->committing, 0);
78872 @@ -4364,8 +4364,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
78873 rb_init_page(bpage);
78874 bpage = reader->page;
78875 reader->page = *data_page;
78876 - local_set(&reader->write, 0);
78877 - local_set(&reader->entries, 0);
78878 + local_set_unchecked(&reader->write, 0);
78879 + local_set_unchecked(&reader->entries, 0);
78880 reader->read = 0;
78881 *data_page = bpage;
78882
78883 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
78884 index 3f28192..a29e8b0 100644
78885 --- a/kernel/trace/trace.c
78886 +++ b/kernel/trace/trace.c
78887 @@ -2893,7 +2893,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
78888 return 0;
78889 }
78890
78891 -int set_tracer_flag(unsigned int mask, int enabled)
78892 +int set_tracer_flag(unsigned long mask, int enabled)
78893 {
78894 /* do nothing if flag is already set */
78895 if (!!(trace_flags & mask) == !!enabled)
78896 @@ -4637,10 +4637,9 @@ static const struct file_operations tracing_dyn_info_fops = {
78897 };
78898 #endif
78899
78900 -static struct dentry *d_tracer;
78901 -
78902 struct dentry *tracing_init_dentry(void)
78903 {
78904 + static struct dentry *d_tracer;
78905 static int once;
78906
78907 if (d_tracer)
78908 @@ -4660,10 +4659,9 @@ struct dentry *tracing_init_dentry(void)
78909 return d_tracer;
78910 }
78911
78912 -static struct dentry *d_percpu;
78913 -
78914 static struct dentry *tracing_dentry_percpu(void)
78915 {
78916 + static struct dentry *d_percpu;
78917 static int once;
78918 struct dentry *d_tracer;
78919
78920 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
78921 index 2081971..09f861e 100644
78922 --- a/kernel/trace/trace.h
78923 +++ b/kernel/trace/trace.h
78924 @@ -948,7 +948,7 @@ extern const char *__stop___trace_bprintk_fmt[];
78925 void trace_printk_init_buffers(void);
78926 void trace_printk_start_comm(void);
78927 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
78928 -int set_tracer_flag(unsigned int mask, int enabled);
78929 +int set_tracer_flag(unsigned long mask, int enabled);
78930
78931 #undef FTRACE_ENTRY
78932 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
78933 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
78934 index 57e9b28..eebe41c 100644
78935 --- a/kernel/trace/trace_events.c
78936 +++ b/kernel/trace/trace_events.c
78937 @@ -1329,10 +1329,6 @@ static LIST_HEAD(ftrace_module_file_list);
78938 struct ftrace_module_file_ops {
78939 struct list_head list;
78940 struct module *mod;
78941 - struct file_operations id;
78942 - struct file_operations enable;
78943 - struct file_operations format;
78944 - struct file_operations filter;
78945 };
78946
78947 static struct ftrace_module_file_ops *
78948 @@ -1353,17 +1349,12 @@ trace_create_file_ops(struct module *mod)
78949
78950 file_ops->mod = mod;
78951
78952 - file_ops->id = ftrace_event_id_fops;
78953 - file_ops->id.owner = mod;
78954 -
78955 - file_ops->enable = ftrace_enable_fops;
78956 - file_ops->enable.owner = mod;
78957 -
78958 - file_ops->filter = ftrace_event_filter_fops;
78959 - file_ops->filter.owner = mod;
78960 -
78961 - file_ops->format = ftrace_event_format_fops;
78962 - file_ops->format.owner = mod;
78963 + pax_open_kernel();
78964 + mod->trace_id.owner = mod;
78965 + mod->trace_enable.owner = mod;
78966 + mod->trace_filter.owner = mod;
78967 + mod->trace_format.owner = mod;
78968 + pax_close_kernel();
78969
78970 list_add(&file_ops->list, &ftrace_module_file_list);
78971
78972 @@ -1387,8 +1378,8 @@ static void trace_module_add_events(struct module *mod)
78973
78974 for_each_event(call, start, end) {
78975 __trace_add_event_call(*call, mod,
78976 - &file_ops->id, &file_ops->enable,
78977 - &file_ops->filter, &file_ops->format);
78978 + &mod->trace_id, &mod->trace_enable,
78979 + &mod->trace_filter, &mod->trace_format);
78980 }
78981 }
78982
78983 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
78984 index fd3c8aa..5f324a6 100644
78985 --- a/kernel/trace/trace_mmiotrace.c
78986 +++ b/kernel/trace/trace_mmiotrace.c
78987 @@ -24,7 +24,7 @@ struct header_iter {
78988 static struct trace_array *mmio_trace_array;
78989 static bool overrun_detected;
78990 static unsigned long prev_overruns;
78991 -static atomic_t dropped_count;
78992 +static atomic_unchecked_t dropped_count;
78993
78994 static void mmio_reset_data(struct trace_array *tr)
78995 {
78996 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
78997
78998 static unsigned long count_overruns(struct trace_iterator *iter)
78999 {
79000 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
79001 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
79002 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
79003
79004 if (over > prev_overruns)
79005 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
79006 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
79007 sizeof(*entry), 0, pc);
79008 if (!event) {
79009 - atomic_inc(&dropped_count);
79010 + atomic_inc_unchecked(&dropped_count);
79011 return;
79012 }
79013 entry = ring_buffer_event_data(event);
79014 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
79015 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
79016 sizeof(*entry), 0, pc);
79017 if (!event) {
79018 - atomic_inc(&dropped_count);
79019 + atomic_inc_unchecked(&dropped_count);
79020 return;
79021 }
79022 entry = ring_buffer_event_data(event);
79023 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
79024 index 697e88d..1a79993 100644
79025 --- a/kernel/trace/trace_output.c
79026 +++ b/kernel/trace/trace_output.c
79027 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
79028
79029 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
79030 if (!IS_ERR(p)) {
79031 - p = mangle_path(s->buffer + s->len, p, "\n");
79032 + p = mangle_path(s->buffer + s->len, p, "\n\\");
79033 if (p) {
79034 s->len = p - s->buffer;
79035 return 1;
79036 @@ -851,14 +851,16 @@ int register_ftrace_event(struct trace_event *event)
79037 goto out;
79038 }
79039
79040 + pax_open_kernel();
79041 if (event->funcs->trace == NULL)
79042 - event->funcs->trace = trace_nop_print;
79043 + *(void **)&event->funcs->trace = trace_nop_print;
79044 if (event->funcs->raw == NULL)
79045 - event->funcs->raw = trace_nop_print;
79046 + *(void **)&event->funcs->raw = trace_nop_print;
79047 if (event->funcs->hex == NULL)
79048 - event->funcs->hex = trace_nop_print;
79049 + *(void **)&event->funcs->hex = trace_nop_print;
79050 if (event->funcs->binary == NULL)
79051 - event->funcs->binary = trace_nop_print;
79052 + *(void **)&event->funcs->binary = trace_nop_print;
79053 + pax_close_kernel();
79054
79055 key = event->type & (EVENT_HASHSIZE - 1);
79056
79057 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
79058 index b20428c..4845a10 100644
79059 --- a/kernel/trace/trace_stack.c
79060 +++ b/kernel/trace/trace_stack.c
79061 @@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
79062 return;
79063
79064 /* we do not handle interrupt stacks yet */
79065 - if (!object_is_on_stack(stack))
79066 + if (!object_starts_on_stack(stack))
79067 return;
79068
79069 local_irq_save(flags);
79070 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
79071 index e134d8f..a018cdd 100644
79072 --- a/kernel/user_namespace.c
79073 +++ b/kernel/user_namespace.c
79074 @@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
79075 if (atomic_read(&current->mm->mm_users) > 1)
79076 return -EINVAL;
79077
79078 - if (current->fs->users != 1)
79079 + if (atomic_read(&current->fs->users) != 1)
79080 return -EINVAL;
79081
79082 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
79083 diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
79084 index 4f69f9a..7c6f8f8 100644
79085 --- a/kernel/utsname_sysctl.c
79086 +++ b/kernel/utsname_sysctl.c
79087 @@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
79088 static int proc_do_uts_string(ctl_table *table, int write,
79089 void __user *buffer, size_t *lenp, loff_t *ppos)
79090 {
79091 - struct ctl_table uts_table;
79092 + ctl_table_no_const uts_table;
79093 int r;
79094 memcpy(&uts_table, table, sizeof(uts_table));
79095 uts_table.data = get_uts(table, write);
79096 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
79097 index 4a94467..80a6f9c 100644
79098 --- a/kernel/watchdog.c
79099 +++ b/kernel/watchdog.c
79100 @@ -526,7 +526,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
79101 }
79102 #endif /* CONFIG_SYSCTL */
79103
79104 -static struct smp_hotplug_thread watchdog_threads = {
79105 +static struct smp_hotplug_thread watchdog_threads __read_only = {
79106 .store = &softlockup_watchdog,
79107 .thread_should_run = watchdog_should_run,
79108 .thread_fn = watchdog,
79109 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
79110 index 28be08c..47bab92 100644
79111 --- a/lib/Kconfig.debug
79112 +++ b/lib/Kconfig.debug
79113 @@ -549,7 +549,7 @@ config DEBUG_MUTEXES
79114
79115 config DEBUG_LOCK_ALLOC
79116 bool "Lock debugging: detect incorrect freeing of live locks"
79117 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79118 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79119 select DEBUG_SPINLOCK
79120 select DEBUG_MUTEXES
79121 select LOCKDEP
79122 @@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
79123
79124 config PROVE_LOCKING
79125 bool "Lock debugging: prove locking correctness"
79126 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79127 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79128 select LOCKDEP
79129 select DEBUG_SPINLOCK
79130 select DEBUG_MUTEXES
79131 @@ -614,7 +614,7 @@ config LOCKDEP
79132
79133 config LOCK_STAT
79134 bool "Lock usage statistics"
79135 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79136 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79137 select LOCKDEP
79138 select DEBUG_SPINLOCK
79139 select DEBUG_MUTEXES
79140 @@ -1282,6 +1282,7 @@ config LATENCYTOP
79141 depends on DEBUG_KERNEL
79142 depends on STACKTRACE_SUPPORT
79143 depends on PROC_FS
79144 + depends on !GRKERNSEC_HIDESYM
79145 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
79146 select KALLSYMS
79147 select KALLSYMS_ALL
79148 @@ -1310,7 +1311,7 @@ config INTERVAL_TREE_TEST
79149
79150 config PROVIDE_OHCI1394_DMA_INIT
79151 bool "Remote debugging over FireWire early on boot"
79152 - depends on PCI && X86
79153 + depends on PCI && X86 && !GRKERNSEC
79154 help
79155 If you want to debug problems which hang or crash the kernel early
79156 on boot and the crashing machine has a FireWire port, you can use
79157 @@ -1339,7 +1340,7 @@ config PROVIDE_OHCI1394_DMA_INIT
79158
79159 config FIREWIRE_OHCI_REMOTE_DMA
79160 bool "Remote debugging over FireWire with firewire-ohci"
79161 - depends on FIREWIRE_OHCI
79162 + depends on FIREWIRE_OHCI && !GRKERNSEC
79163 help
79164 This option lets you use the FireWire bus for remote debugging
79165 with help of the firewire-ohci driver. It enables unfiltered
79166 diff --git a/lib/Makefile b/lib/Makefile
79167 index 6e2cc56..9b13738 100644
79168 --- a/lib/Makefile
79169 +++ b/lib/Makefile
79170 @@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
79171
79172 obj-$(CONFIG_BTREE) += btree.o
79173 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
79174 -obj-$(CONFIG_DEBUG_LIST) += list_debug.o
79175 +obj-y += list_debug.o
79176 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
79177
79178 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
79179 diff --git a/lib/bitmap.c b/lib/bitmap.c
79180 index 06f7e4f..f3cf2b0 100644
79181 --- a/lib/bitmap.c
79182 +++ b/lib/bitmap.c
79183 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
79184 {
79185 int c, old_c, totaldigits, ndigits, nchunks, nbits;
79186 u32 chunk;
79187 - const char __user __force *ubuf = (const char __user __force *)buf;
79188 + const char __user *ubuf = (const char __force_user *)buf;
79189
79190 bitmap_zero(maskp, nmaskbits);
79191
79192 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
79193 {
79194 if (!access_ok(VERIFY_READ, ubuf, ulen))
79195 return -EFAULT;
79196 - return __bitmap_parse((const char __force *)ubuf,
79197 + return __bitmap_parse((const char __force_kernel *)ubuf,
79198 ulen, 1, maskp, nmaskbits);
79199
79200 }
79201 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
79202 {
79203 unsigned a, b;
79204 int c, old_c, totaldigits;
79205 - const char __user __force *ubuf = (const char __user __force *)buf;
79206 + const char __user *ubuf = (const char __force_user *)buf;
79207 int exp_digit, in_range;
79208
79209 totaldigits = c = 0;
79210 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
79211 {
79212 if (!access_ok(VERIFY_READ, ubuf, ulen))
79213 return -EFAULT;
79214 - return __bitmap_parselist((const char __force *)ubuf,
79215 + return __bitmap_parselist((const char __force_kernel *)ubuf,
79216 ulen, 1, maskp, nmaskbits);
79217 }
79218 EXPORT_SYMBOL(bitmap_parselist_user);
79219 diff --git a/lib/bug.c b/lib/bug.c
79220 index 1686034..a9c00c8 100644
79221 --- a/lib/bug.c
79222 +++ b/lib/bug.c
79223 @@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
79224 return BUG_TRAP_TYPE_NONE;
79225
79226 bug = find_bug(bugaddr);
79227 + if (!bug)
79228 + return BUG_TRAP_TYPE_NONE;
79229
79230 file = NULL;
79231 line = 0;
79232 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
79233 index 37061ed..da83f48 100644
79234 --- a/lib/debugobjects.c
79235 +++ b/lib/debugobjects.c
79236 @@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
79237 if (limit > 4)
79238 return;
79239
79240 - is_on_stack = object_is_on_stack(addr);
79241 + is_on_stack = object_starts_on_stack(addr);
79242 if (is_on_stack == onstack)
79243 return;
79244
79245 diff --git a/lib/devres.c b/lib/devres.c
79246 index 8235331..5881053 100644
79247 --- a/lib/devres.c
79248 +++ b/lib/devres.c
79249 @@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
79250 void devm_iounmap(struct device *dev, void __iomem *addr)
79251 {
79252 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
79253 - (void *)addr));
79254 + (void __force *)addr));
79255 iounmap(addr);
79256 }
79257 EXPORT_SYMBOL(devm_iounmap);
79258 @@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
79259 {
79260 ioport_unmap(addr);
79261 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
79262 - devm_ioport_map_match, (void *)addr));
79263 + devm_ioport_map_match, (void __force *)addr));
79264 }
79265 EXPORT_SYMBOL(devm_ioport_unmap);
79266 #endif /* CONFIG_HAS_IOPORT */
79267 diff --git a/lib/div64.c b/lib/div64.c
79268 index a163b6c..9618fa5 100644
79269 --- a/lib/div64.c
79270 +++ b/lib/div64.c
79271 @@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
79272 EXPORT_SYMBOL(__div64_32);
79273
79274 #ifndef div_s64_rem
79275 -s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79276 +s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79277 {
79278 u64 quotient;
79279
79280 @@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
79281 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
79282 */
79283 #ifndef div64_u64
79284 -u64 div64_u64(u64 dividend, u64 divisor)
79285 +u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79286 {
79287 u32 high = divisor >> 32;
79288 u64 quot;
79289 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
79290 index d87a17a..ac0d79a 100644
79291 --- a/lib/dma-debug.c
79292 +++ b/lib/dma-debug.c
79293 @@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
79294
79295 void dma_debug_add_bus(struct bus_type *bus)
79296 {
79297 - struct notifier_block *nb;
79298 + notifier_block_no_const *nb;
79299
79300 if (global_disable)
79301 return;
79302 @@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
79303
79304 static void check_for_stack(struct device *dev, void *addr)
79305 {
79306 - if (object_is_on_stack(addr))
79307 + if (object_starts_on_stack(addr))
79308 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
79309 "stack [addr=%p]\n", addr);
79310 }
79311 diff --git a/lib/inflate.c b/lib/inflate.c
79312 index 013a761..c28f3fc 100644
79313 --- a/lib/inflate.c
79314 +++ b/lib/inflate.c
79315 @@ -269,7 +269,7 @@ static void free(void *where)
79316 malloc_ptr = free_mem_ptr;
79317 }
79318 #else
79319 -#define malloc(a) kmalloc(a, GFP_KERNEL)
79320 +#define malloc(a) kmalloc((a), GFP_KERNEL)
79321 #define free(a) kfree(a)
79322 #endif
79323
79324 diff --git a/lib/ioremap.c b/lib/ioremap.c
79325 index 0c9216c..863bd89 100644
79326 --- a/lib/ioremap.c
79327 +++ b/lib/ioremap.c
79328 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
79329 unsigned long next;
79330
79331 phys_addr -= addr;
79332 - pmd = pmd_alloc(&init_mm, pud, addr);
79333 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
79334 if (!pmd)
79335 return -ENOMEM;
79336 do {
79337 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
79338 unsigned long next;
79339
79340 phys_addr -= addr;
79341 - pud = pud_alloc(&init_mm, pgd, addr);
79342 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
79343 if (!pud)
79344 return -ENOMEM;
79345 do {
79346 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
79347 index bd2bea9..6b3c95e 100644
79348 --- a/lib/is_single_threaded.c
79349 +++ b/lib/is_single_threaded.c
79350 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
79351 struct task_struct *p, *t;
79352 bool ret;
79353
79354 + if (!mm)
79355 + return true;
79356 +
79357 if (atomic_read(&task->signal->live) != 1)
79358 return false;
79359
79360 diff --git a/lib/kobject.c b/lib/kobject.c
79361 index a654866..a4fd13d 100644
79362 --- a/lib/kobject.c
79363 +++ b/lib/kobject.c
79364 @@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
79365
79366
79367 static DEFINE_SPINLOCK(kobj_ns_type_lock);
79368 -static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
79369 +static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
79370
79371 -int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79372 +int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79373 {
79374 enum kobj_ns_type type = ops->type;
79375 int error;
79376 diff --git a/lib/list_debug.c b/lib/list_debug.c
79377 index c24c2f7..06e070b 100644
79378 --- a/lib/list_debug.c
79379 +++ b/lib/list_debug.c
79380 @@ -11,7 +11,9 @@
79381 #include <linux/bug.h>
79382 #include <linux/kernel.h>
79383 #include <linux/rculist.h>
79384 +#include <linux/mm.h>
79385
79386 +#ifdef CONFIG_DEBUG_LIST
79387 /*
79388 * Insert a new entry between two known consecutive entries.
79389 *
79390 @@ -19,21 +21,32 @@
79391 * the prev/next entries already!
79392 */
79393
79394 -void __list_add(struct list_head *new,
79395 - struct list_head *prev,
79396 - struct list_head *next)
79397 +static bool __list_add_debug(struct list_head *new,
79398 + struct list_head *prev,
79399 + struct list_head *next)
79400 {
79401 - WARN(next->prev != prev,
79402 + if (WARN(next->prev != prev,
79403 "list_add corruption. next->prev should be "
79404 "prev (%p), but was %p. (next=%p).\n",
79405 - prev, next->prev, next);
79406 - WARN(prev->next != next,
79407 + prev, next->prev, next) ||
79408 + WARN(prev->next != next,
79409 "list_add corruption. prev->next should be "
79410 "next (%p), but was %p. (prev=%p).\n",
79411 - next, prev->next, prev);
79412 - WARN(new == prev || new == next,
79413 - "list_add double add: new=%p, prev=%p, next=%p.\n",
79414 - new, prev, next);
79415 + next, prev->next, prev) ||
79416 + WARN(new == prev || new == next,
79417 + "list_add double add: new=%p, prev=%p, next=%p.\n",
79418 + new, prev, next))
79419 + return false;
79420 + return true;
79421 +}
79422 +
79423 +void __list_add(struct list_head *new,
79424 + struct list_head *prev,
79425 + struct list_head *next)
79426 +{
79427 + if (!__list_add_debug(new, prev, next))
79428 + return;
79429 +
79430 next->prev = new;
79431 new->next = next;
79432 new->prev = prev;
79433 @@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
79434 }
79435 EXPORT_SYMBOL(__list_add);
79436
79437 -void __list_del_entry(struct list_head *entry)
79438 +static bool __list_del_entry_debug(struct list_head *entry)
79439 {
79440 struct list_head *prev, *next;
79441
79442 @@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
79443 WARN(next->prev != entry,
79444 "list_del corruption. next->prev should be %p, "
79445 "but was %p\n", entry, next->prev))
79446 + return false;
79447 + return true;
79448 +}
79449 +
79450 +void __list_del_entry(struct list_head *entry)
79451 +{
79452 + if (!__list_del_entry_debug(entry))
79453 return;
79454
79455 - __list_del(prev, next);
79456 + __list_del(entry->prev, entry->next);
79457 }
79458 EXPORT_SYMBOL(__list_del_entry);
79459
79460 @@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
79461 void __list_add_rcu(struct list_head *new,
79462 struct list_head *prev, struct list_head *next)
79463 {
79464 - WARN(next->prev != prev,
79465 - "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
79466 - prev, next->prev, next);
79467 - WARN(prev->next != next,
79468 - "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
79469 - next, prev->next, prev);
79470 + if (!__list_add_debug(new, prev, next))
79471 + return;
79472 +
79473 new->next = next;
79474 new->prev = prev;
79475 rcu_assign_pointer(list_next_rcu(prev), new);
79476 next->prev = new;
79477 }
79478 EXPORT_SYMBOL(__list_add_rcu);
79479 +#endif
79480 +
79481 +void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
79482 +{
79483 +#ifdef CONFIG_DEBUG_LIST
79484 + if (!__list_add_debug(new, prev, next))
79485 + return;
79486 +#endif
79487 +
79488 + pax_open_kernel();
79489 + next->prev = new;
79490 + new->next = next;
79491 + new->prev = prev;
79492 + prev->next = new;
79493 + pax_close_kernel();
79494 +}
79495 +EXPORT_SYMBOL(__pax_list_add);
79496 +
79497 +void pax_list_del(struct list_head *entry)
79498 +{
79499 +#ifdef CONFIG_DEBUG_LIST
79500 + if (!__list_del_entry_debug(entry))
79501 + return;
79502 +#endif
79503 +
79504 + pax_open_kernel();
79505 + __list_del(entry->prev, entry->next);
79506 + entry->next = LIST_POISON1;
79507 + entry->prev = LIST_POISON2;
79508 + pax_close_kernel();
79509 +}
79510 +EXPORT_SYMBOL(pax_list_del);
79511 +
79512 +void pax_list_del_init(struct list_head *entry)
79513 +{
79514 + pax_open_kernel();
79515 + __list_del(entry->prev, entry->next);
79516 + INIT_LIST_HEAD(entry);
79517 + pax_close_kernel();
79518 +}
79519 +EXPORT_SYMBOL(pax_list_del_init);
79520 +
79521 +void __pax_list_add_rcu(struct list_head *new,
79522 + struct list_head *prev, struct list_head *next)
79523 +{
79524 +#ifdef CONFIG_DEBUG_LIST
79525 + if (!__list_add_debug(new, prev, next))
79526 + return;
79527 +#endif
79528 +
79529 + pax_open_kernel();
79530 + new->next = next;
79531 + new->prev = prev;
79532 + rcu_assign_pointer(list_next_rcu(prev), new);
79533 + next->prev = new;
79534 + pax_close_kernel();
79535 +}
79536 +EXPORT_SYMBOL(__pax_list_add_rcu);
79537 +
79538 +void pax_list_del_rcu(struct list_head *entry)
79539 +{
79540 +#ifdef CONFIG_DEBUG_LIST
79541 + if (!__list_del_entry_debug(entry))
79542 + return;
79543 +#endif
79544 +
79545 + pax_open_kernel();
79546 + __list_del(entry->prev, entry->next);
79547 + entry->next = LIST_POISON1;
79548 + entry->prev = LIST_POISON2;
79549 + pax_close_kernel();
79550 +}
79551 +EXPORT_SYMBOL(pax_list_del_rcu);
79552 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
79553 index e796429..6e38f9f 100644
79554 --- a/lib/radix-tree.c
79555 +++ b/lib/radix-tree.c
79556 @@ -92,7 +92,7 @@ struct radix_tree_preload {
79557 int nr;
79558 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
79559 };
79560 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
79561 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
79562
79563 static inline void *ptr_to_indirect(void *ptr)
79564 {
79565 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
79566 index bb2b201..46abaf9 100644
79567 --- a/lib/strncpy_from_user.c
79568 +++ b/lib/strncpy_from_user.c
79569 @@ -21,7 +21,7 @@
79570 */
79571 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
79572 {
79573 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79574 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79575 long res = 0;
79576
79577 /*
79578 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
79579 index a28df52..3d55877 100644
79580 --- a/lib/strnlen_user.c
79581 +++ b/lib/strnlen_user.c
79582 @@ -26,7 +26,7 @@
79583 */
79584 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
79585 {
79586 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79587 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
79588 long align, res = 0;
79589 unsigned long c;
79590
79591 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
79592 index d23762e..e21eab2 100644
79593 --- a/lib/swiotlb.c
79594 +++ b/lib/swiotlb.c
79595 @@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
79596
79597 void
79598 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
79599 - dma_addr_t dev_addr)
79600 + dma_addr_t dev_addr, struct dma_attrs *attrs)
79601 {
79602 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
79603
79604 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
79605 index 0d62fd7..b7bc911 100644
79606 --- a/lib/vsprintf.c
79607 +++ b/lib/vsprintf.c
79608 @@ -16,6 +16,9 @@
79609 * - scnprintf and vscnprintf
79610 */
79611
79612 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79613 +#define __INCLUDED_BY_HIDESYM 1
79614 +#endif
79615 #include <stdarg.h>
79616 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
79617 #include <linux/types.h>
79618 @@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
79619 return number(buf, end, *(const netdev_features_t *)addr, spec);
79620 }
79621
79622 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79623 +int kptr_restrict __read_mostly = 2;
79624 +#else
79625 int kptr_restrict __read_mostly;
79626 +#endif
79627
79628 /*
79629 * Show a '%p' thing. A kernel extension is that the '%p' is followed
79630 @@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
79631 * - 'S' For symbolic direct pointers with offset
79632 * - 's' For symbolic direct pointers without offset
79633 * - 'B' For backtraced symbolic direct pointers with offset
79634 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
79635 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
79636 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
79637 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
79638 * - 'M' For a 6-byte MAC address, it prints the address in the
79639 @@ -1044,12 +1053,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79640
79641 if (!ptr && *fmt != 'K') {
79642 /*
79643 - * Print (null) with the same width as a pointer so it makes
79644 + * Print (nil) with the same width as a pointer so it makes
79645 * tabular output look nice.
79646 */
79647 if (spec.field_width == -1)
79648 spec.field_width = default_width;
79649 - return string(buf, end, "(null)", spec);
79650 + return string(buf, end, "(nil)", spec);
79651 }
79652
79653 switch (*fmt) {
79654 @@ -1059,6 +1068,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79655 /* Fallthrough */
79656 case 'S':
79657 case 's':
79658 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79659 + break;
79660 +#else
79661 + return symbol_string(buf, end, ptr, spec, *fmt);
79662 +#endif
79663 + case 'A':
79664 case 'B':
79665 return symbol_string(buf, end, ptr, spec, *fmt);
79666 case 'R':
79667 @@ -1099,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79668 va_end(va);
79669 return buf;
79670 }
79671 + case 'P':
79672 + break;
79673 case 'K':
79674 /*
79675 * %pK cannot be used in IRQ context because its test
79676 @@ -1128,6 +1145,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
79677 return number(buf, end,
79678 (unsigned long long) *((phys_addr_t *)ptr), spec);
79679 }
79680 +
79681 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79682 + /* 'P' = approved pointers to copy to userland,
79683 + as in the /proc/kallsyms case, as we make it display nothing
79684 + for non-root users, and the real contents for root users
79685 + Also ignore 'K' pointers, since we force their NULLing for non-root users
79686 + above
79687 + */
79688 + if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
79689 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
79690 + dump_stack();
79691 + ptr = NULL;
79692 + }
79693 +#endif
79694 +
79695 spec.flags |= SMALL;
79696 if (spec.field_width == -1) {
79697 spec.field_width = default_width;
79698 @@ -1849,11 +1881,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
79699 typeof(type) value; \
79700 if (sizeof(type) == 8) { \
79701 args = PTR_ALIGN(args, sizeof(u32)); \
79702 - *(u32 *)&value = *(u32 *)args; \
79703 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
79704 + *(u32 *)&value = *(const u32 *)args; \
79705 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
79706 } else { \
79707 args = PTR_ALIGN(args, sizeof(type)); \
79708 - value = *(typeof(type) *)args; \
79709 + value = *(const typeof(type) *)args; \
79710 } \
79711 args += sizeof(type); \
79712 value; \
79713 @@ -1916,7 +1948,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
79714 case FORMAT_TYPE_STR: {
79715 const char *str_arg = args;
79716 args += strlen(str_arg) + 1;
79717 - str = string(str, end, (char *)str_arg, spec);
79718 + str = string(str, end, str_arg, spec);
79719 break;
79720 }
79721
79722 diff --git a/localversion-grsec b/localversion-grsec
79723 new file mode 100644
79724 index 0000000..7cd6065
79725 --- /dev/null
79726 +++ b/localversion-grsec
79727 @@ -0,0 +1 @@
79728 +-grsec
79729 diff --git a/mm/Kconfig b/mm/Kconfig
79730 index 3bea74f..e821c99 100644
79731 --- a/mm/Kconfig
79732 +++ b/mm/Kconfig
79733 @@ -311,10 +311,10 @@ config KSM
79734 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
79735
79736 config DEFAULT_MMAP_MIN_ADDR
79737 - int "Low address space to protect from user allocation"
79738 + int "Low address space to protect from user allocation"
79739 depends on MMU
79740 - default 4096
79741 - help
79742 + default 65536
79743 + help
79744 This is the portion of low virtual memory which should be protected
79745 from userspace allocation. Keeping a user from writing to low pages
79746 can help reduce the impact of kernel NULL pointer bugs.
79747 @@ -345,7 +345,7 @@ config MEMORY_FAILURE
79748
79749 config HWPOISON_INJECT
79750 tristate "HWPoison pages injector"
79751 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
79752 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
79753 select PROC_PAGE_MONITOR
79754
79755 config NOMMU_INITIAL_TRIM_EXCESS
79756 diff --git a/mm/filemap.c b/mm/filemap.c
79757 index e1979fd..dda5120 100644
79758 --- a/mm/filemap.c
79759 +++ b/mm/filemap.c
79760 @@ -1748,7 +1748,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
79761 struct address_space *mapping = file->f_mapping;
79762
79763 if (!mapping->a_ops->readpage)
79764 - return -ENOEXEC;
79765 + return -ENODEV;
79766 file_accessed(file);
79767 vma->vm_ops = &generic_file_vm_ops;
79768 return 0;
79769 @@ -2088,6 +2088,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
79770 *pos = i_size_read(inode);
79771
79772 if (limit != RLIM_INFINITY) {
79773 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
79774 if (*pos >= limit) {
79775 send_sig(SIGXFSZ, current, 0);
79776 return -EFBIG;
79777 diff --git a/mm/fremap.c b/mm/fremap.c
79778 index 87da359..3f41cb1 100644
79779 --- a/mm/fremap.c
79780 +++ b/mm/fremap.c
79781 @@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
79782 retry:
79783 vma = find_vma(mm, start);
79784
79785 +#ifdef CONFIG_PAX_SEGMEXEC
79786 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
79787 + goto out;
79788 +#endif
79789 +
79790 /*
79791 * Make sure the vma is shared, that it supports prefaulting,
79792 * and that the remapped range is valid and fully within
79793 diff --git a/mm/highmem.c b/mm/highmem.c
79794 index b32b70c..e512eb0 100644
79795 --- a/mm/highmem.c
79796 +++ b/mm/highmem.c
79797 @@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
79798 * So no dangers, even with speculative execution.
79799 */
79800 page = pte_page(pkmap_page_table[i]);
79801 + pax_open_kernel();
79802 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
79803 -
79804 + pax_close_kernel();
79805 set_page_address(page, NULL);
79806 need_flush = 1;
79807 }
79808 @@ -198,9 +199,11 @@ start:
79809 }
79810 }
79811 vaddr = PKMAP_ADDR(last_pkmap_nr);
79812 +
79813 + pax_open_kernel();
79814 set_pte_at(&init_mm, vaddr,
79815 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
79816 -
79817 + pax_close_kernel();
79818 pkmap_count[last_pkmap_nr] = 1;
79819 set_page_address(page, (void *)vaddr);
79820
79821 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
79822 index 1a12f5b..a85b8fc 100644
79823 --- a/mm/hugetlb.c
79824 +++ b/mm/hugetlb.c
79825 @@ -2005,15 +2005,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
79826 struct hstate *h = &default_hstate;
79827 unsigned long tmp;
79828 int ret;
79829 + ctl_table_no_const hugetlb_table;
79830
79831 tmp = h->max_huge_pages;
79832
79833 if (write && h->order >= MAX_ORDER)
79834 return -EINVAL;
79835
79836 - table->data = &tmp;
79837 - table->maxlen = sizeof(unsigned long);
79838 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
79839 + hugetlb_table = *table;
79840 + hugetlb_table.data = &tmp;
79841 + hugetlb_table.maxlen = sizeof(unsigned long);
79842 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
79843 if (ret)
79844 goto out;
79845
79846 @@ -2070,15 +2072,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
79847 struct hstate *h = &default_hstate;
79848 unsigned long tmp;
79849 int ret;
79850 + ctl_table_no_const hugetlb_table;
79851
79852 tmp = h->nr_overcommit_huge_pages;
79853
79854 if (write && h->order >= MAX_ORDER)
79855 return -EINVAL;
79856
79857 - table->data = &tmp;
79858 - table->maxlen = sizeof(unsigned long);
79859 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
79860 + hugetlb_table = *table;
79861 + hugetlb_table.data = &tmp;
79862 + hugetlb_table.maxlen = sizeof(unsigned long);
79863 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
79864 if (ret)
79865 goto out;
79866
79867 @@ -2512,6 +2516,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
79868 return 1;
79869 }
79870
79871 +#ifdef CONFIG_PAX_SEGMEXEC
79872 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
79873 +{
79874 + struct mm_struct *mm = vma->vm_mm;
79875 + struct vm_area_struct *vma_m;
79876 + unsigned long address_m;
79877 + pte_t *ptep_m;
79878 +
79879 + vma_m = pax_find_mirror_vma(vma);
79880 + if (!vma_m)
79881 + return;
79882 +
79883 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79884 + address_m = address + SEGMEXEC_TASK_SIZE;
79885 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
79886 + get_page(page_m);
79887 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
79888 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
79889 +}
79890 +#endif
79891 +
79892 /*
79893 * Hugetlb_cow() should be called with page lock of the original hugepage held.
79894 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
79895 @@ -2630,6 +2655,11 @@ retry_avoidcopy:
79896 make_huge_pte(vma, new_page, 1));
79897 page_remove_rmap(old_page);
79898 hugepage_add_new_anon_rmap(new_page, vma, address);
79899 +
79900 +#ifdef CONFIG_PAX_SEGMEXEC
79901 + pax_mirror_huge_pte(vma, address, new_page);
79902 +#endif
79903 +
79904 /* Make the old page be freed below */
79905 new_page = old_page;
79906 }
79907 @@ -2788,6 +2818,10 @@ retry:
79908 && (vma->vm_flags & VM_SHARED)));
79909 set_huge_pte_at(mm, address, ptep, new_pte);
79910
79911 +#ifdef CONFIG_PAX_SEGMEXEC
79912 + pax_mirror_huge_pte(vma, address, page);
79913 +#endif
79914 +
79915 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
79916 /* Optimization, do the COW without a second fault */
79917 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
79918 @@ -2817,6 +2851,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79919 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
79920 struct hstate *h = hstate_vma(vma);
79921
79922 +#ifdef CONFIG_PAX_SEGMEXEC
79923 + struct vm_area_struct *vma_m;
79924 +#endif
79925 +
79926 address &= huge_page_mask(h);
79927
79928 ptep = huge_pte_offset(mm, address);
79929 @@ -2830,6 +2868,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79930 VM_FAULT_SET_HINDEX(hstate_index(h));
79931 }
79932
79933 +#ifdef CONFIG_PAX_SEGMEXEC
79934 + vma_m = pax_find_mirror_vma(vma);
79935 + if (vma_m) {
79936 + unsigned long address_m;
79937 +
79938 + if (vma->vm_start > vma_m->vm_start) {
79939 + address_m = address;
79940 + address -= SEGMEXEC_TASK_SIZE;
79941 + vma = vma_m;
79942 + h = hstate_vma(vma);
79943 + } else
79944 + address_m = address + SEGMEXEC_TASK_SIZE;
79945 +
79946 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
79947 + return VM_FAULT_OOM;
79948 + address_m &= HPAGE_MASK;
79949 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
79950 + }
79951 +#endif
79952 +
79953 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
79954 if (!ptep)
79955 return VM_FAULT_OOM;
79956 diff --git a/mm/internal.h b/mm/internal.h
79957 index 8562de0..7fdfe92 100644
79958 --- a/mm/internal.h
79959 +++ b/mm/internal.h
79960 @@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
79961 * in mm/page_alloc.c
79962 */
79963 extern void __free_pages_bootmem(struct page *page, unsigned int order);
79964 +extern void free_compound_page(struct page *page);
79965 extern void prep_compound_page(struct page *page, unsigned long order);
79966 #ifdef CONFIG_MEMORY_FAILURE
79967 extern bool is_free_buddy_page(struct page *page);
79968 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
79969 index c8d7f31..2dbeffd 100644
79970 --- a/mm/kmemleak.c
79971 +++ b/mm/kmemleak.c
79972 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
79973
79974 for (i = 0; i < object->trace_len; i++) {
79975 void *ptr = (void *)object->trace[i];
79976 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
79977 + seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
79978 }
79979 }
79980
79981 @@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
79982 return -ENOMEM;
79983 }
79984
79985 - dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
79986 + dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
79987 &kmemleak_fops);
79988 if (!dentry)
79989 pr_warning("Failed to create the debugfs kmemleak file\n");
79990 diff --git a/mm/maccess.c b/mm/maccess.c
79991 index d53adf9..03a24bf 100644
79992 --- a/mm/maccess.c
79993 +++ b/mm/maccess.c
79994 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
79995 set_fs(KERNEL_DS);
79996 pagefault_disable();
79997 ret = __copy_from_user_inatomic(dst,
79998 - (__force const void __user *)src, size);
79999 + (const void __force_user *)src, size);
80000 pagefault_enable();
80001 set_fs(old_fs);
80002
80003 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
80004
80005 set_fs(KERNEL_DS);
80006 pagefault_disable();
80007 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
80008 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
80009 pagefault_enable();
80010 set_fs(old_fs);
80011
80012 diff --git a/mm/madvise.c b/mm/madvise.c
80013 index c58c94b..86ec14e 100644
80014 --- a/mm/madvise.c
80015 +++ b/mm/madvise.c
80016 @@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
80017 pgoff_t pgoff;
80018 unsigned long new_flags = vma->vm_flags;
80019
80020 +#ifdef CONFIG_PAX_SEGMEXEC
80021 + struct vm_area_struct *vma_m;
80022 +#endif
80023 +
80024 switch (behavior) {
80025 case MADV_NORMAL:
80026 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
80027 @@ -126,6 +130,13 @@ success:
80028 /*
80029 * vm_flags is protected by the mmap_sem held in write mode.
80030 */
80031 +
80032 +#ifdef CONFIG_PAX_SEGMEXEC
80033 + vma_m = pax_find_mirror_vma(vma);
80034 + if (vma_m)
80035 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
80036 +#endif
80037 +
80038 vma->vm_flags = new_flags;
80039
80040 out:
80041 @@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80042 struct vm_area_struct ** prev,
80043 unsigned long start, unsigned long end)
80044 {
80045 +
80046 +#ifdef CONFIG_PAX_SEGMEXEC
80047 + struct vm_area_struct *vma_m;
80048 +#endif
80049 +
80050 *prev = vma;
80051 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
80052 return -EINVAL;
80053 @@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80054 zap_page_range(vma, start, end - start, &details);
80055 } else
80056 zap_page_range(vma, start, end - start, NULL);
80057 +
80058 +#ifdef CONFIG_PAX_SEGMEXEC
80059 + vma_m = pax_find_mirror_vma(vma);
80060 + if (vma_m) {
80061 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
80062 + struct zap_details details = {
80063 + .nonlinear_vma = vma_m,
80064 + .last_index = ULONG_MAX,
80065 + };
80066 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
80067 + } else
80068 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
80069 + }
80070 +#endif
80071 +
80072 return 0;
80073 }
80074
80075 @@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
80076 if (end < start)
80077 goto out;
80078
80079 +#ifdef CONFIG_PAX_SEGMEXEC
80080 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80081 + if (end > SEGMEXEC_TASK_SIZE)
80082 + goto out;
80083 + } else
80084 +#endif
80085 +
80086 + if (end > TASK_SIZE)
80087 + goto out;
80088 +
80089 error = 0;
80090 if (end == start)
80091 goto out;
80092 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
80093 index df0694c..bc95539 100644
80094 --- a/mm/memory-failure.c
80095 +++ b/mm/memory-failure.c
80096 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
80097
80098 int sysctl_memory_failure_recovery __read_mostly = 1;
80099
80100 -atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
80101 +atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
80102
80103 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
80104
80105 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
80106 pfn, t->comm, t->pid);
80107 si.si_signo = SIGBUS;
80108 si.si_errno = 0;
80109 - si.si_addr = (void *)addr;
80110 + si.si_addr = (void __user *)addr;
80111 #ifdef __ARCH_SI_TRAPNO
80112 si.si_trapno = trapno;
80113 #endif
80114 @@ -760,7 +760,7 @@ static struct page_state {
80115 unsigned long res;
80116 char *msg;
80117 int (*action)(struct page *p, unsigned long pfn);
80118 -} error_states[] = {
80119 +} __do_const error_states[] = {
80120 { reserved, reserved, "reserved kernel", me_kernel },
80121 /*
80122 * free pages are specially detected outside this table:
80123 @@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80124 nr_pages = 1 << compound_order(hpage);
80125 else /* normal page or thp */
80126 nr_pages = 1;
80127 - atomic_long_add(nr_pages, &num_poisoned_pages);
80128 + atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
80129
80130 /*
80131 * We need/can do nothing about count=0 pages.
80132 @@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80133 if (!PageHWPoison(hpage)
80134 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
80135 || (p != hpage && TestSetPageHWPoison(hpage))) {
80136 - atomic_long_sub(nr_pages, &num_poisoned_pages);
80137 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
80138 return 0;
80139 }
80140 set_page_hwpoison_huge_page(hpage);
80141 @@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80142 }
80143 if (hwpoison_filter(p)) {
80144 if (TestClearPageHWPoison(p))
80145 - atomic_long_sub(nr_pages, &num_poisoned_pages);
80146 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
80147 unlock_page(hpage);
80148 put_page(hpage);
80149 return 0;
80150 @@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
80151 return 0;
80152 }
80153 if (TestClearPageHWPoison(p))
80154 - atomic_long_sub(nr_pages, &num_poisoned_pages);
80155 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
80156 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
80157 return 0;
80158 }
80159 @@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
80160 */
80161 if (TestClearPageHWPoison(page)) {
80162 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
80163 - atomic_long_sub(nr_pages, &num_poisoned_pages);
80164 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
80165 freeit = 1;
80166 if (PageHuge(page))
80167 clear_page_hwpoison_huge_page(page);
80168 @@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
80169 } else {
80170 set_page_hwpoison_huge_page(hpage);
80171 dequeue_hwpoisoned_huge_page(hpage);
80172 - atomic_long_add(1 << compound_trans_order(hpage),
80173 + atomic_long_add_unchecked(1 << compound_trans_order(hpage),
80174 &num_poisoned_pages);
80175 }
80176 /* keep elevated page count for bad page */
80177 @@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
80178 if (PageHuge(page)) {
80179 set_page_hwpoison_huge_page(hpage);
80180 dequeue_hwpoisoned_huge_page(hpage);
80181 - atomic_long_add(1 << compound_trans_order(hpage),
80182 + atomic_long_add_unchecked(1 << compound_trans_order(hpage),
80183 &num_poisoned_pages);
80184 } else {
80185 SetPageHWPoison(page);
80186 - atomic_long_inc(&num_poisoned_pages);
80187 + atomic_long_inc_unchecked(&num_poisoned_pages);
80188 }
80189 }
80190 /* keep elevated page count for bad page */
80191 @@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
80192 put_page(page);
80193 pr_info("soft_offline: %#lx: invalidated\n", pfn);
80194 SetPageHWPoison(page);
80195 - atomic_long_inc(&num_poisoned_pages);
80196 + atomic_long_inc_unchecked(&num_poisoned_pages);
80197 return 0;
80198 }
80199
80200 @@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
80201 ret = -EIO;
80202 } else {
80203 SetPageHWPoison(page);
80204 - atomic_long_inc(&num_poisoned_pages);
80205 + atomic_long_inc_unchecked(&num_poisoned_pages);
80206 }
80207 } else {
80208 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
80209 diff --git a/mm/memory.c b/mm/memory.c
80210 index ba94dec..08ffe0d 100644
80211 --- a/mm/memory.c
80212 +++ b/mm/memory.c
80213 @@ -438,6 +438,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80214 free_pte_range(tlb, pmd, addr);
80215 } while (pmd++, addr = next, addr != end);
80216
80217 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
80218 start &= PUD_MASK;
80219 if (start < floor)
80220 return;
80221 @@ -452,6 +453,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80222 pmd = pmd_offset(pud, start);
80223 pud_clear(pud);
80224 pmd_free_tlb(tlb, pmd, start);
80225 +#endif
80226 +
80227 }
80228
80229 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80230 @@ -471,6 +474,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80231 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
80232 } while (pud++, addr = next, addr != end);
80233
80234 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
80235 start &= PGDIR_MASK;
80236 if (start < floor)
80237 return;
80238 @@ -485,6 +489,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80239 pud = pud_offset(pgd, start);
80240 pgd_clear(pgd);
80241 pud_free_tlb(tlb, pud, start);
80242 +#endif
80243 +
80244 }
80245
80246 /*
80247 @@ -1644,12 +1650,6 @@ no_page_table:
80248 return page;
80249 }
80250
80251 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
80252 -{
80253 - return stack_guard_page_start(vma, addr) ||
80254 - stack_guard_page_end(vma, addr+PAGE_SIZE);
80255 -}
80256 -
80257 /**
80258 * __get_user_pages() - pin user pages in memory
80259 * @tsk: task_struct of target task
80260 @@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80261
80262 i = 0;
80263
80264 - do {
80265 + while (nr_pages) {
80266 struct vm_area_struct *vma;
80267
80268 - vma = find_extend_vma(mm, start);
80269 + vma = find_vma(mm, start);
80270 if (!vma && in_gate_area(mm, start)) {
80271 unsigned long pg = start & PAGE_MASK;
80272 pgd_t *pgd;
80273 @@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80274 goto next_page;
80275 }
80276
80277 - if (!vma ||
80278 + if (!vma || start < vma->vm_start ||
80279 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
80280 !(vm_flags & vma->vm_flags))
80281 return i ? : -EFAULT;
80282 @@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80283 int ret;
80284 unsigned int fault_flags = 0;
80285
80286 - /* For mlock, just skip the stack guard page. */
80287 - if (foll_flags & FOLL_MLOCK) {
80288 - if (stack_guard_page(vma, start))
80289 - goto next_page;
80290 - }
80291 if (foll_flags & FOLL_WRITE)
80292 fault_flags |= FAULT_FLAG_WRITE;
80293 if (nonblocking)
80294 @@ -1901,7 +1896,7 @@ next_page:
80295 start += page_increm * PAGE_SIZE;
80296 nr_pages -= page_increm;
80297 } while (nr_pages && start < vma->vm_end);
80298 - } while (nr_pages);
80299 + }
80300 return i;
80301 }
80302 EXPORT_SYMBOL(__get_user_pages);
80303 @@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
80304 page_add_file_rmap(page);
80305 set_pte_at(mm, addr, pte, mk_pte(page, prot));
80306
80307 +#ifdef CONFIG_PAX_SEGMEXEC
80308 + pax_mirror_file_pte(vma, addr, page, ptl);
80309 +#endif
80310 +
80311 retval = 0;
80312 pte_unmap_unlock(pte, ptl);
80313 return retval;
80314 @@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
80315 if (!page_count(page))
80316 return -EINVAL;
80317 if (!(vma->vm_flags & VM_MIXEDMAP)) {
80318 +
80319 +#ifdef CONFIG_PAX_SEGMEXEC
80320 + struct vm_area_struct *vma_m;
80321 +#endif
80322 +
80323 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
80324 BUG_ON(vma->vm_flags & VM_PFNMAP);
80325 vma->vm_flags |= VM_MIXEDMAP;
80326 +
80327 +#ifdef CONFIG_PAX_SEGMEXEC
80328 + vma_m = pax_find_mirror_vma(vma);
80329 + if (vma_m)
80330 + vma_m->vm_flags |= VM_MIXEDMAP;
80331 +#endif
80332 +
80333 }
80334 return insert_page(vma, addr, page, vma->vm_page_prot);
80335 }
80336 @@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
80337 unsigned long pfn)
80338 {
80339 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
80340 + BUG_ON(vma->vm_mirror);
80341
80342 if (addr < vma->vm_start || addr >= vma->vm_end)
80343 return -EFAULT;
80344 @@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
80345
80346 BUG_ON(pud_huge(*pud));
80347
80348 - pmd = pmd_alloc(mm, pud, addr);
80349 + pmd = (mm == &init_mm) ?
80350 + pmd_alloc_kernel(mm, pud, addr) :
80351 + pmd_alloc(mm, pud, addr);
80352 if (!pmd)
80353 return -ENOMEM;
80354 do {
80355 @@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
80356 unsigned long next;
80357 int err;
80358
80359 - pud = pud_alloc(mm, pgd, addr);
80360 + pud = (mm == &init_mm) ?
80361 + pud_alloc_kernel(mm, pgd, addr) :
80362 + pud_alloc(mm, pgd, addr);
80363 if (!pud)
80364 return -ENOMEM;
80365 do {
80366 @@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
80367 copy_user_highpage(dst, src, va, vma);
80368 }
80369
80370 +#ifdef CONFIG_PAX_SEGMEXEC
80371 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
80372 +{
80373 + struct mm_struct *mm = vma->vm_mm;
80374 + spinlock_t *ptl;
80375 + pte_t *pte, entry;
80376 +
80377 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
80378 + entry = *pte;
80379 + if (!pte_present(entry)) {
80380 + if (!pte_none(entry)) {
80381 + BUG_ON(pte_file(entry));
80382 + free_swap_and_cache(pte_to_swp_entry(entry));
80383 + pte_clear_not_present_full(mm, address, pte, 0);
80384 + }
80385 + } else {
80386 + struct page *page;
80387 +
80388 + flush_cache_page(vma, address, pte_pfn(entry));
80389 + entry = ptep_clear_flush(vma, address, pte);
80390 + BUG_ON(pte_dirty(entry));
80391 + page = vm_normal_page(vma, address, entry);
80392 + if (page) {
80393 + update_hiwater_rss(mm);
80394 + if (PageAnon(page))
80395 + dec_mm_counter_fast(mm, MM_ANONPAGES);
80396 + else
80397 + dec_mm_counter_fast(mm, MM_FILEPAGES);
80398 + page_remove_rmap(page);
80399 + page_cache_release(page);
80400 + }
80401 + }
80402 + pte_unmap_unlock(pte, ptl);
80403 +}
80404 +
80405 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
80406 + *
80407 + * the ptl of the lower mapped page is held on entry and is not released on exit
80408 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
80409 + */
80410 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
80411 +{
80412 + struct mm_struct *mm = vma->vm_mm;
80413 + unsigned long address_m;
80414 + spinlock_t *ptl_m;
80415 + struct vm_area_struct *vma_m;
80416 + pmd_t *pmd_m;
80417 + pte_t *pte_m, entry_m;
80418 +
80419 + BUG_ON(!page_m || !PageAnon(page_m));
80420 +
80421 + vma_m = pax_find_mirror_vma(vma);
80422 + if (!vma_m)
80423 + return;
80424 +
80425 + BUG_ON(!PageLocked(page_m));
80426 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80427 + address_m = address + SEGMEXEC_TASK_SIZE;
80428 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80429 + pte_m = pte_offset_map(pmd_m, address_m);
80430 + ptl_m = pte_lockptr(mm, pmd_m);
80431 + if (ptl != ptl_m) {
80432 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80433 + if (!pte_none(*pte_m))
80434 + goto out;
80435 + }
80436 +
80437 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
80438 + page_cache_get(page_m);
80439 + page_add_anon_rmap(page_m, vma_m, address_m);
80440 + inc_mm_counter_fast(mm, MM_ANONPAGES);
80441 + set_pte_at(mm, address_m, pte_m, entry_m);
80442 + update_mmu_cache(vma_m, address_m, pte_m);
80443 +out:
80444 + if (ptl != ptl_m)
80445 + spin_unlock(ptl_m);
80446 + pte_unmap(pte_m);
80447 + unlock_page(page_m);
80448 +}
80449 +
80450 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
80451 +{
80452 + struct mm_struct *mm = vma->vm_mm;
80453 + unsigned long address_m;
80454 + spinlock_t *ptl_m;
80455 + struct vm_area_struct *vma_m;
80456 + pmd_t *pmd_m;
80457 + pte_t *pte_m, entry_m;
80458 +
80459 + BUG_ON(!page_m || PageAnon(page_m));
80460 +
80461 + vma_m = pax_find_mirror_vma(vma);
80462 + if (!vma_m)
80463 + return;
80464 +
80465 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80466 + address_m = address + SEGMEXEC_TASK_SIZE;
80467 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80468 + pte_m = pte_offset_map(pmd_m, address_m);
80469 + ptl_m = pte_lockptr(mm, pmd_m);
80470 + if (ptl != ptl_m) {
80471 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80472 + if (!pte_none(*pte_m))
80473 + goto out;
80474 + }
80475 +
80476 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
80477 + page_cache_get(page_m);
80478 + page_add_file_rmap(page_m);
80479 + inc_mm_counter_fast(mm, MM_FILEPAGES);
80480 + set_pte_at(mm, address_m, pte_m, entry_m);
80481 + update_mmu_cache(vma_m, address_m, pte_m);
80482 +out:
80483 + if (ptl != ptl_m)
80484 + spin_unlock(ptl_m);
80485 + pte_unmap(pte_m);
80486 +}
80487 +
80488 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
80489 +{
80490 + struct mm_struct *mm = vma->vm_mm;
80491 + unsigned long address_m;
80492 + spinlock_t *ptl_m;
80493 + struct vm_area_struct *vma_m;
80494 + pmd_t *pmd_m;
80495 + pte_t *pte_m, entry_m;
80496 +
80497 + vma_m = pax_find_mirror_vma(vma);
80498 + if (!vma_m)
80499 + return;
80500 +
80501 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80502 + address_m = address + SEGMEXEC_TASK_SIZE;
80503 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
80504 + pte_m = pte_offset_map(pmd_m, address_m);
80505 + ptl_m = pte_lockptr(mm, pmd_m);
80506 + if (ptl != ptl_m) {
80507 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
80508 + if (!pte_none(*pte_m))
80509 + goto out;
80510 + }
80511 +
80512 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
80513 + set_pte_at(mm, address_m, pte_m, entry_m);
80514 +out:
80515 + if (ptl != ptl_m)
80516 + spin_unlock(ptl_m);
80517 + pte_unmap(pte_m);
80518 +}
80519 +
80520 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
80521 +{
80522 + struct page *page_m;
80523 + pte_t entry;
80524 +
80525 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
80526 + goto out;
80527 +
80528 + entry = *pte;
80529 + page_m = vm_normal_page(vma, address, entry);
80530 + if (!page_m)
80531 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
80532 + else if (PageAnon(page_m)) {
80533 + if (pax_find_mirror_vma(vma)) {
80534 + pte_unmap_unlock(pte, ptl);
80535 + lock_page(page_m);
80536 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
80537 + if (pte_same(entry, *pte))
80538 + pax_mirror_anon_pte(vma, address, page_m, ptl);
80539 + else
80540 + unlock_page(page_m);
80541 + }
80542 + } else
80543 + pax_mirror_file_pte(vma, address, page_m, ptl);
80544 +
80545 +out:
80546 + pte_unmap_unlock(pte, ptl);
80547 +}
80548 +#endif
80549 +
80550 /*
80551 * This routine handles present pages, when users try to write
80552 * to a shared page. It is done by copying the page to a new address
80553 @@ -2808,6 +3004,12 @@ gotten:
80554 */
80555 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
80556 if (likely(pte_same(*page_table, orig_pte))) {
80557 +
80558 +#ifdef CONFIG_PAX_SEGMEXEC
80559 + if (pax_find_mirror_vma(vma))
80560 + BUG_ON(!trylock_page(new_page));
80561 +#endif
80562 +
80563 if (old_page) {
80564 if (!PageAnon(old_page)) {
80565 dec_mm_counter_fast(mm, MM_FILEPAGES);
80566 @@ -2859,6 +3061,10 @@ gotten:
80567 page_remove_rmap(old_page);
80568 }
80569
80570 +#ifdef CONFIG_PAX_SEGMEXEC
80571 + pax_mirror_anon_pte(vma, address, new_page, ptl);
80572 +#endif
80573 +
80574 /* Free the old page.. */
80575 new_page = old_page;
80576 ret |= VM_FAULT_WRITE;
80577 @@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
80578 swap_free(entry);
80579 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
80580 try_to_free_swap(page);
80581 +
80582 +#ifdef CONFIG_PAX_SEGMEXEC
80583 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
80584 +#endif
80585 +
80586 unlock_page(page);
80587 if (page != swapcache) {
80588 /*
80589 @@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
80590
80591 /* No need to invalidate - it was non-present before */
80592 update_mmu_cache(vma, address, page_table);
80593 +
80594 +#ifdef CONFIG_PAX_SEGMEXEC
80595 + pax_mirror_anon_pte(vma, address, page, ptl);
80596 +#endif
80597 +
80598 unlock:
80599 pte_unmap_unlock(page_table, ptl);
80600 out:
80601 @@ -3176,40 +3392,6 @@ out_release:
80602 }
80603
80604 /*
80605 - * This is like a special single-page "expand_{down|up}wards()",
80606 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
80607 - * doesn't hit another vma.
80608 - */
80609 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
80610 -{
80611 - address &= PAGE_MASK;
80612 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
80613 - struct vm_area_struct *prev = vma->vm_prev;
80614 -
80615 - /*
80616 - * Is there a mapping abutting this one below?
80617 - *
80618 - * That's only ok if it's the same stack mapping
80619 - * that has gotten split..
80620 - */
80621 - if (prev && prev->vm_end == address)
80622 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
80623 -
80624 - expand_downwards(vma, address - PAGE_SIZE);
80625 - }
80626 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
80627 - struct vm_area_struct *next = vma->vm_next;
80628 -
80629 - /* As VM_GROWSDOWN but s/below/above/ */
80630 - if (next && next->vm_start == address + PAGE_SIZE)
80631 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
80632 -
80633 - expand_upwards(vma, address + PAGE_SIZE);
80634 - }
80635 - return 0;
80636 -}
80637 -
80638 -/*
80639 * We enter with non-exclusive mmap_sem (to exclude vma changes,
80640 * but allow concurrent faults), and pte mapped but not yet locked.
80641 * We return with mmap_sem still held, but pte unmapped and unlocked.
80642 @@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
80643 unsigned long address, pte_t *page_table, pmd_t *pmd,
80644 unsigned int flags)
80645 {
80646 - struct page *page;
80647 + struct page *page = NULL;
80648 spinlock_t *ptl;
80649 pte_t entry;
80650
80651 - pte_unmap(page_table);
80652 -
80653 - /* Check if we need to add a guard page to the stack */
80654 - if (check_stack_guard_page(vma, address) < 0)
80655 - return VM_FAULT_SIGBUS;
80656 -
80657 - /* Use the zero-page for reads */
80658 if (!(flags & FAULT_FLAG_WRITE)) {
80659 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
80660 vma->vm_page_prot));
80661 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
80662 + ptl = pte_lockptr(mm, pmd);
80663 + spin_lock(ptl);
80664 if (!pte_none(*page_table))
80665 goto unlock;
80666 goto setpte;
80667 }
80668
80669 /* Allocate our own private page. */
80670 + pte_unmap(page_table);
80671 +
80672 if (unlikely(anon_vma_prepare(vma)))
80673 goto oom;
80674 page = alloc_zeroed_user_highpage_movable(vma, address);
80675 @@ -3257,6 +3435,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
80676 if (!pte_none(*page_table))
80677 goto release;
80678
80679 +#ifdef CONFIG_PAX_SEGMEXEC
80680 + if (pax_find_mirror_vma(vma))
80681 + BUG_ON(!trylock_page(page));
80682 +#endif
80683 +
80684 inc_mm_counter_fast(mm, MM_ANONPAGES);
80685 page_add_new_anon_rmap(page, vma, address);
80686 setpte:
80687 @@ -3264,6 +3447,12 @@ setpte:
80688
80689 /* No need to invalidate - it was non-present before */
80690 update_mmu_cache(vma, address, page_table);
80691 +
80692 +#ifdef CONFIG_PAX_SEGMEXEC
80693 + if (page)
80694 + pax_mirror_anon_pte(vma, address, page, ptl);
80695 +#endif
80696 +
80697 unlock:
80698 pte_unmap_unlock(page_table, ptl);
80699 return 0;
80700 @@ -3407,6 +3596,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80701 */
80702 /* Only go through if we didn't race with anybody else... */
80703 if (likely(pte_same(*page_table, orig_pte))) {
80704 +
80705 +#ifdef CONFIG_PAX_SEGMEXEC
80706 + if (anon && pax_find_mirror_vma(vma))
80707 + BUG_ON(!trylock_page(page));
80708 +#endif
80709 +
80710 flush_icache_page(vma, page);
80711 entry = mk_pte(page, vma->vm_page_prot);
80712 if (flags & FAULT_FLAG_WRITE)
80713 @@ -3426,6 +3621,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80714
80715 /* no need to invalidate: a not-present page won't be cached */
80716 update_mmu_cache(vma, address, page_table);
80717 +
80718 +#ifdef CONFIG_PAX_SEGMEXEC
80719 + if (anon)
80720 + pax_mirror_anon_pte(vma, address, page, ptl);
80721 + else
80722 + pax_mirror_file_pte(vma, address, page, ptl);
80723 +#endif
80724 +
80725 } else {
80726 if (cow_page)
80727 mem_cgroup_uncharge_page(cow_page);
80728 @@ -3747,6 +3950,12 @@ int handle_pte_fault(struct mm_struct *mm,
80729 if (flags & FAULT_FLAG_WRITE)
80730 flush_tlb_fix_spurious_fault(vma, address);
80731 }
80732 +
80733 +#ifdef CONFIG_PAX_SEGMEXEC
80734 + pax_mirror_pte(vma, address, pte, pmd, ptl);
80735 + return 0;
80736 +#endif
80737 +
80738 unlock:
80739 pte_unmap_unlock(pte, ptl);
80740 return 0;
80741 @@ -3763,6 +3972,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80742 pmd_t *pmd;
80743 pte_t *pte;
80744
80745 +#ifdef CONFIG_PAX_SEGMEXEC
80746 + struct vm_area_struct *vma_m;
80747 +#endif
80748 +
80749 __set_current_state(TASK_RUNNING);
80750
80751 count_vm_event(PGFAULT);
80752 @@ -3774,6 +3987,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80753 if (unlikely(is_vm_hugetlb_page(vma)))
80754 return hugetlb_fault(mm, vma, address, flags);
80755
80756 +#ifdef CONFIG_PAX_SEGMEXEC
80757 + vma_m = pax_find_mirror_vma(vma);
80758 + if (vma_m) {
80759 + unsigned long address_m;
80760 + pgd_t *pgd_m;
80761 + pud_t *pud_m;
80762 + pmd_t *pmd_m;
80763 +
80764 + if (vma->vm_start > vma_m->vm_start) {
80765 + address_m = address;
80766 + address -= SEGMEXEC_TASK_SIZE;
80767 + vma = vma_m;
80768 + } else
80769 + address_m = address + SEGMEXEC_TASK_SIZE;
80770 +
80771 + pgd_m = pgd_offset(mm, address_m);
80772 + pud_m = pud_alloc(mm, pgd_m, address_m);
80773 + if (!pud_m)
80774 + return VM_FAULT_OOM;
80775 + pmd_m = pmd_alloc(mm, pud_m, address_m);
80776 + if (!pmd_m)
80777 + return VM_FAULT_OOM;
80778 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
80779 + return VM_FAULT_OOM;
80780 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
80781 + }
80782 +#endif
80783 +
80784 retry:
80785 pgd = pgd_offset(mm, address);
80786 pud = pud_alloc(mm, pgd, address);
80787 @@ -3872,6 +4113,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
80788 spin_unlock(&mm->page_table_lock);
80789 return 0;
80790 }
80791 +
80792 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
80793 +{
80794 + pud_t *new = pud_alloc_one(mm, address);
80795 + if (!new)
80796 + return -ENOMEM;
80797 +
80798 + smp_wmb(); /* See comment in __pte_alloc */
80799 +
80800 + spin_lock(&mm->page_table_lock);
80801 + if (pgd_present(*pgd)) /* Another has populated it */
80802 + pud_free(mm, new);
80803 + else
80804 + pgd_populate_kernel(mm, pgd, new);
80805 + spin_unlock(&mm->page_table_lock);
80806 + return 0;
80807 +}
80808 #endif /* __PAGETABLE_PUD_FOLDED */
80809
80810 #ifndef __PAGETABLE_PMD_FOLDED
80811 @@ -3902,6 +4160,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
80812 spin_unlock(&mm->page_table_lock);
80813 return 0;
80814 }
80815 +
80816 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
80817 +{
80818 + pmd_t *new = pmd_alloc_one(mm, address);
80819 + if (!new)
80820 + return -ENOMEM;
80821 +
80822 + smp_wmb(); /* See comment in __pte_alloc */
80823 +
80824 + spin_lock(&mm->page_table_lock);
80825 +#ifndef __ARCH_HAS_4LEVEL_HACK
80826 + if (pud_present(*pud)) /* Another has populated it */
80827 + pmd_free(mm, new);
80828 + else
80829 + pud_populate_kernel(mm, pud, new);
80830 +#else
80831 + if (pgd_present(*pud)) /* Another has populated it */
80832 + pmd_free(mm, new);
80833 + else
80834 + pgd_populate_kernel(mm, pud, new);
80835 +#endif /* __ARCH_HAS_4LEVEL_HACK */
80836 + spin_unlock(&mm->page_table_lock);
80837 + return 0;
80838 +}
80839 #endif /* __PAGETABLE_PMD_FOLDED */
80840
80841 #if !defined(__HAVE_ARCH_GATE_AREA)
80842 @@ -3915,7 +4197,7 @@ static int __init gate_vma_init(void)
80843 gate_vma.vm_start = FIXADDR_USER_START;
80844 gate_vma.vm_end = FIXADDR_USER_END;
80845 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
80846 - gate_vma.vm_page_prot = __P101;
80847 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
80848
80849 return 0;
80850 }
80851 @@ -4049,8 +4331,8 @@ out:
80852 return ret;
80853 }
80854
80855 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
80856 - void *buf, int len, int write)
80857 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
80858 + void *buf, size_t len, int write)
80859 {
80860 resource_size_t phys_addr;
80861 unsigned long prot = 0;
80862 @@ -4075,8 +4357,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
80863 * Access another process' address space as given in mm. If non-NULL, use the
80864 * given task for page fault accounting.
80865 */
80866 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
80867 - unsigned long addr, void *buf, int len, int write)
80868 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
80869 + unsigned long addr, void *buf, size_t len, int write)
80870 {
80871 struct vm_area_struct *vma;
80872 void *old_buf = buf;
80873 @@ -4084,7 +4366,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
80874 down_read(&mm->mmap_sem);
80875 /* ignore errors, just check how much was successfully transferred */
80876 while (len) {
80877 - int bytes, ret, offset;
80878 + ssize_t bytes, ret, offset;
80879 void *maddr;
80880 struct page *page = NULL;
80881
80882 @@ -4143,8 +4425,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
80883 *
80884 * The caller must hold a reference on @mm.
80885 */
80886 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
80887 - void *buf, int len, int write)
80888 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
80889 + void *buf, size_t len, int write)
80890 {
80891 return __access_remote_vm(NULL, mm, addr, buf, len, write);
80892 }
80893 @@ -4154,11 +4436,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
80894 * Source/target buffer must be kernel space,
80895 * Do not walk the page table directly, use get_user_pages
80896 */
80897 -int access_process_vm(struct task_struct *tsk, unsigned long addr,
80898 - void *buf, int len, int write)
80899 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
80900 + void *buf, size_t len, int write)
80901 {
80902 struct mm_struct *mm;
80903 - int ret;
80904 + ssize_t ret;
80905
80906 mm = get_task_mm(tsk);
80907 if (!mm)
80908 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
80909 index 7431001..0f8344e 100644
80910 --- a/mm/mempolicy.c
80911 +++ b/mm/mempolicy.c
80912 @@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
80913 unsigned long vmstart;
80914 unsigned long vmend;
80915
80916 +#ifdef CONFIG_PAX_SEGMEXEC
80917 + struct vm_area_struct *vma_m;
80918 +#endif
80919 +
80920 vma = find_vma(mm, start);
80921 if (!vma || vma->vm_start > start)
80922 return -EFAULT;
80923 @@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
80924 if (err)
80925 goto out;
80926 }
80927 +
80928 err = vma_replace_policy(vma, new_pol);
80929 if (err)
80930 goto out;
80931 +
80932 +#ifdef CONFIG_PAX_SEGMEXEC
80933 + vma_m = pax_find_mirror_vma(vma);
80934 + if (vma_m) {
80935 + err = vma_replace_policy(vma_m, new_pol);
80936 + if (err)
80937 + goto out;
80938 + }
80939 +#endif
80940 +
80941 }
80942
80943 out:
80944 @@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
80945
80946 if (end < start)
80947 return -EINVAL;
80948 +
80949 +#ifdef CONFIG_PAX_SEGMEXEC
80950 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
80951 + if (end > SEGMEXEC_TASK_SIZE)
80952 + return -EINVAL;
80953 + } else
80954 +#endif
80955 +
80956 + if (end > TASK_SIZE)
80957 + return -EINVAL;
80958 +
80959 if (end == start)
80960 return 0;
80961
80962 @@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80963 */
80964 tcred = __task_cred(task);
80965 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80966 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80967 - !capable(CAP_SYS_NICE)) {
80968 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80969 rcu_read_unlock();
80970 err = -EPERM;
80971 goto out_put;
80972 @@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80973 goto out;
80974 }
80975
80976 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80977 + if (mm != current->mm &&
80978 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
80979 + mmput(mm);
80980 + err = -EPERM;
80981 + goto out;
80982 + }
80983 +#endif
80984 +
80985 err = do_migrate_pages(mm, old, new,
80986 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
80987
80988 diff --git a/mm/migrate.c b/mm/migrate.c
80989 index 3bbaf5d..299b0e9 100644
80990 --- a/mm/migrate.c
80991 +++ b/mm/migrate.c
80992 @@ -1382,8 +1382,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
80993 */
80994 tcred = __task_cred(task);
80995 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80996 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80997 - !capable(CAP_SYS_NICE)) {
80998 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80999 rcu_read_unlock();
81000 err = -EPERM;
81001 goto out;
81002 diff --git a/mm/mlock.c b/mm/mlock.c
81003 index 79b7cf7..c60424f 100644
81004 --- a/mm/mlock.c
81005 +++ b/mm/mlock.c
81006 @@ -13,6 +13,7 @@
81007 #include <linux/pagemap.h>
81008 #include <linux/mempolicy.h>
81009 #include <linux/syscalls.h>
81010 +#include <linux/security.h>
81011 #include <linux/sched.h>
81012 #include <linux/export.h>
81013 #include <linux/rmap.h>
81014 @@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
81015 {
81016 unsigned long nstart, end, tmp;
81017 struct vm_area_struct * vma, * prev;
81018 - int error;
81019 + int error = 0;
81020
81021 VM_BUG_ON(start & ~PAGE_MASK);
81022 VM_BUG_ON(len != PAGE_ALIGN(len));
81023 @@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
81024 return -EINVAL;
81025 if (end == start)
81026 return 0;
81027 + if (end > TASK_SIZE)
81028 + return -EINVAL;
81029 +
81030 vma = find_vma(current->mm, start);
81031 if (!vma || vma->vm_start > start)
81032 return -ENOMEM;
81033 @@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
81034 for (nstart = start ; ; ) {
81035 vm_flags_t newflags;
81036
81037 +#ifdef CONFIG_PAX_SEGMEXEC
81038 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81039 + break;
81040 +#endif
81041 +
81042 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
81043
81044 newflags = vma->vm_flags & ~VM_LOCKED;
81045 @@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
81046 lock_limit >>= PAGE_SHIFT;
81047
81048 /* check against resource limits */
81049 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
81050 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
81051 error = do_mlock(start, len, 1);
81052 up_write(&current->mm->mmap_sem);
81053 @@ -500,6 +510,12 @@ static int do_mlockall(int flags)
81054 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
81055 vm_flags_t newflags;
81056
81057 +#ifdef CONFIG_PAX_SEGMEXEC
81058 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81059 + break;
81060 +#endif
81061 +
81062 + BUG_ON(vma->vm_end > TASK_SIZE);
81063 newflags = vma->vm_flags & ~VM_LOCKED;
81064 if (flags & MCL_CURRENT)
81065 newflags |= VM_LOCKED;
81066 @@ -532,6 +548,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
81067 lock_limit >>= PAGE_SHIFT;
81068
81069 ret = -ENOMEM;
81070 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
81071 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
81072 capable(CAP_IPC_LOCK))
81073 ret = do_mlockall(flags);
81074 diff --git a/mm/mmap.c b/mm/mmap.c
81075 index e17fc06..72fc5fd 100644
81076 --- a/mm/mmap.c
81077 +++ b/mm/mmap.c
81078 @@ -33,6 +33,7 @@
81079 #include <linux/uprobes.h>
81080 #include <linux/rbtree_augmented.h>
81081 #include <linux/sched/sysctl.h>
81082 +#include <linux/random.h>
81083
81084 #include <asm/uaccess.h>
81085 #include <asm/cacheflush.h>
81086 @@ -49,6 +50,16 @@
81087 #define arch_rebalance_pgtables(addr, len) (addr)
81088 #endif
81089
81090 +static inline void verify_mm_writelocked(struct mm_struct *mm)
81091 +{
81092 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
81093 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81094 + up_read(&mm->mmap_sem);
81095 + BUG();
81096 + }
81097 +#endif
81098 +}
81099 +
81100 static void unmap_region(struct mm_struct *mm,
81101 struct vm_area_struct *vma, struct vm_area_struct *prev,
81102 unsigned long start, unsigned long end);
81103 @@ -68,22 +79,32 @@ static void unmap_region(struct mm_struct *mm,
81104 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
81105 *
81106 */
81107 -pgprot_t protection_map[16] = {
81108 +pgprot_t protection_map[16] __read_only = {
81109 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81110 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
81111 };
81112
81113 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
81114 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
81115 {
81116 - return __pgprot(pgprot_val(protection_map[vm_flags &
81117 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
81118 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81119 pgprot_val(arch_vm_get_page_prot(vm_flags)));
81120 +
81121 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81122 + if (!(__supported_pte_mask & _PAGE_NX) &&
81123 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
81124 + (vm_flags & (VM_READ | VM_WRITE)))
81125 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
81126 +#endif
81127 +
81128 + return prot;
81129 }
81130 EXPORT_SYMBOL(vm_get_page_prot);
81131
81132 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
81133 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
81134 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
81135 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
81136 /*
81137 * Make sure vm_committed_as in one cacheline and not cacheline shared with
81138 * other variables. It can be updated by several CPUs frequently.
81139 @@ -239,6 +260,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
81140 struct vm_area_struct *next = vma->vm_next;
81141
81142 might_sleep();
81143 + BUG_ON(vma->vm_mirror);
81144 if (vma->vm_ops && vma->vm_ops->close)
81145 vma->vm_ops->close(vma);
81146 if (vma->vm_file)
81147 @@ -283,6 +305,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
81148 * not page aligned -Ram Gupta
81149 */
81150 rlim = rlimit(RLIMIT_DATA);
81151 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
81152 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
81153 (mm->end_data - mm->start_data) > rlim)
81154 goto out;
81155 @@ -897,6 +920,12 @@ static int
81156 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
81157 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81158 {
81159 +
81160 +#ifdef CONFIG_PAX_SEGMEXEC
81161 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
81162 + return 0;
81163 +#endif
81164 +
81165 if (is_mergeable_vma(vma, file, vm_flags) &&
81166 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81167 if (vma->vm_pgoff == vm_pgoff)
81168 @@ -916,6 +945,12 @@ static int
81169 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81170 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81171 {
81172 +
81173 +#ifdef CONFIG_PAX_SEGMEXEC
81174 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
81175 + return 0;
81176 +#endif
81177 +
81178 if (is_mergeable_vma(vma, file, vm_flags) &&
81179 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81180 pgoff_t vm_pglen;
81181 @@ -958,13 +993,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81182 struct vm_area_struct *vma_merge(struct mm_struct *mm,
81183 struct vm_area_struct *prev, unsigned long addr,
81184 unsigned long end, unsigned long vm_flags,
81185 - struct anon_vma *anon_vma, struct file *file,
81186 + struct anon_vma *anon_vma, struct file *file,
81187 pgoff_t pgoff, struct mempolicy *policy)
81188 {
81189 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
81190 struct vm_area_struct *area, *next;
81191 int err;
81192
81193 +#ifdef CONFIG_PAX_SEGMEXEC
81194 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
81195 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
81196 +
81197 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
81198 +#endif
81199 +
81200 /*
81201 * We later require that vma->vm_flags == vm_flags,
81202 * so this tests vma->vm_flags & VM_SPECIAL, too.
81203 @@ -980,6 +1022,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81204 if (next && next->vm_end == end) /* cases 6, 7, 8 */
81205 next = next->vm_next;
81206
81207 +#ifdef CONFIG_PAX_SEGMEXEC
81208 + if (prev)
81209 + prev_m = pax_find_mirror_vma(prev);
81210 + if (area)
81211 + area_m = pax_find_mirror_vma(area);
81212 + if (next)
81213 + next_m = pax_find_mirror_vma(next);
81214 +#endif
81215 +
81216 /*
81217 * Can it merge with the predecessor?
81218 */
81219 @@ -999,9 +1050,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81220 /* cases 1, 6 */
81221 err = vma_adjust(prev, prev->vm_start,
81222 next->vm_end, prev->vm_pgoff, NULL);
81223 - } else /* cases 2, 5, 7 */
81224 +
81225 +#ifdef CONFIG_PAX_SEGMEXEC
81226 + if (!err && prev_m)
81227 + err = vma_adjust(prev_m, prev_m->vm_start,
81228 + next_m->vm_end, prev_m->vm_pgoff, NULL);
81229 +#endif
81230 +
81231 + } else { /* cases 2, 5, 7 */
81232 err = vma_adjust(prev, prev->vm_start,
81233 end, prev->vm_pgoff, NULL);
81234 +
81235 +#ifdef CONFIG_PAX_SEGMEXEC
81236 + if (!err && prev_m)
81237 + err = vma_adjust(prev_m, prev_m->vm_start,
81238 + end_m, prev_m->vm_pgoff, NULL);
81239 +#endif
81240 +
81241 + }
81242 if (err)
81243 return NULL;
81244 khugepaged_enter_vma_merge(prev);
81245 @@ -1015,12 +1081,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81246 mpol_equal(policy, vma_policy(next)) &&
81247 can_vma_merge_before(next, vm_flags,
81248 anon_vma, file, pgoff+pglen)) {
81249 - if (prev && addr < prev->vm_end) /* case 4 */
81250 + if (prev && addr < prev->vm_end) { /* case 4 */
81251 err = vma_adjust(prev, prev->vm_start,
81252 addr, prev->vm_pgoff, NULL);
81253 - else /* cases 3, 8 */
81254 +
81255 +#ifdef CONFIG_PAX_SEGMEXEC
81256 + if (!err && prev_m)
81257 + err = vma_adjust(prev_m, prev_m->vm_start,
81258 + addr_m, prev_m->vm_pgoff, NULL);
81259 +#endif
81260 +
81261 + } else { /* cases 3, 8 */
81262 err = vma_adjust(area, addr, next->vm_end,
81263 next->vm_pgoff - pglen, NULL);
81264 +
81265 +#ifdef CONFIG_PAX_SEGMEXEC
81266 + if (!err && area_m)
81267 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
81268 + next_m->vm_pgoff - pglen, NULL);
81269 +#endif
81270 +
81271 + }
81272 if (err)
81273 return NULL;
81274 khugepaged_enter_vma_merge(area);
81275 @@ -1129,8 +1210,10 @@ none:
81276 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81277 struct file *file, long pages)
81278 {
81279 - const unsigned long stack_flags
81280 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
81281 +
81282 +#ifdef CONFIG_PAX_RANDMMAP
81283 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81284 +#endif
81285
81286 mm->total_vm += pages;
81287
81288 @@ -1138,7 +1221,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81289 mm->shared_vm += pages;
81290 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
81291 mm->exec_vm += pages;
81292 - } else if (flags & stack_flags)
81293 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
81294 mm->stack_vm += pages;
81295 }
81296 #endif /* CONFIG_PROC_FS */
81297 @@ -1177,7 +1260,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81298 * (the exception is when the underlying filesystem is noexec
81299 * mounted, in which case we dont add PROT_EXEC.)
81300 */
81301 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81302 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81303 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
81304 prot |= PROT_EXEC;
81305
81306 @@ -1203,7 +1286,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81307 /* Obtain the address to map to. we verify (or select) it and ensure
81308 * that it represents a valid section of the address space.
81309 */
81310 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
81311 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
81312 if (addr & ~PAGE_MASK)
81313 return addr;
81314
81315 @@ -1214,6 +1297,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81316 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
81317 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
81318
81319 +#ifdef CONFIG_PAX_MPROTECT
81320 + if (mm->pax_flags & MF_PAX_MPROTECT) {
81321 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
81322 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
81323 + gr_log_rwxmmap(file);
81324 +
81325 +#ifdef CONFIG_PAX_EMUPLT
81326 + vm_flags &= ~VM_EXEC;
81327 +#else
81328 + return -EPERM;
81329 +#endif
81330 +
81331 + }
81332 +
81333 + if (!(vm_flags & VM_EXEC))
81334 + vm_flags &= ~VM_MAYEXEC;
81335 +#else
81336 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81337 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81338 +#endif
81339 + else
81340 + vm_flags &= ~VM_MAYWRITE;
81341 + }
81342 +#endif
81343 +
81344 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81345 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
81346 + vm_flags &= ~VM_PAGEEXEC;
81347 +#endif
81348 +
81349 if (flags & MAP_LOCKED)
81350 if (!can_do_mlock())
81351 return -EPERM;
81352 @@ -1225,6 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81353 locked += mm->locked_vm;
81354 lock_limit = rlimit(RLIMIT_MEMLOCK);
81355 lock_limit >>= PAGE_SHIFT;
81356 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81357 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
81358 return -EAGAIN;
81359 }
81360 @@ -1305,6 +1419,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81361 vm_flags |= VM_NORESERVE;
81362 }
81363
81364 + if (!gr_acl_handle_mmap(file, prot))
81365 + return -EACCES;
81366 +
81367 addr = mmap_region(file, addr, len, vm_flags, pgoff);
81368 if (!IS_ERR_VALUE(addr) &&
81369 ((vm_flags & VM_LOCKED) ||
81370 @@ -1392,7 +1509,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
81371 vm_flags_t vm_flags = vma->vm_flags;
81372
81373 /* If it was private or non-writable, the write bit is already clear */
81374 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
81375 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
81376 return 0;
81377
81378 /* The backer wishes to know when pages are first written to? */
81379 @@ -1440,16 +1557,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
81380 unsigned long charged = 0;
81381 struct inode *inode = file ? file_inode(file) : NULL;
81382
81383 +#ifdef CONFIG_PAX_SEGMEXEC
81384 + struct vm_area_struct *vma_m = NULL;
81385 +#endif
81386 +
81387 + /*
81388 + * mm->mmap_sem is required to protect against another thread
81389 + * changing the mappings in case we sleep.
81390 + */
81391 + verify_mm_writelocked(mm);
81392 +
81393 /* Clear old maps */
81394 error = -ENOMEM;
81395 -munmap_back:
81396 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
81397 if (do_munmap(mm, addr, len))
81398 return -ENOMEM;
81399 - goto munmap_back;
81400 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
81401 }
81402
81403 /* Check against address space limit. */
81404 +
81405 +#ifdef CONFIG_PAX_RANDMMAP
81406 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81407 +#endif
81408 +
81409 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
81410 return -ENOMEM;
81411
81412 @@ -1481,6 +1612,16 @@ munmap_back:
81413 goto unacct_error;
81414 }
81415
81416 +#ifdef CONFIG_PAX_SEGMEXEC
81417 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
81418 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81419 + if (!vma_m) {
81420 + error = -ENOMEM;
81421 + goto free_vma;
81422 + }
81423 + }
81424 +#endif
81425 +
81426 vma->vm_mm = mm;
81427 vma->vm_start = addr;
81428 vma->vm_end = addr + len;
81429 @@ -1505,6 +1646,13 @@ munmap_back:
81430 if (error)
81431 goto unmap_and_free_vma;
81432
81433 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81434 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
81435 + vma->vm_flags |= VM_PAGEEXEC;
81436 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
81437 + }
81438 +#endif
81439 +
81440 /* Can addr have changed??
81441 *
81442 * Answer: Yes, several device drivers can do it in their
81443 @@ -1543,6 +1691,11 @@ munmap_back:
81444 vma_link(mm, vma, prev, rb_link, rb_parent);
81445 file = vma->vm_file;
81446
81447 +#ifdef CONFIG_PAX_SEGMEXEC
81448 + if (vma_m)
81449 + BUG_ON(pax_mirror_vma(vma_m, vma));
81450 +#endif
81451 +
81452 /* Once vma denies write, undo our temporary denial count */
81453 if (correct_wcount)
81454 atomic_inc(&inode->i_writecount);
81455 @@ -1550,6 +1703,7 @@ out:
81456 perf_event_mmap(vma);
81457
81458 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
81459 + track_exec_limit(mm, addr, addr + len, vm_flags);
81460 if (vm_flags & VM_LOCKED) {
81461 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
81462 vma == get_gate_vma(current->mm)))
81463 @@ -1573,6 +1727,12 @@ unmap_and_free_vma:
81464 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
81465 charged = 0;
81466 free_vma:
81467 +
81468 +#ifdef CONFIG_PAX_SEGMEXEC
81469 + if (vma_m)
81470 + kmem_cache_free(vm_area_cachep, vma_m);
81471 +#endif
81472 +
81473 kmem_cache_free(vm_area_cachep, vma);
81474 unacct_error:
81475 if (charged)
81476 @@ -1580,6 +1740,62 @@ unacct_error:
81477 return error;
81478 }
81479
81480 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
81481 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
81482 +{
81483 + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
81484 + return (random32() & 0xFF) << PAGE_SHIFT;
81485 +
81486 + return 0;
81487 +}
81488 +#endif
81489 +
81490 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
81491 +{
81492 + if (!vma) {
81493 +#ifdef CONFIG_STACK_GROWSUP
81494 + if (addr > sysctl_heap_stack_gap)
81495 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
81496 + else
81497 + vma = find_vma(current->mm, 0);
81498 + if (vma && (vma->vm_flags & VM_GROWSUP))
81499 + return false;
81500 +#endif
81501 + return true;
81502 + }
81503 +
81504 + if (addr + len > vma->vm_start)
81505 + return false;
81506 +
81507 + if (vma->vm_flags & VM_GROWSDOWN)
81508 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
81509 +#ifdef CONFIG_STACK_GROWSUP
81510 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
81511 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
81512 +#endif
81513 + else if (offset)
81514 + return offset <= vma->vm_start - addr - len;
81515 +
81516 + return true;
81517 +}
81518 +
81519 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
81520 +{
81521 + if (vma->vm_start < len)
81522 + return -ENOMEM;
81523 +
81524 + if (!(vma->vm_flags & VM_GROWSDOWN)) {
81525 + if (offset <= vma->vm_start - len)
81526 + return vma->vm_start - len - offset;
81527 + else
81528 + return -ENOMEM;
81529 + }
81530 +
81531 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
81532 + return vma->vm_start - len - sysctl_heap_stack_gap;
81533 + return -ENOMEM;
81534 +}
81535 +
81536 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
81537 {
81538 /*
81539 @@ -1799,6 +2015,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
81540 struct mm_struct *mm = current->mm;
81541 struct vm_area_struct *vma;
81542 struct vm_unmapped_area_info info;
81543 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
81544
81545 if (len > TASK_SIZE)
81546 return -ENOMEM;
81547 @@ -1806,29 +2023,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
81548 if (flags & MAP_FIXED)
81549 return addr;
81550
81551 +#ifdef CONFIG_PAX_RANDMMAP
81552 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
81553 +#endif
81554 +
81555 if (addr) {
81556 addr = PAGE_ALIGN(addr);
81557 vma = find_vma(mm, addr);
81558 - if (TASK_SIZE - len >= addr &&
81559 - (!vma || addr + len <= vma->vm_start))
81560 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
81561 return addr;
81562 }
81563
81564 info.flags = 0;
81565 info.length = len;
81566 info.low_limit = TASK_UNMAPPED_BASE;
81567 +
81568 +#ifdef CONFIG_PAX_RANDMMAP
81569 + if (mm->pax_flags & MF_PAX_RANDMMAP)
81570 + info.low_limit += mm->delta_mmap;
81571 +#endif
81572 +
81573 info.high_limit = TASK_SIZE;
81574 info.align_mask = 0;
81575 + info.threadstack_offset = offset;
81576 return vm_unmapped_area(&info);
81577 }
81578 #endif
81579
81580 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
81581 {
81582 +
81583 +#ifdef CONFIG_PAX_SEGMEXEC
81584 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
81585 + return;
81586 +#endif
81587 +
81588 /*
81589 * Is this a new hole at the lowest possible address?
81590 */
81591 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
81592 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
81593 mm->free_area_cache = addr;
81594 }
81595
81596 @@ -1846,6 +2079,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81597 struct mm_struct *mm = current->mm;
81598 unsigned long addr = addr0;
81599 struct vm_unmapped_area_info info;
81600 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
81601
81602 /* requested length too big for entire address space */
81603 if (len > TASK_SIZE)
81604 @@ -1854,12 +2088,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81605 if (flags & MAP_FIXED)
81606 return addr;
81607
81608 +#ifdef CONFIG_PAX_RANDMMAP
81609 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
81610 +#endif
81611 +
81612 /* requesting a specific address */
81613 if (addr) {
81614 addr = PAGE_ALIGN(addr);
81615 vma = find_vma(mm, addr);
81616 - if (TASK_SIZE - len >= addr &&
81617 - (!vma || addr + len <= vma->vm_start))
81618 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
81619 return addr;
81620 }
81621
81622 @@ -1868,6 +2105,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81623 info.low_limit = PAGE_SIZE;
81624 info.high_limit = mm->mmap_base;
81625 info.align_mask = 0;
81626 + info.threadstack_offset = offset;
81627 addr = vm_unmapped_area(&info);
81628
81629 /*
81630 @@ -1880,6 +2118,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81631 VM_BUG_ON(addr != -ENOMEM);
81632 info.flags = 0;
81633 info.low_limit = TASK_UNMAPPED_BASE;
81634 +
81635 +#ifdef CONFIG_PAX_RANDMMAP
81636 + if (mm->pax_flags & MF_PAX_RANDMMAP)
81637 + info.low_limit += mm->delta_mmap;
81638 +#endif
81639 +
81640 info.high_limit = TASK_SIZE;
81641 addr = vm_unmapped_area(&info);
81642 }
81643 @@ -1890,6 +2134,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81644
81645 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
81646 {
81647 +
81648 +#ifdef CONFIG_PAX_SEGMEXEC
81649 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
81650 + return;
81651 +#endif
81652 +
81653 /*
81654 * Is this a new hole at the highest possible address?
81655 */
81656 @@ -1897,8 +2147,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
81657 mm->free_area_cache = addr;
81658
81659 /* dont allow allocations above current base */
81660 - if (mm->free_area_cache > mm->mmap_base)
81661 + if (mm->free_area_cache > mm->mmap_base) {
81662 mm->free_area_cache = mm->mmap_base;
81663 + mm->cached_hole_size = ~0UL;
81664 + }
81665 }
81666
81667 unsigned long
81668 @@ -1997,6 +2249,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
81669 return vma;
81670 }
81671
81672 +#ifdef CONFIG_PAX_SEGMEXEC
81673 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
81674 +{
81675 + struct vm_area_struct *vma_m;
81676 +
81677 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
81678 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
81679 + BUG_ON(vma->vm_mirror);
81680 + return NULL;
81681 + }
81682 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
81683 + vma_m = vma->vm_mirror;
81684 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
81685 + BUG_ON(vma->vm_file != vma_m->vm_file);
81686 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
81687 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
81688 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
81689 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
81690 + return vma_m;
81691 +}
81692 +#endif
81693 +
81694 /*
81695 * Verify that the stack growth is acceptable and
81696 * update accounting. This is shared with both the
81697 @@ -2013,6 +2287,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81698 return -ENOMEM;
81699
81700 /* Stack limit test */
81701 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
81702 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
81703 return -ENOMEM;
81704
81705 @@ -2023,6 +2298,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81706 locked = mm->locked_vm + grow;
81707 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
81708 limit >>= PAGE_SHIFT;
81709 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81710 if (locked > limit && !capable(CAP_IPC_LOCK))
81711 return -ENOMEM;
81712 }
81713 @@ -2052,37 +2328,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
81714 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
81715 * vma is the last one with address > vma->vm_end. Have to extend vma.
81716 */
81717 +#ifndef CONFIG_IA64
81718 +static
81719 +#endif
81720 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
81721 {
81722 int error;
81723 + bool locknext;
81724
81725 if (!(vma->vm_flags & VM_GROWSUP))
81726 return -EFAULT;
81727
81728 + /* Also guard against wrapping around to address 0. */
81729 + if (address < PAGE_ALIGN(address+1))
81730 + address = PAGE_ALIGN(address+1);
81731 + else
81732 + return -ENOMEM;
81733 +
81734 /*
81735 * We must make sure the anon_vma is allocated
81736 * so that the anon_vma locking is not a noop.
81737 */
81738 if (unlikely(anon_vma_prepare(vma)))
81739 return -ENOMEM;
81740 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
81741 + if (locknext && anon_vma_prepare(vma->vm_next))
81742 + return -ENOMEM;
81743 vma_lock_anon_vma(vma);
81744 + if (locknext)
81745 + vma_lock_anon_vma(vma->vm_next);
81746
81747 /*
81748 * vma->vm_start/vm_end cannot change under us because the caller
81749 * is required to hold the mmap_sem in read mode. We need the
81750 - * anon_vma lock to serialize against concurrent expand_stacks.
81751 - * Also guard against wrapping around to address 0.
81752 + * anon_vma locks to serialize against concurrent expand_stacks
81753 + * and expand_upwards.
81754 */
81755 - if (address < PAGE_ALIGN(address+4))
81756 - address = PAGE_ALIGN(address+4);
81757 - else {
81758 - vma_unlock_anon_vma(vma);
81759 - return -ENOMEM;
81760 - }
81761 error = 0;
81762
81763 /* Somebody else might have raced and expanded it already */
81764 - if (address > vma->vm_end) {
81765 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
81766 + error = -ENOMEM;
81767 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
81768 unsigned long size, grow;
81769
81770 size = address - vma->vm_start;
81771 @@ -2117,6 +2404,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
81772 }
81773 }
81774 }
81775 + if (locknext)
81776 + vma_unlock_anon_vma(vma->vm_next);
81777 vma_unlock_anon_vma(vma);
81778 khugepaged_enter_vma_merge(vma);
81779 validate_mm(vma->vm_mm);
81780 @@ -2131,6 +2420,8 @@ int expand_downwards(struct vm_area_struct *vma,
81781 unsigned long address)
81782 {
81783 int error;
81784 + bool lockprev = false;
81785 + struct vm_area_struct *prev;
81786
81787 /*
81788 * We must make sure the anon_vma is allocated
81789 @@ -2144,6 +2435,15 @@ int expand_downwards(struct vm_area_struct *vma,
81790 if (error)
81791 return error;
81792
81793 + prev = vma->vm_prev;
81794 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
81795 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
81796 +#endif
81797 + if (lockprev && anon_vma_prepare(prev))
81798 + return -ENOMEM;
81799 + if (lockprev)
81800 + vma_lock_anon_vma(prev);
81801 +
81802 vma_lock_anon_vma(vma);
81803
81804 /*
81805 @@ -2153,9 +2453,17 @@ int expand_downwards(struct vm_area_struct *vma,
81806 */
81807
81808 /* Somebody else might have raced and expanded it already */
81809 - if (address < vma->vm_start) {
81810 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
81811 + error = -ENOMEM;
81812 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
81813 unsigned long size, grow;
81814
81815 +#ifdef CONFIG_PAX_SEGMEXEC
81816 + struct vm_area_struct *vma_m;
81817 +
81818 + vma_m = pax_find_mirror_vma(vma);
81819 +#endif
81820 +
81821 size = vma->vm_end - address;
81822 grow = (vma->vm_start - address) >> PAGE_SHIFT;
81823
81824 @@ -2180,6 +2488,18 @@ int expand_downwards(struct vm_area_struct *vma,
81825 vma->vm_pgoff -= grow;
81826 anon_vma_interval_tree_post_update_vma(vma);
81827 vma_gap_update(vma);
81828 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
81829 +
81830 +#ifdef CONFIG_PAX_SEGMEXEC
81831 + if (vma_m) {
81832 + anon_vma_interval_tree_pre_update_vma(vma_m);
81833 + vma_m->vm_start -= grow << PAGE_SHIFT;
81834 + vma_m->vm_pgoff -= grow;
81835 + anon_vma_interval_tree_post_update_vma(vma_m);
81836 + vma_gap_update(vma_m);
81837 + }
81838 +#endif
81839 +
81840 spin_unlock(&vma->vm_mm->page_table_lock);
81841
81842 perf_event_mmap(vma);
81843 @@ -2284,6 +2604,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
81844 do {
81845 long nrpages = vma_pages(vma);
81846
81847 +#ifdef CONFIG_PAX_SEGMEXEC
81848 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
81849 + vma = remove_vma(vma);
81850 + continue;
81851 + }
81852 +#endif
81853 +
81854 if (vma->vm_flags & VM_ACCOUNT)
81855 nr_accounted += nrpages;
81856 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
81857 @@ -2329,6 +2656,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
81858 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
81859 vma->vm_prev = NULL;
81860 do {
81861 +
81862 +#ifdef CONFIG_PAX_SEGMEXEC
81863 + if (vma->vm_mirror) {
81864 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
81865 + vma->vm_mirror->vm_mirror = NULL;
81866 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
81867 + vma->vm_mirror = NULL;
81868 + }
81869 +#endif
81870 +
81871 vma_rb_erase(vma, &mm->mm_rb);
81872 mm->map_count--;
81873 tail_vma = vma;
81874 @@ -2360,14 +2697,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81875 struct vm_area_struct *new;
81876 int err = -ENOMEM;
81877
81878 +#ifdef CONFIG_PAX_SEGMEXEC
81879 + struct vm_area_struct *vma_m, *new_m = NULL;
81880 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
81881 +#endif
81882 +
81883 if (is_vm_hugetlb_page(vma) && (addr &
81884 ~(huge_page_mask(hstate_vma(vma)))))
81885 return -EINVAL;
81886
81887 +#ifdef CONFIG_PAX_SEGMEXEC
81888 + vma_m = pax_find_mirror_vma(vma);
81889 +#endif
81890 +
81891 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
81892 if (!new)
81893 goto out_err;
81894
81895 +#ifdef CONFIG_PAX_SEGMEXEC
81896 + if (vma_m) {
81897 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
81898 + if (!new_m) {
81899 + kmem_cache_free(vm_area_cachep, new);
81900 + goto out_err;
81901 + }
81902 + }
81903 +#endif
81904 +
81905 /* most fields are the same, copy all, and then fixup */
81906 *new = *vma;
81907
81908 @@ -2380,6 +2736,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81909 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
81910 }
81911
81912 +#ifdef CONFIG_PAX_SEGMEXEC
81913 + if (vma_m) {
81914 + *new_m = *vma_m;
81915 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
81916 + new_m->vm_mirror = new;
81917 + new->vm_mirror = new_m;
81918 +
81919 + if (new_below)
81920 + new_m->vm_end = addr_m;
81921 + else {
81922 + new_m->vm_start = addr_m;
81923 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
81924 + }
81925 + }
81926 +#endif
81927 +
81928 pol = mpol_dup(vma_policy(vma));
81929 if (IS_ERR(pol)) {
81930 err = PTR_ERR(pol);
81931 @@ -2402,6 +2774,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81932 else
81933 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
81934
81935 +#ifdef CONFIG_PAX_SEGMEXEC
81936 + if (!err && vma_m) {
81937 + if (anon_vma_clone(new_m, vma_m))
81938 + goto out_free_mpol;
81939 +
81940 + mpol_get(pol);
81941 + vma_set_policy(new_m, pol);
81942 +
81943 + if (new_m->vm_file)
81944 + get_file(new_m->vm_file);
81945 +
81946 + if (new_m->vm_ops && new_m->vm_ops->open)
81947 + new_m->vm_ops->open(new_m);
81948 +
81949 + if (new_below)
81950 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
81951 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
81952 + else
81953 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
81954 +
81955 + if (err) {
81956 + if (new_m->vm_ops && new_m->vm_ops->close)
81957 + new_m->vm_ops->close(new_m);
81958 + if (new_m->vm_file)
81959 + fput(new_m->vm_file);
81960 + mpol_put(pol);
81961 + }
81962 + }
81963 +#endif
81964 +
81965 /* Success. */
81966 if (!err)
81967 return 0;
81968 @@ -2411,10 +2813,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81969 new->vm_ops->close(new);
81970 if (new->vm_file)
81971 fput(new->vm_file);
81972 - unlink_anon_vmas(new);
81973 out_free_mpol:
81974 mpol_put(pol);
81975 out_free_vma:
81976 +
81977 +#ifdef CONFIG_PAX_SEGMEXEC
81978 + if (new_m) {
81979 + unlink_anon_vmas(new_m);
81980 + kmem_cache_free(vm_area_cachep, new_m);
81981 + }
81982 +#endif
81983 +
81984 + unlink_anon_vmas(new);
81985 kmem_cache_free(vm_area_cachep, new);
81986 out_err:
81987 return err;
81988 @@ -2427,6 +2837,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81989 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81990 unsigned long addr, int new_below)
81991 {
81992 +
81993 +#ifdef CONFIG_PAX_SEGMEXEC
81994 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81995 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
81996 + if (mm->map_count >= sysctl_max_map_count-1)
81997 + return -ENOMEM;
81998 + } else
81999 +#endif
82000 +
82001 if (mm->map_count >= sysctl_max_map_count)
82002 return -ENOMEM;
82003
82004 @@ -2438,11 +2857,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82005 * work. This now handles partial unmappings.
82006 * Jeremy Fitzhardinge <jeremy@goop.org>
82007 */
82008 +#ifdef CONFIG_PAX_SEGMEXEC
82009 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82010 {
82011 + int ret = __do_munmap(mm, start, len);
82012 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
82013 + return ret;
82014 +
82015 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
82016 +}
82017 +
82018 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82019 +#else
82020 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82021 +#endif
82022 +{
82023 unsigned long end;
82024 struct vm_area_struct *vma, *prev, *last;
82025
82026 + /*
82027 + * mm->mmap_sem is required to protect against another thread
82028 + * changing the mappings in case we sleep.
82029 + */
82030 + verify_mm_writelocked(mm);
82031 +
82032 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
82033 return -EINVAL;
82034
82035 @@ -2517,6 +2955,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82036 /* Fix up all other VM information */
82037 remove_vma_list(mm, vma);
82038
82039 + track_exec_limit(mm, start, end, 0UL);
82040 +
82041 return 0;
82042 }
82043
82044 @@ -2525,6 +2965,13 @@ int vm_munmap(unsigned long start, size_t len)
82045 int ret;
82046 struct mm_struct *mm = current->mm;
82047
82048 +
82049 +#ifdef CONFIG_PAX_SEGMEXEC
82050 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
82051 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
82052 + return -EINVAL;
82053 +#endif
82054 +
82055 down_write(&mm->mmap_sem);
82056 ret = do_munmap(mm, start, len);
82057 up_write(&mm->mmap_sem);
82058 @@ -2538,16 +2985,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
82059 return vm_munmap(addr, len);
82060 }
82061
82062 -static inline void verify_mm_writelocked(struct mm_struct *mm)
82063 -{
82064 -#ifdef CONFIG_DEBUG_VM
82065 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82066 - WARN_ON(1);
82067 - up_read(&mm->mmap_sem);
82068 - }
82069 -#endif
82070 -}
82071 -
82072 /*
82073 * this is really a simplified "do_mmap". it only handles
82074 * anonymous maps. eventually we may be able to do some
82075 @@ -2561,6 +2998,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82076 struct rb_node ** rb_link, * rb_parent;
82077 pgoff_t pgoff = addr >> PAGE_SHIFT;
82078 int error;
82079 + unsigned long charged;
82080
82081 len = PAGE_ALIGN(len);
82082 if (!len)
82083 @@ -2568,16 +3006,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82084
82085 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
82086
82087 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
82088 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
82089 + flags &= ~VM_EXEC;
82090 +
82091 +#ifdef CONFIG_PAX_MPROTECT
82092 + if (mm->pax_flags & MF_PAX_MPROTECT)
82093 + flags &= ~VM_MAYEXEC;
82094 +#endif
82095 +
82096 + }
82097 +#endif
82098 +
82099 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
82100 if (error & ~PAGE_MASK)
82101 return error;
82102
82103 + charged = len >> PAGE_SHIFT;
82104 +
82105 /*
82106 * mlock MCL_FUTURE?
82107 */
82108 if (mm->def_flags & VM_LOCKED) {
82109 unsigned long locked, lock_limit;
82110 - locked = len >> PAGE_SHIFT;
82111 + locked = charged;
82112 locked += mm->locked_vm;
82113 lock_limit = rlimit(RLIMIT_MEMLOCK);
82114 lock_limit >>= PAGE_SHIFT;
82115 @@ -2594,21 +3046,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82116 /*
82117 * Clear old maps. this also does some error checking for us
82118 */
82119 - munmap_back:
82120 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82121 if (do_munmap(mm, addr, len))
82122 return -ENOMEM;
82123 - goto munmap_back;
82124 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82125 }
82126
82127 /* Check against address space limits *after* clearing old maps... */
82128 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82129 + if (!may_expand_vm(mm, charged))
82130 return -ENOMEM;
82131
82132 if (mm->map_count > sysctl_max_map_count)
82133 return -ENOMEM;
82134
82135 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
82136 + if (security_vm_enough_memory_mm(mm, charged))
82137 return -ENOMEM;
82138
82139 /* Can we just expand an old private anonymous mapping? */
82140 @@ -2622,7 +3073,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82141 */
82142 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82143 if (!vma) {
82144 - vm_unacct_memory(len >> PAGE_SHIFT);
82145 + vm_unacct_memory(charged);
82146 return -ENOMEM;
82147 }
82148
82149 @@ -2636,9 +3087,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82150 vma_link(mm, vma, prev, rb_link, rb_parent);
82151 out:
82152 perf_event_mmap(vma);
82153 - mm->total_vm += len >> PAGE_SHIFT;
82154 + mm->total_vm += charged;
82155 if (flags & VM_LOCKED)
82156 - mm->locked_vm += (len >> PAGE_SHIFT);
82157 + mm->locked_vm += charged;
82158 + track_exec_limit(mm, addr, addr + len, flags);
82159 return addr;
82160 }
82161
82162 @@ -2700,6 +3152,7 @@ void exit_mmap(struct mm_struct *mm)
82163 while (vma) {
82164 if (vma->vm_flags & VM_ACCOUNT)
82165 nr_accounted += vma_pages(vma);
82166 + vma->vm_mirror = NULL;
82167 vma = remove_vma(vma);
82168 }
82169 vm_unacct_memory(nr_accounted);
82170 @@ -2716,6 +3169,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82171 struct vm_area_struct *prev;
82172 struct rb_node **rb_link, *rb_parent;
82173
82174 +#ifdef CONFIG_PAX_SEGMEXEC
82175 + struct vm_area_struct *vma_m = NULL;
82176 +#endif
82177 +
82178 + if (security_mmap_addr(vma->vm_start))
82179 + return -EPERM;
82180 +
82181 /*
82182 * The vm_pgoff of a purely anonymous vma should be irrelevant
82183 * until its first write fault, when page's anon_vma and index
82184 @@ -2739,7 +3199,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82185 security_vm_enough_memory_mm(mm, vma_pages(vma)))
82186 return -ENOMEM;
82187
82188 +#ifdef CONFIG_PAX_SEGMEXEC
82189 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
82190 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82191 + if (!vma_m)
82192 + return -ENOMEM;
82193 + }
82194 +#endif
82195 +
82196 vma_link(mm, vma, prev, rb_link, rb_parent);
82197 +
82198 +#ifdef CONFIG_PAX_SEGMEXEC
82199 + if (vma_m)
82200 + BUG_ON(pax_mirror_vma(vma_m, vma));
82201 +#endif
82202 +
82203 return 0;
82204 }
82205
82206 @@ -2759,6 +3233,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82207 struct mempolicy *pol;
82208 bool faulted_in_anon_vma = true;
82209
82210 + BUG_ON(vma->vm_mirror);
82211 +
82212 /*
82213 * If anonymous vma has not yet been faulted, update new pgoff
82214 * to match new location, to increase its chance of merging.
82215 @@ -2825,6 +3301,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82216 return NULL;
82217 }
82218
82219 +#ifdef CONFIG_PAX_SEGMEXEC
82220 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
82221 +{
82222 + struct vm_area_struct *prev_m;
82223 + struct rb_node **rb_link_m, *rb_parent_m;
82224 + struct mempolicy *pol_m;
82225 +
82226 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
82227 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
82228 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
82229 + *vma_m = *vma;
82230 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
82231 + if (anon_vma_clone(vma_m, vma))
82232 + return -ENOMEM;
82233 + pol_m = vma_policy(vma_m);
82234 + mpol_get(pol_m);
82235 + vma_set_policy(vma_m, pol_m);
82236 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
82237 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
82238 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
82239 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
82240 + if (vma_m->vm_file)
82241 + get_file(vma_m->vm_file);
82242 + if (vma_m->vm_ops && vma_m->vm_ops->open)
82243 + vma_m->vm_ops->open(vma_m);
82244 + BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
82245 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
82246 + vma_m->vm_mirror = vma;
82247 + vma->vm_mirror = vma_m;
82248 + return 0;
82249 +}
82250 +#endif
82251 +
82252 /*
82253 * Return true if the calling process may expand its vm space by the passed
82254 * number of pages
82255 @@ -2836,6 +3345,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
82256
82257 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
82258
82259 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
82260 if (cur + npages > lim)
82261 return 0;
82262 return 1;
82263 @@ -2906,6 +3416,22 @@ int install_special_mapping(struct mm_struct *mm,
82264 vma->vm_start = addr;
82265 vma->vm_end = addr + len;
82266
82267 +#ifdef CONFIG_PAX_MPROTECT
82268 + if (mm->pax_flags & MF_PAX_MPROTECT) {
82269 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
82270 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
82271 + return -EPERM;
82272 + if (!(vm_flags & VM_EXEC))
82273 + vm_flags &= ~VM_MAYEXEC;
82274 +#else
82275 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82276 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82277 +#endif
82278 + else
82279 + vm_flags &= ~VM_MAYWRITE;
82280 + }
82281 +#endif
82282 +
82283 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
82284 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82285
82286 diff --git a/mm/mprotect.c b/mm/mprotect.c
82287 index 94722a4..07d9926 100644
82288 --- a/mm/mprotect.c
82289 +++ b/mm/mprotect.c
82290 @@ -23,10 +23,18 @@
82291 #include <linux/mmu_notifier.h>
82292 #include <linux/migrate.h>
82293 #include <linux/perf_event.h>
82294 +#include <linux/sched/sysctl.h>
82295 +
82296 +#ifdef CONFIG_PAX_MPROTECT
82297 +#include <linux/elf.h>
82298 +#include <linux/binfmts.h>
82299 +#endif
82300 +
82301 #include <asm/uaccess.h>
82302 #include <asm/pgtable.h>
82303 #include <asm/cacheflush.h>
82304 #include <asm/tlbflush.h>
82305 +#include <asm/mmu_context.h>
82306
82307 #ifndef pgprot_modify
82308 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
82309 @@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
82310 return pages;
82311 }
82312
82313 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82314 +/* called while holding the mmap semaphor for writing except stack expansion */
82315 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
82316 +{
82317 + unsigned long oldlimit, newlimit = 0UL;
82318 +
82319 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
82320 + return;
82321 +
82322 + spin_lock(&mm->page_table_lock);
82323 + oldlimit = mm->context.user_cs_limit;
82324 + if ((prot & VM_EXEC) && oldlimit < end)
82325 + /* USER_CS limit moved up */
82326 + newlimit = end;
82327 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
82328 + /* USER_CS limit moved down */
82329 + newlimit = start;
82330 +
82331 + if (newlimit) {
82332 + mm->context.user_cs_limit = newlimit;
82333 +
82334 +#ifdef CONFIG_SMP
82335 + wmb();
82336 + cpus_clear(mm->context.cpu_user_cs_mask);
82337 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
82338 +#endif
82339 +
82340 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
82341 + }
82342 + spin_unlock(&mm->page_table_lock);
82343 + if (newlimit == end) {
82344 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
82345 +
82346 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
82347 + if (is_vm_hugetlb_page(vma))
82348 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
82349 + else
82350 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
82351 + }
82352 +}
82353 +#endif
82354 +
82355 int
82356 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82357 unsigned long start, unsigned long end, unsigned long newflags)
82358 @@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82359 int error;
82360 int dirty_accountable = 0;
82361
82362 +#ifdef CONFIG_PAX_SEGMEXEC
82363 + struct vm_area_struct *vma_m = NULL;
82364 + unsigned long start_m, end_m;
82365 +
82366 + start_m = start + SEGMEXEC_TASK_SIZE;
82367 + end_m = end + SEGMEXEC_TASK_SIZE;
82368 +#endif
82369 +
82370 if (newflags == oldflags) {
82371 *pprev = vma;
82372 return 0;
82373 }
82374
82375 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
82376 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
82377 +
82378 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
82379 + return -ENOMEM;
82380 +
82381 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
82382 + return -ENOMEM;
82383 + }
82384 +
82385 /*
82386 * If we make a private mapping writable we increase our commit;
82387 * but (without finer accounting) cannot reduce our commit if we
82388 @@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82389 }
82390 }
82391
82392 +#ifdef CONFIG_PAX_SEGMEXEC
82393 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
82394 + if (start != vma->vm_start) {
82395 + error = split_vma(mm, vma, start, 1);
82396 + if (error)
82397 + goto fail;
82398 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
82399 + *pprev = (*pprev)->vm_next;
82400 + }
82401 +
82402 + if (end != vma->vm_end) {
82403 + error = split_vma(mm, vma, end, 0);
82404 + if (error)
82405 + goto fail;
82406 + }
82407 +
82408 + if (pax_find_mirror_vma(vma)) {
82409 + error = __do_munmap(mm, start_m, end_m - start_m);
82410 + if (error)
82411 + goto fail;
82412 + } else {
82413 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82414 + if (!vma_m) {
82415 + error = -ENOMEM;
82416 + goto fail;
82417 + }
82418 + vma->vm_flags = newflags;
82419 + error = pax_mirror_vma(vma_m, vma);
82420 + if (error) {
82421 + vma->vm_flags = oldflags;
82422 + goto fail;
82423 + }
82424 + }
82425 + }
82426 +#endif
82427 +
82428 /*
82429 * First try to merge with previous and/or next vma.
82430 */
82431 @@ -296,9 +400,21 @@ success:
82432 * vm_flags and vm_page_prot are protected by the mmap_sem
82433 * held in write mode.
82434 */
82435 +
82436 +#ifdef CONFIG_PAX_SEGMEXEC
82437 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
82438 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
82439 +#endif
82440 +
82441 vma->vm_flags = newflags;
82442 +
82443 +#ifdef CONFIG_PAX_MPROTECT
82444 + if (mm->binfmt && mm->binfmt->handle_mprotect)
82445 + mm->binfmt->handle_mprotect(vma, newflags);
82446 +#endif
82447 +
82448 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
82449 - vm_get_page_prot(newflags));
82450 + vm_get_page_prot(vma->vm_flags));
82451
82452 if (vma_wants_writenotify(vma)) {
82453 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
82454 @@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82455 end = start + len;
82456 if (end <= start)
82457 return -ENOMEM;
82458 +
82459 +#ifdef CONFIG_PAX_SEGMEXEC
82460 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
82461 + if (end > SEGMEXEC_TASK_SIZE)
82462 + return -EINVAL;
82463 + } else
82464 +#endif
82465 +
82466 + if (end > TASK_SIZE)
82467 + return -EINVAL;
82468 +
82469 if (!arch_validate_prot(prot))
82470 return -EINVAL;
82471
82472 @@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82473 /*
82474 * Does the application expect PROT_READ to imply PROT_EXEC:
82475 */
82476 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82477 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82478 prot |= PROT_EXEC;
82479
82480 vm_flags = calc_vm_prot_bits(prot);
82481 @@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82482 if (start > vma->vm_start)
82483 prev = vma;
82484
82485 +#ifdef CONFIG_PAX_MPROTECT
82486 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
82487 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
82488 +#endif
82489 +
82490 for (nstart = start ; ; ) {
82491 unsigned long newflags;
82492
82493 @@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82494
82495 /* newflags >> 4 shift VM_MAY% in place of VM_% */
82496 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
82497 + if (prot & (PROT_WRITE | PROT_EXEC))
82498 + gr_log_rwxmprotect(vma->vm_file);
82499 +
82500 + error = -EACCES;
82501 + goto out;
82502 + }
82503 +
82504 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
82505 error = -EACCES;
82506 goto out;
82507 }
82508 @@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
82509 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
82510 if (error)
82511 goto out;
82512 +
82513 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
82514 +
82515 nstart = tmp;
82516
82517 if (nstart < prev->vm_end)
82518 diff --git a/mm/mremap.c b/mm/mremap.c
82519 index 463a257..c0c7a92 100644
82520 --- a/mm/mremap.c
82521 +++ b/mm/mremap.c
82522 @@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
82523 continue;
82524 pte = ptep_get_and_clear(mm, old_addr, old_pte);
82525 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
82526 +
82527 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82528 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
82529 + pte = pte_exprotect(pte);
82530 +#endif
82531 +
82532 set_pte_at(mm, new_addr, new_pte, pte);
82533 }
82534
82535 @@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
82536 if (is_vm_hugetlb_page(vma))
82537 goto Einval;
82538
82539 +#ifdef CONFIG_PAX_SEGMEXEC
82540 + if (pax_find_mirror_vma(vma))
82541 + goto Einval;
82542 +#endif
82543 +
82544 /* We can't remap across vm area boundaries */
82545 if (old_len > vma->vm_end - addr)
82546 goto Efault;
82547 @@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
82548 unsigned long ret = -EINVAL;
82549 unsigned long charged = 0;
82550 unsigned long map_flags;
82551 + unsigned long pax_task_size = TASK_SIZE;
82552
82553 if (new_addr & ~PAGE_MASK)
82554 goto out;
82555
82556 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
82557 +#ifdef CONFIG_PAX_SEGMEXEC
82558 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
82559 + pax_task_size = SEGMEXEC_TASK_SIZE;
82560 +#endif
82561 +
82562 + pax_task_size -= PAGE_SIZE;
82563 +
82564 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
82565 goto out;
82566
82567 /* Check if the location we're moving into overlaps the
82568 * old location at all, and fail if it does.
82569 */
82570 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
82571 - goto out;
82572 -
82573 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
82574 + if (addr + old_len > new_addr && new_addr + new_len > addr)
82575 goto out;
82576
82577 ret = do_munmap(mm, new_addr, new_len);
82578 @@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82579 unsigned long ret = -EINVAL;
82580 unsigned long charged = 0;
82581 bool locked = false;
82582 + unsigned long pax_task_size = TASK_SIZE;
82583
82584 down_write(&current->mm->mmap_sem);
82585
82586 @@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82587 if (!new_len)
82588 goto out;
82589
82590 +#ifdef CONFIG_PAX_SEGMEXEC
82591 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
82592 + pax_task_size = SEGMEXEC_TASK_SIZE;
82593 +#endif
82594 +
82595 + pax_task_size -= PAGE_SIZE;
82596 +
82597 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
82598 + old_len > pax_task_size || addr > pax_task_size-old_len)
82599 + goto out;
82600 +
82601 if (flags & MREMAP_FIXED) {
82602 if (flags & MREMAP_MAYMOVE)
82603 ret = mremap_to(addr, old_len, new_addr, new_len,
82604 @@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82605 new_addr = addr;
82606 }
82607 ret = addr;
82608 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
82609 goto out;
82610 }
82611 }
82612 @@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
82613 goto out;
82614 }
82615
82616 + map_flags = vma->vm_flags;
82617 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
82618 + if (!(ret & ~PAGE_MASK)) {
82619 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
82620 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
82621 + }
82622 }
82623 out:
82624 if (ret & ~PAGE_MASK)
82625 diff --git a/mm/nommu.c b/mm/nommu.c
82626 index e001768..9b52b30 100644
82627 --- a/mm/nommu.c
82628 +++ b/mm/nommu.c
82629 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
82630 int sysctl_overcommit_ratio = 50; /* default is 50% */
82631 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
82632 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
82633 -int heap_stack_gap = 0;
82634
82635 atomic_long_t mmap_pages_allocated;
82636
82637 @@ -841,15 +840,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
82638 EXPORT_SYMBOL(find_vma);
82639
82640 /*
82641 - * find a VMA
82642 - * - we don't extend stack VMAs under NOMMU conditions
82643 - */
82644 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
82645 -{
82646 - return find_vma(mm, addr);
82647 -}
82648 -
82649 -/*
82650 * expand a stack to a given address
82651 * - not supported under NOMMU conditions
82652 */
82653 @@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82654
82655 /* most fields are the same, copy all, and then fixup */
82656 *new = *vma;
82657 + INIT_LIST_HEAD(&new->anon_vma_chain);
82658 *region = *vma->vm_region;
82659 new->vm_region = region;
82660
82661 @@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
82662 }
82663 EXPORT_SYMBOL(generic_file_remap_pages);
82664
82665 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82666 - unsigned long addr, void *buf, int len, int write)
82667 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82668 + unsigned long addr, void *buf, size_t len, int write)
82669 {
82670 struct vm_area_struct *vma;
82671
82672 @@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82673 *
82674 * The caller must hold a reference on @mm.
82675 */
82676 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82677 - void *buf, int len, int write)
82678 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
82679 + void *buf, size_t len, int write)
82680 {
82681 return __access_remote_vm(NULL, mm, addr, buf, len, write);
82682 }
82683 @@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82684 * Access another process' address space.
82685 * - source/target buffer must be kernel space
82686 */
82687 -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
82688 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
82689 {
82690 struct mm_struct *mm;
82691
82692 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
82693 index efe6814..64b4701 100644
82694 --- a/mm/page-writeback.c
82695 +++ b/mm/page-writeback.c
82696 @@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
82697 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
82698 * - the bdi dirty thresh drops quickly due to change of JBOD workload
82699 */
82700 -static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
82701 +static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
82702 unsigned long thresh,
82703 unsigned long bg_thresh,
82704 unsigned long dirty,
82705 @@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
82706 }
82707 }
82708
82709 -static struct notifier_block __cpuinitdata ratelimit_nb = {
82710 +static struct notifier_block ratelimit_nb = {
82711 .notifier_call = ratelimit_handler,
82712 .next = NULL,
82713 };
82714 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
82715 index 8fcced7..ebcd481 100644
82716 --- a/mm/page_alloc.c
82717 +++ b/mm/page_alloc.c
82718 @@ -59,6 +59,7 @@
82719 #include <linux/migrate.h>
82720 #include <linux/page-debug-flags.h>
82721 #include <linux/sched/rt.h>
82722 +#include <linux/random.h>
82723
82724 #include <asm/tlbflush.h>
82725 #include <asm/div64.h>
82726 @@ -344,7 +345,7 @@ out:
82727 * This usage means that zero-order pages may not be compound.
82728 */
82729
82730 -static void free_compound_page(struct page *page)
82731 +void free_compound_page(struct page *page)
82732 {
82733 __free_pages_ok(page, compound_order(page));
82734 }
82735 @@ -701,6 +702,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
82736 int i;
82737 int bad = 0;
82738
82739 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
82740 + unsigned long index = 1UL << order;
82741 +#endif
82742 +
82743 trace_mm_page_free(page, order);
82744 kmemcheck_free_shadow(page, order);
82745
82746 @@ -716,6 +721,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
82747 debug_check_no_obj_freed(page_address(page),
82748 PAGE_SIZE << order);
82749 }
82750 +
82751 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
82752 + for (; index; --index)
82753 + sanitize_highpage(page + index - 1);
82754 +#endif
82755 +
82756 arch_free_page(page, order);
82757 kernel_map_pages(page, 1 << order, 0);
82758
82759 @@ -738,6 +749,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
82760 local_irq_restore(flags);
82761 }
82762
82763 +#ifdef CONFIG_PAX_LATENT_ENTROPY
82764 +bool __meminitdata extra_latent_entropy;
82765 +
82766 +static int __init setup_pax_extra_latent_entropy(char *str)
82767 +{
82768 + extra_latent_entropy = true;
82769 + return 0;
82770 +}
82771 +early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
82772 +
82773 +volatile u64 latent_entropy;
82774 +#endif
82775 +
82776 /*
82777 * Read access to zone->managed_pages is safe because it's unsigned long,
82778 * but we still need to serialize writers. Currently all callers of
82779 @@ -760,6 +784,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
82780 set_page_count(p, 0);
82781 }
82782
82783 +#ifdef CONFIG_PAX_LATENT_ENTROPY
82784 + if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
82785 + u64 hash = 0;
82786 + size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
82787 + const u64 *data = lowmem_page_address(page);
82788 +
82789 + for (index = 0; index < end; index++)
82790 + hash ^= hash + data[index];
82791 + latent_entropy ^= hash;
82792 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
82793 + }
82794 +#endif
82795 +
82796 page_zone(page)->managed_pages += 1 << order;
82797 set_page_refcounted(page);
82798 __free_pages(page, order);
82799 @@ -869,8 +906,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
82800 arch_alloc_page(page, order);
82801 kernel_map_pages(page, 1 << order, 1);
82802
82803 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
82804 if (gfp_flags & __GFP_ZERO)
82805 prep_zero_page(page, order, gfp_flags);
82806 +#endif
82807
82808 if (order && (gfp_flags & __GFP_COMP))
82809 prep_compound_page(page, order);
82810 diff --git a/mm/percpu.c b/mm/percpu.c
82811 index 8c8e08f..73a5cda 100644
82812 --- a/mm/percpu.c
82813 +++ b/mm/percpu.c
82814 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
82815 static unsigned int pcpu_high_unit_cpu __read_mostly;
82816
82817 /* the address of the first chunk which starts with the kernel static area */
82818 -void *pcpu_base_addr __read_mostly;
82819 +void *pcpu_base_addr __read_only;
82820 EXPORT_SYMBOL_GPL(pcpu_base_addr);
82821
82822 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
82823 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
82824 index fd26d04..0cea1b0 100644
82825 --- a/mm/process_vm_access.c
82826 +++ b/mm/process_vm_access.c
82827 @@ -13,6 +13,7 @@
82828 #include <linux/uio.h>
82829 #include <linux/sched.h>
82830 #include <linux/highmem.h>
82831 +#include <linux/security.h>
82832 #include <linux/ptrace.h>
82833 #include <linux/slab.h>
82834 #include <linux/syscalls.h>
82835 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
82836 size_t iov_l_curr_offset = 0;
82837 ssize_t iov_len;
82838
82839 + return -ENOSYS; // PaX: until properly audited
82840 +
82841 /*
82842 * Work out how many pages of struct pages we're going to need
82843 * when eventually calling get_user_pages
82844 */
82845 for (i = 0; i < riovcnt; i++) {
82846 iov_len = rvec[i].iov_len;
82847 - if (iov_len > 0) {
82848 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
82849 - + iov_len)
82850 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
82851 - / PAGE_SIZE + 1;
82852 - nr_pages = max(nr_pages, nr_pages_iov);
82853 - }
82854 + if (iov_len <= 0)
82855 + continue;
82856 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
82857 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
82858 + nr_pages = max(nr_pages, nr_pages_iov);
82859 }
82860
82861 if (nr_pages == 0)
82862 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
82863 goto free_proc_pages;
82864 }
82865
82866 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
82867 + rc = -EPERM;
82868 + goto put_task_struct;
82869 + }
82870 +
82871 mm = mm_access(task, PTRACE_MODE_ATTACH);
82872 if (!mm || IS_ERR(mm)) {
82873 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
82874 diff --git a/mm/rmap.c b/mm/rmap.c
82875 index 807c96b..0e05279 100644
82876 --- a/mm/rmap.c
82877 +++ b/mm/rmap.c
82878 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82879 struct anon_vma *anon_vma = vma->anon_vma;
82880 struct anon_vma_chain *avc;
82881
82882 +#ifdef CONFIG_PAX_SEGMEXEC
82883 + struct anon_vma_chain *avc_m = NULL;
82884 +#endif
82885 +
82886 might_sleep();
82887 if (unlikely(!anon_vma)) {
82888 struct mm_struct *mm = vma->vm_mm;
82889 @@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82890 if (!avc)
82891 goto out_enomem;
82892
82893 +#ifdef CONFIG_PAX_SEGMEXEC
82894 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
82895 + if (!avc_m)
82896 + goto out_enomem_free_avc;
82897 +#endif
82898 +
82899 anon_vma = find_mergeable_anon_vma(vma);
82900 allocated = NULL;
82901 if (!anon_vma) {
82902 @@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82903 /* page_table_lock to protect against threads */
82904 spin_lock(&mm->page_table_lock);
82905 if (likely(!vma->anon_vma)) {
82906 +
82907 +#ifdef CONFIG_PAX_SEGMEXEC
82908 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
82909 +
82910 + if (vma_m) {
82911 + BUG_ON(vma_m->anon_vma);
82912 + vma_m->anon_vma = anon_vma;
82913 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
82914 + avc_m = NULL;
82915 + }
82916 +#endif
82917 +
82918 vma->anon_vma = anon_vma;
82919 anon_vma_chain_link(vma, avc, anon_vma);
82920 allocated = NULL;
82921 @@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
82922
82923 if (unlikely(allocated))
82924 put_anon_vma(allocated);
82925 +
82926 +#ifdef CONFIG_PAX_SEGMEXEC
82927 + if (unlikely(avc_m))
82928 + anon_vma_chain_free(avc_m);
82929 +#endif
82930 +
82931 if (unlikely(avc))
82932 anon_vma_chain_free(avc);
82933 }
82934 return 0;
82935
82936 out_enomem_free_avc:
82937 +
82938 +#ifdef CONFIG_PAX_SEGMEXEC
82939 + if (avc_m)
82940 + anon_vma_chain_free(avc_m);
82941 +#endif
82942 +
82943 anon_vma_chain_free(avc);
82944 out_enomem:
82945 return -ENOMEM;
82946 @@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
82947 * Attach the anon_vmas from src to dst.
82948 * Returns 0 on success, -ENOMEM on failure.
82949 */
82950 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
82951 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
82952 {
82953 struct anon_vma_chain *avc, *pavc;
82954 struct anon_vma *root = NULL;
82955 @@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
82956 * the corresponding VMA in the parent process is attached to.
82957 * Returns 0 on success, non-zero on failure.
82958 */
82959 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
82960 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
82961 {
82962 struct anon_vma_chain *avc;
82963 struct anon_vma *anon_vma;
82964 diff --git a/mm/shmem.c b/mm/shmem.c
82965 index 1c44af7..cefe9a6 100644
82966 --- a/mm/shmem.c
82967 +++ b/mm/shmem.c
82968 @@ -31,7 +31,7 @@
82969 #include <linux/export.h>
82970 #include <linux/swap.h>
82971
82972 -static struct vfsmount *shm_mnt;
82973 +struct vfsmount *shm_mnt;
82974
82975 #ifdef CONFIG_SHMEM
82976 /*
82977 @@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
82978 #define BOGO_DIRENT_SIZE 20
82979
82980 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
82981 -#define SHORT_SYMLINK_LEN 128
82982 +#define SHORT_SYMLINK_LEN 64
82983
82984 /*
82985 * shmem_fallocate and shmem_writepage communicate via inode->i_private
82986 @@ -2201,6 +2201,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
82987 static int shmem_xattr_validate(const char *name)
82988 {
82989 struct { const char *prefix; size_t len; } arr[] = {
82990 +
82991 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
82992 + { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
82993 +#endif
82994 +
82995 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
82996 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
82997 };
82998 @@ -2256,6 +2261,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
82999 if (err)
83000 return err;
83001
83002 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83003 + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
83004 + if (strcmp(name, XATTR_NAME_PAX_FLAGS))
83005 + return -EOPNOTSUPP;
83006 + if (size > 8)
83007 + return -EINVAL;
83008 + }
83009 +#endif
83010 +
83011 return simple_xattr_set(&info->xattrs, name, value, size, flags);
83012 }
83013
83014 @@ -2568,8 +2582,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
83015 int err = -ENOMEM;
83016
83017 /* Round up to L1_CACHE_BYTES to resist false sharing */
83018 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
83019 - L1_CACHE_BYTES), GFP_KERNEL);
83020 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
83021 if (!sbinfo)
83022 return -ENOMEM;
83023
83024 diff --git a/mm/slab.c b/mm/slab.c
83025 index 856e4a1..fafb820 100644
83026 --- a/mm/slab.c
83027 +++ b/mm/slab.c
83028 @@ -306,7 +306,7 @@ struct kmem_list3 {
83029 * Need this for bootstrapping a per node allocator.
83030 */
83031 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
83032 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
83033 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
83034 #define CACHE_CACHE 0
83035 #define SIZE_AC MAX_NUMNODES
83036 #define SIZE_L3 (2 * MAX_NUMNODES)
83037 @@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
83038 if ((x)->max_freeable < i) \
83039 (x)->max_freeable = i; \
83040 } while (0)
83041 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
83042 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
83043 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
83044 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
83045 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
83046 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
83047 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
83048 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
83049 #else
83050 #define STATS_INC_ACTIVE(x) do { } while (0)
83051 #define STATS_DEC_ACTIVE(x) do { } while (0)
83052 @@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
83053 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
83054 */
83055 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
83056 - const struct slab *slab, void *obj)
83057 + const struct slab *slab, const void *obj)
83058 {
83059 u32 offset = (obj - slab->s_mem);
83060 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
83061 @@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
83062 struct cache_names {
83063 char *name;
83064 char *name_dma;
83065 + char *name_usercopy;
83066 };
83067
83068 static struct cache_names __initdata cache_names[] = {
83069 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
83070 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
83071 #include <linux/kmalloc_sizes.h>
83072 - {NULL,}
83073 + {NULL}
83074 #undef CACHE
83075 };
83076
83077 @@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
83078 if (unlikely(gfpflags & GFP_DMA))
83079 return csizep->cs_dmacachep;
83080 #endif
83081 +
83082 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83083 + if (unlikely(gfpflags & GFP_USERCOPY))
83084 + return csizep->cs_usercopycachep;
83085 +#endif
83086 +
83087 return csizep->cs_cachep;
83088 }
83089
83090 @@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
83091 return notifier_from_errno(err);
83092 }
83093
83094 -static struct notifier_block __cpuinitdata cpucache_notifier = {
83095 +static struct notifier_block cpucache_notifier = {
83096 &cpuup_callback, NULL, 0
83097 };
83098
83099 @@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
83100 */
83101
83102 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
83103 - sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
83104 + sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83105
83106 if (INDEX_AC != INDEX_L3)
83107 sizes[INDEX_L3].cs_cachep =
83108 create_kmalloc_cache(names[INDEX_L3].name,
83109 - sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
83110 + sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83111
83112 slab_early_init = 0;
83113
83114 @@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
83115 */
83116 if (!sizes->cs_cachep)
83117 sizes->cs_cachep = create_kmalloc_cache(names->name,
83118 - sizes->cs_size, ARCH_KMALLOC_FLAGS);
83119 + sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83120
83121 #ifdef CONFIG_ZONE_DMA
83122 sizes->cs_dmacachep = create_kmalloc_cache(
83123 names->name_dma, sizes->cs_size,
83124 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
83125 #endif
83126 +
83127 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83128 + sizes->cs_usercopycachep = create_kmalloc_cache(
83129 + names->name_usercopy, sizes->cs_size,
83130 + ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83131 +#endif
83132 +
83133 sizes++;
83134 names++;
83135 }
83136 @@ -3924,6 +3938,7 @@ void kfree(const void *objp)
83137
83138 if (unlikely(ZERO_OR_NULL_PTR(objp)))
83139 return;
83140 + VM_BUG_ON(!virt_addr_valid(objp));
83141 local_irq_save(flags);
83142 kfree_debugcheck(objp);
83143 c = virt_to_cache(objp);
83144 @@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
83145 }
83146 /* cpu stats */
83147 {
83148 - unsigned long allochit = atomic_read(&cachep->allochit);
83149 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
83150 - unsigned long freehit = atomic_read(&cachep->freehit);
83151 - unsigned long freemiss = atomic_read(&cachep->freemiss);
83152 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
83153 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
83154 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
83155 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
83156
83157 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
83158 allochit, allocmiss, freehit, freemiss);
83159 @@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
83160 static int __init slab_proc_init(void)
83161 {
83162 #ifdef CONFIG_DEBUG_SLAB_LEAK
83163 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
83164 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
83165 #endif
83166 return 0;
83167 }
83168 module_init(slab_proc_init);
83169 #endif
83170
83171 +bool is_usercopy_object(const void *ptr)
83172 +{
83173 + struct page *page;
83174 + struct kmem_cache *cachep;
83175 +
83176 + if (ZERO_OR_NULL_PTR(ptr))
83177 + return false;
83178 +
83179 + if (!slab_is_available())
83180 + return false;
83181 +
83182 + if (!virt_addr_valid(ptr))
83183 + return false;
83184 +
83185 + page = virt_to_head_page(ptr);
83186 +
83187 + if (!PageSlab(page))
83188 + return false;
83189 +
83190 + cachep = page->slab_cache;
83191 + return cachep->flags & SLAB_USERCOPY;
83192 +}
83193 +
83194 +#ifdef CONFIG_PAX_USERCOPY
83195 +const char *check_heap_object(const void *ptr, unsigned long n)
83196 +{
83197 + struct page *page;
83198 + struct kmem_cache *cachep;
83199 + struct slab *slabp;
83200 + unsigned int objnr;
83201 + unsigned long offset;
83202 +
83203 + if (ZERO_OR_NULL_PTR(ptr))
83204 + return "<null>";
83205 +
83206 + if (!virt_addr_valid(ptr))
83207 + return NULL;
83208 +
83209 + page = virt_to_head_page(ptr);
83210 +
83211 + if (!PageSlab(page))
83212 + return NULL;
83213 +
83214 + cachep = page->slab_cache;
83215 + if (!(cachep->flags & SLAB_USERCOPY))
83216 + return cachep->name;
83217 +
83218 + slabp = page->slab_page;
83219 + objnr = obj_to_index(cachep, slabp, ptr);
83220 + BUG_ON(objnr >= cachep->num);
83221 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
83222 + if (offset <= cachep->object_size && n <= cachep->object_size - offset)
83223 + return NULL;
83224 +
83225 + return cachep->name;
83226 +}
83227 +#endif
83228 +
83229 /**
83230 * ksize - get the actual amount of memory allocated for a given object
83231 * @objp: Pointer to the object
83232 diff --git a/mm/slab.h b/mm/slab.h
83233 index 34a98d6..73633d1 100644
83234 --- a/mm/slab.h
83235 +++ b/mm/slab.h
83236 @@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83237
83238 /* Legal flag mask for kmem_cache_create(), for various configurations */
83239 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
83240 - SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
83241 + SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
83242
83243 #if defined(CONFIG_DEBUG_SLAB)
83244 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
83245 @@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
83246 return s;
83247
83248 page = virt_to_head_page(x);
83249 +
83250 + BUG_ON(!PageSlab(page));
83251 +
83252 cachep = page->slab_cache;
83253 if (slab_equal_or_root(cachep, s))
83254 return cachep;
83255 diff --git a/mm/slab_common.c b/mm/slab_common.c
83256 index 3f3cd97..93b0236 100644
83257 --- a/mm/slab_common.c
83258 +++ b/mm/slab_common.c
83259 @@ -22,7 +22,7 @@
83260
83261 #include "slab.h"
83262
83263 -enum slab_state slab_state;
83264 +enum slab_state slab_state __read_only;
83265 LIST_HEAD(slab_caches);
83266 DEFINE_MUTEX(slab_mutex);
83267 struct kmem_cache *kmem_cache;
83268 @@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
83269
83270 err = __kmem_cache_create(s, flags);
83271 if (!err) {
83272 - s->refcount = 1;
83273 + atomic_set(&s->refcount, 1);
83274 list_add(&s->list, &slab_caches);
83275 memcg_cache_list_add(memcg, s);
83276 } else {
83277 @@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
83278
83279 get_online_cpus();
83280 mutex_lock(&slab_mutex);
83281 - s->refcount--;
83282 - if (!s->refcount) {
83283 + if (atomic_dec_and_test(&s->refcount)) {
83284 list_del(&s->list);
83285
83286 if (!__kmem_cache_shutdown(s)) {
83287 @@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
83288 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
83289 name, size, err);
83290
83291 - s->refcount = -1; /* Exempt from merging for now */
83292 + atomic_set(&s->refcount, -1); /* Exempt from merging for now */
83293 }
83294
83295 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83296 @@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83297
83298 create_boot_cache(s, name, size, flags);
83299 list_add(&s->list, &slab_caches);
83300 - s->refcount = 1;
83301 + atomic_set(&s->refcount, 1);
83302 return s;
83303 }
83304
83305 diff --git a/mm/slob.c b/mm/slob.c
83306 index eeed4a0..6ee34ec 100644
83307 --- a/mm/slob.c
83308 +++ b/mm/slob.c
83309 @@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
83310 /*
83311 * Return the size of a slob block.
83312 */
83313 -static slobidx_t slob_units(slob_t *s)
83314 +static slobidx_t slob_units(const slob_t *s)
83315 {
83316 if (s->units > 0)
83317 return s->units;
83318 @@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
83319 /*
83320 * Return the next free slob block pointer after this one.
83321 */
83322 -static slob_t *slob_next(slob_t *s)
83323 +static slob_t *slob_next(const slob_t *s)
83324 {
83325 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
83326 slobidx_t next;
83327 @@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
83328 /*
83329 * Returns true if s is the last free block in its page.
83330 */
83331 -static int slob_last(slob_t *s)
83332 +static int slob_last(const slob_t *s)
83333 {
83334 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
83335 }
83336
83337 -static void *slob_new_pages(gfp_t gfp, int order, int node)
83338 +static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
83339 {
83340 - void *page;
83341 + struct page *page;
83342
83343 #ifdef CONFIG_NUMA
83344 if (node != NUMA_NO_NODE)
83345 @@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
83346 if (!page)
83347 return NULL;
83348
83349 - return page_address(page);
83350 + __SetPageSlab(page);
83351 + return page;
83352 }
83353
83354 -static void slob_free_pages(void *b, int order)
83355 +static void slob_free_pages(struct page *sp, int order)
83356 {
83357 if (current->reclaim_state)
83358 current->reclaim_state->reclaimed_slab += 1 << order;
83359 - free_pages((unsigned long)b, order);
83360 + __ClearPageSlab(sp);
83361 + reset_page_mapcount(sp);
83362 + sp->private = 0;
83363 + __free_pages(sp, order);
83364 }
83365
83366 /*
83367 @@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
83368
83369 /* Not enough space: must allocate a new page */
83370 if (!b) {
83371 - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83372 - if (!b)
83373 + sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83374 + if (!sp)
83375 return NULL;
83376 - sp = virt_to_page(b);
83377 - __SetPageSlab(sp);
83378 + b = page_address(sp);
83379
83380 spin_lock_irqsave(&slob_lock, flags);
83381 sp->units = SLOB_UNITS(PAGE_SIZE);
83382 sp->freelist = b;
83383 + sp->private = 0;
83384 INIT_LIST_HEAD(&sp->list);
83385 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
83386 set_slob_page_free(sp, slob_list);
83387 @@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
83388 if (slob_page_free(sp))
83389 clear_slob_page_free(sp);
83390 spin_unlock_irqrestore(&slob_lock, flags);
83391 - __ClearPageSlab(sp);
83392 - page_mapcount_reset(sp);
83393 - slob_free_pages(b, 0);
83394 + slob_free_pages(sp, 0);
83395 return;
83396 }
83397
83398 @@ -424,11 +426,10 @@ out:
83399 */
83400
83401 static __always_inline void *
83402 -__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83403 +__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
83404 {
83405 - unsigned int *m;
83406 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83407 - void *ret;
83408 + slob_t *m;
83409 + void *ret = NULL;
83410
83411 gfp &= gfp_allowed_mask;
83412
83413 @@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83414
83415 if (!m)
83416 return NULL;
83417 - *m = size;
83418 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
83419 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
83420 + m[0].units = size;
83421 + m[1].units = align;
83422 ret = (void *)m + align;
83423
83424 trace_kmalloc_node(caller, ret,
83425 size, size + align, gfp, node);
83426 } else {
83427 unsigned int order = get_order(size);
83428 + struct page *page;
83429
83430 if (likely(order))
83431 gfp |= __GFP_COMP;
83432 - ret = slob_new_pages(gfp, order, node);
83433 + page = slob_new_pages(gfp, order, node);
83434 + if (page) {
83435 + ret = page_address(page);
83436 + page->private = size;
83437 + }
83438
83439 trace_kmalloc_node(caller, ret,
83440 size, PAGE_SIZE << order, gfp, node);
83441 }
83442
83443 - kmemleak_alloc(ret, size, 1, gfp);
83444 + return ret;
83445 +}
83446 +
83447 +static __always_inline void *
83448 +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
83449 +{
83450 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83451 + void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
83452 +
83453 + if (!ZERO_OR_NULL_PTR(ret))
83454 + kmemleak_alloc(ret, size, 1, gfp);
83455 return ret;
83456 }
83457
83458 @@ -493,34 +512,112 @@ void kfree(const void *block)
83459 return;
83460 kmemleak_free(block);
83461
83462 + VM_BUG_ON(!virt_addr_valid(block));
83463 sp = virt_to_page(block);
83464 - if (PageSlab(sp)) {
83465 + VM_BUG_ON(!PageSlab(sp));
83466 + if (!sp->private) {
83467 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83468 - unsigned int *m = (unsigned int *)(block - align);
83469 - slob_free(m, *m + align);
83470 - } else
83471 + slob_t *m = (slob_t *)(block - align);
83472 + slob_free(m, m[0].units + align);
83473 + } else {
83474 + __ClearPageSlab(sp);
83475 + reset_page_mapcount(sp);
83476 + sp->private = 0;
83477 __free_pages(sp, compound_order(sp));
83478 + }
83479 }
83480 EXPORT_SYMBOL(kfree);
83481
83482 +bool is_usercopy_object(const void *ptr)
83483 +{
83484 + if (!slab_is_available())
83485 + return false;
83486 +
83487 + // PAX: TODO
83488 +
83489 + return false;
83490 +}
83491 +
83492 +#ifdef CONFIG_PAX_USERCOPY
83493 +const char *check_heap_object(const void *ptr, unsigned long n)
83494 +{
83495 + struct page *page;
83496 + const slob_t *free;
83497 + const void *base;
83498 + unsigned long flags;
83499 +
83500 + if (ZERO_OR_NULL_PTR(ptr))
83501 + return "<null>";
83502 +
83503 + if (!virt_addr_valid(ptr))
83504 + return NULL;
83505 +
83506 + page = virt_to_head_page(ptr);
83507 + if (!PageSlab(page))
83508 + return NULL;
83509 +
83510 + if (page->private) {
83511 + base = page;
83512 + if (base <= ptr && n <= page->private - (ptr - base))
83513 + return NULL;
83514 + return "<slob>";
83515 + }
83516 +
83517 + /* some tricky double walking to find the chunk */
83518 + spin_lock_irqsave(&slob_lock, flags);
83519 + base = (void *)((unsigned long)ptr & PAGE_MASK);
83520 + free = page->freelist;
83521 +
83522 + while (!slob_last(free) && (void *)free <= ptr) {
83523 + base = free + slob_units(free);
83524 + free = slob_next(free);
83525 + }
83526 +
83527 + while (base < (void *)free) {
83528 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
83529 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
83530 + int offset;
83531 +
83532 + if (ptr < base + align)
83533 + break;
83534 +
83535 + offset = ptr - base - align;
83536 + if (offset >= m) {
83537 + base += size;
83538 + continue;
83539 + }
83540 +
83541 + if (n > m - offset)
83542 + break;
83543 +
83544 + spin_unlock_irqrestore(&slob_lock, flags);
83545 + return NULL;
83546 + }
83547 +
83548 + spin_unlock_irqrestore(&slob_lock, flags);
83549 + return "<slob>";
83550 +}
83551 +#endif
83552 +
83553 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
83554 size_t ksize(const void *block)
83555 {
83556 struct page *sp;
83557 int align;
83558 - unsigned int *m;
83559 + slob_t *m;
83560
83561 BUG_ON(!block);
83562 if (unlikely(block == ZERO_SIZE_PTR))
83563 return 0;
83564
83565 sp = virt_to_page(block);
83566 - if (unlikely(!PageSlab(sp)))
83567 - return PAGE_SIZE << compound_order(sp);
83568 + VM_BUG_ON(!PageSlab(sp));
83569 + if (sp->private)
83570 + return sp->private;
83571
83572 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
83573 - m = (unsigned int *)(block - align);
83574 - return SLOB_UNITS(*m) * SLOB_UNIT;
83575 + m = (slob_t *)(block - align);
83576 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
83577 }
83578 EXPORT_SYMBOL(ksize);
83579
83580 @@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
83581
83582 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
83583 {
83584 - void *b;
83585 + void *b = NULL;
83586
83587 flags &= gfp_allowed_mask;
83588
83589 lockdep_trace_alloc(flags);
83590
83591 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83592 + b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
83593 +#else
83594 if (c->size < PAGE_SIZE) {
83595 b = slob_alloc(c->size, flags, c->align, node);
83596 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
83597 SLOB_UNITS(c->size) * SLOB_UNIT,
83598 flags, node);
83599 } else {
83600 - b = slob_new_pages(flags, get_order(c->size), node);
83601 + struct page *sp;
83602 +
83603 + sp = slob_new_pages(flags, get_order(c->size), node);
83604 + if (sp) {
83605 + b = page_address(sp);
83606 + sp->private = c->size;
83607 + }
83608 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
83609 PAGE_SIZE << get_order(c->size),
83610 flags, node);
83611 }
83612 +#endif
83613
83614 if (c->ctor)
83615 c->ctor(b);
83616 @@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
83617
83618 static void __kmem_cache_free(void *b, int size)
83619 {
83620 - if (size < PAGE_SIZE)
83621 + struct page *sp;
83622 +
83623 + sp = virt_to_page(b);
83624 + BUG_ON(!PageSlab(sp));
83625 + if (!sp->private)
83626 slob_free(b, size);
83627 else
83628 - slob_free_pages(b, get_order(size));
83629 + slob_free_pages(sp, get_order(size));
83630 }
83631
83632 static void kmem_rcu_free(struct rcu_head *head)
83633 @@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
83634
83635 void kmem_cache_free(struct kmem_cache *c, void *b)
83636 {
83637 + int size = c->size;
83638 +
83639 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83640 + if (size + c->align < PAGE_SIZE) {
83641 + size += c->align;
83642 + b -= c->align;
83643 + }
83644 +#endif
83645 +
83646 kmemleak_free_recursive(b, c->flags);
83647 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
83648 struct slob_rcu *slob_rcu;
83649 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
83650 - slob_rcu->size = c->size;
83651 + slob_rcu = b + (size - sizeof(struct slob_rcu));
83652 + slob_rcu->size = size;
83653 call_rcu(&slob_rcu->head, kmem_rcu_free);
83654 } else {
83655 - __kmem_cache_free(b, c->size);
83656 + __kmem_cache_free(b, size);
83657 }
83658
83659 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83660 + trace_kfree(_RET_IP_, b);
83661 +#else
83662 trace_kmem_cache_free(_RET_IP_, b);
83663 +#endif
83664 +
83665 }
83666 EXPORT_SYMBOL(kmem_cache_free);
83667
83668 diff --git a/mm/slub.c b/mm/slub.c
83669 index 4aec537..a64753d 100644
83670 --- a/mm/slub.c
83671 +++ b/mm/slub.c
83672 @@ -197,7 +197,7 @@ struct track {
83673
83674 enum track_item { TRACK_ALLOC, TRACK_FREE };
83675
83676 -#ifdef CONFIG_SYSFS
83677 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83678 static int sysfs_slab_add(struct kmem_cache *);
83679 static int sysfs_slab_alias(struct kmem_cache *, const char *);
83680 static void sysfs_slab_remove(struct kmem_cache *);
83681 @@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
83682 if (!t->addr)
83683 return;
83684
83685 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
83686 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
83687 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
83688 #ifdef CONFIG_STACKTRACE
83689 {
83690 @@ -2653,7 +2653,7 @@ static int slub_min_objects;
83691 * Merge control. If this is set then no merging of slab caches will occur.
83692 * (Could be removed. This was introduced to pacify the merge skeptics.)
83693 */
83694 -static int slub_nomerge;
83695 +static int slub_nomerge = 1;
83696
83697 /*
83698 * Calculate the order of allocation given an slab object size.
83699 @@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
83700 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
83701 #endif
83702
83703 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83704 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
83705 +#endif
83706 +
83707 static int __init setup_slub_min_order(char *str)
83708 {
83709 get_option(&str, &slub_min_order);
83710 @@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
83711 return kmalloc_dma_caches[index];
83712
83713 #endif
83714 +
83715 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83716 + if (flags & SLAB_USERCOPY)
83717 + return kmalloc_usercopy_caches[index];
83718 +
83719 +#endif
83720 +
83721 return kmalloc_caches[index];
83722 }
83723
83724 @@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
83725 EXPORT_SYMBOL(__kmalloc_node);
83726 #endif
83727
83728 +bool is_usercopy_object(const void *ptr)
83729 +{
83730 + struct page *page;
83731 + struct kmem_cache *s;
83732 +
83733 + if (ZERO_OR_NULL_PTR(ptr))
83734 + return false;
83735 +
83736 + if (!slab_is_available())
83737 + return false;
83738 +
83739 + if (!virt_addr_valid(ptr))
83740 + return false;
83741 +
83742 + page = virt_to_head_page(ptr);
83743 +
83744 + if (!PageSlab(page))
83745 + return false;
83746 +
83747 + s = page->slab_cache;
83748 + return s->flags & SLAB_USERCOPY;
83749 +}
83750 +
83751 +#ifdef CONFIG_PAX_USERCOPY
83752 +const char *check_heap_object(const void *ptr, unsigned long n)
83753 +{
83754 + struct page *page;
83755 + struct kmem_cache *s;
83756 + unsigned long offset;
83757 +
83758 + if (ZERO_OR_NULL_PTR(ptr))
83759 + return "<null>";
83760 +
83761 + if (!virt_addr_valid(ptr))
83762 + return NULL;
83763 +
83764 + page = virt_to_head_page(ptr);
83765 +
83766 + if (!PageSlab(page))
83767 + return NULL;
83768 +
83769 + s = page->slab_cache;
83770 + if (!(s->flags & SLAB_USERCOPY))
83771 + return s->name;
83772 +
83773 + offset = (ptr - page_address(page)) % s->size;
83774 + if (offset <= s->object_size && n <= s->object_size - offset)
83775 + return NULL;
83776 +
83777 + return s->name;
83778 +}
83779 +#endif
83780 +
83781 size_t ksize(const void *object)
83782 {
83783 struct page *page;
83784 @@ -3404,6 +3468,7 @@ void kfree(const void *x)
83785 if (unlikely(ZERO_OR_NULL_PTR(x)))
83786 return;
83787
83788 + VM_BUG_ON(!virt_addr_valid(x));
83789 page = virt_to_head_page(x);
83790 if (unlikely(!PageSlab(page))) {
83791 BUG_ON(!PageCompound(page));
83792 @@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
83793
83794 /* Caches that are not of the two-to-the-power-of size */
83795 if (KMALLOC_MIN_SIZE <= 32) {
83796 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
83797 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
83798 caches++;
83799 }
83800
83801 if (KMALLOC_MIN_SIZE <= 64) {
83802 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
83803 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
83804 caches++;
83805 }
83806
83807 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
83808 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
83809 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
83810 caches++;
83811 }
83812
83813 @@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
83814 }
83815 }
83816 #endif
83817 +
83818 +#ifdef CONFIG_PAX_USERCOPY_SLABS
83819 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
83820 + struct kmem_cache *s = kmalloc_caches[i];
83821 +
83822 + if (s && s->size) {
83823 + char *name = kasprintf(GFP_NOWAIT,
83824 + "usercopy-kmalloc-%d", s->object_size);
83825 +
83826 + BUG_ON(!name);
83827 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
83828 + s->object_size, SLAB_USERCOPY);
83829 + }
83830 + }
83831 +#endif
83832 +
83833 printk(KERN_INFO
83834 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
83835 " CPUs=%d, Nodes=%d\n",
83836 @@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
83837 /*
83838 * We may have set a slab to be unmergeable during bootstrap.
83839 */
83840 - if (s->refcount < 0)
83841 + if (atomic_read(&s->refcount) < 0)
83842 return 1;
83843
83844 return 0;
83845 @@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83846
83847 s = find_mergeable(memcg, size, align, flags, name, ctor);
83848 if (s) {
83849 - s->refcount++;
83850 + atomic_inc(&s->refcount);
83851 /*
83852 * Adjust the object sizes so that we clear
83853 * the complete object on kzalloc.
83854 @@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83855 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
83856
83857 if (sysfs_slab_alias(s, name)) {
83858 - s->refcount--;
83859 + atomic_dec(&s->refcount);
83860 s = NULL;
83861 }
83862 }
83863 @@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
83864 return NOTIFY_OK;
83865 }
83866
83867 -static struct notifier_block __cpuinitdata slab_notifier = {
83868 +static struct notifier_block slab_notifier = {
83869 .notifier_call = slab_cpuup_callback
83870 };
83871
83872 @@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
83873 }
83874 #endif
83875
83876 -#ifdef CONFIG_SYSFS
83877 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83878 static int count_inuse(struct page *page)
83879 {
83880 return page->inuse;
83881 @@ -4364,12 +4445,12 @@ static void resiliency_test(void)
83882 validate_slab_cache(kmalloc_caches[9]);
83883 }
83884 #else
83885 -#ifdef CONFIG_SYSFS
83886 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83887 static void resiliency_test(void) {};
83888 #endif
83889 #endif
83890
83891 -#ifdef CONFIG_SYSFS
83892 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83893 enum slab_stat_type {
83894 SL_ALL, /* All slabs */
83895 SL_PARTIAL, /* Only partially allocated slabs */
83896 @@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
83897
83898 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
83899 {
83900 - return sprintf(buf, "%d\n", s->refcount - 1);
83901 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
83902 }
83903 SLAB_ATTR_RO(aliases);
83904
83905 @@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
83906 return name;
83907 }
83908
83909 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83910 static int sysfs_slab_add(struct kmem_cache *s)
83911 {
83912 int err;
83913 @@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
83914 kobject_del(&s->kobj);
83915 kobject_put(&s->kobj);
83916 }
83917 +#endif
83918
83919 /*
83920 * Need to buffer aliases during bootup until sysfs becomes
83921 @@ -5336,6 +5419,7 @@ struct saved_alias {
83922
83923 static struct saved_alias *alias_list;
83924
83925 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
83926 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
83927 {
83928 struct saved_alias *al;
83929 @@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
83930 alias_list = al;
83931 return 0;
83932 }
83933 +#endif
83934
83935 static int __init slab_sysfs_init(void)
83936 {
83937 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
83938 index 1b7e22a..3fcd4f3 100644
83939 --- a/mm/sparse-vmemmap.c
83940 +++ b/mm/sparse-vmemmap.c
83941 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
83942 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
83943 if (!p)
83944 return NULL;
83945 - pud_populate(&init_mm, pud, p);
83946 + pud_populate_kernel(&init_mm, pud, p);
83947 }
83948 return pud;
83949 }
83950 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
83951 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
83952 if (!p)
83953 return NULL;
83954 - pgd_populate(&init_mm, pgd, p);
83955 + pgd_populate_kernel(&init_mm, pgd, p);
83956 }
83957 return pgd;
83958 }
83959 diff --git a/mm/sparse.c b/mm/sparse.c
83960 index 7ca6dc8..6472aa1 100644
83961 --- a/mm/sparse.c
83962 +++ b/mm/sparse.c
83963 @@ -783,7 +783,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
83964
83965 for (i = 0; i < PAGES_PER_SECTION; i++) {
83966 if (PageHWPoison(&memmap[i])) {
83967 - atomic_long_sub(1, &num_poisoned_pages);
83968 + atomic_long_sub_unchecked(1, &num_poisoned_pages);
83969 ClearPageHWPoison(&memmap[i]);
83970 }
83971 }
83972 diff --git a/mm/swap.c b/mm/swap.c
83973 index 8a529a0..154ef26 100644
83974 --- a/mm/swap.c
83975 +++ b/mm/swap.c
83976 @@ -30,6 +30,7 @@
83977 #include <linux/backing-dev.h>
83978 #include <linux/memcontrol.h>
83979 #include <linux/gfp.h>
83980 +#include <linux/hugetlb.h>
83981
83982 #include "internal.h"
83983
83984 @@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
83985
83986 __page_cache_release(page);
83987 dtor = get_compound_page_dtor(page);
83988 + if (!PageHuge(page))
83989 + BUG_ON(dtor != free_compound_page);
83990 (*dtor)(page);
83991 }
83992
83993 diff --git a/mm/swapfile.c b/mm/swapfile.c
83994 index a1f7772..9e982ac 100644
83995 --- a/mm/swapfile.c
83996 +++ b/mm/swapfile.c
83997 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
83998
83999 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
84000 /* Activity counter to indicate that a swapon or swapoff has occurred */
84001 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
84002 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
84003
84004 static inline unsigned char swap_count(unsigned char ent)
84005 {
84006 @@ -1683,7 +1683,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
84007 }
84008 filp_close(swap_file, NULL);
84009 err = 0;
84010 - atomic_inc(&proc_poll_event);
84011 + atomic_inc_unchecked(&proc_poll_event);
84012 wake_up_interruptible(&proc_poll_wait);
84013
84014 out_dput:
84015 @@ -1700,8 +1700,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
84016
84017 poll_wait(file, &proc_poll_wait, wait);
84018
84019 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
84020 - seq->poll_event = atomic_read(&proc_poll_event);
84021 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
84022 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84023 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
84024 }
84025
84026 @@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inode, struct file *file)
84027 return ret;
84028
84029 seq = file->private_data;
84030 - seq->poll_event = atomic_read(&proc_poll_event);
84031 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84032 return 0;
84033 }
84034
84035 @@ -2142,7 +2142,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
84036 (frontswap_map) ? "FS" : "");
84037
84038 mutex_unlock(&swapon_mutex);
84039 - atomic_inc(&proc_poll_event);
84040 + atomic_inc_unchecked(&proc_poll_event);
84041 wake_up_interruptible(&proc_poll_wait);
84042
84043 if (S_ISREG(inode->i_mode))
84044 diff --git a/mm/util.c b/mm/util.c
84045 index ab1424d..7c5bd5a 100644
84046 --- a/mm/util.c
84047 +++ b/mm/util.c
84048 @@ -294,6 +294,12 @@ done:
84049 void arch_pick_mmap_layout(struct mm_struct *mm)
84050 {
84051 mm->mmap_base = TASK_UNMAPPED_BASE;
84052 +
84053 +#ifdef CONFIG_PAX_RANDMMAP
84054 + if (mm->pax_flags & MF_PAX_RANDMMAP)
84055 + mm->mmap_base += mm->delta_mmap;
84056 +#endif
84057 +
84058 mm->get_unmapped_area = arch_get_unmapped_area;
84059 mm->unmap_area = arch_unmap_area;
84060 }
84061 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
84062 index 0f751f2..ef398a0 100644
84063 --- a/mm/vmalloc.c
84064 +++ b/mm/vmalloc.c
84065 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
84066
84067 pte = pte_offset_kernel(pmd, addr);
84068 do {
84069 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84070 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84071 +
84072 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84073 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
84074 + BUG_ON(!pte_exec(*pte));
84075 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
84076 + continue;
84077 + }
84078 +#endif
84079 +
84080 + {
84081 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84082 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84083 + }
84084 } while (pte++, addr += PAGE_SIZE, addr != end);
84085 }
84086
84087 @@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
84088 pte = pte_alloc_kernel(pmd, addr);
84089 if (!pte)
84090 return -ENOMEM;
84091 +
84092 + pax_open_kernel();
84093 do {
84094 struct page *page = pages[*nr];
84095
84096 - if (WARN_ON(!pte_none(*pte)))
84097 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84098 + if (pgprot_val(prot) & _PAGE_NX)
84099 +#endif
84100 +
84101 + if (!pte_none(*pte)) {
84102 + pax_close_kernel();
84103 + WARN_ON(1);
84104 return -EBUSY;
84105 - if (WARN_ON(!page))
84106 + }
84107 + if (!page) {
84108 + pax_close_kernel();
84109 + WARN_ON(1);
84110 return -ENOMEM;
84111 + }
84112 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
84113 (*nr)++;
84114 } while (pte++, addr += PAGE_SIZE, addr != end);
84115 + pax_close_kernel();
84116 return 0;
84117 }
84118
84119 @@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
84120 pmd_t *pmd;
84121 unsigned long next;
84122
84123 - pmd = pmd_alloc(&init_mm, pud, addr);
84124 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
84125 if (!pmd)
84126 return -ENOMEM;
84127 do {
84128 @@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
84129 pud_t *pud;
84130 unsigned long next;
84131
84132 - pud = pud_alloc(&init_mm, pgd, addr);
84133 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
84134 if (!pud)
84135 return -ENOMEM;
84136 do {
84137 @@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
84138 * and fall back on vmalloc() if that fails. Others
84139 * just put it in the vmalloc space.
84140 */
84141 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
84142 +#ifdef CONFIG_MODULES
84143 +#ifdef MODULES_VADDR
84144 unsigned long addr = (unsigned long)x;
84145 if (addr >= MODULES_VADDR && addr < MODULES_END)
84146 return 1;
84147 #endif
84148 +
84149 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84150 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
84151 + return 1;
84152 +#endif
84153 +
84154 +#endif
84155 +
84156 return is_vmalloc_addr(x);
84157 }
84158
84159 @@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
84160
84161 if (!pgd_none(*pgd)) {
84162 pud_t *pud = pud_offset(pgd, addr);
84163 +#ifdef CONFIG_X86
84164 + if (!pud_large(*pud))
84165 +#endif
84166 if (!pud_none(*pud)) {
84167 pmd_t *pmd = pmd_offset(pud, addr);
84168 +#ifdef CONFIG_X86
84169 + if (!pmd_large(*pmd))
84170 +#endif
84171 if (!pmd_none(*pmd)) {
84172 pte_t *ptep, pte;
84173
84174 @@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
84175 * Allocate a region of KVA of the specified size and alignment, within the
84176 * vstart and vend.
84177 */
84178 -static struct vmap_area *alloc_vmap_area(unsigned long size,
84179 +static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
84180 unsigned long align,
84181 unsigned long vstart, unsigned long vend,
84182 int node, gfp_t gfp_mask)
84183 @@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
84184 struct vm_struct *area;
84185
84186 BUG_ON(in_interrupt());
84187 +
84188 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84189 + if (flags & VM_KERNEXEC) {
84190 + if (start != VMALLOC_START || end != VMALLOC_END)
84191 + return NULL;
84192 + start = (unsigned long)MODULES_EXEC_VADDR;
84193 + end = (unsigned long)MODULES_EXEC_END;
84194 + }
84195 +#endif
84196 +
84197 if (flags & VM_IOREMAP) {
84198 int bit = fls(size);
84199
84200 @@ -1569,6 +1618,11 @@ void *vmap(struct page **pages, unsigned int count,
84201 if (count > totalram_pages)
84202 return NULL;
84203
84204 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84205 + if (!(pgprot_val(prot) & _PAGE_NX))
84206 + flags |= VM_KERNEXEC;
84207 +#endif
84208 +
84209 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
84210 __builtin_return_address(0));
84211 if (!area)
84212 @@ -1670,6 +1724,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
84213 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
84214 goto fail;
84215
84216 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84217 + if (!(pgprot_val(prot) & _PAGE_NX))
84218 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
84219 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
84220 + else
84221 +#endif
84222 +
84223 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
84224 start, end, node, gfp_mask, caller);
84225 if (!area)
84226 @@ -1845,10 +1906,9 @@ EXPORT_SYMBOL(vzalloc_node);
84227 * For tight control over page level allocator and protection flags
84228 * use __vmalloc() instead.
84229 */
84230 -
84231 void *vmalloc_exec(unsigned long size)
84232 {
84233 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
84234 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
84235 NUMA_NO_NODE, __builtin_return_address(0));
84236 }
84237
84238 @@ -2139,6 +2199,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
84239 unsigned long uaddr = vma->vm_start;
84240 unsigned long usize = vma->vm_end - vma->vm_start;
84241
84242 + BUG_ON(vma->vm_mirror);
84243 +
84244 if ((PAGE_SIZE-1) & (unsigned long)addr)
84245 return -EINVAL;
84246
84247 @@ -2578,7 +2640,11 @@ static int s_show(struct seq_file *m, void *p)
84248 v->addr, v->addr + v->size, v->size);
84249
84250 if (v->caller)
84251 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84252 + seq_printf(m, " %pK", v->caller);
84253 +#else
84254 seq_printf(m, " %pS", v->caller);
84255 +#endif
84256
84257 if (v->nr_pages)
84258 seq_printf(m, " pages=%d", v->nr_pages);
84259 diff --git a/mm/vmstat.c b/mm/vmstat.c
84260 index e1d8ed1..253fa3c 100644
84261 --- a/mm/vmstat.c
84262 +++ b/mm/vmstat.c
84263 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
84264 *
84265 * vm_stat contains the global counters
84266 */
84267 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84268 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84269 EXPORT_SYMBOL(vm_stat);
84270
84271 #ifdef CONFIG_SMP
84272 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
84273 v = p->vm_stat_diff[i];
84274 p->vm_stat_diff[i] = 0;
84275 local_irq_restore(flags);
84276 - atomic_long_add(v, &zone->vm_stat[i]);
84277 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84278 global_diff[i] += v;
84279 #ifdef CONFIG_NUMA
84280 /* 3 seconds idle till flush */
84281 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
84282
84283 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
84284 if (global_diff[i])
84285 - atomic_long_add(global_diff[i], &vm_stat[i]);
84286 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
84287 }
84288
84289 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84290 @@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84291 if (pset->vm_stat_diff[i]) {
84292 int v = pset->vm_stat_diff[i];
84293 pset->vm_stat_diff[i] = 0;
84294 - atomic_long_add(v, &zone->vm_stat[i]);
84295 - atomic_long_add(v, &vm_stat[i]);
84296 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84297 + atomic_long_add_unchecked(v, &vm_stat[i]);
84298 }
84299 }
84300 #endif
84301 @@ -1224,7 +1224,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
84302 return NOTIFY_OK;
84303 }
84304
84305 -static struct notifier_block __cpuinitdata vmstat_notifier =
84306 +static struct notifier_block vmstat_notifier =
84307 { &vmstat_cpuup_callback, NULL, 0 };
84308 #endif
84309
84310 @@ -1239,10 +1239,20 @@ static int __init setup_vmstat(void)
84311 start_cpu_timer(cpu);
84312 #endif
84313 #ifdef CONFIG_PROC_FS
84314 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
84315 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
84316 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
84317 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
84318 + {
84319 + mode_t gr_mode = S_IRUGO;
84320 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
84321 + gr_mode = S_IRUSR;
84322 +#endif
84323 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
84324 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
84325 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84326 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
84327 +#else
84328 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
84329 +#endif
84330 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
84331 + }
84332 #endif
84333 return 0;
84334 }
84335 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
84336 index 85addcd..c429a13 100644
84337 --- a/net/8021q/vlan.c
84338 +++ b/net/8021q/vlan.c
84339 @@ -114,6 +114,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
84340 if (vlan_id)
84341 vlan_vid_del(real_dev, vlan_id);
84342
84343 + /* Take it out of our own structures, but be sure to interlock with
84344 + * HW accelerating devices or SW vlan input packet processing if
84345 + * VLAN is not 0 (leave it there for 802.1p).
84346 + */
84347 + if (vlan_id)
84348 + vlan_vid_del(real_dev, vlan_id);
84349 +
84350 /* Get rid of the vlan's reference to real_dev */
84351 dev_put(real_dev);
84352 }
84353 @@ -496,7 +503,7 @@ out:
84354 return NOTIFY_DONE;
84355 }
84356
84357 -static struct notifier_block vlan_notifier_block __read_mostly = {
84358 +static struct notifier_block vlan_notifier_block = {
84359 .notifier_call = vlan_device_event,
84360 };
84361
84362 @@ -571,8 +578,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
84363 err = -EPERM;
84364 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
84365 break;
84366 - if ((args.u.name_type >= 0) &&
84367 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
84368 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
84369 struct vlan_net *vn;
84370
84371 vn = net_generic(net, vlan_net_id);
84372 diff --git a/net/9p/mod.c b/net/9p/mod.c
84373 index 6ab36ae..6f1841b 100644
84374 --- a/net/9p/mod.c
84375 +++ b/net/9p/mod.c
84376 @@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
84377 void v9fs_register_trans(struct p9_trans_module *m)
84378 {
84379 spin_lock(&v9fs_trans_lock);
84380 - list_add_tail(&m->list, &v9fs_trans_list);
84381 + pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
84382 spin_unlock(&v9fs_trans_lock);
84383 }
84384 EXPORT_SYMBOL(v9fs_register_trans);
84385 @@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
84386 void v9fs_unregister_trans(struct p9_trans_module *m)
84387 {
84388 spin_lock(&v9fs_trans_lock);
84389 - list_del_init(&m->list);
84390 + pax_list_del_init((struct list_head *)&m->list);
84391 spin_unlock(&v9fs_trans_lock);
84392 }
84393 EXPORT_SYMBOL(v9fs_unregister_trans);
84394 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
84395 index 02efb25..41541a9 100644
84396 --- a/net/9p/trans_fd.c
84397 +++ b/net/9p/trans_fd.c
84398 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
84399 oldfs = get_fs();
84400 set_fs(get_ds());
84401 /* The cast to a user pointer is valid due to the set_fs() */
84402 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
84403 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
84404 set_fs(oldfs);
84405
84406 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
84407 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
84408 index 876fbe8..8bbea9f 100644
84409 --- a/net/atm/atm_misc.c
84410 +++ b/net/atm/atm_misc.c
84411 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
84412 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
84413 return 1;
84414 atm_return(vcc, truesize);
84415 - atomic_inc(&vcc->stats->rx_drop);
84416 + atomic_inc_unchecked(&vcc->stats->rx_drop);
84417 return 0;
84418 }
84419 EXPORT_SYMBOL(atm_charge);
84420 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
84421 }
84422 }
84423 atm_return(vcc, guess);
84424 - atomic_inc(&vcc->stats->rx_drop);
84425 + atomic_inc_unchecked(&vcc->stats->rx_drop);
84426 return NULL;
84427 }
84428 EXPORT_SYMBOL(atm_alloc_charge);
84429 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
84430
84431 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
84432 {
84433 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
84434 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
84435 __SONET_ITEMS
84436 #undef __HANDLE_ITEM
84437 }
84438 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
84439
84440 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
84441 {
84442 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
84443 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
84444 __SONET_ITEMS
84445 #undef __HANDLE_ITEM
84446 }
84447 diff --git a/net/atm/lec.h b/net/atm/lec.h
84448 index a86aff9..3a0d6f6 100644
84449 --- a/net/atm/lec.h
84450 +++ b/net/atm/lec.h
84451 @@ -48,7 +48,7 @@ struct lane2_ops {
84452 const u8 *tlvs, u32 sizeoftlvs);
84453 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
84454 const u8 *tlvs, u32 sizeoftlvs);
84455 -};
84456 +} __no_const;
84457
84458 /*
84459 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
84460 diff --git a/net/atm/proc.c b/net/atm/proc.c
84461 index 6ac35ff..ac0e136 100644
84462 --- a/net/atm/proc.c
84463 +++ b/net/atm/proc.c
84464 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
84465 const struct k_atm_aal_stats *stats)
84466 {
84467 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
84468 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
84469 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
84470 - atomic_read(&stats->rx_drop));
84471 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
84472 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
84473 + atomic_read_unchecked(&stats->rx_drop));
84474 }
84475
84476 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
84477 diff --git a/net/atm/resources.c b/net/atm/resources.c
84478 index 0447d5d..3cf4728 100644
84479 --- a/net/atm/resources.c
84480 +++ b/net/atm/resources.c
84481 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
84482 static void copy_aal_stats(struct k_atm_aal_stats *from,
84483 struct atm_aal_stats *to)
84484 {
84485 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
84486 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
84487 __AAL_STAT_ITEMS
84488 #undef __HANDLE_ITEM
84489 }
84490 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
84491 static void subtract_aal_stats(struct k_atm_aal_stats *from,
84492 struct atm_aal_stats *to)
84493 {
84494 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
84495 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
84496 __AAL_STAT_ITEMS
84497 #undef __HANDLE_ITEM
84498 }
84499 diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
84500 index d5744b7..506bae3 100644
84501 --- a/net/ax25/sysctl_net_ax25.c
84502 +++ b/net/ax25/sysctl_net_ax25.c
84503 @@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
84504 {
84505 char path[sizeof("net/ax25/") + IFNAMSIZ];
84506 int k;
84507 - struct ctl_table *table;
84508 + ctl_table_no_const *table;
84509
84510 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
84511 if (!table)
84512 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
84513 index a5bb0a7..e1d8b97 100644
84514 --- a/net/batman-adv/bat_iv_ogm.c
84515 +++ b/net/batman-adv/bat_iv_ogm.c
84516 @@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
84517
84518 /* randomize initial seqno to avoid collision */
84519 get_random_bytes(&random_seqno, sizeof(random_seqno));
84520 - atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
84521 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
84522
84523 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
84524 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
84525 @@ -611,9 +611,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
84526 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
84527
84528 /* change sequence number to network order */
84529 - seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
84530 + seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
84531 batadv_ogm_packet->seqno = htonl(seqno);
84532 - atomic_inc(&hard_iface->bat_iv.ogm_seqno);
84533 + atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
84534
84535 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
84536 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
84537 @@ -1013,7 +1013,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
84538 return;
84539
84540 /* could be changed by schedule_own_packet() */
84541 - if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
84542 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
84543
84544 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
84545 has_directlink_flag = 1;
84546 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
84547 index 368219e..53f56f9 100644
84548 --- a/net/batman-adv/hard-interface.c
84549 +++ b/net/batman-adv/hard-interface.c
84550 @@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
84551 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
84552 dev_add_pack(&hard_iface->batman_adv_ptype);
84553
84554 - atomic_set(&hard_iface->frag_seqno, 1);
84555 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
84556 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
84557 hard_iface->net_dev->name);
84558
84559 @@ -514,7 +514,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
84560 /* This can't be called via a bat_priv callback because
84561 * we have no bat_priv yet.
84562 */
84563 - atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
84564 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
84565 hard_iface->bat_iv.ogm_buff = NULL;
84566
84567 return hard_iface;
84568 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
84569 index 2711e87..4ca48fa 100644
84570 --- a/net/batman-adv/soft-interface.c
84571 +++ b/net/batman-adv/soft-interface.c
84572 @@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
84573 primary_if->net_dev->dev_addr, ETH_ALEN);
84574
84575 /* set broadcast sequence number */
84576 - seqno = atomic_inc_return(&bat_priv->bcast_seqno);
84577 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
84578 bcast_packet->seqno = htonl(seqno);
84579
84580 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
84581 @@ -527,7 +527,7 @@ struct net_device *batadv_softif_create(const char *name)
84582 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
84583
84584 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
84585 - atomic_set(&bat_priv->bcast_seqno, 1);
84586 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
84587 atomic_set(&bat_priv->tt.vn, 0);
84588 atomic_set(&bat_priv->tt.local_changes, 0);
84589 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
84590 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
84591 index 4cd87a0..348e705 100644
84592 --- a/net/batman-adv/types.h
84593 +++ b/net/batman-adv/types.h
84594 @@ -51,7 +51,7 @@
84595 struct batadv_hard_iface_bat_iv {
84596 unsigned char *ogm_buff;
84597 int ogm_buff_len;
84598 - atomic_t ogm_seqno;
84599 + atomic_unchecked_t ogm_seqno;
84600 };
84601
84602 /**
84603 @@ -75,7 +75,7 @@ struct batadv_hard_iface {
84604 int16_t if_num;
84605 char if_status;
84606 struct net_device *net_dev;
84607 - atomic_t frag_seqno;
84608 + atomic_unchecked_t frag_seqno;
84609 struct kobject *hardif_obj;
84610 atomic_t refcount;
84611 struct packet_type batman_adv_ptype;
84612 @@ -495,7 +495,7 @@ struct batadv_priv {
84613 #ifdef CONFIG_BATMAN_ADV_DEBUG
84614 atomic_t log_level;
84615 #endif
84616 - atomic_t bcast_seqno;
84617 + atomic_unchecked_t bcast_seqno;
84618 atomic_t bcast_queue_left;
84619 atomic_t batman_queue_left;
84620 char num_ifaces;
84621 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
84622 index 50e079f..49ce2d2 100644
84623 --- a/net/batman-adv/unicast.c
84624 +++ b/net/batman-adv/unicast.c
84625 @@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
84626 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
84627 frag2->flags = large_tail;
84628
84629 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
84630 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
84631 frag1->seqno = htons(seqno - 1);
84632 frag2->seqno = htons(seqno);
84633
84634 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
84635 index 6a93614..1415549 100644
84636 --- a/net/bluetooth/hci_sock.c
84637 +++ b/net/bluetooth/hci_sock.c
84638 @@ -929,7 +929,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
84639 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
84640 }
84641
84642 - len = min_t(unsigned int, len, sizeof(uf));
84643 + len = min((size_t)len, sizeof(uf));
84644 if (copy_from_user(&uf, optval, len)) {
84645 err = -EFAULT;
84646 break;
84647 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
84648 index 7c7e932..7a7815d 100644
84649 --- a/net/bluetooth/l2cap_core.c
84650 +++ b/net/bluetooth/l2cap_core.c
84651 @@ -3395,8 +3395,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
84652 break;
84653
84654 case L2CAP_CONF_RFC:
84655 - if (olen == sizeof(rfc))
84656 - memcpy(&rfc, (void *)val, olen);
84657 + if (olen != sizeof(rfc))
84658 + break;
84659 +
84660 + memcpy(&rfc, (void *)val, olen);
84661
84662 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
84663 rfc.mode != chan->mode)
84664 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
84665 index 1bcfb84..dad9f98 100644
84666 --- a/net/bluetooth/l2cap_sock.c
84667 +++ b/net/bluetooth/l2cap_sock.c
84668 @@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
84669 struct sock *sk = sock->sk;
84670 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
84671 struct l2cap_options opts;
84672 - int len, err = 0;
84673 + int err = 0;
84674 + size_t len = optlen;
84675 u32 opt;
84676
84677 BT_DBG("sk %p", sk);
84678 @@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
84679 opts.max_tx = chan->max_tx;
84680 opts.txwin_size = chan->tx_win;
84681
84682 - len = min_t(unsigned int, sizeof(opts), optlen);
84683 + len = min(sizeof(opts), len);
84684 if (copy_from_user((char *) &opts, optval, len)) {
84685 err = -EFAULT;
84686 break;
84687 @@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84688 struct bt_security sec;
84689 struct bt_power pwr;
84690 struct l2cap_conn *conn;
84691 - int len, err = 0;
84692 + int err = 0;
84693 + size_t len = optlen;
84694 u32 opt;
84695
84696 BT_DBG("sk %p", sk);
84697 @@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84698
84699 sec.level = BT_SECURITY_LOW;
84700
84701 - len = min_t(unsigned int, sizeof(sec), optlen);
84702 + len = min(sizeof(sec), len);
84703 if (copy_from_user((char *) &sec, optval, len)) {
84704 err = -EFAULT;
84705 break;
84706 @@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
84707
84708 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
84709
84710 - len = min_t(unsigned int, sizeof(pwr), optlen);
84711 + len = min(sizeof(pwr), len);
84712 if (copy_from_user((char *) &pwr, optval, len)) {
84713 err = -EFAULT;
84714 break;
84715 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
84716 index 7c9224b..381009e 100644
84717 --- a/net/bluetooth/rfcomm/sock.c
84718 +++ b/net/bluetooth/rfcomm/sock.c
84719 @@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
84720 struct sock *sk = sock->sk;
84721 struct bt_security sec;
84722 int err = 0;
84723 - size_t len;
84724 + size_t len = optlen;
84725 u32 opt;
84726
84727 BT_DBG("sk %p", sk);
84728 @@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
84729
84730 sec.level = BT_SECURITY_LOW;
84731
84732 - len = min_t(unsigned int, sizeof(sec), optlen);
84733 + len = min(sizeof(sec), len);
84734 if (copy_from_user((char *) &sec, optval, len)) {
84735 err = -EFAULT;
84736 break;
84737 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
84738 index b6e44ad..5b0d514 100644
84739 --- a/net/bluetooth/rfcomm/tty.c
84740 +++ b/net/bluetooth/rfcomm/tty.c
84741 @@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
84742 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
84743
84744 spin_lock_irqsave(&dev->port.lock, flags);
84745 - if (dev->port.count > 0) {
84746 + if (atomic_read(&dev->port.count) > 0) {
84747 spin_unlock_irqrestore(&dev->port.lock, flags);
84748 return;
84749 }
84750 @@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
84751 return -ENODEV;
84752
84753 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
84754 - dev->channel, dev->port.count);
84755 + dev->channel, atomic_read(&dev->port.count));
84756
84757 spin_lock_irqsave(&dev->port.lock, flags);
84758 - if (++dev->port.count > 1) {
84759 + if (atomic_inc_return(&dev->port.count) > 1) {
84760 spin_unlock_irqrestore(&dev->port.lock, flags);
84761 return 0;
84762 }
84763 @@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
84764 return;
84765
84766 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
84767 - dev->port.count);
84768 + atomic_read(&dev->port.count));
84769
84770 spin_lock_irqsave(&dev->port.lock, flags);
84771 - if (!--dev->port.count) {
84772 + if (!atomic_dec_return(&dev->port.count)) {
84773 spin_unlock_irqrestore(&dev->port.lock, flags);
84774 if (dev->tty_dev->parent)
84775 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
84776 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
84777 index 8d493c9..3849e49 100644
84778 --- a/net/bridge/netfilter/ebtables.c
84779 +++ b/net/bridge/netfilter/ebtables.c
84780 @@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
84781 tmp.valid_hooks = t->table->valid_hooks;
84782 }
84783 mutex_unlock(&ebt_mutex);
84784 - if (copy_to_user(user, &tmp, *len) != 0){
84785 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
84786 BUGPRINT("c2u Didn't work\n");
84787 ret = -EFAULT;
84788 break;
84789 @@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
84790 goto out;
84791 tmp.valid_hooks = t->valid_hooks;
84792
84793 - if (copy_to_user(user, &tmp, *len) != 0) {
84794 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
84795 ret = -EFAULT;
84796 break;
84797 }
84798 @@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
84799 tmp.entries_size = t->table->entries_size;
84800 tmp.valid_hooks = t->table->valid_hooks;
84801
84802 - if (copy_to_user(user, &tmp, *len) != 0) {
84803 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
84804 ret = -EFAULT;
84805 break;
84806 }
84807 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
84808 index a376ec1..1fbd6be 100644
84809 --- a/net/caif/cfctrl.c
84810 +++ b/net/caif/cfctrl.c
84811 @@ -10,6 +10,7 @@
84812 #include <linux/spinlock.h>
84813 #include <linux/slab.h>
84814 #include <linux/pkt_sched.h>
84815 +#include <linux/sched.h>
84816 #include <net/caif/caif_layer.h>
84817 #include <net/caif/cfpkt.h>
84818 #include <net/caif/cfctrl.h>
84819 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
84820 memset(&dev_info, 0, sizeof(dev_info));
84821 dev_info.id = 0xff;
84822 cfsrvl_init(&this->serv, 0, &dev_info, false);
84823 - atomic_set(&this->req_seq_no, 1);
84824 - atomic_set(&this->rsp_seq_no, 1);
84825 + atomic_set_unchecked(&this->req_seq_no, 1);
84826 + atomic_set_unchecked(&this->rsp_seq_no, 1);
84827 this->serv.layer.receive = cfctrl_recv;
84828 sprintf(this->serv.layer.name, "ctrl");
84829 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
84830 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
84831 struct cfctrl_request_info *req)
84832 {
84833 spin_lock_bh(&ctrl->info_list_lock);
84834 - atomic_inc(&ctrl->req_seq_no);
84835 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
84836 + atomic_inc_unchecked(&ctrl->req_seq_no);
84837 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
84838 list_add_tail(&req->list, &ctrl->list);
84839 spin_unlock_bh(&ctrl->info_list_lock);
84840 }
84841 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
84842 if (p != first)
84843 pr_warn("Requests are not received in order\n");
84844
84845 - atomic_set(&ctrl->rsp_seq_no,
84846 + atomic_set_unchecked(&ctrl->rsp_seq_no,
84847 p->sequence_no);
84848 list_del(&p->list);
84849 goto out;
84850 diff --git a/net/can/af_can.c b/net/can/af_can.c
84851 index c48e522..1223690 100644
84852 --- a/net/can/af_can.c
84853 +++ b/net/can/af_can.c
84854 @@ -870,7 +870,7 @@ static const struct net_proto_family can_family_ops = {
84855 };
84856
84857 /* notifier block for netdevice event */
84858 -static struct notifier_block can_netdev_notifier __read_mostly = {
84859 +static struct notifier_block can_netdev_notifier = {
84860 .notifier_call = can_notifier,
84861 };
84862
84863 diff --git a/net/can/gw.c b/net/can/gw.c
84864 index 117814a..ad4fb73 100644
84865 --- a/net/can/gw.c
84866 +++ b/net/can/gw.c
84867 @@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
84868 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
84869
84870 static HLIST_HEAD(cgw_list);
84871 -static struct notifier_block notifier;
84872
84873 static struct kmem_cache *cgw_cache __read_mostly;
84874
84875 @@ -928,6 +927,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
84876 return err;
84877 }
84878
84879 +static struct notifier_block notifier = {
84880 + .notifier_call = cgw_notifier
84881 +};
84882 +
84883 static __init int cgw_module_init(void)
84884 {
84885 /* sanitize given module parameter */
84886 @@ -943,7 +946,6 @@ static __init int cgw_module_init(void)
84887 return -ENOMEM;
84888
84889 /* set notifier */
84890 - notifier.notifier_call = cgw_notifier;
84891 register_netdevice_notifier(&notifier);
84892
84893 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
84894 diff --git a/net/compat.c b/net/compat.c
84895 index 79ae884..17c5c09 100644
84896 --- a/net/compat.c
84897 +++ b/net/compat.c
84898 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
84899 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
84900 __get_user(kmsg->msg_flags, &umsg->msg_flags))
84901 return -EFAULT;
84902 - kmsg->msg_name = compat_ptr(tmp1);
84903 - kmsg->msg_iov = compat_ptr(tmp2);
84904 - kmsg->msg_control = compat_ptr(tmp3);
84905 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
84906 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
84907 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
84908 return 0;
84909 }
84910
84911 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84912
84913 if (kern_msg->msg_namelen) {
84914 if (mode == VERIFY_READ) {
84915 - int err = move_addr_to_kernel(kern_msg->msg_name,
84916 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
84917 kern_msg->msg_namelen,
84918 kern_address);
84919 if (err < 0)
84920 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84921 kern_msg->msg_name = NULL;
84922
84923 tot_len = iov_from_user_compat_to_kern(kern_iov,
84924 - (struct compat_iovec __user *)kern_msg->msg_iov,
84925 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
84926 kern_msg->msg_iovlen);
84927 if (tot_len >= 0)
84928 kern_msg->msg_iov = kern_iov;
84929 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
84930
84931 #define CMSG_COMPAT_FIRSTHDR(msg) \
84932 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
84933 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
84934 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
84935 (struct compat_cmsghdr __user *)NULL)
84936
84937 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
84938 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
84939 (ucmlen) <= (unsigned long) \
84940 ((mhdr)->msg_controllen - \
84941 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
84942 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
84943
84944 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
84945 struct compat_cmsghdr __user *cmsg, int cmsg_len)
84946 {
84947 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
84948 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
84949 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
84950 msg->msg_controllen)
84951 return NULL;
84952 return (struct compat_cmsghdr __user *)ptr;
84953 @@ -219,7 +219,7 @@ Efault:
84954
84955 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
84956 {
84957 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
84958 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
84959 struct compat_cmsghdr cmhdr;
84960 struct compat_timeval ctv;
84961 struct compat_timespec cts[3];
84962 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
84963
84964 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
84965 {
84966 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
84967 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
84968 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
84969 int fdnum = scm->fp->count;
84970 struct file **fp = scm->fp->fp;
84971 @@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
84972 return -EFAULT;
84973 old_fs = get_fs();
84974 set_fs(KERNEL_DS);
84975 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
84976 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
84977 set_fs(old_fs);
84978
84979 return err;
84980 @@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
84981 len = sizeof(ktime);
84982 old_fs = get_fs();
84983 set_fs(KERNEL_DS);
84984 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
84985 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
84986 set_fs(old_fs);
84987
84988 if (!err) {
84989 @@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
84990 case MCAST_JOIN_GROUP:
84991 case MCAST_LEAVE_GROUP:
84992 {
84993 - struct compat_group_req __user *gr32 = (void *)optval;
84994 + struct compat_group_req __user *gr32 = (void __user *)optval;
84995 struct group_req __user *kgr =
84996 compat_alloc_user_space(sizeof(struct group_req));
84997 u32 interface;
84998 @@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
84999 case MCAST_BLOCK_SOURCE:
85000 case MCAST_UNBLOCK_SOURCE:
85001 {
85002 - struct compat_group_source_req __user *gsr32 = (void *)optval;
85003 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
85004 struct group_source_req __user *kgsr = compat_alloc_user_space(
85005 sizeof(struct group_source_req));
85006 u32 interface;
85007 @@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85008 }
85009 case MCAST_MSFILTER:
85010 {
85011 - struct compat_group_filter __user *gf32 = (void *)optval;
85012 + struct compat_group_filter __user *gf32 = (void __user *)optval;
85013 struct group_filter __user *kgf;
85014 u32 interface, fmode, numsrc;
85015
85016 @@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
85017 char __user *optval, int __user *optlen,
85018 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
85019 {
85020 - struct compat_group_filter __user *gf32 = (void *)optval;
85021 + struct compat_group_filter __user *gf32 = (void __user *)optval;
85022 struct group_filter __user *kgf;
85023 int __user *koptlen;
85024 u32 interface, fmode, numsrc;
85025 @@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
85026
85027 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
85028 return -EINVAL;
85029 - if (copy_from_user(a, args, nas[call]))
85030 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
85031 return -EFAULT;
85032 a0 = a[0];
85033 a1 = a[1];
85034 diff --git a/net/core/datagram.c b/net/core/datagram.c
85035 index 368f9c3..f82d4a3 100644
85036 --- a/net/core/datagram.c
85037 +++ b/net/core/datagram.c
85038 @@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
85039 }
85040
85041 kfree_skb(skb);
85042 - atomic_inc(&sk->sk_drops);
85043 + atomic_inc_unchecked(&sk->sk_drops);
85044 sk_mem_reclaim_partial(sk);
85045
85046 return err;
85047 diff --git a/net/core/dev.c b/net/core/dev.c
85048 index b24ab0e9..1c424bc 100644
85049 --- a/net/core/dev.c
85050 +++ b/net/core/dev.c
85051 @@ -1617,7 +1617,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85052 {
85053 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
85054 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
85055 - atomic_long_inc(&dev->rx_dropped);
85056 + atomic_long_inc_unchecked(&dev->rx_dropped);
85057 kfree_skb(skb);
85058 return NET_RX_DROP;
85059 }
85060 @@ -1626,7 +1626,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85061 skb_orphan(skb);
85062
85063 if (unlikely(!is_skb_forwardable(dev, skb))) {
85064 - atomic_long_inc(&dev->rx_dropped);
85065 + atomic_long_inc_unchecked(&dev->rx_dropped);
85066 kfree_skb(skb);
85067 return NET_RX_DROP;
85068 }
85069 @@ -2351,7 +2351,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
85070
85071 struct dev_gso_cb {
85072 void (*destructor)(struct sk_buff *skb);
85073 -};
85074 +} __no_const;
85075
85076 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
85077
85078 @@ -3093,7 +3093,7 @@ enqueue:
85079
85080 local_irq_restore(flags);
85081
85082 - atomic_long_inc(&skb->dev->rx_dropped);
85083 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85084 kfree_skb(skb);
85085 return NET_RX_DROP;
85086 }
85087 @@ -3165,7 +3165,7 @@ int netif_rx_ni(struct sk_buff *skb)
85088 }
85089 EXPORT_SYMBOL(netif_rx_ni);
85090
85091 -static void net_tx_action(struct softirq_action *h)
85092 +static void net_tx_action(void)
85093 {
85094 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85095
85096 @@ -3490,7 +3490,7 @@ ncls:
85097 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
85098 } else {
85099 drop:
85100 - atomic_long_inc(&skb->dev->rx_dropped);
85101 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85102 kfree_skb(skb);
85103 /* Jamal, now you will not able to escape explaining
85104 * me how you were going to use this. :-)
85105 @@ -4095,7 +4095,7 @@ void netif_napi_del(struct napi_struct *napi)
85106 }
85107 EXPORT_SYMBOL(netif_napi_del);
85108
85109 -static void net_rx_action(struct softirq_action *h)
85110 +static void net_rx_action(void)
85111 {
85112 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85113 unsigned long time_limit = jiffies + 2;
85114 @@ -5522,7 +5522,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
85115 } else {
85116 netdev_stats_to_stats64(storage, &dev->stats);
85117 }
85118 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
85119 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
85120 return storage;
85121 }
85122 EXPORT_SYMBOL(dev_get_stats);
85123 diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
85124 index 6cc0481..59cfb00 100644
85125 --- a/net/core/dev_ioctl.c
85126 +++ b/net/core/dev_ioctl.c
85127 @@ -376,9 +376,13 @@ void dev_load(struct net *net, const char *name)
85128 if (no_module && capable(CAP_NET_ADMIN))
85129 no_module = request_module("netdev-%s", name);
85130 if (no_module && capable(CAP_SYS_MODULE)) {
85131 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
85132 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
85133 +#else
85134 if (!request_module("%s", name))
85135 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
85136 name);
85137 +#endif
85138 }
85139 }
85140 EXPORT_SYMBOL(dev_load);
85141 diff --git a/net/core/flow.c b/net/core/flow.c
85142 index 2bfd081..53c6058 100644
85143 --- a/net/core/flow.c
85144 +++ b/net/core/flow.c
85145 @@ -61,7 +61,7 @@ struct flow_cache {
85146 struct timer_list rnd_timer;
85147 };
85148
85149 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
85150 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
85151 EXPORT_SYMBOL(flow_cache_genid);
85152 static struct flow_cache flow_cache_global;
85153 static struct kmem_cache *flow_cachep __read_mostly;
85154 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
85155
85156 static int flow_entry_valid(struct flow_cache_entry *fle)
85157 {
85158 - if (atomic_read(&flow_cache_genid) != fle->genid)
85159 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
85160 return 0;
85161 if (fle->object && !fle->object->ops->check(fle->object))
85162 return 0;
85163 @@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
85164 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
85165 fcp->hash_count++;
85166 }
85167 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
85168 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
85169 flo = fle->object;
85170 if (!flo)
85171 goto ret_object;
85172 @@ -279,7 +279,7 @@ nocache:
85173 }
85174 flo = resolver(net, key, family, dir, flo, ctx);
85175 if (fle) {
85176 - fle->genid = atomic_read(&flow_cache_genid);
85177 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
85178 if (!IS_ERR(flo))
85179 fle->object = flo;
85180 else
85181 diff --git a/net/core/iovec.c b/net/core/iovec.c
85182 index 7e7aeb0..2a998cb 100644
85183 --- a/net/core/iovec.c
85184 +++ b/net/core/iovec.c
85185 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85186 if (m->msg_namelen) {
85187 if (mode == VERIFY_READ) {
85188 void __user *namep;
85189 - namep = (void __user __force *) m->msg_name;
85190 + namep = (void __force_user *) m->msg_name;
85191 err = move_addr_to_kernel(namep, m->msg_namelen,
85192 address);
85193 if (err < 0)
85194 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85195 }
85196
85197 size = m->msg_iovlen * sizeof(struct iovec);
85198 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
85199 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
85200 return -EFAULT;
85201
85202 m->msg_iov = iov;
85203 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
85204 index 3863b8f..85c99a6 100644
85205 --- a/net/core/neighbour.c
85206 +++ b/net/core/neighbour.c
85207 @@ -2778,7 +2778,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
85208 size_t *lenp, loff_t *ppos)
85209 {
85210 int size, ret;
85211 - ctl_table tmp = *ctl;
85212 + ctl_table_no_const tmp = *ctl;
85213
85214 tmp.extra1 = &zero;
85215 tmp.extra2 = &unres_qlen_max;
85216 diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
85217 index 3174f19..5810985 100644
85218 --- a/net/core/net-procfs.c
85219 +++ b/net/core/net-procfs.c
85220 @@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
85221 else
85222 seq_printf(seq, "%04x", ntohs(pt->type));
85223
85224 +#ifdef CONFIG_GRKERNSEC_HIDESYM
85225 + seq_printf(seq, " %-8s %pF\n",
85226 + pt->dev ? pt->dev->name : "", NULL);
85227 +#else
85228 seq_printf(seq, " %-8s %pF\n",
85229 pt->dev ? pt->dev->name : "", pt->func);
85230 +#endif
85231 }
85232
85233 return 0;
85234 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
85235 index 7427ab5..389f411 100644
85236 --- a/net/core/net-sysfs.c
85237 +++ b/net/core/net-sysfs.c
85238 @@ -1321,7 +1321,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
85239 }
85240 EXPORT_SYMBOL(netdev_class_remove_file);
85241
85242 -int netdev_kobject_init(void)
85243 +int __init netdev_kobject_init(void)
85244 {
85245 kobj_ns_type_register(&net_ns_type_operations);
85246 return class_register(&net_class);
85247 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
85248 index 80e271d..2980cc2 100644
85249 --- a/net/core/net_namespace.c
85250 +++ b/net/core/net_namespace.c
85251 @@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
85252 int error;
85253 LIST_HEAD(net_exit_list);
85254
85255 - list_add_tail(&ops->list, list);
85256 + pax_list_add_tail((struct list_head *)&ops->list, list);
85257 if (ops->init || (ops->id && ops->size)) {
85258 for_each_net(net) {
85259 error = ops_init(ops, net);
85260 @@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
85261
85262 out_undo:
85263 /* If I have an error cleanup all namespaces I initialized */
85264 - list_del(&ops->list);
85265 + pax_list_del((struct list_head *)&ops->list);
85266 ops_exit_list(ops, &net_exit_list);
85267 ops_free_list(ops, &net_exit_list);
85268 return error;
85269 @@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
85270 struct net *net;
85271 LIST_HEAD(net_exit_list);
85272
85273 - list_del(&ops->list);
85274 + pax_list_del((struct list_head *)&ops->list);
85275 for_each_net(net)
85276 list_add_tail(&net->exit_list, &net_exit_list);
85277 ops_exit_list(ops, &net_exit_list);
85278 @@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
85279 mutex_lock(&net_mutex);
85280 error = register_pernet_operations(&pernet_list, ops);
85281 if (!error && (first_device == &pernet_list))
85282 - first_device = &ops->list;
85283 + first_device = (struct list_head *)&ops->list;
85284 mutex_unlock(&net_mutex);
85285 return error;
85286 }
85287 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
85288 index 23854b5..ff4fda4 100644
85289 --- a/net/core/rtnetlink.c
85290 +++ b/net/core/rtnetlink.c
85291 @@ -58,7 +58,7 @@ struct rtnl_link {
85292 rtnl_doit_func doit;
85293 rtnl_dumpit_func dumpit;
85294 rtnl_calcit_func calcit;
85295 -};
85296 +} __no_const;
85297
85298 static DEFINE_MUTEX(rtnl_mutex);
85299
85300 @@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
85301 if (rtnl_link_ops_get(ops->kind))
85302 return -EEXIST;
85303
85304 - if (!ops->dellink)
85305 - ops->dellink = unregister_netdevice_queue;
85306 + if (!ops->dellink) {
85307 + pax_open_kernel();
85308 + *(void **)&ops->dellink = unregister_netdevice_queue;
85309 + pax_close_kernel();
85310 + }
85311
85312 - list_add_tail(&ops->list, &link_ops);
85313 + pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
85314 return 0;
85315 }
85316 EXPORT_SYMBOL_GPL(__rtnl_link_register);
85317 @@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
85318 for_each_net(net) {
85319 __rtnl_kill_links(net, ops);
85320 }
85321 - list_del(&ops->list);
85322 + pax_list_del((struct list_head *)&ops->list);
85323 }
85324 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
85325
85326 diff --git a/net/core/scm.c b/net/core/scm.c
85327 index 2dc6cda..2159524 100644
85328 --- a/net/core/scm.c
85329 +++ b/net/core/scm.c
85330 @@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
85331 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85332 {
85333 struct cmsghdr __user *cm
85334 - = (__force struct cmsghdr __user *)msg->msg_control;
85335 + = (struct cmsghdr __force_user *)msg->msg_control;
85336 struct cmsghdr cmhdr;
85337 int cmlen = CMSG_LEN(len);
85338 int err;
85339 @@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85340 err = -EFAULT;
85341 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
85342 goto out;
85343 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
85344 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
85345 goto out;
85346 cmlen = CMSG_SPACE(len);
85347 if (msg->msg_controllen < cmlen)
85348 @@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
85349 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85350 {
85351 struct cmsghdr __user *cm
85352 - = (__force struct cmsghdr __user*)msg->msg_control;
85353 + = (struct cmsghdr __force_user *)msg->msg_control;
85354
85355 int fdmax = 0;
85356 int fdnum = scm->fp->count;
85357 @@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85358 if (fdnum < fdmax)
85359 fdmax = fdnum;
85360
85361 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
85362 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
85363 i++, cmfptr++)
85364 {
85365 struct socket *sock;
85366 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
85367 index e61a8bb..6a2f13c 100644
85368 --- a/net/core/secure_seq.c
85369 +++ b/net/core/secure_seq.c
85370 @@ -12,12 +12,10 @@
85371
85372 static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
85373
85374 -static int __init net_secret_init(void)
85375 +void net_secret_init(void)
85376 {
85377 get_random_bytes(net_secret, sizeof(net_secret));
85378 - return 0;
85379 }
85380 -late_initcall(net_secret_init);
85381
85382 #ifdef CONFIG_INET
85383 static u32 seq_scale(u32 seq)
85384 diff --git a/net/core/sock.c b/net/core/sock.c
85385 index b261a79..8fe17ab 100644
85386 --- a/net/core/sock.c
85387 +++ b/net/core/sock.c
85388 @@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85389 struct sk_buff_head *list = &sk->sk_receive_queue;
85390
85391 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
85392 - atomic_inc(&sk->sk_drops);
85393 + atomic_inc_unchecked(&sk->sk_drops);
85394 trace_sock_rcvqueue_full(sk, skb);
85395 return -ENOMEM;
85396 }
85397 @@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85398 return err;
85399
85400 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
85401 - atomic_inc(&sk->sk_drops);
85402 + atomic_inc_unchecked(&sk->sk_drops);
85403 return -ENOBUFS;
85404 }
85405
85406 @@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85407 skb_dst_force(skb);
85408
85409 spin_lock_irqsave(&list->lock, flags);
85410 - skb->dropcount = atomic_read(&sk->sk_drops);
85411 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
85412 __skb_queue_tail(list, skb);
85413 spin_unlock_irqrestore(&list->lock, flags);
85414
85415 @@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
85416 skb->dev = NULL;
85417
85418 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
85419 - atomic_inc(&sk->sk_drops);
85420 + atomic_inc_unchecked(&sk->sk_drops);
85421 goto discard_and_relse;
85422 }
85423 if (nested)
85424 @@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
85425 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
85426 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
85427 bh_unlock_sock(sk);
85428 - atomic_inc(&sk->sk_drops);
85429 + atomic_inc_unchecked(&sk->sk_drops);
85430 goto discard_and_relse;
85431 }
85432
85433 @@ -942,12 +942,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85434 struct timeval tm;
85435 } v;
85436
85437 - int lv = sizeof(int);
85438 - int len;
85439 + unsigned int lv = sizeof(int);
85440 + unsigned int len;
85441
85442 if (get_user(len, optlen))
85443 return -EFAULT;
85444 - if (len < 0)
85445 + if (len > INT_MAX)
85446 return -EINVAL;
85447
85448 memset(&v, 0, sizeof(v));
85449 @@ -1099,11 +1099,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85450
85451 case SO_PEERNAME:
85452 {
85453 - char address[128];
85454 + char address[_K_SS_MAXSIZE];
85455
85456 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
85457 return -ENOTCONN;
85458 - if (lv < len)
85459 + if (lv < len || sizeof address < len)
85460 return -EINVAL;
85461 if (copy_to_user(optval, address, len))
85462 return -EFAULT;
85463 @@ -1166,7 +1166,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
85464
85465 if (len > lv)
85466 len = lv;
85467 - if (copy_to_user(optval, &v, len))
85468 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
85469 return -EFAULT;
85470 lenout:
85471 if (put_user(len, optlen))
85472 @@ -2296,7 +2296,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
85473 */
85474 smp_wmb();
85475 atomic_set(&sk->sk_refcnt, 1);
85476 - atomic_set(&sk->sk_drops, 0);
85477 + atomic_set_unchecked(&sk->sk_drops, 0);
85478 }
85479 EXPORT_SYMBOL(sock_init_data);
85480
85481 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
85482 index a29e90c..922399c 100644
85483 --- a/net/core/sock_diag.c
85484 +++ b/net/core/sock_diag.c
85485 @@ -9,26 +9,33 @@
85486 #include <linux/inet_diag.h>
85487 #include <linux/sock_diag.h>
85488
85489 -static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
85490 +static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
85491 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
85492 static DEFINE_MUTEX(sock_diag_table_mutex);
85493
85494 int sock_diag_check_cookie(void *sk, __u32 *cookie)
85495 {
85496 +#ifndef CONFIG_GRKERNSEC_HIDESYM
85497 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
85498 cookie[1] != INET_DIAG_NOCOOKIE) &&
85499 ((u32)(unsigned long)sk != cookie[0] ||
85500 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
85501 return -ESTALE;
85502 else
85503 +#endif
85504 return 0;
85505 }
85506 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
85507
85508 void sock_diag_save_cookie(void *sk, __u32 *cookie)
85509 {
85510 +#ifdef CONFIG_GRKERNSEC_HIDESYM
85511 + cookie[0] = 0;
85512 + cookie[1] = 0;
85513 +#else
85514 cookie[0] = (u32)(unsigned long)sk;
85515 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
85516 +#endif
85517 }
85518 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
85519
85520 @@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
85521 mutex_lock(&sock_diag_table_mutex);
85522 if (sock_diag_handlers[hndl->family])
85523 err = -EBUSY;
85524 - else
85525 + else {
85526 + pax_open_kernel();
85527 sock_diag_handlers[hndl->family] = hndl;
85528 + pax_close_kernel();
85529 + }
85530 mutex_unlock(&sock_diag_table_mutex);
85531
85532 return err;
85533 @@ -92,7 +102,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
85534
85535 mutex_lock(&sock_diag_table_mutex);
85536 BUG_ON(sock_diag_handlers[family] != hnld);
85537 + pax_open_kernel();
85538 sock_diag_handlers[family] = NULL;
85539 + pax_close_kernel();
85540 mutex_unlock(&sock_diag_table_mutex);
85541 }
85542 EXPORT_SYMBOL_GPL(sock_diag_unregister);
85543 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
85544 index cfdb46a..cef55e1 100644
85545 --- a/net/core/sysctl_net_core.c
85546 +++ b/net/core/sysctl_net_core.c
85547 @@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
85548 {
85549 unsigned int orig_size, size;
85550 int ret, i;
85551 - ctl_table tmp = {
85552 + ctl_table_no_const tmp = {
85553 .data = &size,
85554 .maxlen = sizeof(size),
85555 .mode = table->mode
85556 @@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
85557
85558 static __net_init int sysctl_core_net_init(struct net *net)
85559 {
85560 - struct ctl_table *tbl;
85561 + ctl_table_no_const *tbl = NULL;
85562
85563 net->core.sysctl_somaxconn = SOMAXCONN;
85564
85565 - tbl = netns_core_table;
85566 if (!net_eq(net, &init_net)) {
85567 - tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
85568 + tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
85569 if (tbl == NULL)
85570 goto err_dup;
85571
85572 @@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
85573 if (net->user_ns != &init_user_ns) {
85574 tbl[0].procname = NULL;
85575 }
85576 - }
85577 -
85578 - net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
85579 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
85580 + } else
85581 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
85582 if (net->core.sysctl_hdr == NULL)
85583 goto err_reg;
85584
85585 return 0;
85586
85587 err_reg:
85588 - if (tbl != netns_core_table)
85589 - kfree(tbl);
85590 + kfree(tbl);
85591 err_dup:
85592 return -ENOMEM;
85593 }
85594 @@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
85595 kfree(tbl);
85596 }
85597
85598 -static __net_initdata struct pernet_operations sysctl_core_ops = {
85599 +static __net_initconst struct pernet_operations sysctl_core_ops = {
85600 .init = sysctl_core_net_init,
85601 .exit = sysctl_core_net_exit,
85602 };
85603 diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
85604 index c21f200..bc4565b 100644
85605 --- a/net/decnet/af_decnet.c
85606 +++ b/net/decnet/af_decnet.c
85607 @@ -465,6 +465,7 @@ static struct proto dn_proto = {
85608 .sysctl_rmem = sysctl_decnet_rmem,
85609 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
85610 .obj_size = sizeof(struct dn_sock),
85611 + .slab_flags = SLAB_USERCOPY,
85612 };
85613
85614 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
85615 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
85616 index a55eecc..dd8428c 100644
85617 --- a/net/decnet/sysctl_net_decnet.c
85618 +++ b/net/decnet/sysctl_net_decnet.c
85619 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
85620
85621 if (len > *lenp) len = *lenp;
85622
85623 - if (copy_to_user(buffer, addr, len))
85624 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
85625 return -EFAULT;
85626
85627 *lenp = len;
85628 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
85629
85630 if (len > *lenp) len = *lenp;
85631
85632 - if (copy_to_user(buffer, devname, len))
85633 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
85634 return -EFAULT;
85635
85636 *lenp = len;
85637 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
85638 index c929d9c..df10cde 100644
85639 --- a/net/ipv4/af_inet.c
85640 +++ b/net/ipv4/af_inet.c
85641 @@ -115,6 +115,7 @@
85642 #include <net/inet_common.h>
85643 #include <net/xfrm.h>
85644 #include <net/net_namespace.h>
85645 +#include <net/secure_seq.h>
85646 #ifdef CONFIG_IP_MROUTE
85647 #include <linux/mroute.h>
85648 #endif
85649 @@ -263,8 +264,10 @@ void build_ehash_secret(void)
85650 get_random_bytes(&rnd, sizeof(rnd));
85651 } while (rnd == 0);
85652
85653 - if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
85654 + if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
85655 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
85656 + net_secret_init();
85657 + }
85658 }
85659 EXPORT_SYMBOL(build_ehash_secret);
85660
85661 @@ -1699,13 +1702,9 @@ static int __init inet_init(void)
85662
85663 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
85664
85665 - sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
85666 - if (!sysctl_local_reserved_ports)
85667 - goto out;
85668 -
85669 rc = proto_register(&tcp_prot, 1);
85670 if (rc)
85671 - goto out_free_reserved_ports;
85672 + goto out;
85673
85674 rc = proto_register(&udp_prot, 1);
85675 if (rc)
85676 @@ -1814,8 +1813,6 @@ out_unregister_udp_proto:
85677 proto_unregister(&udp_prot);
85678 out_unregister_tcp_proto:
85679 proto_unregister(&tcp_prot);
85680 -out_free_reserved_ports:
85681 - kfree(sysctl_local_reserved_ports);
85682 goto out;
85683 }
85684
85685 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
85686 index 2e7f194..0fa4d6d 100644
85687 --- a/net/ipv4/ah4.c
85688 +++ b/net/ipv4/ah4.c
85689 @@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
85690 return;
85691
85692 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85693 - atomic_inc(&flow_cache_genid);
85694 + atomic_inc_unchecked(&flow_cache_genid);
85695 rt_genid_bump(net);
85696
85697 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
85698 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
85699 index c6287cd..e9bc96a 100644
85700 --- a/net/ipv4/devinet.c
85701 +++ b/net/ipv4/devinet.c
85702 @@ -1992,7 +1992,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
85703 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
85704 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
85705
85706 -static struct devinet_sysctl_table {
85707 +static const struct devinet_sysctl_table {
85708 struct ctl_table_header *sysctl_header;
85709 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
85710 } devinet_sysctl = {
85711 @@ -2110,7 +2110,7 @@ static __net_init int devinet_init_net(struct net *net)
85712 int err;
85713 struct ipv4_devconf *all, *dflt;
85714 #ifdef CONFIG_SYSCTL
85715 - struct ctl_table *tbl = ctl_forward_entry;
85716 + ctl_table_no_const *tbl = NULL;
85717 struct ctl_table_header *forw_hdr;
85718 #endif
85719
85720 @@ -2128,7 +2128,7 @@ static __net_init int devinet_init_net(struct net *net)
85721 goto err_alloc_dflt;
85722
85723 #ifdef CONFIG_SYSCTL
85724 - tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
85725 + tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
85726 if (tbl == NULL)
85727 goto err_alloc_ctl;
85728
85729 @@ -2148,7 +2148,10 @@ static __net_init int devinet_init_net(struct net *net)
85730 goto err_reg_dflt;
85731
85732 err = -ENOMEM;
85733 - forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
85734 + if (!net_eq(net, &init_net))
85735 + forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
85736 + else
85737 + forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
85738 if (forw_hdr == NULL)
85739 goto err_reg_ctl;
85740 net->ipv4.forw_hdr = forw_hdr;
85741 @@ -2164,8 +2167,7 @@ err_reg_ctl:
85742 err_reg_dflt:
85743 __devinet_sysctl_unregister(all);
85744 err_reg_all:
85745 - if (tbl != ctl_forward_entry)
85746 - kfree(tbl);
85747 + kfree(tbl);
85748 err_alloc_ctl:
85749 #endif
85750 if (dflt != &ipv4_devconf_dflt)
85751 diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
85752 index 4cfe34d..a6ba66e 100644
85753 --- a/net/ipv4/esp4.c
85754 +++ b/net/ipv4/esp4.c
85755 @@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
85756 return;
85757
85758 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85759 - atomic_inc(&flow_cache_genid);
85760 + atomic_inc_unchecked(&flow_cache_genid);
85761 rt_genid_bump(net);
85762
85763 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
85764 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
85765 index eb4bb12..ee4ec7d 100644
85766 --- a/net/ipv4/fib_frontend.c
85767 +++ b/net/ipv4/fib_frontend.c
85768 @@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
85769 #ifdef CONFIG_IP_ROUTE_MULTIPATH
85770 fib_sync_up(dev);
85771 #endif
85772 - atomic_inc(&net->ipv4.dev_addr_genid);
85773 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85774 rt_cache_flush(dev_net(dev));
85775 break;
85776 case NETDEV_DOWN:
85777 fib_del_ifaddr(ifa, NULL);
85778 - atomic_inc(&net->ipv4.dev_addr_genid);
85779 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85780 if (ifa->ifa_dev->ifa_list == NULL) {
85781 /* Last address was deleted from this interface.
85782 * Disable IP.
85783 @@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
85784 #ifdef CONFIG_IP_ROUTE_MULTIPATH
85785 fib_sync_up(dev);
85786 #endif
85787 - atomic_inc(&net->ipv4.dev_addr_genid);
85788 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
85789 rt_cache_flush(net);
85790 break;
85791 case NETDEV_DOWN:
85792 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
85793 index 8f6cb7a..34507f9 100644
85794 --- a/net/ipv4/fib_semantics.c
85795 +++ b/net/ipv4/fib_semantics.c
85796 @@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
85797 nh->nh_saddr = inet_select_addr(nh->nh_dev,
85798 nh->nh_gw,
85799 nh->nh_parent->fib_scope);
85800 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
85801 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
85802
85803 return nh->nh_saddr;
85804 }
85805 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
85806 index 786d97a..1889c0d 100644
85807 --- a/net/ipv4/inet_connection_sock.c
85808 +++ b/net/ipv4/inet_connection_sock.c
85809 @@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
85810 .range = { 32768, 61000 },
85811 };
85812
85813 -unsigned long *sysctl_local_reserved_ports;
85814 +unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
85815 EXPORT_SYMBOL(sysctl_local_reserved_ports);
85816
85817 void inet_get_local_port_range(int *low, int *high)
85818 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
85819 index 6af375a..c493c74 100644
85820 --- a/net/ipv4/inet_hashtables.c
85821 +++ b/net/ipv4/inet_hashtables.c
85822 @@ -18,12 +18,15 @@
85823 #include <linux/sched.h>
85824 #include <linux/slab.h>
85825 #include <linux/wait.h>
85826 +#include <linux/security.h>
85827
85828 #include <net/inet_connection_sock.h>
85829 #include <net/inet_hashtables.h>
85830 #include <net/secure_seq.h>
85831 #include <net/ip.h>
85832
85833 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
85834 +
85835 /*
85836 * Allocate and initialize a new local port bind bucket.
85837 * The bindhash mutex for snum's hash chain must be held here.
85838 @@ -554,6 +557,8 @@ ok:
85839 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
85840 spin_unlock(&head->lock);
85841
85842 + gr_update_task_in_ip_table(current, inet_sk(sk));
85843 +
85844 if (tw) {
85845 inet_twsk_deschedule(tw, death_row);
85846 while (twrefcnt) {
85847 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
85848 index 000e3d2..5472da3 100644
85849 --- a/net/ipv4/inetpeer.c
85850 +++ b/net/ipv4/inetpeer.c
85851 @@ -503,8 +503,8 @@ relookup:
85852 if (p) {
85853 p->daddr = *daddr;
85854 atomic_set(&p->refcnt, 1);
85855 - atomic_set(&p->rid, 0);
85856 - atomic_set(&p->ip_id_count,
85857 + atomic_set_unchecked(&p->rid, 0);
85858 + atomic_set_unchecked(&p->ip_id_count,
85859 (daddr->family == AF_INET) ?
85860 secure_ip_id(daddr->addr.a4) :
85861 secure_ipv6_id(daddr->addr.a6));
85862 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
85863 index 52c273e..579060b 100644
85864 --- a/net/ipv4/ip_fragment.c
85865 +++ b/net/ipv4/ip_fragment.c
85866 @@ -311,7 +311,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
85867 return 0;
85868
85869 start = qp->rid;
85870 - end = atomic_inc_return(&peer->rid);
85871 + end = atomic_inc_return_unchecked(&peer->rid);
85872 qp->rid = end;
85873
85874 rc = qp->q.fragments && (end - start) > max;
85875 @@ -788,12 +788,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
85876
85877 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85878 {
85879 - struct ctl_table *table;
85880 + ctl_table_no_const *table = NULL;
85881 struct ctl_table_header *hdr;
85882
85883 - table = ip4_frags_ns_ctl_table;
85884 if (!net_eq(net, &init_net)) {
85885 - table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
85886 + table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
85887 if (table == NULL)
85888 goto err_alloc;
85889
85890 @@ -804,9 +803,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85891 /* Don't export sysctls to unprivileged users */
85892 if (net->user_ns != &init_user_ns)
85893 table[0].procname = NULL;
85894 - }
85895 + hdr = register_net_sysctl(net, "net/ipv4", table);
85896 + } else
85897 + hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
85898
85899 - hdr = register_net_sysctl(net, "net/ipv4", table);
85900 if (hdr == NULL)
85901 goto err_reg;
85902
85903 @@ -814,8 +814,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85904 return 0;
85905
85906 err_reg:
85907 - if (!net_eq(net, &init_net))
85908 - kfree(table);
85909 + kfree(table);
85910 err_alloc:
85911 return -ENOMEM;
85912 }
85913 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
85914 index 91d66db..4af7d99 100644
85915 --- a/net/ipv4/ip_gre.c
85916 +++ b/net/ipv4/ip_gre.c
85917 @@ -124,7 +124,7 @@ static bool log_ecn_error = true;
85918 module_param(log_ecn_error, bool, 0644);
85919 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
85920
85921 -static struct rtnl_link_ops ipgre_link_ops __read_mostly;
85922 +static struct rtnl_link_ops ipgre_link_ops;
85923 static int ipgre_tunnel_init(struct net_device *dev);
85924 static void ipgre_tunnel_setup(struct net_device *dev);
85925 static int ipgre_tunnel_bind_dev(struct net_device *dev);
85926 @@ -1823,7 +1823,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
85927 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
85928 };
85929
85930 -static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85931 +static struct rtnl_link_ops ipgre_link_ops = {
85932 .kind = "gre",
85933 .maxtype = IFLA_GRE_MAX,
85934 .policy = ipgre_policy,
85935 @@ -1836,7 +1836,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85936 .fill_info = ipgre_fill_info,
85937 };
85938
85939 -static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
85940 +static struct rtnl_link_ops ipgre_tap_ops = {
85941 .kind = "gretap",
85942 .maxtype = IFLA_GRE_MAX,
85943 .policy = ipgre_policy,
85944 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
85945 index d9c4f11..02b82dbc 100644
85946 --- a/net/ipv4/ip_sockglue.c
85947 +++ b/net/ipv4/ip_sockglue.c
85948 @@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
85949 len = min_t(unsigned int, len, opt->optlen);
85950 if (put_user(len, optlen))
85951 return -EFAULT;
85952 - if (copy_to_user(optval, opt->__data, len))
85953 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
85954 + copy_to_user(optval, opt->__data, len))
85955 return -EFAULT;
85956 return 0;
85957 }
85958 @@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
85959 if (sk->sk_type != SOCK_STREAM)
85960 return -ENOPROTOOPT;
85961
85962 - msg.msg_control = optval;
85963 + msg.msg_control = (void __force_kernel *)optval;
85964 msg.msg_controllen = len;
85965 msg.msg_flags = flags;
85966
85967 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
85968 index c3a4233..1412161 100644
85969 --- a/net/ipv4/ip_vti.c
85970 +++ b/net/ipv4/ip_vti.c
85971 @@ -47,7 +47,7 @@
85972 #define HASH_SIZE 16
85973 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
85974
85975 -static struct rtnl_link_ops vti_link_ops __read_mostly;
85976 +static struct rtnl_link_ops vti_link_ops;
85977
85978 static int vti_net_id __read_mostly;
85979 struct vti_net {
85980 @@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
85981 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
85982 };
85983
85984 -static struct rtnl_link_ops vti_link_ops __read_mostly = {
85985 +static struct rtnl_link_ops vti_link_ops = {
85986 .kind = "vti",
85987 .maxtype = IFLA_VTI_MAX,
85988 .policy = vti_policy,
85989 diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
85990 index f01d1b1..8fe03ad 100644
85991 --- a/net/ipv4/ipcomp.c
85992 +++ b/net/ipv4/ipcomp.c
85993 @@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
85994 return;
85995
85996 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85997 - atomic_inc(&flow_cache_genid);
85998 + atomic_inc_unchecked(&flow_cache_genid);
85999 rt_genid_bump(net);
86000
86001 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
86002 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
86003 index bf6c5cf..ab2e9c6 100644
86004 --- a/net/ipv4/ipconfig.c
86005 +++ b/net/ipv4/ipconfig.c
86006 @@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
86007
86008 mm_segment_t oldfs = get_fs();
86009 set_fs(get_ds());
86010 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86011 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86012 set_fs(oldfs);
86013 return res;
86014 }
86015 @@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
86016
86017 mm_segment_t oldfs = get_fs();
86018 set_fs(get_ds());
86019 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86020 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86021 set_fs(oldfs);
86022 return res;
86023 }
86024 @@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
86025
86026 mm_segment_t oldfs = get_fs();
86027 set_fs(get_ds());
86028 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
86029 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
86030 set_fs(oldfs);
86031 return res;
86032 }
86033 diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
86034 index 8f024d4..8b3500c 100644
86035 --- a/net/ipv4/ipip.c
86036 +++ b/net/ipv4/ipip.c
86037 @@ -138,7 +138,7 @@ struct ipip_net {
86038 static int ipip_tunnel_init(struct net_device *dev);
86039 static void ipip_tunnel_setup(struct net_device *dev);
86040 static void ipip_dev_free(struct net_device *dev);
86041 -static struct rtnl_link_ops ipip_link_ops __read_mostly;
86042 +static struct rtnl_link_ops ipip_link_ops;
86043
86044 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
86045 struct rtnl_link_stats64 *tot)
86046 @@ -974,7 +974,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
86047 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
86048 };
86049
86050 -static struct rtnl_link_ops ipip_link_ops __read_mostly = {
86051 +static struct rtnl_link_ops ipip_link_ops = {
86052 .kind = "ipip",
86053 .maxtype = IFLA_IPTUN_MAX,
86054 .policy = ipip_policy,
86055 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
86056 index 7dc6a97..229c61b 100644
86057 --- a/net/ipv4/netfilter/arp_tables.c
86058 +++ b/net/ipv4/netfilter/arp_tables.c
86059 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
86060 #endif
86061
86062 static int get_info(struct net *net, void __user *user,
86063 - const int *len, int compat)
86064 + int len, int compat)
86065 {
86066 char name[XT_TABLE_MAXNAMELEN];
86067 struct xt_table *t;
86068 int ret;
86069
86070 - if (*len != sizeof(struct arpt_getinfo)) {
86071 - duprintf("length %u != %Zu\n", *len,
86072 + if (len != sizeof(struct arpt_getinfo)) {
86073 + duprintf("length %u != %Zu\n", len,
86074 sizeof(struct arpt_getinfo));
86075 return -EINVAL;
86076 }
86077 @@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
86078 info.size = private->size;
86079 strcpy(info.name, name);
86080
86081 - if (copy_to_user(user, &info, *len) != 0)
86082 + if (copy_to_user(user, &info, len) != 0)
86083 ret = -EFAULT;
86084 else
86085 ret = 0;
86086 @@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
86087
86088 switch (cmd) {
86089 case ARPT_SO_GET_INFO:
86090 - ret = get_info(sock_net(sk), user, len, 1);
86091 + ret = get_info(sock_net(sk), user, *len, 1);
86092 break;
86093 case ARPT_SO_GET_ENTRIES:
86094 ret = compat_get_entries(sock_net(sk), user, len);
86095 @@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
86096
86097 switch (cmd) {
86098 case ARPT_SO_GET_INFO:
86099 - ret = get_info(sock_net(sk), user, len, 0);
86100 + ret = get_info(sock_net(sk), user, *len, 0);
86101 break;
86102
86103 case ARPT_SO_GET_ENTRIES:
86104 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
86105 index 3efcf87..5247916 100644
86106 --- a/net/ipv4/netfilter/ip_tables.c
86107 +++ b/net/ipv4/netfilter/ip_tables.c
86108 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
86109 #endif
86110
86111 static int get_info(struct net *net, void __user *user,
86112 - const int *len, int compat)
86113 + int len, int compat)
86114 {
86115 char name[XT_TABLE_MAXNAMELEN];
86116 struct xt_table *t;
86117 int ret;
86118
86119 - if (*len != sizeof(struct ipt_getinfo)) {
86120 - duprintf("length %u != %zu\n", *len,
86121 + if (len != sizeof(struct ipt_getinfo)) {
86122 + duprintf("length %u != %zu\n", len,
86123 sizeof(struct ipt_getinfo));
86124 return -EINVAL;
86125 }
86126 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
86127 info.size = private->size;
86128 strcpy(info.name, name);
86129
86130 - if (copy_to_user(user, &info, *len) != 0)
86131 + if (copy_to_user(user, &info, len) != 0)
86132 ret = -EFAULT;
86133 else
86134 ret = 0;
86135 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86136
86137 switch (cmd) {
86138 case IPT_SO_GET_INFO:
86139 - ret = get_info(sock_net(sk), user, len, 1);
86140 + ret = get_info(sock_net(sk), user, *len, 1);
86141 break;
86142 case IPT_SO_GET_ENTRIES:
86143 ret = compat_get_entries(sock_net(sk), user, len);
86144 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86145
86146 switch (cmd) {
86147 case IPT_SO_GET_INFO:
86148 - ret = get_info(sock_net(sk), user, len, 0);
86149 + ret = get_info(sock_net(sk), user, *len, 0);
86150 break;
86151
86152 case IPT_SO_GET_ENTRIES:
86153 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
86154 index 2e91006..f084394 100644
86155 --- a/net/ipv4/ping.c
86156 +++ b/net/ipv4/ping.c
86157 @@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
86158 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86159 0, sock_i_ino(sp),
86160 atomic_read(&sp->sk_refcnt), sp,
86161 - atomic_read(&sp->sk_drops), len);
86162 + atomic_read_unchecked(&sp->sk_drops), len);
86163 }
86164
86165 static int ping_seq_show(struct seq_file *seq, void *v)
86166 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
86167 index dd44e0a..06dcca4 100644
86168 --- a/net/ipv4/raw.c
86169 +++ b/net/ipv4/raw.c
86170 @@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
86171 int raw_rcv(struct sock *sk, struct sk_buff *skb)
86172 {
86173 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
86174 - atomic_inc(&sk->sk_drops);
86175 + atomic_inc_unchecked(&sk->sk_drops);
86176 kfree_skb(skb);
86177 return NET_RX_DROP;
86178 }
86179 @@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
86180
86181 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
86182 {
86183 + struct icmp_filter filter;
86184 +
86185 if (optlen > sizeof(struct icmp_filter))
86186 optlen = sizeof(struct icmp_filter);
86187 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
86188 + if (copy_from_user(&filter, optval, optlen))
86189 return -EFAULT;
86190 + raw_sk(sk)->filter = filter;
86191 return 0;
86192 }
86193
86194 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
86195 {
86196 int len, ret = -EFAULT;
86197 + struct icmp_filter filter;
86198
86199 if (get_user(len, optlen))
86200 goto out;
86201 @@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
86202 if (len > sizeof(struct icmp_filter))
86203 len = sizeof(struct icmp_filter);
86204 ret = -EFAULT;
86205 - if (put_user(len, optlen) ||
86206 - copy_to_user(optval, &raw_sk(sk)->filter, len))
86207 + filter = raw_sk(sk)->filter;
86208 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
86209 goto out;
86210 ret = 0;
86211 out: return ret;
86212 @@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
86213 0, 0L, 0,
86214 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
86215 0, sock_i_ino(sp),
86216 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
86217 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
86218 }
86219
86220 static int raw_seq_show(struct seq_file *seq, void *v)
86221 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
86222 index 6e28514..5e1b055 100644
86223 --- a/net/ipv4/route.c
86224 +++ b/net/ipv4/route.c
86225 @@ -2553,34 +2553,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
86226 .maxlen = sizeof(int),
86227 .mode = 0200,
86228 .proc_handler = ipv4_sysctl_rtcache_flush,
86229 + .extra1 = &init_net,
86230 },
86231 { },
86232 };
86233
86234 static __net_init int sysctl_route_net_init(struct net *net)
86235 {
86236 - struct ctl_table *tbl;
86237 + ctl_table_no_const *tbl = NULL;
86238
86239 - tbl = ipv4_route_flush_table;
86240 if (!net_eq(net, &init_net)) {
86241 - tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86242 + tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86243 if (tbl == NULL)
86244 goto err_dup;
86245
86246 /* Don't export sysctls to unprivileged users */
86247 if (net->user_ns != &init_user_ns)
86248 tbl[0].procname = NULL;
86249 - }
86250 - tbl[0].extra1 = net;
86251 + tbl[0].extra1 = net;
86252 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86253 + } else
86254 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
86255
86256 - net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86257 if (net->ipv4.route_hdr == NULL)
86258 goto err_reg;
86259 return 0;
86260
86261 err_reg:
86262 - if (tbl != ipv4_route_flush_table)
86263 - kfree(tbl);
86264 + kfree(tbl);
86265 err_dup:
86266 return -ENOMEM;
86267 }
86268 @@ -2603,7 +2603,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
86269
86270 static __net_init int rt_genid_init(struct net *net)
86271 {
86272 - atomic_set(&net->rt_genid, 0);
86273 + atomic_set_unchecked(&net->rt_genid, 0);
86274 get_random_bytes(&net->ipv4.dev_addr_genid,
86275 sizeof(net->ipv4.dev_addr_genid));
86276 return 0;
86277 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
86278 index 960fd29..d55bf64 100644
86279 --- a/net/ipv4/sysctl_net_ipv4.c
86280 +++ b/net/ipv4/sysctl_net_ipv4.c
86281 @@ -55,7 +55,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
86282 {
86283 int ret;
86284 int range[2];
86285 - ctl_table tmp = {
86286 + ctl_table_no_const tmp = {
86287 .data = &range,
86288 .maxlen = sizeof(range),
86289 .mode = table->mode,
86290 @@ -108,7 +108,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
86291 int ret;
86292 gid_t urange[2];
86293 kgid_t low, high;
86294 - ctl_table tmp = {
86295 + ctl_table_no_const tmp = {
86296 .data = &urange,
86297 .maxlen = sizeof(urange),
86298 .mode = table->mode,
86299 @@ -139,7 +139,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
86300 void __user *buffer, size_t *lenp, loff_t *ppos)
86301 {
86302 char val[TCP_CA_NAME_MAX];
86303 - ctl_table tbl = {
86304 + ctl_table_no_const tbl = {
86305 .data = val,
86306 .maxlen = TCP_CA_NAME_MAX,
86307 };
86308 @@ -158,7 +158,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
86309 void __user *buffer, size_t *lenp,
86310 loff_t *ppos)
86311 {
86312 - ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
86313 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
86314 int ret;
86315
86316 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86317 @@ -175,7 +175,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
86318 void __user *buffer, size_t *lenp,
86319 loff_t *ppos)
86320 {
86321 - ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
86322 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
86323 int ret;
86324
86325 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86326 @@ -201,15 +201,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86327 struct mem_cgroup *memcg;
86328 #endif
86329
86330 - ctl_table tmp = {
86331 + ctl_table_no_const tmp = {
86332 .data = &vec,
86333 .maxlen = sizeof(vec),
86334 .mode = ctl->mode,
86335 };
86336
86337 if (!write) {
86338 - ctl->data = &net->ipv4.sysctl_tcp_mem;
86339 - return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
86340 + ctl_table_no_const tcp_mem = *ctl;
86341 +
86342 + tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
86343 + return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
86344 }
86345
86346 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
86347 @@ -236,7 +238,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86348 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
86349 size_t *lenp, loff_t *ppos)
86350 {
86351 - ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86352 + ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86353 struct tcp_fastopen_context *ctxt;
86354 int ret;
86355 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
86356 @@ -477,7 +479,7 @@ static struct ctl_table ipv4_table[] = {
86357 },
86358 {
86359 .procname = "ip_local_reserved_ports",
86360 - .data = NULL, /* initialized in sysctl_ipv4_init */
86361 + .data = sysctl_local_reserved_ports,
86362 .maxlen = 65536,
86363 .mode = 0644,
86364 .proc_handler = proc_do_large_bitmap,
86365 @@ -856,11 +858,10 @@ static struct ctl_table ipv4_net_table[] = {
86366
86367 static __net_init int ipv4_sysctl_init_net(struct net *net)
86368 {
86369 - struct ctl_table *table;
86370 + ctl_table_no_const *table = NULL;
86371
86372 - table = ipv4_net_table;
86373 if (!net_eq(net, &init_net)) {
86374 - table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
86375 + table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
86376 if (table == NULL)
86377 goto err_alloc;
86378
86379 @@ -895,15 +896,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
86380
86381 tcp_init_mem(net);
86382
86383 - net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
86384 + if (!net_eq(net, &init_net))
86385 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
86386 + else
86387 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
86388 if (net->ipv4.ipv4_hdr == NULL)
86389 goto err_reg;
86390
86391 return 0;
86392
86393 err_reg:
86394 - if (!net_eq(net, &init_net))
86395 - kfree(table);
86396 + kfree(table);
86397 err_alloc:
86398 return -ENOMEM;
86399 }
86400 @@ -925,16 +928,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
86401 static __init int sysctl_ipv4_init(void)
86402 {
86403 struct ctl_table_header *hdr;
86404 - struct ctl_table *i;
86405 -
86406 - for (i = ipv4_table; i->procname; i++) {
86407 - if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
86408 - i->data = sysctl_local_reserved_ports;
86409 - break;
86410 - }
86411 - }
86412 - if (!i->procname)
86413 - return -EINVAL;
86414
86415 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
86416 if (hdr == NULL)
86417 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
86418 index 13b9c08..d33a8d0 100644
86419 --- a/net/ipv4/tcp_input.c
86420 +++ b/net/ipv4/tcp_input.c
86421 @@ -4724,7 +4724,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
86422 * simplifies code)
86423 */
86424 static void
86425 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
86426 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
86427 struct sk_buff *head, struct sk_buff *tail,
86428 u32 start, u32 end)
86429 {
86430 @@ -5838,6 +5838,7 @@ discard:
86431 tcp_paws_reject(&tp->rx_opt, 0))
86432 goto discard_and_undo;
86433
86434 +#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
86435 if (th->syn) {
86436 /* We see SYN without ACK. It is attempt of
86437 * simultaneous connect with crossed SYNs.
86438 @@ -5888,6 +5889,7 @@ discard:
86439 goto discard;
86440 #endif
86441 }
86442 +#endif
86443 /* "fifth, if neither of the SYN or RST bits is set then
86444 * drop the segment and return."
86445 */
86446 @@ -5932,7 +5934,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
86447 goto discard;
86448
86449 if (th->syn) {
86450 - if (th->fin)
86451 + if (th->fin || th->urg || th->psh)
86452 goto discard;
86453 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
86454 return 1;
86455 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
86456 index d09203c..fd5cc91 100644
86457 --- a/net/ipv4/tcp_ipv4.c
86458 +++ b/net/ipv4/tcp_ipv4.c
86459 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
86460 EXPORT_SYMBOL(sysctl_tcp_low_latency);
86461
86462
86463 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86464 +extern int grsec_enable_blackhole;
86465 +#endif
86466 +
86467 #ifdef CONFIG_TCP_MD5SIG
86468 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
86469 __be32 daddr, __be32 saddr, const struct tcphdr *th);
86470 @@ -1897,6 +1901,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
86471 return 0;
86472
86473 reset:
86474 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86475 + if (!grsec_enable_blackhole)
86476 +#endif
86477 tcp_v4_send_reset(rsk, skb);
86478 discard:
86479 kfree_skb(skb);
86480 @@ -1996,12 +2003,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
86481 TCP_SKB_CB(skb)->sacked = 0;
86482
86483 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
86484 - if (!sk)
86485 + if (!sk) {
86486 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86487 + ret = 1;
86488 +#endif
86489 goto no_tcp_socket;
86490 -
86491 + }
86492 process:
86493 - if (sk->sk_state == TCP_TIME_WAIT)
86494 + if (sk->sk_state == TCP_TIME_WAIT) {
86495 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86496 + ret = 2;
86497 +#endif
86498 goto do_time_wait;
86499 + }
86500
86501 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
86502 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
86503 @@ -2052,6 +2066,10 @@ no_tcp_socket:
86504 bad_packet:
86505 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
86506 } else {
86507 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86508 + if (!grsec_enable_blackhole || (ret == 1 &&
86509 + (skb->dev->flags & IFF_LOOPBACK)))
86510 +#endif
86511 tcp_v4_send_reset(NULL, skb);
86512 }
86513
86514 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
86515 index b83a49c..6c562a7 100644
86516 --- a/net/ipv4/tcp_minisocks.c
86517 +++ b/net/ipv4/tcp_minisocks.c
86518 @@ -27,6 +27,10 @@
86519 #include <net/inet_common.h>
86520 #include <net/xfrm.h>
86521
86522 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86523 +extern int grsec_enable_blackhole;
86524 +#endif
86525 +
86526 int sysctl_tcp_syncookies __read_mostly = 1;
86527 EXPORT_SYMBOL(sysctl_tcp_syncookies);
86528
86529 @@ -744,7 +748,10 @@ embryonic_reset:
86530 * avoid becoming vulnerable to outside attack aiming at
86531 * resetting legit local connections.
86532 */
86533 - req->rsk_ops->send_reset(sk, skb);
86534 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86535 + if (!grsec_enable_blackhole)
86536 +#endif
86537 + req->rsk_ops->send_reset(sk, skb);
86538 } else if (fastopen) { /* received a valid RST pkt */
86539 reqsk_fastopen_remove(sk, req, true);
86540 tcp_reset(sk);
86541 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
86542 index d4943f6..e7a74a5 100644
86543 --- a/net/ipv4/tcp_probe.c
86544 +++ b/net/ipv4/tcp_probe.c
86545 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
86546 if (cnt + width >= len)
86547 break;
86548
86549 - if (copy_to_user(buf + cnt, tbuf, width))
86550 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
86551 return -EFAULT;
86552 cnt += width;
86553 }
86554 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
86555 index b78aac3..e18230b 100644
86556 --- a/net/ipv4/tcp_timer.c
86557 +++ b/net/ipv4/tcp_timer.c
86558 @@ -22,6 +22,10 @@
86559 #include <linux/gfp.h>
86560 #include <net/tcp.h>
86561
86562 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86563 +extern int grsec_lastack_retries;
86564 +#endif
86565 +
86566 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
86567 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
86568 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
86569 @@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
86570 }
86571 }
86572
86573 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86574 + if ((sk->sk_state == TCP_LAST_ACK) &&
86575 + (grsec_lastack_retries > 0) &&
86576 + (grsec_lastack_retries < retry_until))
86577 + retry_until = grsec_lastack_retries;
86578 +#endif
86579 +
86580 if (retransmits_timed_out(sk, retry_until,
86581 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
86582 /* Has it gone just too far? */
86583 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
86584 index 0a073a2..ddf6279 100644
86585 --- a/net/ipv4/udp.c
86586 +++ b/net/ipv4/udp.c
86587 @@ -87,6 +87,7 @@
86588 #include <linux/types.h>
86589 #include <linux/fcntl.h>
86590 #include <linux/module.h>
86591 +#include <linux/security.h>
86592 #include <linux/socket.h>
86593 #include <linux/sockios.h>
86594 #include <linux/igmp.h>
86595 @@ -111,6 +112,10 @@
86596 #include <trace/events/skb.h>
86597 #include "udp_impl.h"
86598
86599 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86600 +extern int grsec_enable_blackhole;
86601 +#endif
86602 +
86603 struct udp_table udp_table __read_mostly;
86604 EXPORT_SYMBOL(udp_table);
86605
86606 @@ -594,6 +599,9 @@ found:
86607 return s;
86608 }
86609
86610 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
86611 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
86612 +
86613 /*
86614 * This routine is called by the ICMP module when it gets some
86615 * sort of error condition. If err < 0 then the socket should
86616 @@ -889,9 +897,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
86617 dport = usin->sin_port;
86618 if (dport == 0)
86619 return -EINVAL;
86620 +
86621 + err = gr_search_udp_sendmsg(sk, usin);
86622 + if (err)
86623 + return err;
86624 } else {
86625 if (sk->sk_state != TCP_ESTABLISHED)
86626 return -EDESTADDRREQ;
86627 +
86628 + err = gr_search_udp_sendmsg(sk, NULL);
86629 + if (err)
86630 + return err;
86631 +
86632 daddr = inet->inet_daddr;
86633 dport = inet->inet_dport;
86634 /* Open fast path for connected socket.
86635 @@ -1133,7 +1150,7 @@ static unsigned int first_packet_length(struct sock *sk)
86636 udp_lib_checksum_complete(skb)) {
86637 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86638 IS_UDPLITE(sk));
86639 - atomic_inc(&sk->sk_drops);
86640 + atomic_inc_unchecked(&sk->sk_drops);
86641 __skb_unlink(skb, rcvq);
86642 __skb_queue_tail(&list_kill, skb);
86643 }
86644 @@ -1219,6 +1236,10 @@ try_again:
86645 if (!skb)
86646 goto out;
86647
86648 + err = gr_search_udp_recvmsg(sk, skb);
86649 + if (err)
86650 + goto out_free;
86651 +
86652 ulen = skb->len - sizeof(struct udphdr);
86653 copied = len;
86654 if (copied > ulen)
86655 @@ -1252,7 +1273,7 @@ try_again:
86656 if (unlikely(err)) {
86657 trace_kfree_skb(skb, udp_recvmsg);
86658 if (!peeked) {
86659 - atomic_inc(&sk->sk_drops);
86660 + atomic_inc_unchecked(&sk->sk_drops);
86661 UDP_INC_STATS_USER(sock_net(sk),
86662 UDP_MIB_INERRORS, is_udplite);
86663 }
86664 @@ -1535,7 +1556,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86665
86666 drop:
86667 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
86668 - atomic_inc(&sk->sk_drops);
86669 + atomic_inc_unchecked(&sk->sk_drops);
86670 kfree_skb(skb);
86671 return -1;
86672 }
86673 @@ -1554,7 +1575,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
86674 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
86675
86676 if (!skb1) {
86677 - atomic_inc(&sk->sk_drops);
86678 + atomic_inc_unchecked(&sk->sk_drops);
86679 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
86680 IS_UDPLITE(sk));
86681 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86682 @@ -1723,6 +1744,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
86683 goto csum_error;
86684
86685 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
86686 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86687 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
86688 +#endif
86689 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
86690
86691 /*
86692 @@ -2152,7 +2176,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
86693 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86694 0, sock_i_ino(sp),
86695 atomic_read(&sp->sk_refcnt), sp,
86696 - atomic_read(&sp->sk_drops), len);
86697 + atomic_read_unchecked(&sp->sk_drops), len);
86698 }
86699
86700 int udp4_seq_show(struct seq_file *seq, void *v)
86701 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
86702 index 9a459be..086b866 100644
86703 --- a/net/ipv4/xfrm4_policy.c
86704 +++ b/net/ipv4/xfrm4_policy.c
86705 @@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
86706
86707 static int __net_init xfrm4_net_init(struct net *net)
86708 {
86709 - struct ctl_table *table;
86710 + ctl_table_no_const *table = NULL;
86711 struct ctl_table_header *hdr;
86712
86713 - table = xfrm4_policy_table;
86714 if (!net_eq(net, &init_net)) {
86715 - table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
86716 + table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
86717 if (!table)
86718 goto err_alloc;
86719
86720 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
86721 - }
86722 -
86723 - hdr = register_net_sysctl(net, "net/ipv4", table);
86724 + hdr = register_net_sysctl(net, "net/ipv4", table);
86725 + } else
86726 + hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
86727 if (!hdr)
86728 goto err_reg;
86729
86730 @@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
86731 return 0;
86732
86733 err_reg:
86734 - if (!net_eq(net, &init_net))
86735 - kfree(table);
86736 + kfree(table);
86737 err_alloc:
86738 return -ENOMEM;
86739 }
86740 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
86741 index dae802c..bfa4baa 100644
86742 --- a/net/ipv6/addrconf.c
86743 +++ b/net/ipv6/addrconf.c
86744 @@ -2274,7 +2274,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
86745 p.iph.ihl = 5;
86746 p.iph.protocol = IPPROTO_IPV6;
86747 p.iph.ttl = 64;
86748 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
86749 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
86750
86751 if (ops->ndo_do_ioctl) {
86752 mm_segment_t oldfs = get_fs();
86753 @@ -4410,7 +4410,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
86754 int *valp = ctl->data;
86755 int val = *valp;
86756 loff_t pos = *ppos;
86757 - ctl_table lctl;
86758 + ctl_table_no_const lctl;
86759 int ret;
86760
86761 /*
86762 @@ -4492,7 +4492,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
86763 int *valp = ctl->data;
86764 int val = *valp;
86765 loff_t pos = *ppos;
86766 - ctl_table lctl;
86767 + ctl_table_no_const lctl;
86768 int ret;
86769
86770 /*
86771 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
86772 index fff5bdd..15194fb 100644
86773 --- a/net/ipv6/icmp.c
86774 +++ b/net/ipv6/icmp.c
86775 @@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
86776
86777 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
86778 {
86779 - struct ctl_table *table;
86780 + ctl_table_no_const *table;
86781
86782 table = kmemdup(ipv6_icmp_table_template,
86783 sizeof(ipv6_icmp_table_template),
86784 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
86785 index e4efffe..791fe2f 100644
86786 --- a/net/ipv6/ip6_gre.c
86787 +++ b/net/ipv6/ip6_gre.c
86788 @@ -73,7 +73,7 @@ struct ip6gre_net {
86789 struct net_device *fb_tunnel_dev;
86790 };
86791
86792 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
86793 +static struct rtnl_link_ops ip6gre_link_ops;
86794 static int ip6gre_tunnel_init(struct net_device *dev);
86795 static void ip6gre_tunnel_setup(struct net_device *dev);
86796 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
86797 @@ -1135,6 +1135,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
86798 }
86799 if (t == NULL)
86800 t = netdev_priv(dev);
86801 + memset(&p, 0, sizeof(p));
86802 ip6gre_tnl_parm_to_user(&p, &t->parms);
86803 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
86804 err = -EFAULT;
86805 @@ -1182,6 +1183,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
86806 if (t) {
86807 err = 0;
86808
86809 + memset(&p, 0, sizeof(p));
86810 ip6gre_tnl_parm_to_user(&p, &t->parms);
86811 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
86812 err = -EFAULT;
86813 @@ -1335,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
86814 }
86815
86816
86817 -static struct inet6_protocol ip6gre_protocol __read_mostly = {
86818 +static struct inet6_protocol ip6gre_protocol = {
86819 .handler = ip6gre_rcv,
86820 .err_handler = ip6gre_err,
86821 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
86822 @@ -1669,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
86823 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
86824 };
86825
86826 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
86827 +static struct rtnl_link_ops ip6gre_link_ops = {
86828 .kind = "ip6gre",
86829 .maxtype = IFLA_GRE_MAX,
86830 .policy = ip6gre_policy,
86831 @@ -1682,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
86832 .fill_info = ip6gre_fill_info,
86833 };
86834
86835 -static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
86836 +static struct rtnl_link_ops ip6gre_tap_ops = {
86837 .kind = "ip6gretap",
86838 .maxtype = IFLA_GRE_MAX,
86839 .policy = ip6gre_policy,
86840 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
86841 index fff83cb..82d49dd 100644
86842 --- a/net/ipv6/ip6_tunnel.c
86843 +++ b/net/ipv6/ip6_tunnel.c
86844 @@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
86845
86846 static int ip6_tnl_dev_init(struct net_device *dev);
86847 static void ip6_tnl_dev_setup(struct net_device *dev);
86848 -static struct rtnl_link_ops ip6_link_ops __read_mostly;
86849 +static struct rtnl_link_ops ip6_link_ops;
86850
86851 static int ip6_tnl_net_id __read_mostly;
86852 struct ip6_tnl_net {
86853 @@ -1684,7 +1684,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
86854 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
86855 };
86856
86857 -static struct rtnl_link_ops ip6_link_ops __read_mostly = {
86858 +static struct rtnl_link_ops ip6_link_ops = {
86859 .kind = "ip6tnl",
86860 .maxtype = IFLA_IPTUN_MAX,
86861 .policy = ip6_tnl_policy,
86862 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
86863 index d1e2e8e..51c19ae 100644
86864 --- a/net/ipv6/ipv6_sockglue.c
86865 +++ b/net/ipv6/ipv6_sockglue.c
86866 @@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
86867 if (sk->sk_type != SOCK_STREAM)
86868 return -ENOPROTOOPT;
86869
86870 - msg.msg_control = optval;
86871 + msg.msg_control = (void __force_kernel *)optval;
86872 msg.msg_controllen = len;
86873 msg.msg_flags = flags;
86874
86875 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
86876 index 341b54a..591e8ed 100644
86877 --- a/net/ipv6/netfilter/ip6_tables.c
86878 +++ b/net/ipv6/netfilter/ip6_tables.c
86879 @@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
86880 #endif
86881
86882 static int get_info(struct net *net, void __user *user,
86883 - const int *len, int compat)
86884 + int len, int compat)
86885 {
86886 char name[XT_TABLE_MAXNAMELEN];
86887 struct xt_table *t;
86888 int ret;
86889
86890 - if (*len != sizeof(struct ip6t_getinfo)) {
86891 - duprintf("length %u != %zu\n", *len,
86892 + if (len != sizeof(struct ip6t_getinfo)) {
86893 + duprintf("length %u != %zu\n", len,
86894 sizeof(struct ip6t_getinfo));
86895 return -EINVAL;
86896 }
86897 @@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
86898 info.size = private->size;
86899 strcpy(info.name, name);
86900
86901 - if (copy_to_user(user, &info, *len) != 0)
86902 + if (copy_to_user(user, &info, len) != 0)
86903 ret = -EFAULT;
86904 else
86905 ret = 0;
86906 @@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86907
86908 switch (cmd) {
86909 case IP6T_SO_GET_INFO:
86910 - ret = get_info(sock_net(sk), user, len, 1);
86911 + ret = get_info(sock_net(sk), user, *len, 1);
86912 break;
86913 case IP6T_SO_GET_ENTRIES:
86914 ret = compat_get_entries(sock_net(sk), user, len);
86915 @@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86916
86917 switch (cmd) {
86918 case IP6T_SO_GET_INFO:
86919 - ret = get_info(sock_net(sk), user, len, 0);
86920 + ret = get_info(sock_net(sk), user, *len, 0);
86921 break;
86922
86923 case IP6T_SO_GET_ENTRIES:
86924 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
86925 index 6700069..1e50f42 100644
86926 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
86927 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
86928 @@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
86929
86930 static int nf_ct_frag6_sysctl_register(struct net *net)
86931 {
86932 - struct ctl_table *table;
86933 + ctl_table_no_const *table = NULL;
86934 struct ctl_table_header *hdr;
86935
86936 - table = nf_ct_frag6_sysctl_table;
86937 if (!net_eq(net, &init_net)) {
86938 - table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
86939 + table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
86940 GFP_KERNEL);
86941 if (table == NULL)
86942 goto err_alloc;
86943 @@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
86944 table[0].data = &net->nf_frag.frags.timeout;
86945 table[1].data = &net->nf_frag.frags.low_thresh;
86946 table[2].data = &net->nf_frag.frags.high_thresh;
86947 - }
86948 -
86949 - hdr = register_net_sysctl(net, "net/netfilter", table);
86950 + hdr = register_net_sysctl(net, "net/netfilter", table);
86951 + } else
86952 + hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
86953 if (hdr == NULL)
86954 goto err_reg;
86955
86956 @@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
86957 return 0;
86958
86959 err_reg:
86960 - if (!net_eq(net, &init_net))
86961 - kfree(table);
86962 + kfree(table);
86963 err_alloc:
86964 return -ENOMEM;
86965 }
86966 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
86967 index 330b5e7..796fbf1 100644
86968 --- a/net/ipv6/raw.c
86969 +++ b/net/ipv6/raw.c
86970 @@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
86971 {
86972 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
86973 skb_checksum_complete(skb)) {
86974 - atomic_inc(&sk->sk_drops);
86975 + atomic_inc_unchecked(&sk->sk_drops);
86976 kfree_skb(skb);
86977 return NET_RX_DROP;
86978 }
86979 @@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86980 struct raw6_sock *rp = raw6_sk(sk);
86981
86982 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
86983 - atomic_inc(&sk->sk_drops);
86984 + atomic_inc_unchecked(&sk->sk_drops);
86985 kfree_skb(skb);
86986 return NET_RX_DROP;
86987 }
86988 @@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86989
86990 if (inet->hdrincl) {
86991 if (skb_checksum_complete(skb)) {
86992 - atomic_inc(&sk->sk_drops);
86993 + atomic_inc_unchecked(&sk->sk_drops);
86994 kfree_skb(skb);
86995 return NET_RX_DROP;
86996 }
86997 @@ -603,7 +603,7 @@ out:
86998 return err;
86999 }
87000
87001 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
87002 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
87003 struct flowi6 *fl6, struct dst_entry **dstp,
87004 unsigned int flags)
87005 {
87006 @@ -915,12 +915,15 @@ do_confirm:
87007 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
87008 char __user *optval, int optlen)
87009 {
87010 + struct icmp6_filter filter;
87011 +
87012 switch (optname) {
87013 case ICMPV6_FILTER:
87014 if (optlen > sizeof(struct icmp6_filter))
87015 optlen = sizeof(struct icmp6_filter);
87016 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
87017 + if (copy_from_user(&filter, optval, optlen))
87018 return -EFAULT;
87019 + raw6_sk(sk)->filter = filter;
87020 return 0;
87021 default:
87022 return -ENOPROTOOPT;
87023 @@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87024 char __user *optval, int __user *optlen)
87025 {
87026 int len;
87027 + struct icmp6_filter filter;
87028
87029 switch (optname) {
87030 case ICMPV6_FILTER:
87031 @@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87032 len = sizeof(struct icmp6_filter);
87033 if (put_user(len, optlen))
87034 return -EFAULT;
87035 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
87036 + filter = raw6_sk(sk)->filter;
87037 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
87038 return -EFAULT;
87039 return 0;
87040 default:
87041 @@ -1252,7 +1257,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87042 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87043 0,
87044 sock_i_ino(sp),
87045 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87046 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87047 }
87048
87049 static int raw6_seq_show(struct seq_file *seq, void *v)
87050 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
87051 index 0ba10e5..c14a4f6 100644
87052 --- a/net/ipv6/reassembly.c
87053 +++ b/net/ipv6/reassembly.c
87054 @@ -602,12 +602,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
87055
87056 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87057 {
87058 - struct ctl_table *table;
87059 + ctl_table_no_const *table = NULL;
87060 struct ctl_table_header *hdr;
87061
87062 - table = ip6_frags_ns_ctl_table;
87063 if (!net_eq(net, &init_net)) {
87064 - table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87065 + table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87066 if (table == NULL)
87067 goto err_alloc;
87068
87069 @@ -618,9 +617,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87070 /* Don't export sysctls to unprivileged users */
87071 if (net->user_ns != &init_user_ns)
87072 table[0].procname = NULL;
87073 - }
87074 + hdr = register_net_sysctl(net, "net/ipv6", table);
87075 + } else
87076 + hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
87077
87078 - hdr = register_net_sysctl(net, "net/ipv6", table);
87079 if (hdr == NULL)
87080 goto err_reg;
87081
87082 @@ -628,8 +628,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87083 return 0;
87084
87085 err_reg:
87086 - if (!net_eq(net, &init_net))
87087 - kfree(table);
87088 + kfree(table);
87089 err_alloc:
87090 return -ENOMEM;
87091 }
87092 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
87093 index e5fe004..9fe3e8e 100644
87094 --- a/net/ipv6/route.c
87095 +++ b/net/ipv6/route.c
87096 @@ -2881,7 +2881,7 @@ ctl_table ipv6_route_table_template[] = {
87097
87098 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
87099 {
87100 - struct ctl_table *table;
87101 + ctl_table_no_const *table;
87102
87103 table = kmemdup(ipv6_route_table_template,
87104 sizeof(ipv6_route_table_template),
87105 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
87106 index 02f96dc..4a5a6e5 100644
87107 --- a/net/ipv6/sit.c
87108 +++ b/net/ipv6/sit.c
87109 @@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
87110 static void ipip6_dev_free(struct net_device *dev);
87111 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
87112 __be32 *v4dst);
87113 -static struct rtnl_link_ops sit_link_ops __read_mostly;
87114 +static struct rtnl_link_ops sit_link_ops;
87115
87116 static int sit_net_id __read_mostly;
87117 struct sit_net {
87118 @@ -1486,7 +1486,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
87119 #endif
87120 };
87121
87122 -static struct rtnl_link_ops sit_link_ops __read_mostly = {
87123 +static struct rtnl_link_ops sit_link_ops = {
87124 .kind = "sit",
87125 .maxtype = IFLA_IPTUN_MAX,
87126 .policy = ipip6_policy,
87127 diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
87128 index e85c48b..b8268d3 100644
87129 --- a/net/ipv6/sysctl_net_ipv6.c
87130 +++ b/net/ipv6/sysctl_net_ipv6.c
87131 @@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
87132
87133 static int __net_init ipv6_sysctl_net_init(struct net *net)
87134 {
87135 - struct ctl_table *ipv6_table;
87136 + ctl_table_no_const *ipv6_table;
87137 struct ctl_table *ipv6_route_table;
87138 struct ctl_table *ipv6_icmp_table;
87139 int err;
87140 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
87141 index 46a5be8..415688d 100644
87142 --- a/net/ipv6/tcp_ipv6.c
87143 +++ b/net/ipv6/tcp_ipv6.c
87144 @@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
87145 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
87146 }
87147
87148 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87149 +extern int grsec_enable_blackhole;
87150 +#endif
87151 +
87152 static void tcp_v6_hash(struct sock *sk)
87153 {
87154 if (sk->sk_state != TCP_CLOSE) {
87155 @@ -1446,6 +1450,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
87156 return 0;
87157
87158 reset:
87159 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87160 + if (!grsec_enable_blackhole)
87161 +#endif
87162 tcp_v6_send_reset(sk, skb);
87163 discard:
87164 if (opt_skb)
87165 @@ -1527,12 +1534,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
87166 TCP_SKB_CB(skb)->sacked = 0;
87167
87168 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87169 - if (!sk)
87170 + if (!sk) {
87171 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87172 + ret = 1;
87173 +#endif
87174 goto no_tcp_socket;
87175 + }
87176
87177 process:
87178 - if (sk->sk_state == TCP_TIME_WAIT)
87179 + if (sk->sk_state == TCP_TIME_WAIT) {
87180 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87181 + ret = 2;
87182 +#endif
87183 goto do_time_wait;
87184 + }
87185
87186 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
87187 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87188 @@ -1581,6 +1596,10 @@ no_tcp_socket:
87189 bad_packet:
87190 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87191 } else {
87192 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87193 + if (!grsec_enable_blackhole || (ret == 1 &&
87194 + (skb->dev->flags & IFF_LOOPBACK)))
87195 +#endif
87196 tcp_v6_send_reset(NULL, skb);
87197 }
87198
87199 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
87200 index d8e5e85..5a447f4 100644
87201 --- a/net/ipv6/udp.c
87202 +++ b/net/ipv6/udp.c
87203 @@ -52,6 +52,10 @@
87204 #include <trace/events/skb.h>
87205 #include "udp_impl.h"
87206
87207 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87208 +extern int grsec_enable_blackhole;
87209 +#endif
87210 +
87211 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
87212 {
87213 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
87214 @@ -419,7 +423,7 @@ try_again:
87215 if (unlikely(err)) {
87216 trace_kfree_skb(skb, udpv6_recvmsg);
87217 if (!peeked) {
87218 - atomic_inc(&sk->sk_drops);
87219 + atomic_inc_unchecked(&sk->sk_drops);
87220 if (is_udp4)
87221 UDP_INC_STATS_USER(sock_net(sk),
87222 UDP_MIB_INERRORS,
87223 @@ -657,7 +661,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87224 return rc;
87225 drop:
87226 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87227 - atomic_inc(&sk->sk_drops);
87228 + atomic_inc_unchecked(&sk->sk_drops);
87229 kfree_skb(skb);
87230 return -1;
87231 }
87232 @@ -715,7 +719,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87233 if (likely(skb1 == NULL))
87234 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87235 if (!skb1) {
87236 - atomic_inc(&sk->sk_drops);
87237 + atomic_inc_unchecked(&sk->sk_drops);
87238 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87239 IS_UDPLITE(sk));
87240 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87241 @@ -852,6 +856,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87242 goto discard;
87243
87244 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87245 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87246 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87247 +#endif
87248 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
87249
87250 kfree_skb(skb);
87251 @@ -1377,7 +1384,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
87252 0,
87253 sock_i_ino(sp),
87254 atomic_read(&sp->sk_refcnt), sp,
87255 - atomic_read(&sp->sk_drops));
87256 + atomic_read_unchecked(&sp->sk_drops));
87257 }
87258
87259 int udp6_seq_show(struct seq_file *seq, void *v)
87260 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
87261 index 4ef7bdb..9e97017 100644
87262 --- a/net/ipv6/xfrm6_policy.c
87263 +++ b/net/ipv6/xfrm6_policy.c
87264 @@ -322,19 +322,19 @@ static struct ctl_table xfrm6_policy_table[] = {
87265
87266 static int __net_init xfrm6_net_init(struct net *net)
87267 {
87268 - struct ctl_table *table;
87269 + ctl_table_no_const *table = NULL;
87270 struct ctl_table_header *hdr;
87271
87272 - table = xfrm6_policy_table;
87273 if (!net_eq(net, &init_net)) {
87274 - table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
87275 + table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
87276 if (!table)
87277 goto err_alloc;
87278
87279 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
87280 - }
87281 + hdr = register_net_sysctl(net, "net/ipv6", table);
87282 + } else
87283 + hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
87284
87285 - hdr = register_net_sysctl(net, "net/ipv6", table);
87286 if (!hdr)
87287 goto err_reg;
87288
87289 @@ -342,8 +342,7 @@ static int __net_init xfrm6_net_init(struct net *net)
87290 return 0;
87291
87292 err_reg:
87293 - if (!net_eq(net, &init_net))
87294 - kfree(table);
87295 + kfree(table);
87296 err_alloc:
87297 return -ENOMEM;
87298 }
87299 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
87300 index 362ba47..66196f4 100644
87301 --- a/net/irda/ircomm/ircomm_tty.c
87302 +++ b/net/irda/ircomm/ircomm_tty.c
87303 @@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87304 add_wait_queue(&port->open_wait, &wait);
87305
87306 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
87307 - __FILE__, __LINE__, tty->driver->name, port->count);
87308 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87309
87310 spin_lock_irqsave(&port->lock, flags);
87311 if (!tty_hung_up_p(filp))
87312 - port->count--;
87313 + atomic_dec(&port->count);
87314 port->blocked_open++;
87315 spin_unlock_irqrestore(&port->lock, flags);
87316
87317 @@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87318 }
87319
87320 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
87321 - __FILE__, __LINE__, tty->driver->name, port->count);
87322 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87323
87324 schedule();
87325 }
87326 @@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87327
87328 spin_lock_irqsave(&port->lock, flags);
87329 if (!tty_hung_up_p(filp))
87330 - port->count++;
87331 + atomic_inc(&port->count);
87332 port->blocked_open--;
87333 spin_unlock_irqrestore(&port->lock, flags);
87334
87335 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
87336 - __FILE__, __LINE__, tty->driver->name, port->count);
87337 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87338
87339 if (!retval)
87340 port->flags |= ASYNC_NORMAL_ACTIVE;
87341 @@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
87342
87343 /* ++ is not atomic, so this should be protected - Jean II */
87344 spin_lock_irqsave(&self->port.lock, flags);
87345 - self->port.count++;
87346 + atomic_inc(&self->port.count);
87347 spin_unlock_irqrestore(&self->port.lock, flags);
87348 tty_port_tty_set(&self->port, tty);
87349
87350 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
87351 - self->line, self->port.count);
87352 + self->line, atomic_read(&self->port.count));
87353
87354 /* Not really used by us, but lets do it anyway */
87355 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
87356 @@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
87357 tty_kref_put(port->tty);
87358 }
87359 port->tty = NULL;
87360 - port->count = 0;
87361 + atomic_set(&port->count, 0);
87362 spin_unlock_irqrestore(&port->lock, flags);
87363
87364 wake_up_interruptible(&port->open_wait);
87365 @@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
87366 seq_putc(m, '\n');
87367
87368 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
87369 - seq_printf(m, "Open count: %d\n", self->port.count);
87370 + seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
87371 seq_printf(m, "Max data size: %d\n", self->max_data_size);
87372 seq_printf(m, "Max header size: %d\n", self->max_header_size);
87373
87374 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
87375 index 206ce6d..cfb27cd 100644
87376 --- a/net/iucv/af_iucv.c
87377 +++ b/net/iucv/af_iucv.c
87378 @@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
87379
87380 write_lock_bh(&iucv_sk_list.lock);
87381
87382 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
87383 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87384 while (__iucv_get_sock_by_name(name)) {
87385 sprintf(name, "%08x",
87386 - atomic_inc_return(&iucv_sk_list.autobind_name));
87387 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87388 }
87389
87390 write_unlock_bh(&iucv_sk_list.lock);
87391 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
87392 index 4fe76ff..426a904 100644
87393 --- a/net/iucv/iucv.c
87394 +++ b/net/iucv/iucv.c
87395 @@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
87396 return NOTIFY_OK;
87397 }
87398
87399 -static struct notifier_block __refdata iucv_cpu_notifier = {
87400 +static struct notifier_block iucv_cpu_notifier = {
87401 .notifier_call = iucv_cpu_notify,
87402 };
87403
87404 diff --git a/net/key/af_key.c b/net/key/af_key.c
87405 index 5b1e5af..2358147 100644
87406 --- a/net/key/af_key.c
87407 +++ b/net/key/af_key.c
87408 @@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
87409 static u32 get_acqseq(void)
87410 {
87411 u32 res;
87412 - static atomic_t acqseq;
87413 + static atomic_unchecked_t acqseq;
87414
87415 do {
87416 - res = atomic_inc_return(&acqseq);
87417 + res = atomic_inc_return_unchecked(&acqseq);
87418 } while (!res);
87419 return res;
87420 }
87421 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
87422 index 843d8c4..cb04fa1 100644
87423 --- a/net/mac80211/cfg.c
87424 +++ b/net/mac80211/cfg.c
87425 @@ -799,7 +799,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
87426 ret = ieee80211_vif_use_channel(sdata, chandef,
87427 IEEE80211_CHANCTX_EXCLUSIVE);
87428 }
87429 - } else if (local->open_count == local->monitors) {
87430 + } else if (local_read(&local->open_count) == local->monitors) {
87431 local->_oper_channel = chandef->chan;
87432 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
87433 ieee80211_hw_config(local, 0);
87434 @@ -2834,7 +2834,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
87435 else
87436 local->probe_req_reg--;
87437
87438 - if (!local->open_count)
87439 + if (!local_read(&local->open_count))
87440 break;
87441
87442 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
87443 @@ -3297,8 +3297,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
87444 if (chanctx_conf) {
87445 *chandef = chanctx_conf->def;
87446 ret = 0;
87447 - } else if (local->open_count > 0 &&
87448 - local->open_count == local->monitors &&
87449 + } else if (local_read(&local->open_count) > 0 &&
87450 + local_read(&local->open_count) == local->monitors &&
87451 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
87452 if (local->use_chanctx)
87453 *chandef = local->monitor_chandef;
87454 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
87455 index 5672533..6738c93 100644
87456 --- a/net/mac80211/ieee80211_i.h
87457 +++ b/net/mac80211/ieee80211_i.h
87458 @@ -28,6 +28,7 @@
87459 #include <net/ieee80211_radiotap.h>
87460 #include <net/cfg80211.h>
87461 #include <net/mac80211.h>
87462 +#include <asm/local.h>
87463 #include "key.h"
87464 #include "sta_info.h"
87465 #include "debug.h"
87466 @@ -897,7 +898,7 @@ struct ieee80211_local {
87467 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
87468 spinlock_t queue_stop_reason_lock;
87469
87470 - int open_count;
87471 + local_t open_count;
87472 int monitors, cooked_mntrs;
87473 /* number of interfaces with corresponding FIF_ flags */
87474 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
87475 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
87476 index d51ca9d..042c35f 100644
87477 --- a/net/mac80211/iface.c
87478 +++ b/net/mac80211/iface.c
87479 @@ -495,7 +495,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87480 break;
87481 }
87482
87483 - if (local->open_count == 0) {
87484 + if (local_read(&local->open_count) == 0) {
87485 res = drv_start(local);
87486 if (res)
87487 goto err_del_bss;
87488 @@ -540,7 +540,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87489 break;
87490 }
87491
87492 - if (local->monitors == 0 && local->open_count == 0) {
87493 + if (local->monitors == 0 && local_read(&local->open_count) == 0) {
87494 res = ieee80211_add_virtual_monitor(local);
87495 if (res)
87496 goto err_stop;
87497 @@ -649,7 +649,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87498 atomic_inc(&local->iff_promiscs);
87499
87500 if (coming_up)
87501 - local->open_count++;
87502 + local_inc(&local->open_count);
87503
87504 if (hw_reconf_flags)
87505 ieee80211_hw_config(local, hw_reconf_flags);
87506 @@ -663,7 +663,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
87507 err_del_interface:
87508 drv_remove_interface(local, sdata);
87509 err_stop:
87510 - if (!local->open_count)
87511 + if (!local_read(&local->open_count))
87512 drv_stop(local);
87513 err_del_bss:
87514 sdata->bss = NULL;
87515 @@ -806,7 +806,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87516 }
87517
87518 if (going_down)
87519 - local->open_count--;
87520 + local_dec(&local->open_count);
87521
87522 switch (sdata->vif.type) {
87523 case NL80211_IFTYPE_AP_VLAN:
87524 @@ -871,7 +871,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87525
87526 ieee80211_recalc_ps(local, -1);
87527
87528 - if (local->open_count == 0) {
87529 + if (local_read(&local->open_count) == 0) {
87530 if (local->ops->napi_poll)
87531 napi_disable(&local->napi);
87532 ieee80211_clear_tx_pending(local);
87533 @@ -897,7 +897,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
87534 }
87535 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
87536
87537 - if (local->monitors == local->open_count && local->monitors > 0)
87538 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
87539 ieee80211_add_virtual_monitor(local);
87540 }
87541
87542 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
87543 index 1a8591b..ef5db54 100644
87544 --- a/net/mac80211/main.c
87545 +++ b/net/mac80211/main.c
87546 @@ -180,7 +180,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
87547 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
87548 IEEE80211_CONF_CHANGE_POWER);
87549
87550 - if (changed && local->open_count) {
87551 + if (changed && local_read(&local->open_count)) {
87552 ret = drv_config(local, changed);
87553 /*
87554 * Goal:
87555 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
87556 index 835584c..be46e67 100644
87557 --- a/net/mac80211/pm.c
87558 +++ b/net/mac80211/pm.c
87559 @@ -33,7 +33,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87560 struct sta_info *sta;
87561 struct ieee80211_chanctx *ctx;
87562
87563 - if (!local->open_count)
87564 + if (!local_read(&local->open_count))
87565 goto suspend;
87566
87567 ieee80211_scan_cancel(local);
87568 @@ -75,7 +75,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87569 cancel_work_sync(&local->dynamic_ps_enable_work);
87570 del_timer_sync(&local->dynamic_ps_timer);
87571
87572 - local->wowlan = wowlan && local->open_count;
87573 + local->wowlan = wowlan && local_read(&local->open_count);
87574 if (local->wowlan) {
87575 int err = drv_suspend(local, wowlan);
87576 if (err < 0) {
87577 @@ -214,7 +214,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
87578 mutex_unlock(&local->chanctx_mtx);
87579
87580 /* stop hardware - this must stop RX */
87581 - if (local->open_count)
87582 + if (local_read(&local->open_count))
87583 ieee80211_stop_device(local);
87584
87585 suspend:
87586 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
87587 index dd88381..eef4dd6 100644
87588 --- a/net/mac80211/rate.c
87589 +++ b/net/mac80211/rate.c
87590 @@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
87591
87592 ASSERT_RTNL();
87593
87594 - if (local->open_count)
87595 + if (local_read(&local->open_count))
87596 return -EBUSY;
87597
87598 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
87599 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
87600 index c97a065..ff61928 100644
87601 --- a/net/mac80211/rc80211_pid_debugfs.c
87602 +++ b/net/mac80211/rc80211_pid_debugfs.c
87603 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
87604
87605 spin_unlock_irqrestore(&events->lock, status);
87606
87607 - if (copy_to_user(buf, pb, p))
87608 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
87609 return -EFAULT;
87610
87611 return p;
87612 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
87613 index 0f38f43..e53d4a8 100644
87614 --- a/net/mac80211/util.c
87615 +++ b/net/mac80211/util.c
87616 @@ -1388,7 +1388,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
87617 }
87618 #endif
87619 /* everything else happens only if HW was up & running */
87620 - if (!local->open_count)
87621 + if (!local_read(&local->open_count))
87622 goto wake_up;
87623
87624 /*
87625 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
87626 index 56d22ca..87c778f 100644
87627 --- a/net/netfilter/Kconfig
87628 +++ b/net/netfilter/Kconfig
87629 @@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
87630
87631 To compile it as a module, choose M here. If unsure, say N.
87632
87633 +config NETFILTER_XT_MATCH_GRADM
87634 + tristate '"gradm" match support'
87635 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
87636 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
87637 + ---help---
87638 + The gradm match allows to match on grsecurity RBAC being enabled.
87639 + It is useful when iptables rules are applied early on bootup to
87640 + prevent connections to the machine (except from a trusted host)
87641 + while the RBAC system is disabled.
87642 +
87643 config NETFILTER_XT_MATCH_HASHLIMIT
87644 tristate '"hashlimit" match support'
87645 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
87646 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
87647 index a1abf87..dbcb7ee 100644
87648 --- a/net/netfilter/Makefile
87649 +++ b/net/netfilter/Makefile
87650 @@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
87651 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
87652 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
87653 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
87654 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
87655 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
87656 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
87657 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
87658 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
87659 index 1ba9dbc..e39f4ca 100644
87660 --- a/net/netfilter/ipset/ip_set_core.c
87661 +++ b/net/netfilter/ipset/ip_set_core.c
87662 @@ -1801,7 +1801,7 @@ done:
87663 return ret;
87664 }
87665
87666 -static struct nf_sockopt_ops so_set __read_mostly = {
87667 +static struct nf_sockopt_ops so_set = {
87668 .pf = PF_INET,
87669 .get_optmin = SO_IP_SET,
87670 .get_optmax = SO_IP_SET + 1,
87671 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
87672 index 704e514..d644cc2 100644
87673 --- a/net/netfilter/ipvs/ip_vs_conn.c
87674 +++ b/net/netfilter/ipvs/ip_vs_conn.c
87675 @@ -551,7 +551,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
87676 /* Increase the refcnt counter of the dest */
87677 atomic_inc(&dest->refcnt);
87678
87679 - conn_flags = atomic_read(&dest->conn_flags);
87680 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
87681 if (cp->protocol != IPPROTO_UDP)
87682 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
87683 flags = cp->flags;
87684 @@ -895,7 +895,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
87685 atomic_set(&cp->refcnt, 1);
87686
87687 atomic_set(&cp->n_control, 0);
87688 - atomic_set(&cp->in_pkts, 0);
87689 + atomic_set_unchecked(&cp->in_pkts, 0);
87690
87691 atomic_inc(&ipvs->conn_count);
87692 if (flags & IP_VS_CONN_F_NO_CPORT)
87693 @@ -1174,7 +1174,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
87694
87695 /* Don't drop the entry if its number of incoming packets is not
87696 located in [0, 8] */
87697 - i = atomic_read(&cp->in_pkts);
87698 + i = atomic_read_unchecked(&cp->in_pkts);
87699 if (i > 8 || i < 0) return 0;
87700
87701 if (!todrop_rate[i]) return 0;
87702 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
87703 index 61f49d2..6c8c5bc 100644
87704 --- a/net/netfilter/ipvs/ip_vs_core.c
87705 +++ b/net/netfilter/ipvs/ip_vs_core.c
87706 @@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
87707 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
87708 /* do not touch skb anymore */
87709
87710 - atomic_inc(&cp->in_pkts);
87711 + atomic_inc_unchecked(&cp->in_pkts);
87712 ip_vs_conn_put(cp);
87713 return ret;
87714 }
87715 @@ -1689,7 +1689,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
87716 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
87717 pkts = sysctl_sync_threshold(ipvs);
87718 else
87719 - pkts = atomic_add_return(1, &cp->in_pkts);
87720 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87721
87722 if (ipvs->sync_state & IP_VS_STATE_MASTER)
87723 ip_vs_sync_conn(net, cp, pkts);
87724 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
87725 index 9e2d1cc..7f8f569 100644
87726 --- a/net/netfilter/ipvs/ip_vs_ctl.c
87727 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
87728 @@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
87729 ip_vs_rs_hash(ipvs, dest);
87730 write_unlock_bh(&ipvs->rs_lock);
87731 }
87732 - atomic_set(&dest->conn_flags, conn_flags);
87733 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
87734
87735 /* bind the service */
87736 if (!dest->svc) {
87737 @@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
87738 * align with netns init in ip_vs_control_net_init()
87739 */
87740
87741 -static struct ctl_table vs_vars[] = {
87742 +static ctl_table_no_const vs_vars[] __read_only = {
87743 {
87744 .procname = "amemthresh",
87745 .maxlen = sizeof(int),
87746 @@ -2087,7 +2087,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
87747 " %-7s %-6d %-10d %-10d\n",
87748 &dest->addr.in6,
87749 ntohs(dest->port),
87750 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
87751 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
87752 atomic_read(&dest->weight),
87753 atomic_read(&dest->activeconns),
87754 atomic_read(&dest->inactconns));
87755 @@ -2098,7 +2098,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
87756 "%-7s %-6d %-10d %-10d\n",
87757 ntohl(dest->addr.ip),
87758 ntohs(dest->port),
87759 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
87760 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
87761 atomic_read(&dest->weight),
87762 atomic_read(&dest->activeconns),
87763 atomic_read(&dest->inactconns));
87764 @@ -2568,7 +2568,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
87765
87766 entry.addr = dest->addr.ip;
87767 entry.port = dest->port;
87768 - entry.conn_flags = atomic_read(&dest->conn_flags);
87769 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
87770 entry.weight = atomic_read(&dest->weight);
87771 entry.u_threshold = dest->u_threshold;
87772 entry.l_threshold = dest->l_threshold;
87773 @@ -3104,7 +3104,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
87774 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
87775 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
87776 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
87777 - (atomic_read(&dest->conn_flags) &
87778 + (atomic_read_unchecked(&dest->conn_flags) &
87779 IP_VS_CONN_F_FWD_MASK)) ||
87780 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
87781 atomic_read(&dest->weight)) ||
87782 @@ -3694,7 +3694,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
87783 {
87784 int idx;
87785 struct netns_ipvs *ipvs = net_ipvs(net);
87786 - struct ctl_table *tbl;
87787 + ctl_table_no_const *tbl;
87788
87789 atomic_set(&ipvs->dropentry, 0);
87790 spin_lock_init(&ipvs->dropentry_lock);
87791 diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
87792 index fdd89b9..bd96aa9 100644
87793 --- a/net/netfilter/ipvs/ip_vs_lblc.c
87794 +++ b/net/netfilter/ipvs/ip_vs_lblc.c
87795 @@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
87796 * IPVS LBLC sysctl table
87797 */
87798 #ifdef CONFIG_SYSCTL
87799 -static ctl_table vs_vars_table[] = {
87800 +static ctl_table_no_const vs_vars_table[] __read_only = {
87801 {
87802 .procname = "lblc_expiration",
87803 .data = NULL,
87804 diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
87805 index c03b6a3..8ce3681 100644
87806 --- a/net/netfilter/ipvs/ip_vs_lblcr.c
87807 +++ b/net/netfilter/ipvs/ip_vs_lblcr.c
87808 @@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
87809 * IPVS LBLCR sysctl table
87810 */
87811
87812 -static ctl_table vs_vars_table[] = {
87813 +static ctl_table_no_const vs_vars_table[] __read_only = {
87814 {
87815 .procname = "lblcr_expiration",
87816 .data = NULL,
87817 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
87818 index 44fd10c..2a163b3 100644
87819 --- a/net/netfilter/ipvs/ip_vs_sync.c
87820 +++ b/net/netfilter/ipvs/ip_vs_sync.c
87821 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
87822 cp = cp->control;
87823 if (cp) {
87824 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
87825 - pkts = atomic_add_return(1, &cp->in_pkts);
87826 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87827 else
87828 pkts = sysctl_sync_threshold(ipvs);
87829 ip_vs_sync_conn(net, cp->control, pkts);
87830 @@ -758,7 +758,7 @@ control:
87831 if (!cp)
87832 return;
87833 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
87834 - pkts = atomic_add_return(1, &cp->in_pkts);
87835 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
87836 else
87837 pkts = sysctl_sync_threshold(ipvs);
87838 goto sloop;
87839 @@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
87840
87841 if (opt)
87842 memcpy(&cp->in_seq, opt, sizeof(*opt));
87843 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
87844 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
87845 cp->state = state;
87846 cp->old_state = cp->state;
87847 /*
87848 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
87849 index ee6b7a9..f9a89f6 100644
87850 --- a/net/netfilter/ipvs/ip_vs_xmit.c
87851 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
87852 @@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
87853 else
87854 rc = NF_ACCEPT;
87855 /* do not touch skb anymore */
87856 - atomic_inc(&cp->in_pkts);
87857 + atomic_inc_unchecked(&cp->in_pkts);
87858 goto out;
87859 }
87860
87861 @@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
87862 else
87863 rc = NF_ACCEPT;
87864 /* do not touch skb anymore */
87865 - atomic_inc(&cp->in_pkts);
87866 + atomic_inc_unchecked(&cp->in_pkts);
87867 goto out;
87868 }
87869
87870 diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
87871 index 2d3030a..7ba1c0a 100644
87872 --- a/net/netfilter/nf_conntrack_acct.c
87873 +++ b/net/netfilter/nf_conntrack_acct.c
87874 @@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
87875 #ifdef CONFIG_SYSCTL
87876 static int nf_conntrack_acct_init_sysctl(struct net *net)
87877 {
87878 - struct ctl_table *table;
87879 + ctl_table_no_const *table;
87880
87881 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
87882 GFP_KERNEL);
87883 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
87884 index c8e001a..f842a8b 100644
87885 --- a/net/netfilter/nf_conntrack_core.c
87886 +++ b/net/netfilter/nf_conntrack_core.c
87887 @@ -1594,6 +1594,10 @@ void nf_conntrack_init_end(void)
87888 #define DYING_NULLS_VAL ((1<<30)+1)
87889 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
87890
87891 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87892 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
87893 +#endif
87894 +
87895 int nf_conntrack_init_net(struct net *net)
87896 {
87897 int ret;
87898 @@ -1608,7 +1612,11 @@ int nf_conntrack_init_net(struct net *net)
87899 goto err_stat;
87900 }
87901
87902 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87903 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
87904 +#else
87905 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
87906 +#endif
87907 if (!net->ct.slabname) {
87908 ret = -ENOMEM;
87909 goto err_slabname;
87910 diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
87911 index b5d2eb8..61ef19a 100644
87912 --- a/net/netfilter/nf_conntrack_ecache.c
87913 +++ b/net/netfilter/nf_conntrack_ecache.c
87914 @@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
87915 #ifdef CONFIG_SYSCTL
87916 static int nf_conntrack_event_init_sysctl(struct net *net)
87917 {
87918 - struct ctl_table *table;
87919 + ctl_table_no_const *table;
87920
87921 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
87922 GFP_KERNEL);
87923 diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
87924 index 94b4b98..97cf0ad 100644
87925 --- a/net/netfilter/nf_conntrack_helper.c
87926 +++ b/net/netfilter/nf_conntrack_helper.c
87927 @@ -56,7 +56,7 @@ static struct ctl_table helper_sysctl_table[] = {
87928
87929 static int nf_conntrack_helper_init_sysctl(struct net *net)
87930 {
87931 - struct ctl_table *table;
87932 + ctl_table_no_const *table;
87933
87934 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
87935 GFP_KERNEL);
87936 diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
87937 index 58ab405..50eb8d3 100644
87938 --- a/net/netfilter/nf_conntrack_proto.c
87939 +++ b/net/netfilter/nf_conntrack_proto.c
87940 @@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
87941
87942 static void
87943 nf_ct_unregister_sysctl(struct ctl_table_header **header,
87944 - struct ctl_table **table,
87945 + ctl_table_no_const **table,
87946 unsigned int users)
87947 {
87948 if (users > 0)
87949 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
87950 index fedee39..d62a93d 100644
87951 --- a/net/netfilter/nf_conntrack_standalone.c
87952 +++ b/net/netfilter/nf_conntrack_standalone.c
87953 @@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
87954
87955 static int nf_conntrack_standalone_init_sysctl(struct net *net)
87956 {
87957 - struct ctl_table *table;
87958 + ctl_table_no_const *table;
87959
87960 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
87961 GFP_KERNEL);
87962 diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
87963 index 902fb0a..87f7fdb 100644
87964 --- a/net/netfilter/nf_conntrack_timestamp.c
87965 +++ b/net/netfilter/nf_conntrack_timestamp.c
87966 @@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
87967 #ifdef CONFIG_SYSCTL
87968 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
87969 {
87970 - struct ctl_table *table;
87971 + ctl_table_no_const *table;
87972
87973 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
87974 GFP_KERNEL);
87975 diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
87976 index 9e31269..bc4c1b7 100644
87977 --- a/net/netfilter/nf_log.c
87978 +++ b/net/netfilter/nf_log.c
87979 @@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
87980
87981 #ifdef CONFIG_SYSCTL
87982 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
87983 -static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
87984 +static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
87985 static struct ctl_table_header *nf_log_dir_header;
87986
87987 static int nf_log_proc_dostring(ctl_table *table, int write,
87988 @@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
87989 rcu_assign_pointer(nf_loggers[tindex], logger);
87990 mutex_unlock(&nf_log_mutex);
87991 } else {
87992 + ctl_table_no_const nf_log_table = *table;
87993 +
87994 mutex_lock(&nf_log_mutex);
87995 logger = rcu_dereference_protected(nf_loggers[tindex],
87996 lockdep_is_held(&nf_log_mutex));
87997 if (!logger)
87998 - table->data = "NONE";
87999 + nf_log_table.data = "NONE";
88000 else
88001 - table->data = logger->name;
88002 - r = proc_dostring(table, write, buffer, lenp, ppos);
88003 + nf_log_table.data = logger->name;
88004 + r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
88005 mutex_unlock(&nf_log_mutex);
88006 }
88007
88008 diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
88009 index f042ae5..30ea486 100644
88010 --- a/net/netfilter/nf_sockopt.c
88011 +++ b/net/netfilter/nf_sockopt.c
88012 @@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
88013 }
88014 }
88015
88016 - list_add(&reg->list, &nf_sockopts);
88017 + pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
88018 out:
88019 mutex_unlock(&nf_sockopt_mutex);
88020 return ret;
88021 @@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
88022 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
88023 {
88024 mutex_lock(&nf_sockopt_mutex);
88025 - list_del(&reg->list);
88026 + pax_list_del((struct list_head *)&reg->list);
88027 mutex_unlock(&nf_sockopt_mutex);
88028 }
88029 EXPORT_SYMBOL(nf_unregister_sockopt);
88030 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
88031 index f248db5..3778ad9 100644
88032 --- a/net/netfilter/nfnetlink_log.c
88033 +++ b/net/netfilter/nfnetlink_log.c
88034 @@ -72,7 +72,7 @@ struct nfulnl_instance {
88035 };
88036
88037 static DEFINE_SPINLOCK(instances_lock);
88038 -static atomic_t global_seq;
88039 +static atomic_unchecked_t global_seq;
88040
88041 #define INSTANCE_BUCKETS 16
88042 static struct hlist_head instance_table[INSTANCE_BUCKETS];
88043 @@ -536,7 +536,7 @@ __build_packet_message(struct nfulnl_instance *inst,
88044 /* global sequence number */
88045 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
88046 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
88047 - htonl(atomic_inc_return(&global_seq))))
88048 + htonl(atomic_inc_return_unchecked(&global_seq))))
88049 goto nla_put_failure;
88050
88051 if (data_len) {
88052 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
88053 new file mode 100644
88054 index 0000000..c566332
88055 --- /dev/null
88056 +++ b/net/netfilter/xt_gradm.c
88057 @@ -0,0 +1,51 @@
88058 +/*
88059 + * gradm match for netfilter
88060 + * Copyright © Zbigniew Krzystolik, 2010
88061 + *
88062 + * This program is free software; you can redistribute it and/or modify
88063 + * it under the terms of the GNU General Public License; either version
88064 + * 2 or 3 as published by the Free Software Foundation.
88065 + */
88066 +#include <linux/module.h>
88067 +#include <linux/moduleparam.h>
88068 +#include <linux/skbuff.h>
88069 +#include <linux/netfilter/x_tables.h>
88070 +#include <linux/grsecurity.h>
88071 +#include <linux/netfilter/xt_gradm.h>
88072 +
88073 +static bool
88074 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
88075 +{
88076 + const struct xt_gradm_mtinfo *info = par->matchinfo;
88077 + bool retval = false;
88078 + if (gr_acl_is_enabled())
88079 + retval = true;
88080 + return retval ^ info->invflags;
88081 +}
88082 +
88083 +static struct xt_match gradm_mt_reg __read_mostly = {
88084 + .name = "gradm",
88085 + .revision = 0,
88086 + .family = NFPROTO_UNSPEC,
88087 + .match = gradm_mt,
88088 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
88089 + .me = THIS_MODULE,
88090 +};
88091 +
88092 +static int __init gradm_mt_init(void)
88093 +{
88094 + return xt_register_match(&gradm_mt_reg);
88095 +}
88096 +
88097 +static void __exit gradm_mt_exit(void)
88098 +{
88099 + xt_unregister_match(&gradm_mt_reg);
88100 +}
88101 +
88102 +module_init(gradm_mt_init);
88103 +module_exit(gradm_mt_exit);
88104 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
88105 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
88106 +MODULE_LICENSE("GPL");
88107 +MODULE_ALIAS("ipt_gradm");
88108 +MODULE_ALIAS("ip6t_gradm");
88109 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
88110 index 4fe4fb4..87a89e5 100644
88111 --- a/net/netfilter/xt_statistic.c
88112 +++ b/net/netfilter/xt_statistic.c
88113 @@ -19,7 +19,7 @@
88114 #include <linux/module.h>
88115
88116 struct xt_statistic_priv {
88117 - atomic_t count;
88118 + atomic_unchecked_t count;
88119 } ____cacheline_aligned_in_smp;
88120
88121 MODULE_LICENSE("GPL");
88122 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
88123 break;
88124 case XT_STATISTIC_MODE_NTH:
88125 do {
88126 - oval = atomic_read(&info->master->count);
88127 + oval = atomic_read_unchecked(&info->master->count);
88128 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
88129 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
88130 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
88131 if (nval == 0)
88132 ret = !ret;
88133 break;
88134 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
88135 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
88136 if (info->master == NULL)
88137 return -ENOMEM;
88138 - atomic_set(&info->master->count, info->u.nth.count);
88139 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
88140
88141 return 0;
88142 }
88143 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
88144 index 1e3fd5b..ad397ea 100644
88145 --- a/net/netlink/af_netlink.c
88146 +++ b/net/netlink/af_netlink.c
88147 @@ -781,7 +781,7 @@ static void netlink_overrun(struct sock *sk)
88148 sk->sk_error_report(sk);
88149 }
88150 }
88151 - atomic_inc(&sk->sk_drops);
88152 + atomic_inc_unchecked(&sk->sk_drops);
88153 }
88154
88155 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
88156 @@ -2063,7 +2063,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
88157 sk_wmem_alloc_get(s),
88158 nlk->cb,
88159 atomic_read(&s->sk_refcnt),
88160 - atomic_read(&s->sk_drops),
88161 + atomic_read_unchecked(&s->sk_drops),
88162 sock_i_ino(s)
88163 );
88164
88165 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
88166 index 5a55be3..7630745 100644
88167 --- a/net/netlink/genetlink.c
88168 +++ b/net/netlink/genetlink.c
88169 @@ -296,18 +296,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
88170 goto errout;
88171 }
88172
88173 + pax_open_kernel();
88174 if (ops->dumpit)
88175 - ops->flags |= GENL_CMD_CAP_DUMP;
88176 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DUMP;
88177 if (ops->doit)
88178 - ops->flags |= GENL_CMD_CAP_DO;
88179 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DO;
88180 if (ops->policy)
88181 - ops->flags |= GENL_CMD_CAP_HASPOL;
88182 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_HASPOL;
88183 + pax_close_kernel();
88184
88185 genl_lock();
88186 - list_add_tail(&ops->ops_list, &family->ops_list);
88187 + pax_list_add_tail((struct list_head *)&ops->ops_list, &family->ops_list);
88188 genl_unlock();
88189
88190 - genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
88191 + genl_ctrl_event(CTRL_CMD_NEWOPS, (void *)ops);
88192 err = 0;
88193 errout:
88194 return err;
88195 @@ -337,9 +339,9 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
88196 genl_lock();
88197 list_for_each_entry(rc, &family->ops_list, ops_list) {
88198 if (rc == ops) {
88199 - list_del(&ops->ops_list);
88200 + pax_list_del((struct list_head *)&ops->ops_list);
88201 genl_unlock();
88202 - genl_ctrl_event(CTRL_CMD_DELOPS, ops);
88203 + genl_ctrl_event(CTRL_CMD_DELOPS, (void *)ops);
88204 return 0;
88205 }
88206 }
88207 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
88208 index 103bd70..f21aad3 100644
88209 --- a/net/netrom/af_netrom.c
88210 +++ b/net/netrom/af_netrom.c
88211 @@ -834,6 +834,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
88212 struct sock *sk = sock->sk;
88213 struct nr_sock *nr = nr_sk(sk);
88214
88215 + memset(sax, 0, sizeof(*sax));
88216 lock_sock(sk);
88217 if (peer != 0) {
88218 if (sk->sk_state != TCP_ESTABLISHED) {
88219 @@ -848,7 +849,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
88220 *uaddr_len = sizeof(struct full_sockaddr_ax25);
88221 } else {
88222 sax->fsa_ax25.sax25_family = AF_NETROM;
88223 - sax->fsa_ax25.sax25_ndigis = 0;
88224 sax->fsa_ax25.sax25_call = nr->source_addr;
88225 *uaddr_len = sizeof(struct sockaddr_ax25);
88226 }
88227 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
88228 index 1d6793d..056b191 100644
88229 --- a/net/packet/af_packet.c
88230 +++ b/net/packet/af_packet.c
88231 @@ -1578,7 +1578,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
88232
88233 spin_lock(&sk->sk_receive_queue.lock);
88234 po->stats.tp_packets++;
88235 - skb->dropcount = atomic_read(&sk->sk_drops);
88236 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
88237 __skb_queue_tail(&sk->sk_receive_queue, skb);
88238 spin_unlock(&sk->sk_receive_queue.lock);
88239 sk->sk_data_ready(sk, skb->len);
88240 @@ -1587,7 +1587,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
88241 drop_n_acct:
88242 spin_lock(&sk->sk_receive_queue.lock);
88243 po->stats.tp_drops++;
88244 - atomic_inc(&sk->sk_drops);
88245 + atomic_inc_unchecked(&sk->sk_drops);
88246 spin_unlock(&sk->sk_receive_queue.lock);
88247
88248 drop_n_restore:
88249 @@ -2565,6 +2565,7 @@ out:
88250
88251 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
88252 {
88253 + struct sock_extended_err ee;
88254 struct sock_exterr_skb *serr;
88255 struct sk_buff *skb, *skb2;
88256 int copied, err;
88257 @@ -2586,8 +2587,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
88258 sock_recv_timestamp(msg, sk, skb);
88259
88260 serr = SKB_EXT_ERR(skb);
88261 + ee = serr->ee;
88262 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
88263 - sizeof(serr->ee), &serr->ee);
88264 + sizeof ee, &ee);
88265
88266 msg->msg_flags |= MSG_ERRQUEUE;
88267 err = copied;
88268 @@ -3212,7 +3214,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
88269 case PACKET_HDRLEN:
88270 if (len > sizeof(int))
88271 len = sizeof(int);
88272 - if (copy_from_user(&val, optval, len))
88273 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
88274 return -EFAULT;
88275 switch (val) {
88276 case TPACKET_V1:
88277 @@ -3254,7 +3256,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
88278 len = lv;
88279 if (put_user(len, optlen))
88280 return -EFAULT;
88281 - if (copy_to_user(optval, data, len))
88282 + if (len > sizeof(st) || copy_to_user(optval, data, len))
88283 return -EFAULT;
88284 return 0;
88285 }
88286 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
88287 index 5a940db..d6a502d 100644
88288 --- a/net/phonet/af_phonet.c
88289 +++ b/net/phonet/af_phonet.c
88290 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
88291 {
88292 int err = 0;
88293
88294 - if (protocol >= PHONET_NPROTO)
88295 + if (protocol < 0 || protocol >= PHONET_NPROTO)
88296 return -EINVAL;
88297
88298 err = proto_register(pp->prot, 1);
88299 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
88300 index e774117..900b8b7 100644
88301 --- a/net/phonet/pep.c
88302 +++ b/net/phonet/pep.c
88303 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
88304
88305 case PNS_PEP_CTRL_REQ:
88306 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
88307 - atomic_inc(&sk->sk_drops);
88308 + atomic_inc_unchecked(&sk->sk_drops);
88309 break;
88310 }
88311 __skb_pull(skb, 4);
88312 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
88313 }
88314
88315 if (pn->rx_credits == 0) {
88316 - atomic_inc(&sk->sk_drops);
88317 + atomic_inc_unchecked(&sk->sk_drops);
88318 err = -ENOBUFS;
88319 break;
88320 }
88321 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
88322 }
88323
88324 if (pn->rx_credits == 0) {
88325 - atomic_inc(&sk->sk_drops);
88326 + atomic_inc_unchecked(&sk->sk_drops);
88327 err = NET_RX_DROP;
88328 break;
88329 }
88330 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
88331 index 1afd138..0b42453 100644
88332 --- a/net/phonet/socket.c
88333 +++ b/net/phonet/socket.c
88334 @@ -612,7 +612,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
88335 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
88336 sock_i_ino(sk),
88337 atomic_read(&sk->sk_refcnt), sk,
88338 - atomic_read(&sk->sk_drops), &len);
88339 + atomic_read_unchecked(&sk->sk_drops), &len);
88340 }
88341 seq_printf(seq, "%*s\n", 127 - len, "");
88342 return 0;
88343 diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
88344 index d6bbbbd..61561e4 100644
88345 --- a/net/phonet/sysctl.c
88346 +++ b/net/phonet/sysctl.c
88347 @@ -67,7 +67,7 @@ static int proc_local_port_range(ctl_table *table, int write,
88348 {
88349 int ret;
88350 int range[2] = {local_port_range[0], local_port_range[1]};
88351 - ctl_table tmp = {
88352 + ctl_table_no_const tmp = {
88353 .data = &range,
88354 .maxlen = sizeof(range),
88355 .mode = table->mode,
88356 diff --git a/net/rds/cong.c b/net/rds/cong.c
88357 index e5b65ac..f3b6fb7 100644
88358 --- a/net/rds/cong.c
88359 +++ b/net/rds/cong.c
88360 @@ -78,7 +78,7 @@
88361 * finds that the saved generation number is smaller than the global generation
88362 * number, it wakes up the process.
88363 */
88364 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
88365 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
88366
88367 /*
88368 * Congestion monitoring
88369 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
88370 rdsdebug("waking map %p for %pI4\n",
88371 map, &map->m_addr);
88372 rds_stats_inc(s_cong_update_received);
88373 - atomic_inc(&rds_cong_generation);
88374 + atomic_inc_unchecked(&rds_cong_generation);
88375 if (waitqueue_active(&map->m_waitq))
88376 wake_up(&map->m_waitq);
88377 if (waitqueue_active(&rds_poll_waitq))
88378 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
88379
88380 int rds_cong_updated_since(unsigned long *recent)
88381 {
88382 - unsigned long gen = atomic_read(&rds_cong_generation);
88383 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
88384
88385 if (likely(*recent == gen))
88386 return 0;
88387 diff --git a/net/rds/ib.h b/net/rds/ib.h
88388 index 7280ab8..e04f4ea 100644
88389 --- a/net/rds/ib.h
88390 +++ b/net/rds/ib.h
88391 @@ -128,7 +128,7 @@ struct rds_ib_connection {
88392 /* sending acks */
88393 unsigned long i_ack_flags;
88394 #ifdef KERNEL_HAS_ATOMIC64
88395 - atomic64_t i_ack_next; /* next ACK to send */
88396 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
88397 #else
88398 spinlock_t i_ack_lock; /* protect i_ack_next */
88399 u64 i_ack_next; /* next ACK to send */
88400 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
88401 index 31b74f5..dc1fbfa 100644
88402 --- a/net/rds/ib_cm.c
88403 +++ b/net/rds/ib_cm.c
88404 @@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
88405 /* Clear the ACK state */
88406 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
88407 #ifdef KERNEL_HAS_ATOMIC64
88408 - atomic64_set(&ic->i_ack_next, 0);
88409 + atomic64_set_unchecked(&ic->i_ack_next, 0);
88410 #else
88411 ic->i_ack_next = 0;
88412 #endif
88413 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
88414 index 8eb9501..0c386ff 100644
88415 --- a/net/rds/ib_recv.c
88416 +++ b/net/rds/ib_recv.c
88417 @@ -597,7 +597,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
88418 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
88419 int ack_required)
88420 {
88421 - atomic64_set(&ic->i_ack_next, seq);
88422 + atomic64_set_unchecked(&ic->i_ack_next, seq);
88423 if (ack_required) {
88424 smp_mb__before_clear_bit();
88425 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
88426 @@ -609,7 +609,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
88427 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
88428 smp_mb__after_clear_bit();
88429
88430 - return atomic64_read(&ic->i_ack_next);
88431 + return atomic64_read_unchecked(&ic->i_ack_next);
88432 }
88433 #endif
88434
88435 diff --git a/net/rds/iw.h b/net/rds/iw.h
88436 index 04ce3b1..48119a6 100644
88437 --- a/net/rds/iw.h
88438 +++ b/net/rds/iw.h
88439 @@ -134,7 +134,7 @@ struct rds_iw_connection {
88440 /* sending acks */
88441 unsigned long i_ack_flags;
88442 #ifdef KERNEL_HAS_ATOMIC64
88443 - atomic64_t i_ack_next; /* next ACK to send */
88444 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
88445 #else
88446 spinlock_t i_ack_lock; /* protect i_ack_next */
88447 u64 i_ack_next; /* next ACK to send */
88448 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
88449 index a91e1db..cf3053f 100644
88450 --- a/net/rds/iw_cm.c
88451 +++ b/net/rds/iw_cm.c
88452 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
88453 /* Clear the ACK state */
88454 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
88455 #ifdef KERNEL_HAS_ATOMIC64
88456 - atomic64_set(&ic->i_ack_next, 0);
88457 + atomic64_set_unchecked(&ic->i_ack_next, 0);
88458 #else
88459 ic->i_ack_next = 0;
88460 #endif
88461 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
88462 index 4503335..db566b4 100644
88463 --- a/net/rds/iw_recv.c
88464 +++ b/net/rds/iw_recv.c
88465 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
88466 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
88467 int ack_required)
88468 {
88469 - atomic64_set(&ic->i_ack_next, seq);
88470 + atomic64_set_unchecked(&ic->i_ack_next, seq);
88471 if (ack_required) {
88472 smp_mb__before_clear_bit();
88473 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
88474 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
88475 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
88476 smp_mb__after_clear_bit();
88477
88478 - return atomic64_read(&ic->i_ack_next);
88479 + return atomic64_read_unchecked(&ic->i_ack_next);
88480 }
88481 #endif
88482
88483 diff --git a/net/rds/rds.h b/net/rds/rds.h
88484 index ec1d731..90a3a8d 100644
88485 --- a/net/rds/rds.h
88486 +++ b/net/rds/rds.h
88487 @@ -449,7 +449,7 @@ struct rds_transport {
88488 void (*sync_mr)(void *trans_private, int direction);
88489 void (*free_mr)(void *trans_private, int invalidate);
88490 void (*flush_mrs)(void);
88491 -};
88492 +} __do_const;
88493
88494 struct rds_sock {
88495 struct sock rs_sk;
88496 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
88497 index edac9ef..16bcb98 100644
88498 --- a/net/rds/tcp.c
88499 +++ b/net/rds/tcp.c
88500 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
88501 int val = 1;
88502
88503 set_fs(KERNEL_DS);
88504 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
88505 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
88506 sizeof(val));
88507 set_fs(oldfs);
88508 }
88509 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
88510 index 81cf5a4..b5826ff 100644
88511 --- a/net/rds/tcp_send.c
88512 +++ b/net/rds/tcp_send.c
88513 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
88514
88515 oldfs = get_fs();
88516 set_fs(KERNEL_DS);
88517 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
88518 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
88519 sizeof(val));
88520 set_fs(oldfs);
88521 }
88522 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
88523 index e61aa60..f07cc89 100644
88524 --- a/net/rxrpc/af_rxrpc.c
88525 +++ b/net/rxrpc/af_rxrpc.c
88526 @@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
88527 __be32 rxrpc_epoch;
88528
88529 /* current debugging ID */
88530 -atomic_t rxrpc_debug_id;
88531 +atomic_unchecked_t rxrpc_debug_id;
88532
88533 /* count of skbs currently in use */
88534 atomic_t rxrpc_n_skbs;
88535 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
88536 index e4d9cbc..b229649 100644
88537 --- a/net/rxrpc/ar-ack.c
88538 +++ b/net/rxrpc/ar-ack.c
88539 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
88540
88541 _enter("{%d,%d,%d,%d},",
88542 call->acks_hard, call->acks_unacked,
88543 - atomic_read(&call->sequence),
88544 + atomic_read_unchecked(&call->sequence),
88545 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
88546
88547 stop = 0;
88548 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
88549
88550 /* each Tx packet has a new serial number */
88551 sp->hdr.serial =
88552 - htonl(atomic_inc_return(&call->conn->serial));
88553 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
88554
88555 hdr = (struct rxrpc_header *) txb->head;
88556 hdr->serial = sp->hdr.serial;
88557 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
88558 */
88559 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
88560 {
88561 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
88562 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
88563 }
88564
88565 /*
88566 @@ -629,7 +629,7 @@ process_further:
88567
88568 latest = ntohl(sp->hdr.serial);
88569 hard = ntohl(ack.firstPacket);
88570 - tx = atomic_read(&call->sequence);
88571 + tx = atomic_read_unchecked(&call->sequence);
88572
88573 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
88574 latest,
88575 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
88576 goto maybe_reschedule;
88577
88578 send_ACK_with_skew:
88579 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
88580 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
88581 ntohl(ack.serial));
88582 send_ACK:
88583 mtu = call->conn->trans->peer->if_mtu;
88584 @@ -1173,7 +1173,7 @@ send_ACK:
88585 ackinfo.rxMTU = htonl(5692);
88586 ackinfo.jumbo_max = htonl(4);
88587
88588 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
88589 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
88590 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
88591 ntohl(hdr.serial),
88592 ntohs(ack.maxSkew),
88593 @@ -1191,7 +1191,7 @@ send_ACK:
88594 send_message:
88595 _debug("send message");
88596
88597 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
88598 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
88599 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
88600 send_message_2:
88601
88602 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
88603 index a3bbb36..3341fb9 100644
88604 --- a/net/rxrpc/ar-call.c
88605 +++ b/net/rxrpc/ar-call.c
88606 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
88607 spin_lock_init(&call->lock);
88608 rwlock_init(&call->state_lock);
88609 atomic_set(&call->usage, 1);
88610 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
88611 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
88612 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
88613
88614 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
88615 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
88616 index 4106ca9..a338d7a 100644
88617 --- a/net/rxrpc/ar-connection.c
88618 +++ b/net/rxrpc/ar-connection.c
88619 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
88620 rwlock_init(&conn->lock);
88621 spin_lock_init(&conn->state_lock);
88622 atomic_set(&conn->usage, 1);
88623 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
88624 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
88625 conn->avail_calls = RXRPC_MAXCALLS;
88626 conn->size_align = 4;
88627 conn->header_size = sizeof(struct rxrpc_header);
88628 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
88629 index e7ed43a..6afa140 100644
88630 --- a/net/rxrpc/ar-connevent.c
88631 +++ b/net/rxrpc/ar-connevent.c
88632 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
88633
88634 len = iov[0].iov_len + iov[1].iov_len;
88635
88636 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
88637 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
88638 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
88639
88640 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
88641 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
88642 index 529572f..c758ca7 100644
88643 --- a/net/rxrpc/ar-input.c
88644 +++ b/net/rxrpc/ar-input.c
88645 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
88646 /* track the latest serial number on this connection for ACK packet
88647 * information */
88648 serial = ntohl(sp->hdr.serial);
88649 - hi_serial = atomic_read(&call->conn->hi_serial);
88650 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
88651 while (serial > hi_serial)
88652 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
88653 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
88654 serial);
88655
88656 /* request ACK generation for any ACK or DATA packet that requests
88657 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
88658 index a693aca..81e7293 100644
88659 --- a/net/rxrpc/ar-internal.h
88660 +++ b/net/rxrpc/ar-internal.h
88661 @@ -272,8 +272,8 @@ struct rxrpc_connection {
88662 int error; /* error code for local abort */
88663 int debug_id; /* debug ID for printks */
88664 unsigned int call_counter; /* call ID counter */
88665 - atomic_t serial; /* packet serial number counter */
88666 - atomic_t hi_serial; /* highest serial number received */
88667 + atomic_unchecked_t serial; /* packet serial number counter */
88668 + atomic_unchecked_t hi_serial; /* highest serial number received */
88669 u8 avail_calls; /* number of calls available */
88670 u8 size_align; /* data size alignment (for security) */
88671 u8 header_size; /* rxrpc + security header size */
88672 @@ -346,7 +346,7 @@ struct rxrpc_call {
88673 spinlock_t lock;
88674 rwlock_t state_lock; /* lock for state transition */
88675 atomic_t usage;
88676 - atomic_t sequence; /* Tx data packet sequence counter */
88677 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
88678 u32 abort_code; /* local/remote abort code */
88679 enum { /* current state of call */
88680 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
88681 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
88682 */
88683 extern atomic_t rxrpc_n_skbs;
88684 extern __be32 rxrpc_epoch;
88685 -extern atomic_t rxrpc_debug_id;
88686 +extern atomic_unchecked_t rxrpc_debug_id;
88687 extern struct workqueue_struct *rxrpc_workqueue;
88688
88689 /*
88690 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
88691 index 87f7135..74d3703 100644
88692 --- a/net/rxrpc/ar-local.c
88693 +++ b/net/rxrpc/ar-local.c
88694 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
88695 spin_lock_init(&local->lock);
88696 rwlock_init(&local->services_lock);
88697 atomic_set(&local->usage, 1);
88698 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
88699 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
88700 memcpy(&local->srx, srx, sizeof(*srx));
88701 }
88702
88703 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
88704 index e1ac183..b43e10e 100644
88705 --- a/net/rxrpc/ar-output.c
88706 +++ b/net/rxrpc/ar-output.c
88707 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
88708 sp->hdr.cid = call->cid;
88709 sp->hdr.callNumber = call->call_id;
88710 sp->hdr.seq =
88711 - htonl(atomic_inc_return(&call->sequence));
88712 + htonl(atomic_inc_return_unchecked(&call->sequence));
88713 sp->hdr.serial =
88714 - htonl(atomic_inc_return(&conn->serial));
88715 + htonl(atomic_inc_return_unchecked(&conn->serial));
88716 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
88717 sp->hdr.userStatus = 0;
88718 sp->hdr.securityIndex = conn->security_ix;
88719 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
88720 index bebaa43..2644591 100644
88721 --- a/net/rxrpc/ar-peer.c
88722 +++ b/net/rxrpc/ar-peer.c
88723 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
88724 INIT_LIST_HEAD(&peer->error_targets);
88725 spin_lock_init(&peer->lock);
88726 atomic_set(&peer->usage, 1);
88727 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
88728 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
88729 memcpy(&peer->srx, srx, sizeof(*srx));
88730
88731 rxrpc_assess_MTU_size(peer);
88732 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
88733 index 38047f7..9f48511 100644
88734 --- a/net/rxrpc/ar-proc.c
88735 +++ b/net/rxrpc/ar-proc.c
88736 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
88737 atomic_read(&conn->usage),
88738 rxrpc_conn_states[conn->state],
88739 key_serial(conn->key),
88740 - atomic_read(&conn->serial),
88741 - atomic_read(&conn->hi_serial));
88742 + atomic_read_unchecked(&conn->serial),
88743 + atomic_read_unchecked(&conn->hi_serial));
88744
88745 return 0;
88746 }
88747 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
88748 index 92df566..87ec1bf 100644
88749 --- a/net/rxrpc/ar-transport.c
88750 +++ b/net/rxrpc/ar-transport.c
88751 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
88752 spin_lock_init(&trans->client_lock);
88753 rwlock_init(&trans->conn_lock);
88754 atomic_set(&trans->usage, 1);
88755 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
88756 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
88757
88758 if (peer->srx.transport.family == AF_INET) {
88759 switch (peer->srx.transport_type) {
88760 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
88761 index f226709..0e735a8 100644
88762 --- a/net/rxrpc/rxkad.c
88763 +++ b/net/rxrpc/rxkad.c
88764 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
88765
88766 len = iov[0].iov_len + iov[1].iov_len;
88767
88768 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
88769 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
88770 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
88771
88772 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
88773 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
88774
88775 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
88776
88777 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
88778 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
88779 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
88780
88781 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
88782 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
88783 index 391a245..296b3d7 100644
88784 --- a/net/sctp/ipv6.c
88785 +++ b/net/sctp/ipv6.c
88786 @@ -981,7 +981,7 @@ static const struct inet6_protocol sctpv6_protocol = {
88787 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
88788 };
88789
88790 -static struct sctp_af sctp_af_inet6 = {
88791 +static struct sctp_af sctp_af_inet6 __read_only = {
88792 .sa_family = AF_INET6,
88793 .sctp_xmit = sctp_v6_xmit,
88794 .setsockopt = ipv6_setsockopt,
88795 @@ -1013,7 +1013,7 @@ static struct sctp_af sctp_af_inet6 = {
88796 #endif
88797 };
88798
88799 -static struct sctp_pf sctp_pf_inet6 = {
88800 +static struct sctp_pf sctp_pf_inet6 __read_only = {
88801 .event_msgname = sctp_inet6_event_msgname,
88802 .skb_msgname = sctp_inet6_skb_msgname,
88803 .af_supported = sctp_inet6_af_supported,
88804 @@ -1038,7 +1038,7 @@ void sctp_v6_pf_init(void)
88805
88806 void sctp_v6_pf_exit(void)
88807 {
88808 - list_del(&sctp_af_inet6.list);
88809 + pax_list_del(&sctp_af_inet6.list);
88810 }
88811
88812 /* Initialize IPv6 support and register with socket layer. */
88813 diff --git a/net/sctp/probe.c b/net/sctp/probe.c
88814 index ad0dba8..e62c225 100644
88815 --- a/net/sctp/probe.c
88816 +++ b/net/sctp/probe.c
88817 @@ -63,7 +63,7 @@ static struct {
88818 struct timespec tstart;
88819 } sctpw;
88820
88821 -static void printl(const char *fmt, ...)
88822 +static __printf(1, 2) void printl(const char *fmt, ...)
88823 {
88824 va_list args;
88825 int len;
88826 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
88827 index ab3bba8..2fbab4e 100644
88828 --- a/net/sctp/proc.c
88829 +++ b/net/sctp/proc.c
88830 @@ -336,7 +336,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
88831 seq_printf(seq,
88832 "%8pK %8pK %-3d %-3d %-2d %-4d "
88833 "%4d %8d %8d %7d %5lu %-5d %5d ",
88834 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
88835 + assoc, sk,
88836 + sctp_sk(sk)->type, sk->sk_state,
88837 assoc->state, hash,
88838 assoc->assoc_id,
88839 assoc->sndbuf_used,
88840 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
88841 index 1c2e46c..f91cf5e 100644
88842 --- a/net/sctp/protocol.c
88843 +++ b/net/sctp/protocol.c
88844 @@ -834,8 +834,10 @@ int sctp_register_af(struct sctp_af *af)
88845 return 0;
88846 }
88847
88848 + pax_open_kernel();
88849 INIT_LIST_HEAD(&af->list);
88850 - list_add_tail(&af->list, &sctp_address_families);
88851 + pax_close_kernel();
88852 + pax_list_add_tail(&af->list, &sctp_address_families);
88853 return 1;
88854 }
88855
88856 @@ -966,7 +968,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
88857
88858 static struct sctp_af sctp_af_inet;
88859
88860 -static struct sctp_pf sctp_pf_inet = {
88861 +static struct sctp_pf sctp_pf_inet __read_only = {
88862 .event_msgname = sctp_inet_event_msgname,
88863 .skb_msgname = sctp_inet_skb_msgname,
88864 .af_supported = sctp_inet_af_supported,
88865 @@ -1037,7 +1039,7 @@ static const struct net_protocol sctp_protocol = {
88866 };
88867
88868 /* IPv4 address related functions. */
88869 -static struct sctp_af sctp_af_inet = {
88870 +static struct sctp_af sctp_af_inet __read_only = {
88871 .sa_family = AF_INET,
88872 .sctp_xmit = sctp_v4_xmit,
88873 .setsockopt = ip_setsockopt,
88874 @@ -1122,7 +1124,7 @@ static void sctp_v4_pf_init(void)
88875
88876 static void sctp_v4_pf_exit(void)
88877 {
88878 - list_del(&sctp_af_inet.list);
88879 + pax_list_del(&sctp_af_inet.list);
88880 }
88881
88882 static int sctp_v4_protosw_init(void)
88883 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
88884 index 8aab894..f6b7e7d 100644
88885 --- a/net/sctp/sm_sideeffect.c
88886 +++ b/net/sctp/sm_sideeffect.c
88887 @@ -447,7 +447,7 @@ static void sctp_generate_sack_event(unsigned long data)
88888 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
88889 }
88890
88891 -sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
88892 +sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
88893 NULL,
88894 sctp_generate_t1_cookie_event,
88895 sctp_generate_t1_init_event,
88896 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
88897 index b907073..57fef6c 100644
88898 --- a/net/sctp/socket.c
88899 +++ b/net/sctp/socket.c
88900 @@ -2166,11 +2166,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
88901 {
88902 struct sctp_association *asoc;
88903 struct sctp_ulpevent *event;
88904 + struct sctp_event_subscribe subscribe;
88905
88906 if (optlen > sizeof(struct sctp_event_subscribe))
88907 return -EINVAL;
88908 - if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
88909 + if (copy_from_user(&subscribe, optval, optlen))
88910 return -EFAULT;
88911 + sctp_sk(sk)->subscribe = subscribe;
88912
88913 /*
88914 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
88915 @@ -4215,13 +4217,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
88916 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
88917 int __user *optlen)
88918 {
88919 + struct sctp_event_subscribe subscribe;
88920 +
88921 if (len <= 0)
88922 return -EINVAL;
88923 if (len > sizeof(struct sctp_event_subscribe))
88924 len = sizeof(struct sctp_event_subscribe);
88925 if (put_user(len, optlen))
88926 return -EFAULT;
88927 - if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
88928 + subscribe = sctp_sk(sk)->subscribe;
88929 + if (copy_to_user(optval, &subscribe, len))
88930 return -EFAULT;
88931 return 0;
88932 }
88933 @@ -4239,6 +4244,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
88934 */
88935 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
88936 {
88937 + __u32 autoclose;
88938 +
88939 /* Applicable to UDP-style socket only */
88940 if (sctp_style(sk, TCP))
88941 return -EOPNOTSUPP;
88942 @@ -4247,7 +4254,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
88943 len = sizeof(int);
88944 if (put_user(len, optlen))
88945 return -EFAULT;
88946 - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
88947 + autoclose = sctp_sk(sk)->autoclose;
88948 + if (copy_to_user(optval, &autoclose, sizeof(int)))
88949 return -EFAULT;
88950 return 0;
88951 }
88952 @@ -4619,12 +4627,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
88953 */
88954 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
88955 {
88956 + struct sctp_initmsg initmsg;
88957 +
88958 if (len < sizeof(struct sctp_initmsg))
88959 return -EINVAL;
88960 len = sizeof(struct sctp_initmsg);
88961 if (put_user(len, optlen))
88962 return -EFAULT;
88963 - if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
88964 + initmsg = sctp_sk(sk)->initmsg;
88965 + if (copy_to_user(optval, &initmsg, len))
88966 return -EFAULT;
88967 return 0;
88968 }
88969 @@ -4665,6 +4676,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
88970 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
88971 if (space_left < addrlen)
88972 return -ENOMEM;
88973 + if (addrlen > sizeof(temp) || addrlen < 0)
88974 + return -EFAULT;
88975 if (copy_to_user(to, &temp, addrlen))
88976 return -EFAULT;
88977 to += addrlen;
88978 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
88979 index bf3c6e8..376d8d0 100644
88980 --- a/net/sctp/sysctl.c
88981 +++ b/net/sctp/sysctl.c
88982 @@ -307,7 +307,7 @@ static int proc_sctp_do_hmac_alg(ctl_table *ctl,
88983 {
88984 struct net *net = current->nsproxy->net_ns;
88985 char tmp[8];
88986 - ctl_table tbl;
88987 + ctl_table_no_const tbl;
88988 int ret;
88989 int changed = 0;
88990 char *none = "none";
88991 @@ -350,7 +350,7 @@ static int proc_sctp_do_hmac_alg(ctl_table *ctl,
88992
88993 int sctp_sysctl_net_register(struct net *net)
88994 {
88995 - struct ctl_table *table;
88996 + ctl_table_no_const *table;
88997 int i;
88998
88999 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
89000 diff --git a/net/socket.c b/net/socket.c
89001 index 88f759a..c6933de 100644
89002 --- a/net/socket.c
89003 +++ b/net/socket.c
89004 @@ -88,6 +88,7 @@
89005 #include <linux/magic.h>
89006 #include <linux/slab.h>
89007 #include <linux/xattr.h>
89008 +#include <linux/in.h>
89009
89010 #include <asm/uaccess.h>
89011 #include <asm/unistd.h>
89012 @@ -105,6 +106,8 @@
89013 #include <linux/sockios.h>
89014 #include <linux/atalk.h>
89015
89016 +#include <linux/grsock.h>
89017 +
89018 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
89019 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
89020 unsigned long nr_segs, loff_t pos);
89021 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
89022 &sockfs_dentry_operations, SOCKFS_MAGIC);
89023 }
89024
89025 -static struct vfsmount *sock_mnt __read_mostly;
89026 +struct vfsmount *sock_mnt __read_mostly;
89027
89028 static struct file_system_type sock_fs_type = {
89029 .name = "sockfs",
89030 @@ -1268,6 +1271,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
89031 return -EAFNOSUPPORT;
89032 if (type < 0 || type >= SOCK_MAX)
89033 return -EINVAL;
89034 + if (protocol < 0)
89035 + return -EINVAL;
89036
89037 /* Compatibility.
89038
89039 @@ -1399,6 +1404,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
89040 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
89041 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
89042
89043 + if(!gr_search_socket(family, type, protocol)) {
89044 + retval = -EACCES;
89045 + goto out;
89046 + }
89047 +
89048 + if (gr_handle_sock_all(family, type, protocol)) {
89049 + retval = -EACCES;
89050 + goto out;
89051 + }
89052 +
89053 retval = sock_create(family, type, protocol, &sock);
89054 if (retval < 0)
89055 goto out;
89056 @@ -1526,6 +1541,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
89057 if (sock) {
89058 err = move_addr_to_kernel(umyaddr, addrlen, &address);
89059 if (err >= 0) {
89060 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
89061 + err = -EACCES;
89062 + goto error;
89063 + }
89064 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
89065 + if (err)
89066 + goto error;
89067 +
89068 err = security_socket_bind(sock,
89069 (struct sockaddr *)&address,
89070 addrlen);
89071 @@ -1534,6 +1557,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
89072 (struct sockaddr *)
89073 &address, addrlen);
89074 }
89075 +error:
89076 fput_light(sock->file, fput_needed);
89077 }
89078 return err;
89079 @@ -1557,10 +1581,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
89080 if ((unsigned int)backlog > somaxconn)
89081 backlog = somaxconn;
89082
89083 + if (gr_handle_sock_server_other(sock->sk)) {
89084 + err = -EPERM;
89085 + goto error;
89086 + }
89087 +
89088 + err = gr_search_listen(sock);
89089 + if (err)
89090 + goto error;
89091 +
89092 err = security_socket_listen(sock, backlog);
89093 if (!err)
89094 err = sock->ops->listen(sock, backlog);
89095
89096 +error:
89097 fput_light(sock->file, fput_needed);
89098 }
89099 return err;
89100 @@ -1604,6 +1638,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
89101 newsock->type = sock->type;
89102 newsock->ops = sock->ops;
89103
89104 + if (gr_handle_sock_server_other(sock->sk)) {
89105 + err = -EPERM;
89106 + sock_release(newsock);
89107 + goto out_put;
89108 + }
89109 +
89110 + err = gr_search_accept(sock);
89111 + if (err) {
89112 + sock_release(newsock);
89113 + goto out_put;
89114 + }
89115 +
89116 /*
89117 * We don't need try_module_get here, as the listening socket (sock)
89118 * has the protocol module (sock->ops->owner) held.
89119 @@ -1649,6 +1695,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
89120 fd_install(newfd, newfile);
89121 err = newfd;
89122
89123 + gr_attach_curr_ip(newsock->sk);
89124 +
89125 out_put:
89126 fput_light(sock->file, fput_needed);
89127 out:
89128 @@ -1681,6 +1729,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
89129 int, addrlen)
89130 {
89131 struct socket *sock;
89132 + struct sockaddr *sck;
89133 struct sockaddr_storage address;
89134 int err, fput_needed;
89135
89136 @@ -1691,6 +1740,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
89137 if (err < 0)
89138 goto out_put;
89139
89140 + sck = (struct sockaddr *)&address;
89141 +
89142 + if (gr_handle_sock_client(sck)) {
89143 + err = -EACCES;
89144 + goto out_put;
89145 + }
89146 +
89147 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
89148 + if (err)
89149 + goto out_put;
89150 +
89151 err =
89152 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
89153 if (err)
89154 @@ -1772,6 +1832,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
89155 * the protocol.
89156 */
89157
89158 +asmlinkage long sys_sendto(int, void *, size_t, unsigned, struct sockaddr *, int);
89159 +
89160 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
89161 unsigned int, flags, struct sockaddr __user *, addr,
89162 int, addr_len)
89163 @@ -1838,7 +1900,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
89164 struct socket *sock;
89165 struct iovec iov;
89166 struct msghdr msg;
89167 - struct sockaddr_storage address;
89168 + struct sockaddr_storage address = { };
89169 int err, err2;
89170 int fput_needed;
89171
89172 @@ -2045,7 +2107,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
89173 * checking falls down on this.
89174 */
89175 if (copy_from_user(ctl_buf,
89176 - (void __user __force *)msg_sys->msg_control,
89177 + (void __force_user *)msg_sys->msg_control,
89178 ctl_len))
89179 goto out_freectl;
89180 msg_sys->msg_control = ctl_buf;
89181 @@ -2185,7 +2247,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
89182 int err, total_len, len;
89183
89184 /* kernel mode address */
89185 - struct sockaddr_storage addr;
89186 + struct sockaddr_storage addr = { };
89187
89188 /* user mode address pointers */
89189 struct sockaddr __user *uaddr;
89190 @@ -2213,7 +2275,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
89191 * kernel msghdr to use the kernel address space)
89192 */
89193
89194 - uaddr = (__force void __user *)msg_sys->msg_name;
89195 + uaddr = (void __force_user *)msg_sys->msg_name;
89196 uaddr_len = COMPAT_NAMELEN(msg);
89197 if (MSG_CMSG_COMPAT & flags) {
89198 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
89199 @@ -2952,7 +3014,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
89200 old_fs = get_fs();
89201 set_fs(KERNEL_DS);
89202 err = dev_ioctl(net, cmd,
89203 - (struct ifreq __user __force *) &kifr);
89204 + (struct ifreq __force_user *) &kifr);
89205 set_fs(old_fs);
89206
89207 return err;
89208 @@ -3061,7 +3123,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
89209
89210 old_fs = get_fs();
89211 set_fs(KERNEL_DS);
89212 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
89213 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
89214 set_fs(old_fs);
89215
89216 if (cmd == SIOCGIFMAP && !err) {
89217 @@ -3166,7 +3228,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
89218 ret |= __get_user(rtdev, &(ur4->rt_dev));
89219 if (rtdev) {
89220 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
89221 - r4.rt_dev = (char __user __force *)devname;
89222 + r4.rt_dev = (char __force_user *)devname;
89223 devname[15] = 0;
89224 } else
89225 r4.rt_dev = NULL;
89226 @@ -3392,8 +3454,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
89227 int __user *uoptlen;
89228 int err;
89229
89230 - uoptval = (char __user __force *) optval;
89231 - uoptlen = (int __user __force *) optlen;
89232 + uoptval = (char __force_user *) optval;
89233 + uoptlen = (int __force_user *) optlen;
89234
89235 set_fs(KERNEL_DS);
89236 if (level == SOL_SOCKET)
89237 @@ -3413,7 +3475,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
89238 char __user *uoptval;
89239 int err;
89240
89241 - uoptval = (char __user __force *) optval;
89242 + uoptval = (char __force_user *) optval;
89243
89244 set_fs(KERNEL_DS);
89245 if (level == SOL_SOCKET)
89246 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
89247 index d5f35f1..da2680b5 100644
89248 --- a/net/sunrpc/clnt.c
89249 +++ b/net/sunrpc/clnt.c
89250 @@ -1283,7 +1283,9 @@ call_start(struct rpc_task *task)
89251 (RPC_IS_ASYNC(task) ? "async" : "sync"));
89252
89253 /* Increment call count */
89254 - task->tk_msg.rpc_proc->p_count++;
89255 + pax_open_kernel();
89256 + (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
89257 + pax_close_kernel();
89258 clnt->cl_stats->rpccnt++;
89259 task->tk_action = call_reserve;
89260 }
89261 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
89262 index f8529fc..ce8c643 100644
89263 --- a/net/sunrpc/sched.c
89264 +++ b/net/sunrpc/sched.c
89265 @@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *word)
89266 #ifdef RPC_DEBUG
89267 static void rpc_task_set_debuginfo(struct rpc_task *task)
89268 {
89269 - static atomic_t rpc_pid;
89270 + static atomic_unchecked_t rpc_pid;
89271
89272 - task->tk_pid = atomic_inc_return(&rpc_pid);
89273 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
89274 }
89275 #else
89276 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
89277 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
89278 index 89a588b..ba2cef8 100644
89279 --- a/net/sunrpc/svc.c
89280 +++ b/net/sunrpc/svc.c
89281 @@ -1160,7 +1160,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
89282 svc_putnl(resv, RPC_SUCCESS);
89283
89284 /* Bump per-procedure stats counter */
89285 - procp->pc_count++;
89286 + pax_open_kernel();
89287 + (*(unsigned int *)&procp->pc_count)++;
89288 + pax_close_kernel();
89289
89290 /* Initialize storage for argp and resp */
89291 memset(rqstp->rq_argp, 0, procp->pc_argsize);
89292 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
89293 index 8343737..677025e 100644
89294 --- a/net/sunrpc/xprtrdma/svc_rdma.c
89295 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
89296 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
89297 static unsigned int min_max_inline = 4096;
89298 static unsigned int max_max_inline = 65536;
89299
89300 -atomic_t rdma_stat_recv;
89301 -atomic_t rdma_stat_read;
89302 -atomic_t rdma_stat_write;
89303 -atomic_t rdma_stat_sq_starve;
89304 -atomic_t rdma_stat_rq_starve;
89305 -atomic_t rdma_stat_rq_poll;
89306 -atomic_t rdma_stat_rq_prod;
89307 -atomic_t rdma_stat_sq_poll;
89308 -atomic_t rdma_stat_sq_prod;
89309 +atomic_unchecked_t rdma_stat_recv;
89310 +atomic_unchecked_t rdma_stat_read;
89311 +atomic_unchecked_t rdma_stat_write;
89312 +atomic_unchecked_t rdma_stat_sq_starve;
89313 +atomic_unchecked_t rdma_stat_rq_starve;
89314 +atomic_unchecked_t rdma_stat_rq_poll;
89315 +atomic_unchecked_t rdma_stat_rq_prod;
89316 +atomic_unchecked_t rdma_stat_sq_poll;
89317 +atomic_unchecked_t rdma_stat_sq_prod;
89318
89319 /* Temporary NFS request map and context caches */
89320 struct kmem_cache *svc_rdma_map_cachep;
89321 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
89322 len -= *ppos;
89323 if (len > *lenp)
89324 len = *lenp;
89325 - if (len && copy_to_user(buffer, str_buf, len))
89326 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
89327 return -EFAULT;
89328 *lenp = len;
89329 *ppos += len;
89330 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
89331 {
89332 .procname = "rdma_stat_read",
89333 .data = &rdma_stat_read,
89334 - .maxlen = sizeof(atomic_t),
89335 + .maxlen = sizeof(atomic_unchecked_t),
89336 .mode = 0644,
89337 .proc_handler = read_reset_stat,
89338 },
89339 {
89340 .procname = "rdma_stat_recv",
89341 .data = &rdma_stat_recv,
89342 - .maxlen = sizeof(atomic_t),
89343 + .maxlen = sizeof(atomic_unchecked_t),
89344 .mode = 0644,
89345 .proc_handler = read_reset_stat,
89346 },
89347 {
89348 .procname = "rdma_stat_write",
89349 .data = &rdma_stat_write,
89350 - .maxlen = sizeof(atomic_t),
89351 + .maxlen = sizeof(atomic_unchecked_t),
89352 .mode = 0644,
89353 .proc_handler = read_reset_stat,
89354 },
89355 {
89356 .procname = "rdma_stat_sq_starve",
89357 .data = &rdma_stat_sq_starve,
89358 - .maxlen = sizeof(atomic_t),
89359 + .maxlen = sizeof(atomic_unchecked_t),
89360 .mode = 0644,
89361 .proc_handler = read_reset_stat,
89362 },
89363 {
89364 .procname = "rdma_stat_rq_starve",
89365 .data = &rdma_stat_rq_starve,
89366 - .maxlen = sizeof(atomic_t),
89367 + .maxlen = sizeof(atomic_unchecked_t),
89368 .mode = 0644,
89369 .proc_handler = read_reset_stat,
89370 },
89371 {
89372 .procname = "rdma_stat_rq_poll",
89373 .data = &rdma_stat_rq_poll,
89374 - .maxlen = sizeof(atomic_t),
89375 + .maxlen = sizeof(atomic_unchecked_t),
89376 .mode = 0644,
89377 .proc_handler = read_reset_stat,
89378 },
89379 {
89380 .procname = "rdma_stat_rq_prod",
89381 .data = &rdma_stat_rq_prod,
89382 - .maxlen = sizeof(atomic_t),
89383 + .maxlen = sizeof(atomic_unchecked_t),
89384 .mode = 0644,
89385 .proc_handler = read_reset_stat,
89386 },
89387 {
89388 .procname = "rdma_stat_sq_poll",
89389 .data = &rdma_stat_sq_poll,
89390 - .maxlen = sizeof(atomic_t),
89391 + .maxlen = sizeof(atomic_unchecked_t),
89392 .mode = 0644,
89393 .proc_handler = read_reset_stat,
89394 },
89395 {
89396 .procname = "rdma_stat_sq_prod",
89397 .data = &rdma_stat_sq_prod,
89398 - .maxlen = sizeof(atomic_t),
89399 + .maxlen = sizeof(atomic_unchecked_t),
89400 .mode = 0644,
89401 .proc_handler = read_reset_stat,
89402 },
89403 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
89404 index 0ce7552..d074459 100644
89405 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
89406 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
89407 @@ -501,7 +501,7 @@ next_sge:
89408 svc_rdma_put_context(ctxt, 0);
89409 goto out;
89410 }
89411 - atomic_inc(&rdma_stat_read);
89412 + atomic_inc_unchecked(&rdma_stat_read);
89413
89414 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
89415 chl_map->ch[ch_no].count -= read_wr.num_sge;
89416 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
89417 dto_q);
89418 list_del_init(&ctxt->dto_q);
89419 } else {
89420 - atomic_inc(&rdma_stat_rq_starve);
89421 + atomic_inc_unchecked(&rdma_stat_rq_starve);
89422 clear_bit(XPT_DATA, &xprt->xpt_flags);
89423 ctxt = NULL;
89424 }
89425 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
89426 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
89427 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
89428 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
89429 - atomic_inc(&rdma_stat_recv);
89430 + atomic_inc_unchecked(&rdma_stat_recv);
89431
89432 /* Build up the XDR from the receive buffers. */
89433 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
89434 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
89435 index c1d124d..acfc59e 100644
89436 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
89437 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
89438 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
89439 write_wr.wr.rdma.remote_addr = to;
89440
89441 /* Post It */
89442 - atomic_inc(&rdma_stat_write);
89443 + atomic_inc_unchecked(&rdma_stat_write);
89444 if (svc_rdma_send(xprt, &write_wr))
89445 goto err;
89446 return 0;
89447 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
89448 index 62e4f9b..dd3f2d7 100644
89449 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
89450 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
89451 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
89452 return;
89453
89454 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
89455 - atomic_inc(&rdma_stat_rq_poll);
89456 + atomic_inc_unchecked(&rdma_stat_rq_poll);
89457
89458 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
89459 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
89460 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
89461 }
89462
89463 if (ctxt)
89464 - atomic_inc(&rdma_stat_rq_prod);
89465 + atomic_inc_unchecked(&rdma_stat_rq_prod);
89466
89467 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
89468 /*
89469 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
89470 return;
89471
89472 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
89473 - atomic_inc(&rdma_stat_sq_poll);
89474 + atomic_inc_unchecked(&rdma_stat_sq_poll);
89475 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
89476 if (wc.status != IB_WC_SUCCESS)
89477 /* Close the transport */
89478 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
89479 }
89480
89481 if (ctxt)
89482 - atomic_inc(&rdma_stat_sq_prod);
89483 + atomic_inc_unchecked(&rdma_stat_sq_prod);
89484 }
89485
89486 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
89487 @@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
89488 spin_lock_bh(&xprt->sc_lock);
89489 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
89490 spin_unlock_bh(&xprt->sc_lock);
89491 - atomic_inc(&rdma_stat_sq_starve);
89492 + atomic_inc_unchecked(&rdma_stat_sq_starve);
89493
89494 /* See if we can opportunistically reap SQ WR to make room */
89495 sq_cq_reap(xprt);
89496 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
89497 index 9bc6db0..47ac8c0 100644
89498 --- a/net/sysctl_net.c
89499 +++ b/net/sysctl_net.c
89500 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
89501 kgid_t root_gid = make_kgid(net->user_ns, 0);
89502
89503 /* Allow network administrator to have same access as root. */
89504 - if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
89505 + if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
89506 uid_eq(root_uid, current_uid())) {
89507 int mode = (table->mode >> 6) & 7;
89508 return (mode << 6) | (mode << 3) | mode;
89509 diff --git a/net/tipc/link.c b/net/tipc/link.c
89510 index daa6080..2bbbe70 100644
89511 --- a/net/tipc/link.c
89512 +++ b/net/tipc/link.c
89513 @@ -1201,7 +1201,7 @@ static int link_send_sections_long(struct tipc_port *sender,
89514 struct tipc_msg fragm_hdr;
89515 struct sk_buff *buf, *buf_chain, *prev;
89516 u32 fragm_crs, fragm_rest, hsz, sect_rest;
89517 - const unchar *sect_crs;
89518 + const unchar __user *sect_crs;
89519 int curr_sect;
89520 u32 fragm_no;
89521
89522 @@ -1242,7 +1242,7 @@ again:
89523
89524 if (!sect_rest) {
89525 sect_rest = msg_sect[++curr_sect].iov_len;
89526 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
89527 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
89528 }
89529
89530 if (sect_rest < fragm_rest)
89531 @@ -1261,7 +1261,7 @@ error:
89532 }
89533 } else
89534 skb_copy_to_linear_data_offset(buf, fragm_crs,
89535 - sect_crs, sz);
89536 + (const void __force_kernel *)sect_crs, sz);
89537 sect_crs += sz;
89538 sect_rest -= sz;
89539 fragm_crs += sz;
89540 @@ -2306,8 +2306,11 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
89541 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
89542 u32 msg_typ = msg_type(tunnel_msg);
89543 u32 msg_count = msg_msgcnt(tunnel_msg);
89544 + u32 bearer_id = msg_bearer_id(tunnel_msg);
89545
89546 - dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
89547 + if (bearer_id >= MAX_BEARERS)
89548 + goto exit;
89549 + dest_link = (*l_ptr)->owner->links[bearer_id];
89550 if (!dest_link)
89551 goto exit;
89552 if (dest_link == *l_ptr) {
89553 @@ -2521,14 +2524,16 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
89554 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
89555 u32 msg_sz = msg_size(imsg);
89556 u32 fragm_sz = msg_data_sz(fragm);
89557 - u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
89558 + u32 exp_fragm_cnt;
89559 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
89560 +
89561 if (msg_type(imsg) == TIPC_MCAST_MSG)
89562 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
89563 - if (msg_size(imsg) > max) {
89564 + if (fragm_sz == 0 || msg_size(imsg) > max) {
89565 kfree_skb(fbuf);
89566 return 0;
89567 }
89568 + exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
89569 pbuf = tipc_buf_acquire(msg_size(imsg));
89570 if (pbuf != NULL) {
89571 pbuf->next = *pending;
89572 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
89573 index f2db8a8..9245aa4 100644
89574 --- a/net/tipc/msg.c
89575 +++ b/net/tipc/msg.c
89576 @@ -98,7 +98,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
89577 msg_sect[cnt].iov_len);
89578 else
89579 skb_copy_to_linear_data_offset(*buf, pos,
89580 - msg_sect[cnt].iov_base,
89581 + (const void __force_kernel *)msg_sect[cnt].iov_base,
89582 msg_sect[cnt].iov_len);
89583 pos += msg_sect[cnt].iov_len;
89584 }
89585 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
89586 index 6b42d47..2ac24d5 100644
89587 --- a/net/tipc/subscr.c
89588 +++ b/net/tipc/subscr.c
89589 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
89590 {
89591 struct iovec msg_sect;
89592
89593 - msg_sect.iov_base = (void *)&sub->evt;
89594 + msg_sect.iov_base = (void __force_user *)&sub->evt;
89595 msg_sect.iov_len = sizeof(struct tipc_event);
89596
89597 sub->evt.event = htohl(event, sub->swap);
89598 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
89599 index 2db702d..09a77488 100644
89600 --- a/net/unix/af_unix.c
89601 +++ b/net/unix/af_unix.c
89602 @@ -783,6 +783,12 @@ static struct sock *unix_find_other(struct net *net,
89603 err = -ECONNREFUSED;
89604 if (!S_ISSOCK(inode->i_mode))
89605 goto put_fail;
89606 +
89607 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
89608 + err = -EACCES;
89609 + goto put_fail;
89610 + }
89611 +
89612 u = unix_find_socket_byinode(inode);
89613 if (!u)
89614 goto put_fail;
89615 @@ -803,6 +809,13 @@ static struct sock *unix_find_other(struct net *net,
89616 if (u) {
89617 struct dentry *dentry;
89618 dentry = unix_sk(u)->path.dentry;
89619 +
89620 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
89621 + err = -EPERM;
89622 + sock_put(u);
89623 + goto fail;
89624 + }
89625 +
89626 if (dentry)
89627 touch_atime(&unix_sk(u)->path);
89628 } else
89629 @@ -836,12 +849,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
89630 */
89631 err = security_path_mknod(&path, dentry, mode, 0);
89632 if (!err) {
89633 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
89634 + err = -EACCES;
89635 + goto out;
89636 + }
89637 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
89638 if (!err) {
89639 res->mnt = mntget(path.mnt);
89640 res->dentry = dget(dentry);
89641 + gr_handle_create(dentry, path.mnt);
89642 }
89643 }
89644 +out:
89645 done_path_create(&path, dentry);
89646 return err;
89647 }
89648 @@ -2323,9 +2342,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
89649 seq_puts(seq, "Num RefCount Protocol Flags Type St "
89650 "Inode Path\n");
89651 else {
89652 - struct sock *s = v;
89653 + struct sock *s = v, *peer;
89654 struct unix_sock *u = unix_sk(s);
89655 unix_state_lock(s);
89656 + peer = unix_peer(s);
89657 + unix_state_unlock(s);
89658 +
89659 + unix_state_double_lock(s, peer);
89660
89661 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
89662 s,
89663 @@ -2352,8 +2375,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
89664 }
89665 for ( ; i < len; i++)
89666 seq_putc(seq, u->addr->name->sun_path[i]);
89667 - }
89668 - unix_state_unlock(s);
89669 + } else if (peer)
89670 + seq_printf(seq, " P%lu", sock_i_ino(peer));
89671 +
89672 + unix_state_double_unlock(s, peer);
89673 seq_putc(seq, '\n');
89674 }
89675
89676 diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
89677 index 8800604..0526440 100644
89678 --- a/net/unix/sysctl_net_unix.c
89679 +++ b/net/unix/sysctl_net_unix.c
89680 @@ -28,7 +28,7 @@ static ctl_table unix_table[] = {
89681
89682 int __net_init unix_sysctl_register(struct net *net)
89683 {
89684 - struct ctl_table *table;
89685 + ctl_table_no_const *table;
89686
89687 table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
89688 if (table == NULL)
89689 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
89690 index c8717c1..08539f5 100644
89691 --- a/net/wireless/wext-core.c
89692 +++ b/net/wireless/wext-core.c
89693 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
89694 */
89695
89696 /* Support for very large requests */
89697 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
89698 - (user_length > descr->max_tokens)) {
89699 + if (user_length > descr->max_tokens) {
89700 /* Allow userspace to GET more than max so
89701 * we can support any size GET requests.
89702 * There is still a limit : -ENOMEM.
89703 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
89704 }
89705 }
89706
89707 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
89708 - /*
89709 - * If this is a GET, but not NOMAX, it means that the extra
89710 - * data is not bounded by userspace, but by max_tokens. Thus
89711 - * set the length to max_tokens. This matches the extra data
89712 - * allocation.
89713 - * The driver should fill it with the number of tokens it
89714 - * provided, and it may check iwp->length rather than having
89715 - * knowledge of max_tokens. If the driver doesn't change the
89716 - * iwp->length, this ioctl just copies back max_token tokens
89717 - * filled with zeroes. Hopefully the driver isn't claiming
89718 - * them to be valid data.
89719 - */
89720 - iwp->length = descr->max_tokens;
89721 - }
89722 -
89723 err = handler(dev, info, (union iwreq_data *) iwp, extra);
89724
89725 iwp->length += essid_compat;
89726 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
89727 index 167c67d..3f2ae427 100644
89728 --- a/net/xfrm/xfrm_policy.c
89729 +++ b/net/xfrm/xfrm_policy.c
89730 @@ -334,7 +334,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
89731 {
89732 policy->walk.dead = 1;
89733
89734 - atomic_inc(&policy->genid);
89735 + atomic_inc_unchecked(&policy->genid);
89736
89737 del_timer(&policy->polq.hold_timer);
89738 xfrm_queue_purge(&policy->polq.hold_queue);
89739 @@ -659,7 +659,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
89740 hlist_add_head(&policy->bydst, chain);
89741 xfrm_pol_hold(policy);
89742 net->xfrm.policy_count[dir]++;
89743 - atomic_inc(&flow_cache_genid);
89744 + atomic_inc_unchecked(&flow_cache_genid);
89745 rt_genid_bump(net);
89746 if (delpol) {
89747 xfrm_policy_requeue(delpol, policy);
89748 @@ -1611,7 +1611,7 @@ free_dst:
89749 goto out;
89750 }
89751
89752 -static int inline
89753 +static inline int
89754 xfrm_dst_alloc_copy(void **target, const void *src, int size)
89755 {
89756 if (!*target) {
89757 @@ -1623,7 +1623,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
89758 return 0;
89759 }
89760
89761 -static int inline
89762 +static inline int
89763 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
89764 {
89765 #ifdef CONFIG_XFRM_SUB_POLICY
89766 @@ -1635,7 +1635,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
89767 #endif
89768 }
89769
89770 -static int inline
89771 +static inline int
89772 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
89773 {
89774 #ifdef CONFIG_XFRM_SUB_POLICY
89775 @@ -1729,7 +1729,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
89776
89777 xdst->num_pols = num_pols;
89778 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
89779 - xdst->policy_genid = atomic_read(&pols[0]->genid);
89780 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
89781
89782 return xdst;
89783 }
89784 @@ -2598,7 +2598,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
89785 if (xdst->xfrm_genid != dst->xfrm->genid)
89786 return 0;
89787 if (xdst->num_pols > 0 &&
89788 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
89789 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
89790 return 0;
89791
89792 mtu = dst_mtu(dst->child);
89793 @@ -2686,8 +2686,11 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
89794 dst_ops->link_failure = xfrm_link_failure;
89795 if (likely(dst_ops->neigh_lookup == NULL))
89796 dst_ops->neigh_lookup = xfrm_neigh_lookup;
89797 - if (likely(afinfo->garbage_collect == NULL))
89798 - afinfo->garbage_collect = xfrm_garbage_collect_deferred;
89799 + if (likely(afinfo->garbage_collect == NULL)) {
89800 + pax_open_kernel();
89801 + *(void **)&afinfo->garbage_collect = xfrm_garbage_collect_deferred;
89802 + pax_close_kernel();
89803 + }
89804 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
89805 }
89806 spin_unlock(&xfrm_policy_afinfo_lock);
89807 @@ -2741,7 +2744,9 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
89808 dst_ops->check = NULL;
89809 dst_ops->negative_advice = NULL;
89810 dst_ops->link_failure = NULL;
89811 - afinfo->garbage_collect = NULL;
89812 + pax_open_kernel();
89813 + *(void **)&afinfo->garbage_collect = NULL;
89814 + pax_close_kernel();
89815 }
89816 return err;
89817 }
89818 @@ -3124,7 +3129,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
89819 sizeof(pol->xfrm_vec[i].saddr));
89820 pol->xfrm_vec[i].encap_family = mp->new_family;
89821 /* flush bundles */
89822 - atomic_inc(&pol->genid);
89823 + atomic_inc_unchecked(&pol->genid);
89824 }
89825 }
89826
89827 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
89828 index 2c341bd..4404211 100644
89829 --- a/net/xfrm/xfrm_state.c
89830 +++ b/net/xfrm/xfrm_state.c
89831 @@ -177,12 +177,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
89832
89833 if (unlikely(afinfo == NULL))
89834 return -EAFNOSUPPORT;
89835 - typemap = afinfo->type_map;
89836 + typemap = (const struct xfrm_type **)afinfo->type_map;
89837 spin_lock_bh(&xfrm_type_lock);
89838
89839 - if (likely(typemap[type->proto] == NULL))
89840 + if (likely(typemap[type->proto] == NULL)) {
89841 + pax_open_kernel();
89842 typemap[type->proto] = type;
89843 - else
89844 + pax_close_kernel();
89845 + } else
89846 err = -EEXIST;
89847 spin_unlock_bh(&xfrm_type_lock);
89848 xfrm_state_put_afinfo(afinfo);
89849 @@ -198,13 +200,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
89850
89851 if (unlikely(afinfo == NULL))
89852 return -EAFNOSUPPORT;
89853 - typemap = afinfo->type_map;
89854 + typemap = (const struct xfrm_type **)afinfo->type_map;
89855 spin_lock_bh(&xfrm_type_lock);
89856
89857 if (unlikely(typemap[type->proto] != type))
89858 err = -ENOENT;
89859 - else
89860 + else {
89861 + pax_open_kernel();
89862 typemap[type->proto] = NULL;
89863 + pax_close_kernel();
89864 + }
89865 spin_unlock_bh(&xfrm_type_lock);
89866 xfrm_state_put_afinfo(afinfo);
89867 return err;
89868 @@ -214,7 +219,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
89869 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
89870 {
89871 struct xfrm_state_afinfo *afinfo;
89872 - const struct xfrm_type **typemap;
89873 const struct xfrm_type *type;
89874 int modload_attempted = 0;
89875
89876 @@ -222,9 +226,8 @@ retry:
89877 afinfo = xfrm_state_get_afinfo(family);
89878 if (unlikely(afinfo == NULL))
89879 return NULL;
89880 - typemap = afinfo->type_map;
89881
89882 - type = typemap[proto];
89883 + type = afinfo->type_map[proto];
89884 if (unlikely(type && !try_module_get(type->owner)))
89885 type = NULL;
89886 if (!type && !modload_attempted) {
89887 @@ -258,7 +261,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
89888 return -EAFNOSUPPORT;
89889
89890 err = -EEXIST;
89891 - modemap = afinfo->mode_map;
89892 + modemap = (struct xfrm_mode **)afinfo->mode_map;
89893 spin_lock_bh(&xfrm_mode_lock);
89894 if (modemap[mode->encap])
89895 goto out;
89896 @@ -267,8 +270,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
89897 if (!try_module_get(afinfo->owner))
89898 goto out;
89899
89900 - mode->afinfo = afinfo;
89901 + pax_open_kernel();
89902 + *(const void **)&mode->afinfo = afinfo;
89903 modemap[mode->encap] = mode;
89904 + pax_close_kernel();
89905 err = 0;
89906
89907 out:
89908 @@ -292,10 +297,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
89909 return -EAFNOSUPPORT;
89910
89911 err = -ENOENT;
89912 - modemap = afinfo->mode_map;
89913 + modemap = (struct xfrm_mode **)afinfo->mode_map;
89914 spin_lock_bh(&xfrm_mode_lock);
89915 if (likely(modemap[mode->encap] == mode)) {
89916 + pax_open_kernel();
89917 modemap[mode->encap] = NULL;
89918 + pax_close_kernel();
89919 module_put(mode->afinfo->owner);
89920 err = 0;
89921 }
89922 diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
89923 index 05a6e3d..6716ec9 100644
89924 --- a/net/xfrm/xfrm_sysctl.c
89925 +++ b/net/xfrm/xfrm_sysctl.c
89926 @@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
89927
89928 int __net_init xfrm_sysctl_init(struct net *net)
89929 {
89930 - struct ctl_table *table;
89931 + ctl_table_no_const *table;
89932
89933 __xfrm_sysctl_init(net);
89934
89935 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
89936 index 0e801c3..5c8ad3b 100644
89937 --- a/scripts/Makefile.build
89938 +++ b/scripts/Makefile.build
89939 @@ -111,7 +111,7 @@ endif
89940 endif
89941
89942 # Do not include host rules unless needed
89943 -ifneq ($(hostprogs-y)$(hostprogs-m),)
89944 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
89945 include scripts/Makefile.host
89946 endif
89947
89948 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
89949 index 686cb0d..9d653bf 100644
89950 --- a/scripts/Makefile.clean
89951 +++ b/scripts/Makefile.clean
89952 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
89953 __clean-files := $(extra-y) $(always) \
89954 $(targets) $(clean-files) \
89955 $(host-progs) \
89956 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
89957 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
89958 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
89959
89960 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
89961
89962 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
89963 index 1ac414f..38575f7 100644
89964 --- a/scripts/Makefile.host
89965 +++ b/scripts/Makefile.host
89966 @@ -31,6 +31,8 @@
89967 # Note: Shared libraries consisting of C++ files are not supported
89968
89969 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
89970 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
89971 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
89972
89973 # C code
89974 # Executables compiled from a single .c file
89975 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
89976 # Shared libaries (only .c supported)
89977 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
89978 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
89979 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
89980 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
89981 # Remove .so files from "xxx-objs"
89982 host-cobjs := $(filter-out %.so,$(host-cobjs))
89983 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
89984
89985 -#Object (.o) files used by the shared libaries
89986 +# Object (.o) files used by the shared libaries
89987 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
89988 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
89989
89990 # output directory for programs/.o files
89991 # hostprogs-y := tools/build may have been specified. Retrieve directory
89992 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
89993 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
89994 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
89995 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
89996 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
89997 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
89998 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
89999 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
90000
90001 obj-dirs += $(host-objdirs)
90002 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
90003 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
90004 $(call if_changed_dep,host-cshobjs)
90005
90006 +# Compile .c file, create position independent .o file
90007 +# host-cxxshobjs -> .o
90008 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
90009 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
90010 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
90011 + $(call if_changed_dep,host-cxxshobjs)
90012 +
90013 # Link a shared library, based on position independent .o files
90014 # *.o -> .so shared library (host-cshlib)
90015 quiet_cmd_host-cshlib = HOSTLLD -shared $@
90016 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
90017 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
90018 $(call if_changed,host-cshlib)
90019
90020 +# Link a shared library, based on position independent .o files
90021 +# *.o -> .so shared library (host-cxxshlib)
90022 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
90023 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
90024 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
90025 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
90026 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
90027 + $(call if_changed,host-cxxshlib)
90028 +
90029 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
90030 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
90031 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
90032
90033 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
90034 index 7f6425e..9864506 100644
90035 --- a/scripts/basic/fixdep.c
90036 +++ b/scripts/basic/fixdep.c
90037 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
90038 /*
90039 * Lookup a value in the configuration string.
90040 */
90041 -static int is_defined_config(const char *name, int len, unsigned int hash)
90042 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
90043 {
90044 struct item *aux;
90045
90046 @@ -211,10 +211,10 @@ static void clear_config(void)
90047 /*
90048 * Record the use of a CONFIG_* word.
90049 */
90050 -static void use_config(const char *m, int slen)
90051 +static void use_config(const char *m, unsigned int slen)
90052 {
90053 unsigned int hash = strhash(m, slen);
90054 - int c, i;
90055 + unsigned int c, i;
90056
90057 if (is_defined_config(m, slen, hash))
90058 return;
90059 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
90060
90061 static void parse_config_file(const char *map, size_t len)
90062 {
90063 - const int *end = (const int *) (map + len);
90064 + const unsigned int *end = (const unsigned int *) (map + len);
90065 /* start at +1, so that p can never be < map */
90066 - const int *m = (const int *) map + 1;
90067 + const unsigned int *m = (const unsigned int *) map + 1;
90068 const char *p, *q;
90069
90070 for (; m < end; m++) {
90071 @@ -406,7 +406,7 @@ static void print_deps(void)
90072 static void traps(void)
90073 {
90074 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
90075 - int *p = (int *)test;
90076 + unsigned int *p = (unsigned int *)test;
90077
90078 if (*p != INT_CONF) {
90079 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
90080 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
90081 new file mode 100644
90082 index 0000000..5e0222d
90083 --- /dev/null
90084 +++ b/scripts/gcc-plugin.sh
90085 @@ -0,0 +1,17 @@
90086 +#!/bin/bash
90087 +plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
90088 +#include "gcc-plugin.h"
90089 +#include "tree.h"
90090 +#include "tm.h"
90091 +#include "rtl.h"
90092 +#ifdef ENABLE_BUILD_WITH_CXX
90093 +#warning $2
90094 +#else
90095 +#warning $1
90096 +#endif
90097 +EOF`
90098 +if [ $? -eq 0 ]
90099 +then
90100 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
90101 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
90102 +fi
90103 diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
90104 index 581ca99..a6ff02e 100644
90105 --- a/scripts/headers_install.pl
90106 +++ b/scripts/headers_install.pl
90107 @@ -35,6 +35,7 @@ foreach my $filename (@files) {
90108 $line =~ s/([\s(])__user\s/$1/g;
90109 $line =~ s/([\s(])__force\s/$1/g;
90110 $line =~ s/([\s(])__iomem\s/$1/g;
90111 + $line =~ s/(\s?)__intentional_overflow\([-\d\s,]*\)\s?/$1/g;
90112 $line =~ s/\s__attribute_const__\s/ /g;
90113 $line =~ s/\s__attribute_const__$//g;
90114 $line =~ s/\b__packed\b/__attribute__((packed))/g;
90115 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
90116 index 3d569d6..0c09522 100644
90117 --- a/scripts/link-vmlinux.sh
90118 +++ b/scripts/link-vmlinux.sh
90119 @@ -159,7 +159,7 @@ else
90120 fi;
90121
90122 # final build of init/
90123 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
90124 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
90125
90126 kallsymso=""
90127 kallsyms_vmlinux=""
90128 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
90129 index 771ac17..9f0d3ee 100644
90130 --- a/scripts/mod/file2alias.c
90131 +++ b/scripts/mod/file2alias.c
90132 @@ -140,7 +140,7 @@ static void device_id_check(const char *modname, const char *device_id,
90133 unsigned long size, unsigned long id_size,
90134 void *symval)
90135 {
90136 - int i;
90137 + unsigned int i;
90138
90139 if (size % id_size || size < id_size) {
90140 fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
90141 @@ -168,7 +168,7 @@ static void device_id_check(const char *modname, const char *device_id,
90142 /* USB is special because the bcdDevice can be matched against a numeric range */
90143 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
90144 static void do_usb_entry(void *symval,
90145 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
90146 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
90147 unsigned char range_lo, unsigned char range_hi,
90148 unsigned char max, struct module *mod)
90149 {
90150 @@ -278,7 +278,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod)
90151 {
90152 unsigned int devlo, devhi;
90153 unsigned char chi, clo, max;
90154 - int ndigits;
90155 + unsigned int ndigits;
90156
90157 DEF_FIELD(symval, usb_device_id, match_flags);
90158 DEF_FIELD(symval, usb_device_id, idVendor);
90159 @@ -531,7 +531,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
90160 for (i = 0; i < count; i++) {
90161 DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
90162 char acpi_id[sizeof(*id)];
90163 - int j;
90164 + unsigned int j;
90165
90166 buf_printf(&mod->dev_table_buf,
90167 "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
90168 @@ -560,7 +560,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
90169
90170 for (j = 0; j < PNP_MAX_DEVICES; j++) {
90171 const char *id = (char *)(*devs)[j].id;
90172 - int i2, j2;
90173 + unsigned int i2, j2;
90174 int dup = 0;
90175
90176 if (!id[0])
90177 @@ -586,7 +586,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
90178 /* add an individual alias for every device entry */
90179 if (!dup) {
90180 char acpi_id[PNP_ID_LEN];
90181 - int k;
90182 + unsigned int k;
90183
90184 buf_printf(&mod->dev_table_buf,
90185 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
90186 @@ -938,7 +938,7 @@ static void dmi_ascii_filter(char *d, const char *s)
90187 static int do_dmi_entry(const char *filename, void *symval,
90188 char *alias)
90189 {
90190 - int i, j;
90191 + unsigned int i, j;
90192 DEF_FIELD_ADDR(symval, dmi_system_id, matches);
90193 sprintf(alias, "dmi*");
90194
90195 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
90196 index 78b30c1..536850d 100644
90197 --- a/scripts/mod/modpost.c
90198 +++ b/scripts/mod/modpost.c
90199 @@ -931,6 +931,7 @@ enum mismatch {
90200 ANY_INIT_TO_ANY_EXIT,
90201 ANY_EXIT_TO_ANY_INIT,
90202 EXPORT_TO_INIT_EXIT,
90203 + DATA_TO_TEXT
90204 };
90205
90206 struct sectioncheck {
90207 @@ -1045,6 +1046,12 @@ const struct sectioncheck sectioncheck[] = {
90208 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
90209 .mismatch = EXPORT_TO_INIT_EXIT,
90210 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
90211 +},
90212 +/* Do not reference code from writable data */
90213 +{
90214 + .fromsec = { DATA_SECTIONS, NULL },
90215 + .tosec = { TEXT_SECTIONS, NULL },
90216 + .mismatch = DATA_TO_TEXT
90217 }
90218 };
90219
90220 @@ -1167,10 +1174,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
90221 continue;
90222 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
90223 continue;
90224 - if (sym->st_value == addr)
90225 - return sym;
90226 /* Find a symbol nearby - addr are maybe negative */
90227 d = sym->st_value - addr;
90228 + if (d == 0)
90229 + return sym;
90230 if (d < 0)
90231 d = addr - sym->st_value;
90232 if (d < distance) {
90233 @@ -1449,6 +1456,14 @@ static void report_sec_mismatch(const char *modname,
90234 tosym, prl_to, prl_to, tosym);
90235 free(prl_to);
90236 break;
90237 + case DATA_TO_TEXT:
90238 +#if 0
90239 + fprintf(stderr,
90240 + "The %s %s:%s references\n"
90241 + "the %s %s:%s%s\n",
90242 + from, fromsec, fromsym, to, tosec, tosym, to_p);
90243 +#endif
90244 + break;
90245 }
90246 fprintf(stderr, "\n");
90247 }
90248 @@ -1683,7 +1698,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
90249 static void check_sec_ref(struct module *mod, const char *modname,
90250 struct elf_info *elf)
90251 {
90252 - int i;
90253 + unsigned int i;
90254 Elf_Shdr *sechdrs = elf->sechdrs;
90255
90256 /* Walk through all sections */
90257 @@ -1781,7 +1796,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
90258 va_end(ap);
90259 }
90260
90261 -void buf_write(struct buffer *buf, const char *s, int len)
90262 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
90263 {
90264 if (buf->size - buf->pos < len) {
90265 buf->size += len + SZ;
90266 @@ -1999,7 +2014,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
90267 if (fstat(fileno(file), &st) < 0)
90268 goto close_write;
90269
90270 - if (st.st_size != b->pos)
90271 + if (st.st_size != (off_t)b->pos)
90272 goto close_write;
90273
90274 tmp = NOFAIL(malloc(b->pos));
90275 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
90276 index 51207e4..f7d603d 100644
90277 --- a/scripts/mod/modpost.h
90278 +++ b/scripts/mod/modpost.h
90279 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
90280
90281 struct buffer {
90282 char *p;
90283 - int pos;
90284 - int size;
90285 + unsigned int pos;
90286 + unsigned int size;
90287 };
90288
90289 void __attribute__((format(printf, 2, 3)))
90290 buf_printf(struct buffer *buf, const char *fmt, ...);
90291
90292 void
90293 -buf_write(struct buffer *buf, const char *s, int len);
90294 +buf_write(struct buffer *buf, const char *s, unsigned int len);
90295
90296 struct module {
90297 struct module *next;
90298 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
90299 index 9dfcd6d..099068e 100644
90300 --- a/scripts/mod/sumversion.c
90301 +++ b/scripts/mod/sumversion.c
90302 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
90303 goto out;
90304 }
90305
90306 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
90307 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
90308 warn("writing sum in %s failed: %s\n",
90309 filename, strerror(errno));
90310 goto out;
90311 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
90312 index acb8650..b8c5f02 100644
90313 --- a/scripts/package/builddeb
90314 +++ b/scripts/package/builddeb
90315 @@ -246,6 +246,7 @@ fi
90316 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
90317 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
90318 (cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
90319 +(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
90320 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
90321 mkdir -p "$destdir"
90322 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
90323 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
90324 index 68bb4ef..2f419e1 100644
90325 --- a/scripts/pnmtologo.c
90326 +++ b/scripts/pnmtologo.c
90327 @@ -244,14 +244,14 @@ static void write_header(void)
90328 fprintf(out, " * Linux logo %s\n", logoname);
90329 fputs(" */\n\n", out);
90330 fputs("#include <linux/linux_logo.h>\n\n", out);
90331 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
90332 + fprintf(out, "static unsigned char %s_data[] = {\n",
90333 logoname);
90334 }
90335
90336 static void write_footer(void)
90337 {
90338 fputs("\n};\n\n", out);
90339 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
90340 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
90341 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
90342 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
90343 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
90344 @@ -381,7 +381,7 @@ static void write_logo_clut224(void)
90345 fputs("\n};\n\n", out);
90346
90347 /* write logo clut */
90348 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
90349 + fprintf(out, "static unsigned char %s_clut[] = {\n",
90350 logoname);
90351 write_hex_cnt = 0;
90352 for (i = 0; i < logo_clutsize; i++) {
90353 diff --git a/scripts/sortextable.h b/scripts/sortextable.h
90354 index f5eb43d..1814de8 100644
90355 --- a/scripts/sortextable.h
90356 +++ b/scripts/sortextable.h
90357 @@ -106,9 +106,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
90358 const char *secstrtab;
90359 const char *strtab;
90360 char *extab_image;
90361 - int extab_index = 0;
90362 - int i;
90363 - int idx;
90364 + unsigned int extab_index = 0;
90365 + unsigned int i;
90366 + unsigned int idx;
90367
90368 shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
90369 shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
90370 diff --git a/security/Kconfig b/security/Kconfig
90371 index e9c6ac7..e6254cf 100644
90372 --- a/security/Kconfig
90373 +++ b/security/Kconfig
90374 @@ -4,6 +4,944 @@
90375
90376 menu "Security options"
90377
90378 +menu "Grsecurity"
90379 +
90380 + config ARCH_TRACK_EXEC_LIMIT
90381 + bool
90382 +
90383 + config PAX_KERNEXEC_PLUGIN
90384 + bool
90385 +
90386 + config PAX_PER_CPU_PGD
90387 + bool
90388 +
90389 + config TASK_SIZE_MAX_SHIFT
90390 + int
90391 + depends on X86_64
90392 + default 47 if !PAX_PER_CPU_PGD
90393 + default 42 if PAX_PER_CPU_PGD
90394 +
90395 + config PAX_ENABLE_PAE
90396 + bool
90397 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
90398 +
90399 + config PAX_USERCOPY_SLABS
90400 + bool
90401 +
90402 +config GRKERNSEC
90403 + bool "Grsecurity"
90404 + select CRYPTO
90405 + select CRYPTO_SHA256
90406 + select PROC_FS
90407 + select STOP_MACHINE
90408 + select TTY
90409 + help
90410 + If you say Y here, you will be able to configure many features
90411 + that will enhance the security of your system. It is highly
90412 + recommended that you say Y here and read through the help
90413 + for each option so that you fully understand the features and
90414 + can evaluate their usefulness for your machine.
90415 +
90416 +choice
90417 + prompt "Configuration Method"
90418 + depends on GRKERNSEC
90419 + default GRKERNSEC_CONFIG_CUSTOM
90420 + help
90421 +
90422 +config GRKERNSEC_CONFIG_AUTO
90423 + bool "Automatic"
90424 + help
90425 + If you choose this configuration method, you'll be able to answer a small
90426 + number of simple questions about how you plan to use this kernel.
90427 + The settings of grsecurity and PaX will be automatically configured for
90428 + the highest commonly-used settings within the provided constraints.
90429 +
90430 + If you require additional configuration, custom changes can still be made
90431 + from the "custom configuration" menu.
90432 +
90433 +config GRKERNSEC_CONFIG_CUSTOM
90434 + bool "Custom"
90435 + help
90436 + If you choose this configuration method, you'll be able to configure all
90437 + grsecurity and PaX settings manually. Via this method, no options are
90438 + automatically enabled.
90439 +
90440 +endchoice
90441 +
90442 +choice
90443 + prompt "Usage Type"
90444 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
90445 + default GRKERNSEC_CONFIG_SERVER
90446 + help
90447 +
90448 +config GRKERNSEC_CONFIG_SERVER
90449 + bool "Server"
90450 + help
90451 + Choose this option if you plan to use this kernel on a server.
90452 +
90453 +config GRKERNSEC_CONFIG_DESKTOP
90454 + bool "Desktop"
90455 + help
90456 + Choose this option if you plan to use this kernel on a desktop.
90457 +
90458 +endchoice
90459 +
90460 +choice
90461 + prompt "Virtualization Type"
90462 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
90463 + default GRKERNSEC_CONFIG_VIRT_NONE
90464 + help
90465 +
90466 +config GRKERNSEC_CONFIG_VIRT_NONE
90467 + bool "None"
90468 + help
90469 + Choose this option if this kernel will be run on bare metal.
90470 +
90471 +config GRKERNSEC_CONFIG_VIRT_GUEST
90472 + bool "Guest"
90473 + help
90474 + Choose this option if this kernel will be run as a VM guest.
90475 +
90476 +config GRKERNSEC_CONFIG_VIRT_HOST
90477 + bool "Host"
90478 + help
90479 + Choose this option if this kernel will be run as a VM host.
90480 +
90481 +endchoice
90482 +
90483 +choice
90484 + prompt "Virtualization Hardware"
90485 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
90486 + help
90487 +
90488 +config GRKERNSEC_CONFIG_VIRT_EPT
90489 + bool "EPT/RVI Processor Support"
90490 + depends on X86
90491 + help
90492 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
90493 + hardware virtualization. This allows for additional kernel hardening protections
90494 + to operate without additional performance impact.
90495 +
90496 + To see if your Intel processor supports EPT, see:
90497 + http://ark.intel.com/Products/VirtualizationTechnology
90498 + (Most Core i3/5/7 support EPT)
90499 +
90500 + To see if your AMD processor supports RVI, see:
90501 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
90502 +
90503 +config GRKERNSEC_CONFIG_VIRT_SOFT
90504 + bool "First-gen/No Hardware Virtualization"
90505 + help
90506 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
90507 + support hardware virtualization or doesn't support the EPT/RVI extensions.
90508 +
90509 +endchoice
90510 +
90511 +choice
90512 + prompt "Virtualization Software"
90513 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
90514 + help
90515 +
90516 +config GRKERNSEC_CONFIG_VIRT_XEN
90517 + bool "Xen"
90518 + help
90519 + Choose this option if this kernel is running as a Xen guest or host.
90520 +
90521 +config GRKERNSEC_CONFIG_VIRT_VMWARE
90522 + bool "VMWare"
90523 + help
90524 + Choose this option if this kernel is running as a VMWare guest or host.
90525 +
90526 +config GRKERNSEC_CONFIG_VIRT_KVM
90527 + bool "KVM"
90528 + help
90529 + Choose this option if this kernel is running as a KVM guest or host.
90530 +
90531 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
90532 + bool "VirtualBox"
90533 + help
90534 + Choose this option if this kernel is running as a VirtualBox guest or host.
90535 +
90536 +endchoice
90537 +
90538 +choice
90539 + prompt "Required Priorities"
90540 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
90541 + default GRKERNSEC_CONFIG_PRIORITY_PERF
90542 + help
90543 +
90544 +config GRKERNSEC_CONFIG_PRIORITY_PERF
90545 + bool "Performance"
90546 + help
90547 + Choose this option if performance is of highest priority for this deployment
90548 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
90549 + clearing of structures intended for userland, and freed memory sanitizing will
90550 + be disabled.
90551 +
90552 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
90553 + bool "Security"
90554 + help
90555 + Choose this option if security is of highest priority for this deployment of
90556 + grsecurity. UDEREF, kernel stack clearing, clearing of structures intended
90557 + for userland, and freed memory sanitizing will be enabled for this kernel.
90558 + In a worst-case scenario, these features can introduce a 20% performance hit
90559 + (UDEREF on x64 contributing half of this hit).
90560 +
90561 +endchoice
90562 +
90563 +menu "Default Special Groups"
90564 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
90565 +
90566 +config GRKERNSEC_PROC_GID
90567 + int "GID exempted from /proc restrictions"
90568 + default 1001
90569 + help
90570 + Setting this GID determines which group will be exempted from
90571 + grsecurity's /proc restrictions, allowing users of the specified
90572 + group to view network statistics and the existence of other users'
90573 + processes on the system. This GID may also be chosen at boot time
90574 + via "grsec_proc_gid=" on the kernel commandline.
90575 +
90576 +config GRKERNSEC_TPE_UNTRUSTED_GID
90577 + int "GID for TPE-untrusted users"
90578 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
90579 + default 1005
90580 + help
90581 + Setting this GID determines which group untrusted users should
90582 + be added to. These users will be placed under grsecurity's Trusted Path
90583 + Execution mechanism, preventing them from executing their own binaries.
90584 + The users will only be able to execute binaries in directories owned and
90585 + writable only by the root user. If the sysctl option is enabled, a sysctl
90586 + option with name "tpe_gid" is created.
90587 +
90588 +config GRKERNSEC_TPE_TRUSTED_GID
90589 + int "GID for TPE-trusted users"
90590 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
90591 + default 1005
90592 + help
90593 + Setting this GID determines what group TPE restrictions will be
90594 + *disabled* for. If the sysctl option is enabled, a sysctl option
90595 + with name "tpe_gid" is created.
90596 +
90597 +config GRKERNSEC_SYMLINKOWN_GID
90598 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
90599 + depends on GRKERNSEC_CONFIG_SERVER
90600 + default 1006
90601 + help
90602 + Setting this GID determines what group kernel-enforced
90603 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
90604 + is enabled, a sysctl option with name "symlinkown_gid" is created.
90605 +
90606 +
90607 +endmenu
90608 +
90609 +menu "Customize Configuration"
90610 +depends on GRKERNSEC
90611 +
90612 +menu "PaX"
90613 +
90614 +config PAX
90615 + bool "Enable various PaX features"
90616 + default y if GRKERNSEC_CONFIG_AUTO
90617 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
90618 + help
90619 + This allows you to enable various PaX features. PaX adds
90620 + intrusion prevention mechanisms to the kernel that reduce
90621 + the risks posed by exploitable memory corruption bugs.
90622 +
90623 +menu "PaX Control"
90624 + depends on PAX
90625 +
90626 +config PAX_SOFTMODE
90627 + bool 'Support soft mode'
90628 + help
90629 + Enabling this option will allow you to run PaX in soft mode, that
90630 + is, PaX features will not be enforced by default, only on executables
90631 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
90632 + support as they are the only way to mark executables for soft mode use.
90633 +
90634 + Soft mode can be activated by using the "pax_softmode=1" kernel command
90635 + line option on boot. Furthermore you can control various PaX features
90636 + at runtime via the entries in /proc/sys/kernel/pax.
90637 +
90638 +config PAX_EI_PAX
90639 + bool 'Use legacy ELF header marking'
90640 + default y if GRKERNSEC_CONFIG_AUTO
90641 + help
90642 + Enabling this option will allow you to control PaX features on
90643 + a per executable basis via the 'chpax' utility available at
90644 + http://pax.grsecurity.net/. The control flags will be read from
90645 + an otherwise reserved part of the ELF header. This marking has
90646 + numerous drawbacks (no support for soft-mode, toolchain does not
90647 + know about the non-standard use of the ELF header) therefore it
90648 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
90649 + support.
90650 +
90651 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
90652 + support as well, they will override the legacy EI_PAX marks.
90653 +
90654 + If you enable none of the marking options then all applications
90655 + will run with PaX enabled on them by default.
90656 +
90657 +config PAX_PT_PAX_FLAGS
90658 + bool 'Use ELF program header marking'
90659 + default y if GRKERNSEC_CONFIG_AUTO
90660 + help
90661 + Enabling this option will allow you to control PaX features on
90662 + a per executable basis via the 'paxctl' utility available at
90663 + http://pax.grsecurity.net/. The control flags will be read from
90664 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
90665 + has the benefits of supporting both soft mode and being fully
90666 + integrated into the toolchain (the binutils patch is available
90667 + from http://pax.grsecurity.net).
90668 +
90669 + Note that if you enable the legacy EI_PAX marking support as well,
90670 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
90671 +
90672 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
90673 + must make sure that the marks are the same if a binary has both marks.
90674 +
90675 + If you enable none of the marking options then all applications
90676 + will run with PaX enabled on them by default.
90677 +
90678 +config PAX_XATTR_PAX_FLAGS
90679 + bool 'Use filesystem extended attributes marking'
90680 + default y if GRKERNSEC_CONFIG_AUTO
90681 + select CIFS_XATTR if CIFS
90682 + select EXT2_FS_XATTR if EXT2_FS
90683 + select EXT3_FS_XATTR if EXT3_FS
90684 + select EXT4_FS_XATTR if EXT4_FS
90685 + select JFFS2_FS_XATTR if JFFS2_FS
90686 + select REISERFS_FS_XATTR if REISERFS_FS
90687 + select SQUASHFS_XATTR if SQUASHFS
90688 + select TMPFS_XATTR if TMPFS
90689 + select UBIFS_FS_XATTR if UBIFS_FS
90690 + help
90691 + Enabling this option will allow you to control PaX features on
90692 + a per executable basis via the 'setfattr' utility. The control
90693 + flags will be read from the user.pax.flags extended attribute of
90694 + the file. This marking has the benefit of supporting binary-only
90695 + applications that self-check themselves (e.g., skype) and would
90696 + not tolerate chpax/paxctl changes. The main drawback is that
90697 + extended attributes are not supported by some filesystems (e.g.,
90698 + isofs, udf, vfat) so copying files through such filesystems will
90699 + lose the extended attributes and these PaX markings.
90700 +
90701 + Note that if you enable the legacy EI_PAX marking support as well,
90702 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
90703 +
90704 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
90705 + must make sure that the marks are the same if a binary has both marks.
90706 +
90707 + If you enable none of the marking options then all applications
90708 + will run with PaX enabled on them by default.
90709 +
90710 +choice
90711 + prompt 'MAC system integration'
90712 + default PAX_HAVE_ACL_FLAGS
90713 + help
90714 + Mandatory Access Control systems have the option of controlling
90715 + PaX flags on a per executable basis, choose the method supported
90716 + by your particular system.
90717 +
90718 + - "none": if your MAC system does not interact with PaX,
90719 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
90720 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
90721 +
90722 + NOTE: this option is for developers/integrators only.
90723 +
90724 + config PAX_NO_ACL_FLAGS
90725 + bool 'none'
90726 +
90727 + config PAX_HAVE_ACL_FLAGS
90728 + bool 'direct'
90729 +
90730 + config PAX_HOOK_ACL_FLAGS
90731 + bool 'hook'
90732 +endchoice
90733 +
90734 +endmenu
90735 +
90736 +menu "Non-executable pages"
90737 + depends on PAX
90738 +
90739 +config PAX_NOEXEC
90740 + bool "Enforce non-executable pages"
90741 + default y if GRKERNSEC_CONFIG_AUTO
90742 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
90743 + help
90744 + By design some architectures do not allow for protecting memory
90745 + pages against execution or even if they do, Linux does not make
90746 + use of this feature. In practice this means that if a page is
90747 + readable (such as the stack or heap) it is also executable.
90748 +
90749 + There is a well known exploit technique that makes use of this
90750 + fact and a common programming mistake where an attacker can
90751 + introduce code of his choice somewhere in the attacked program's
90752 + memory (typically the stack or the heap) and then execute it.
90753 +
90754 + If the attacked program was running with different (typically
90755 + higher) privileges than that of the attacker, then he can elevate
90756 + his own privilege level (e.g. get a root shell, write to files for
90757 + which he does not have write access to, etc).
90758 +
90759 + Enabling this option will let you choose from various features
90760 + that prevent the injection and execution of 'foreign' code in
90761 + a program.
90762 +
90763 + This will also break programs that rely on the old behaviour and
90764 + expect that dynamically allocated memory via the malloc() family
90765 + of functions is executable (which it is not). Notable examples
90766 + are the XFree86 4.x server, the java runtime and wine.
90767 +
90768 +config PAX_PAGEEXEC
90769 + bool "Paging based non-executable pages"
90770 + default y if GRKERNSEC_CONFIG_AUTO
90771 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
90772 + select S390_SWITCH_AMODE if S390
90773 + select S390_EXEC_PROTECT if S390
90774 + select ARCH_TRACK_EXEC_LIMIT if X86_32
90775 + help
90776 + This implementation is based on the paging feature of the CPU.
90777 + On i386 without hardware non-executable bit support there is a
90778 + variable but usually low performance impact, however on Intel's
90779 + P4 core based CPUs it is very high so you should not enable this
90780 + for kernels meant to be used on such CPUs.
90781 +
90782 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
90783 + with hardware non-executable bit support there is no performance
90784 + impact, on ppc the impact is negligible.
90785 +
90786 + Note that several architectures require various emulations due to
90787 + badly designed userland ABIs, this will cause a performance impact
90788 + but will disappear as soon as userland is fixed. For example, ppc
90789 + userland MUST have been built with secure-plt by a recent toolchain.
90790 +
90791 +config PAX_SEGMEXEC
90792 + bool "Segmentation based non-executable pages"
90793 + default y if GRKERNSEC_CONFIG_AUTO
90794 + depends on PAX_NOEXEC && X86_32
90795 + help
90796 + This implementation is based on the segmentation feature of the
90797 + CPU and has a very small performance impact, however applications
90798 + will be limited to a 1.5 GB address space instead of the normal
90799 + 3 GB.
90800 +
90801 +config PAX_EMUTRAMP
90802 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
90803 + default y if PARISC
90804 + help
90805 + There are some programs and libraries that for one reason or
90806 + another attempt to execute special small code snippets from
90807 + non-executable memory pages. Most notable examples are the
90808 + signal handler return code generated by the kernel itself and
90809 + the GCC trampolines.
90810 +
90811 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
90812 + such programs will no longer work under your kernel.
90813 +
90814 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
90815 + utilities to enable trampoline emulation for the affected programs
90816 + yet still have the protection provided by the non-executable pages.
90817 +
90818 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
90819 + your system will not even boot.
90820 +
90821 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
90822 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
90823 + for the affected files.
90824 +
90825 + NOTE: enabling this feature *may* open up a loophole in the
90826 + protection provided by non-executable pages that an attacker
90827 + could abuse. Therefore the best solution is to not have any
90828 + files on your system that would require this option. This can
90829 + be achieved by not using libc5 (which relies on the kernel
90830 + signal handler return code) and not using or rewriting programs
90831 + that make use of the nested function implementation of GCC.
90832 + Skilled users can just fix GCC itself so that it implements
90833 + nested function calls in a way that does not interfere with PaX.
90834 +
90835 +config PAX_EMUSIGRT
90836 + bool "Automatically emulate sigreturn trampolines"
90837 + depends on PAX_EMUTRAMP && PARISC
90838 + default y
90839 + help
90840 + Enabling this option will have the kernel automatically detect
90841 + and emulate signal return trampolines executing on the stack
90842 + that would otherwise lead to task termination.
90843 +
90844 + This solution is intended as a temporary one for users with
90845 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
90846 + Modula-3 runtime, etc) or executables linked to such, basically
90847 + everything that does not specify its own SA_RESTORER function in
90848 + normal executable memory like glibc 2.1+ does.
90849 +
90850 + On parisc you MUST enable this option, otherwise your system will
90851 + not even boot.
90852 +
90853 + NOTE: this feature cannot be disabled on a per executable basis
90854 + and since it *does* open up a loophole in the protection provided
90855 + by non-executable pages, the best solution is to not have any
90856 + files on your system that would require this option.
90857 +
90858 +config PAX_MPROTECT
90859 + bool "Restrict mprotect()"
90860 + default y if GRKERNSEC_CONFIG_AUTO
90861 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
90862 + help
90863 + Enabling this option will prevent programs from
90864 + - changing the executable status of memory pages that were
90865 + not originally created as executable,
90866 + - making read-only executable pages writable again,
90867 + - creating executable pages from anonymous memory,
90868 + - making read-only-after-relocations (RELRO) data pages writable again.
90869 +
90870 + You should say Y here to complete the protection provided by
90871 + the enforcement of non-executable pages.
90872 +
90873 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
90874 + this feature on a per file basis.
90875 +
90876 +config PAX_MPROTECT_COMPAT
90877 + bool "Use legacy/compat protection demoting (read help)"
90878 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
90879 + depends on PAX_MPROTECT
90880 + help
90881 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
90882 + by sending the proper error code to the application. For some broken
90883 + userland, this can cause problems with Python or other applications. The
90884 + current implementation however allows for applications like clamav to
90885 + detect if JIT compilation/execution is allowed and to fall back gracefully
90886 + to an interpreter-based mode if it does not. While we encourage everyone
90887 + to use the current implementation as-is and push upstream to fix broken
90888 + userland (note that the RWX logging option can assist with this), in some
90889 + environments this may not be possible. Having to disable MPROTECT
90890 + completely on certain binaries reduces the security benefit of PaX,
90891 + so this option is provided for those environments to revert to the old
90892 + behavior.
90893 +
90894 +config PAX_ELFRELOCS
90895 + bool "Allow ELF text relocations (read help)"
90896 + depends on PAX_MPROTECT
90897 + default n
90898 + help
90899 + Non-executable pages and mprotect() restrictions are effective
90900 + in preventing the introduction of new executable code into an
90901 + attacked task's address space. There remain only two venues
90902 + for this kind of attack: if the attacker can execute already
90903 + existing code in the attacked task then he can either have it
90904 + create and mmap() a file containing his code or have it mmap()
90905 + an already existing ELF library that does not have position
90906 + independent code in it and use mprotect() on it to make it
90907 + writable and copy his code there. While protecting against
90908 + the former approach is beyond PaX, the latter can be prevented
90909 + by having only PIC ELF libraries on one's system (which do not
90910 + need to relocate their code). If you are sure this is your case,
90911 + as is the case with all modern Linux distributions, then leave
90912 + this option disabled. You should say 'n' here.
90913 +
90914 +config PAX_ETEXECRELOCS
90915 + bool "Allow ELF ET_EXEC text relocations"
90916 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
90917 + select PAX_ELFRELOCS
90918 + default y
90919 + help
90920 + On some architectures there are incorrectly created applications
90921 + that require text relocations and would not work without enabling
90922 + this option. If you are an alpha, ia64 or parisc user, you should
90923 + enable this option and disable it once you have made sure that
90924 + none of your applications need it.
90925 +
90926 +config PAX_EMUPLT
90927 + bool "Automatically emulate ELF PLT"
90928 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
90929 + default y
90930 + help
90931 + Enabling this option will have the kernel automatically detect
90932 + and emulate the Procedure Linkage Table entries in ELF files.
90933 + On some architectures such entries are in writable memory, and
90934 + become non-executable leading to task termination. Therefore
90935 + it is mandatory that you enable this option on alpha, parisc,
90936 + sparc and sparc64, otherwise your system would not even boot.
90937 +
90938 + NOTE: this feature *does* open up a loophole in the protection
90939 + provided by the non-executable pages, therefore the proper
90940 + solution is to modify the toolchain to produce a PLT that does
90941 + not need to be writable.
90942 +
90943 +config PAX_DLRESOLVE
90944 + bool 'Emulate old glibc resolver stub'
90945 + depends on PAX_EMUPLT && SPARC
90946 + default n
90947 + help
90948 + This option is needed if userland has an old glibc (before 2.4)
90949 + that puts a 'save' instruction into the runtime generated resolver
90950 + stub that needs special emulation.
90951 +
90952 +config PAX_KERNEXEC
90953 + bool "Enforce non-executable kernel pages"
90954 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
90955 + depends on (X86 || (ARM && (CPU_V6 || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
90956 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
90957 + select PAX_KERNEXEC_PLUGIN if X86_64
90958 + help
90959 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
90960 + that is, enabling this option will make it harder to inject
90961 + and execute 'foreign' code in kernel memory itself.
90962 +
90963 +choice
90964 + prompt "Return Address Instrumentation Method"
90965 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
90966 + depends on PAX_KERNEXEC_PLUGIN
90967 + help
90968 + Select the method used to instrument function pointer dereferences.
90969 + Note that binary modules cannot be instrumented by this approach.
90970 +
90971 + Note that the implementation requires a gcc with plugin support,
90972 + i.e., gcc 4.5 or newer. You may need to install the supporting
90973 + headers explicitly in addition to the normal gcc package.
90974 +
90975 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
90976 + bool "bts"
90977 + help
90978 + This method is compatible with binary only modules but has
90979 + a higher runtime overhead.
90980 +
90981 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
90982 + bool "or"
90983 + depends on !PARAVIRT
90984 + help
90985 + This method is incompatible with binary only modules but has
90986 + a lower runtime overhead.
90987 +endchoice
90988 +
90989 +config PAX_KERNEXEC_PLUGIN_METHOD
90990 + string
90991 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
90992 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
90993 + default ""
90994 +
90995 +config PAX_KERNEXEC_MODULE_TEXT
90996 + int "Minimum amount of memory reserved for module code"
90997 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
90998 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
90999 + depends on PAX_KERNEXEC && X86_32 && MODULES
91000 + help
91001 + Due to implementation details the kernel must reserve a fixed
91002 + amount of memory for module code at compile time that cannot be
91003 + changed at runtime. Here you can specify the minimum amount
91004 + in MB that will be reserved. Due to the same implementation
91005 + details this size will always be rounded up to the next 2/4 MB
91006 + boundary (depends on PAE) so the actually available memory for
91007 + module code will usually be more than this minimum.
91008 +
91009 + The default 4 MB should be enough for most users but if you have
91010 + an excessive number of modules (e.g., most distribution configs
91011 + compile many drivers as modules) or use huge modules such as
91012 + nvidia's kernel driver, you will need to adjust this amount.
91013 + A good rule of thumb is to look at your currently loaded kernel
91014 + modules and add up their sizes.
91015 +
91016 +endmenu
91017 +
91018 +menu "Address Space Layout Randomization"
91019 + depends on PAX
91020 +
91021 +config PAX_ASLR
91022 + bool "Address Space Layout Randomization"
91023 + default y if GRKERNSEC_CONFIG_AUTO
91024 + help
91025 + Many if not most exploit techniques rely on the knowledge of
91026 + certain addresses in the attacked program. The following options
91027 + will allow the kernel to apply a certain amount of randomization
91028 + to specific parts of the program thereby forcing an attacker to
91029 + guess them in most cases. Any failed guess will most likely crash
91030 + the attacked program which allows the kernel to detect such attempts
91031 + and react on them. PaX itself provides no reaction mechanisms,
91032 + instead it is strongly encouraged that you make use of Nergal's
91033 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
91034 + (http://www.grsecurity.net/) built-in crash detection features or
91035 + develop one yourself.
91036 +
91037 + By saying Y here you can choose to randomize the following areas:
91038 + - top of the task's kernel stack
91039 + - top of the task's userland stack
91040 + - base address for mmap() requests that do not specify one
91041 + (this includes all libraries)
91042 + - base address of the main executable
91043 +
91044 + It is strongly recommended to say Y here as address space layout
91045 + randomization has negligible impact on performance yet it provides
91046 + a very effective protection.
91047 +
91048 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
91049 + this feature on a per file basis.
91050 +
91051 +config PAX_RANDKSTACK
91052 + bool "Randomize kernel stack base"
91053 + default y if GRKERNSEC_CONFIG_AUTO
91054 + depends on X86_TSC && X86
91055 + help
91056 + By saying Y here the kernel will randomize every task's kernel
91057 + stack on every system call. This will not only force an attacker
91058 + to guess it but also prevent him from making use of possible
91059 + leaked information about it.
91060 +
91061 + Since the kernel stack is a rather scarce resource, randomization
91062 + may cause unexpected stack overflows, therefore you should very
91063 + carefully test your system. Note that once enabled in the kernel
91064 + configuration, this feature cannot be disabled on a per file basis.
91065 +
91066 +config PAX_RANDUSTACK
91067 + bool "Randomize user stack base"
91068 + default y if GRKERNSEC_CONFIG_AUTO
91069 + depends on PAX_ASLR
91070 + help
91071 + By saying Y here the kernel will randomize every task's userland
91072 + stack. The randomization is done in two steps where the second
91073 + one may apply a big amount of shift to the top of the stack and
91074 + cause problems for programs that want to use lots of memory (more
91075 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
91076 + For this reason the second step can be controlled by 'chpax' or
91077 + 'paxctl' on a per file basis.
91078 +
91079 +config PAX_RANDMMAP
91080 + bool "Randomize mmap() base"
91081 + default y if GRKERNSEC_CONFIG_AUTO
91082 + depends on PAX_ASLR
91083 + help
91084 + By saying Y here the kernel will use a randomized base address for
91085 + mmap() requests that do not specify one themselves. As a result
91086 + all dynamically loaded libraries will appear at random addresses
91087 + and therefore be harder to exploit by a technique where an attacker
91088 + attempts to execute library code for his purposes (e.g. spawn a
91089 + shell from an exploited program that is running at an elevated
91090 + privilege level).
91091 +
91092 + Furthermore, if a program is relinked as a dynamic ELF file, its
91093 + base address will be randomized as well, completing the full
91094 + randomization of the address space layout. Attacking such programs
91095 + becomes a guess game. You can find an example of doing this at
91096 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
91097 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
91098 +
91099 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
91100 + feature on a per file basis.
91101 +
91102 +endmenu
91103 +
91104 +menu "Miscellaneous hardening features"
91105 +
91106 +config PAX_MEMORY_SANITIZE
91107 + bool "Sanitize all freed memory"
91108 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
91109 + depends on !HIBERNATION
91110 + help
91111 + By saying Y here the kernel will erase memory pages as soon as they
91112 + are freed. This in turn reduces the lifetime of data stored in the
91113 + pages, making it less likely that sensitive information such as
91114 + passwords, cryptographic secrets, etc stay in memory for too long.
91115 +
91116 + This is especially useful for programs whose runtime is short, long
91117 + lived processes and the kernel itself benefit from this as long as
91118 + they operate on whole memory pages and ensure timely freeing of pages
91119 + that may hold sensitive information.
91120 +
91121 + The tradeoff is performance impact, on a single CPU system kernel
91122 + compilation sees a 3% slowdown, other systems and workloads may vary
91123 + and you are advised to test this feature on your expected workload
91124 + before deploying it.
91125 +
91126 + Note that this feature does not protect data stored in live pages,
91127 + e.g., process memory swapped to disk may stay there for a long time.
91128 +
91129 +config PAX_MEMORY_STACKLEAK
91130 + bool "Sanitize kernel stack"
91131 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
91132 + depends on X86
91133 + help
91134 + By saying Y here the kernel will erase the kernel stack before it
91135 + returns from a system call. This in turn reduces the information
91136 + that a kernel stack leak bug can reveal.
91137 +
91138 + Note that such a bug can still leak information that was put on
91139 + the stack by the current system call (the one eventually triggering
91140 + the bug) but traces of earlier system calls on the kernel stack
91141 + cannot leak anymore.
91142 +
91143 + The tradeoff is performance impact: on a single CPU system kernel
91144 + compilation sees a 1% slowdown, other systems and workloads may vary
91145 + and you are advised to test this feature on your expected workload
91146 + before deploying it.
91147 +
91148 + Note that the full feature requires a gcc with plugin support,
91149 + i.e., gcc 4.5 or newer. You may need to install the supporting
91150 + headers explicitly in addition to the normal gcc package. Using
91151 + older gcc versions means that functions with large enough stack
91152 + frames may leave uninitialized memory behind that may be exposed
91153 + to a later syscall leaking the stack.
91154 +
91155 +config PAX_MEMORY_STRUCTLEAK
91156 + bool "Forcibly initialize local variables copied to userland"
91157 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
91158 + help
91159 + By saying Y here the kernel will zero initialize some local
91160 + variables that are going to be copied to userland. This in
91161 + turn prevents unintended information leakage from the kernel
91162 + stack should later code forget to explicitly set all parts of
91163 + the copied variable.
91164 +
91165 + The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
91166 + at a much smaller coverage.
91167 +
91168 + Note that the implementation requires a gcc with plugin support,
91169 + i.e., gcc 4.5 or newer. You may need to install the supporting
91170 + headers explicitly in addition to the normal gcc package.
91171 +
91172 +config PAX_MEMORY_UDEREF
91173 + bool "Prevent invalid userland pointer dereference"
91174 + default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
91175 + depends on (X86 || (ARM && (CPU_V6 || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
91176 + select PAX_PER_CPU_PGD if X86_64
91177 + help
91178 + By saying Y here the kernel will be prevented from dereferencing
91179 + userland pointers in contexts where the kernel expects only kernel
91180 + pointers. This is both a useful runtime debugging feature and a
91181 + security measure that prevents exploiting a class of kernel bugs.
91182 +
91183 + The tradeoff is that some virtualization solutions may experience
91184 + a huge slowdown and therefore you should not enable this feature
91185 + for kernels meant to run in such environments. Whether a given VM
91186 + solution is affected or not is best determined by simply trying it
91187 + out, the performance impact will be obvious right on boot as this
91188 + mechanism engages from very early on. A good rule of thumb is that
91189 + VMs running on CPUs without hardware virtualization support (i.e.,
91190 + the majority of IA-32 CPUs) will likely experience the slowdown.
91191 +
91192 +config PAX_REFCOUNT
91193 + bool "Prevent various kernel object reference counter overflows"
91194 + default y if GRKERNSEC_CONFIG_AUTO
91195 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
91196 + help
91197 + By saying Y here the kernel will detect and prevent overflowing
91198 + various (but not all) kinds of object reference counters. Such
91199 + overflows can normally occur due to bugs only and are often, if
91200 + not always, exploitable.
91201 +
91202 + The tradeoff is that data structures protected by an overflowed
91203 + refcount will never be freed and therefore will leak memory. Note
91204 + that this leak also happens even without this protection but in
91205 + that case the overflow can eventually trigger the freeing of the
91206 + data structure while it is still being used elsewhere, resulting
91207 + in the exploitable situation that this feature prevents.
91208 +
91209 + Since this has a negligible performance impact, you should enable
91210 + this feature.
91211 +
91212 +config PAX_CONSTIFY_PLUGIN
91213 + bool "Automatically constify eligible structures"
91214 + default y
91215 + depends on !UML && PAX_KERNEXEC
91216 + help
91217 + By saying Y here the compiler will automatically constify a class
91218 + of types that contain only function pointers. This reduces the
91219 + kernel's attack surface and also produces a better memory layout.
91220 +
91221 + Note that the implementation requires a gcc with plugin support,
91222 + i.e., gcc 4.5 or newer. You may need to install the supporting
91223 + headers explicitly in addition to the normal gcc package.
91224 +
91225 + Note that if some code really has to modify constified variables
91226 + then the source code will have to be patched to allow it. Examples
91227 + can be found in PaX itself (the no_const attribute) and for some
91228 + out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
91229 +
91230 +config PAX_USERCOPY
91231 + bool "Harden heap object copies between kernel and userland"
91232 + default y if GRKERNSEC_CONFIG_AUTO
91233 + depends on ARM || IA64 || PPC || SPARC || X86
91234 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
91235 + select PAX_USERCOPY_SLABS
91236 + help
91237 + By saying Y here the kernel will enforce the size of heap objects
91238 + when they are copied in either direction between the kernel and
91239 + userland, even if only a part of the heap object is copied.
91240 +
91241 + Specifically, this checking prevents information leaking from the
91242 + kernel heap during kernel to userland copies (if the kernel heap
91243 + object is otherwise fully initialized) and prevents kernel heap
91244 + overflows during userland to kernel copies.
91245 +
91246 + Note that the current implementation provides the strictest bounds
91247 + checks for the SLUB allocator.
91248 +
91249 + Enabling this option also enables per-slab cache protection against
91250 + data in a given cache being copied into/out of via userland
91251 + accessors. Though the whitelist of regions will be reduced over
91252 + time, it notably protects important data structures like task structs.
91253 +
91254 + If frame pointers are enabled on x86, this option will also restrict
91255 + copies into and out of the kernel stack to local variables within a
91256 + single frame.
91257 +
91258 + Since this has a negligible performance impact, you should enable
91259 + this feature.
91260 +
91261 +config PAX_USERCOPY_DEBUG
91262 + bool
91263 + depends on X86 && PAX_USERCOPY
91264 + default n
91265 +
91266 +config PAX_SIZE_OVERFLOW
91267 + bool "Prevent various integer overflows in function size parameters"
91268 + default y if GRKERNSEC_CONFIG_AUTO
91269 + depends on X86
91270 + help
91271 + By saying Y here the kernel recomputes expressions of function
91272 + arguments marked by a size_overflow attribute with double integer
91273 + precision (DImode/TImode for 32/64 bit integer types).
91274 +
91275 + The recomputed argument is checked against TYPE_MAX and an event
91276 + is logged on overflow and the triggering process is killed.
91277 +
91278 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
91279 +
91280 + Note that the implementation requires a gcc with plugin support,
91281 + i.e., gcc 4.5 or newer. You may need to install the supporting
91282 + headers explicitly in addition to the normal gcc package.
91283 +
91284 +config PAX_LATENT_ENTROPY
91285 + bool "Generate some entropy during boot"
91286 + default y if GRKERNSEC_CONFIG_AUTO
91287 + help
91288 + By saying Y here the kernel will instrument early boot code to
91289 + extract some entropy from both original and artificially created
91290 + program state. This will help especially embedded systems where
91291 + there is little 'natural' source of entropy normally. The cost
91292 + is some slowdown of the boot process.
91293 +
91294 + When pax_extra_latent_entropy is passed on the kernel command line,
91295 + entropy will be extracted from up to the first 4GB of RAM while the
91296 + runtime memory allocator is being initialized. This costs even more
91297 + slowdown of the boot process.
91298 +
91299 + Note that the implementation requires a gcc with plugin support,
91300 + i.e., gcc 4.5 or newer. You may need to install the supporting
91301 + headers explicitly in addition to the normal gcc package.
91302 +
91303 + Note that entropy extracted this way is not cryptographically
91304 + secure!
91305 +
91306 +endmenu
91307 +
91308 +endmenu
91309 +
91310 +source grsecurity/Kconfig
91311 +
91312 +endmenu
91313 +
91314 +endmenu
91315 +
91316 source security/keys/Kconfig
91317
91318 config SECURITY_DMESG_RESTRICT
91319 @@ -103,7 +1041,7 @@ config INTEL_TXT
91320 config LSM_MMAP_MIN_ADDR
91321 int "Low address space for LSM to protect from user allocation"
91322 depends on SECURITY && SECURITY_SELINUX
91323 - default 32768 if ARM
91324 + default 32768 if ALPHA || ARM || PARISC || SPARC32
91325 default 65536
91326 help
91327 This is the portion of low virtual memory which should be protected
91328 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
91329 index b21830e..a7d1a17 100644
91330 --- a/security/apparmor/lsm.c
91331 +++ b/security/apparmor/lsm.c
91332 @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
91333 return error;
91334 }
91335
91336 -static struct security_operations apparmor_ops = {
91337 +static struct security_operations apparmor_ops __read_only = {
91338 .name = "apparmor",
91339
91340 .ptrace_access_check = apparmor_ptrace_access_check,
91341 diff --git a/security/commoncap.c b/security/commoncap.c
91342 index c44b6fe..932df30 100644
91343 --- a/security/commoncap.c
91344 +++ b/security/commoncap.c
91345 @@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
91346 return 0;
91347 }
91348
91349 +/* returns:
91350 + 1 for suid privilege
91351 + 2 for sgid privilege
91352 + 3 for fscap privilege
91353 +*/
91354 +int is_privileged_binary(const struct dentry *dentry)
91355 +{
91356 + struct cpu_vfs_cap_data capdata;
91357 + struct inode *inode = dentry->d_inode;
91358 +
91359 + if (!inode || S_ISDIR(inode->i_mode))
91360 + return 0;
91361 +
91362 + if (inode->i_mode & S_ISUID)
91363 + return 1;
91364 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
91365 + return 2;
91366 +
91367 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
91368 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
91369 + return 3;
91370 + }
91371 +
91372 + return 0;
91373 +}
91374 +
91375 /*
91376 * Attempt to get the on-exec apply capability sets for an executable file from
91377 * its xattrs and, if present, apply them to the proposed credentials being
91378 @@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
91379 const struct cred *cred = current_cred();
91380 kuid_t root_uid = make_kuid(cred->user_ns, 0);
91381
91382 + if (gr_acl_enable_at_secure())
91383 + return 1;
91384 +
91385 if (!uid_eq(cred->uid, root_uid)) {
91386 if (bprm->cap_effective)
91387 return 1;
91388 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
91389 index a41c9c1..83da6dd 100644
91390 --- a/security/integrity/ima/ima.h
91391 +++ b/security/integrity/ima/ima.h
91392 @@ -97,8 +97,8 @@ int ima_init_crypto(void);
91393 extern spinlock_t ima_queue_lock;
91394
91395 struct ima_h_table {
91396 - atomic_long_t len; /* number of stored measurements in the list */
91397 - atomic_long_t violations;
91398 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
91399 + atomic_long_unchecked_t violations;
91400 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
91401 };
91402 extern struct ima_h_table ima_htable;
91403 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
91404 index 1c03e8f1..398a941 100644
91405 --- a/security/integrity/ima/ima_api.c
91406 +++ b/security/integrity/ima/ima_api.c
91407 @@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
91408 int result;
91409
91410 /* can overflow, only indicator */
91411 - atomic_long_inc(&ima_htable.violations);
91412 + atomic_long_inc_unchecked(&ima_htable.violations);
91413
91414 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
91415 if (!entry) {
91416 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
91417 index 38477c9..87a60c7 100644
91418 --- a/security/integrity/ima/ima_fs.c
91419 +++ b/security/integrity/ima/ima_fs.c
91420 @@ -28,12 +28,12 @@
91421 static int valid_policy = 1;
91422 #define TMPBUFLEN 12
91423 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
91424 - loff_t *ppos, atomic_long_t *val)
91425 + loff_t *ppos, atomic_long_unchecked_t *val)
91426 {
91427 char tmpbuf[TMPBUFLEN];
91428 ssize_t len;
91429
91430 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
91431 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
91432 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
91433 }
91434
91435 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
91436 index ff63fe0..809cd96 100644
91437 --- a/security/integrity/ima/ima_queue.c
91438 +++ b/security/integrity/ima/ima_queue.c
91439 @@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
91440 INIT_LIST_HEAD(&qe->later);
91441 list_add_tail_rcu(&qe->later, &ima_measurements);
91442
91443 - atomic_long_inc(&ima_htable.len);
91444 + atomic_long_inc_unchecked(&ima_htable.len);
91445 key = ima_hash_key(entry->digest);
91446 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
91447 return 0;
91448 diff --git a/security/keys/compat.c b/security/keys/compat.c
91449 index d65fa7f..cbfe366 100644
91450 --- a/security/keys/compat.c
91451 +++ b/security/keys/compat.c
91452 @@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
91453 if (ret == 0)
91454 goto no_payload_free;
91455
91456 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
91457 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
91458 err:
91459 if (iov != iovstack)
91460 kfree(iov);
91461 diff --git a/security/keys/key.c b/security/keys/key.c
91462 index 8fb7c7b..ba3610d 100644
91463 --- a/security/keys/key.c
91464 +++ b/security/keys/key.c
91465 @@ -284,7 +284,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
91466
91467 atomic_set(&key->usage, 1);
91468 init_rwsem(&key->sem);
91469 - lockdep_set_class(&key->sem, &type->lock_class);
91470 + lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
91471 key->type = type;
91472 key->user = user;
91473 key->quotalen = quotalen;
91474 @@ -1032,7 +1032,9 @@ int register_key_type(struct key_type *ktype)
91475 struct key_type *p;
91476 int ret;
91477
91478 - memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
91479 + pax_open_kernel();
91480 + memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
91481 + pax_close_kernel();
91482
91483 ret = -EEXIST;
91484 down_write(&key_types_sem);
91485 @@ -1044,7 +1046,7 @@ int register_key_type(struct key_type *ktype)
91486 }
91487
91488 /* store the type */
91489 - list_add(&ktype->link, &key_types_list);
91490 + pax_list_add((struct list_head *)&ktype->link, &key_types_list);
91491
91492 pr_notice("Key type %s registered\n", ktype->name);
91493 ret = 0;
91494 @@ -1066,7 +1068,7 @@ EXPORT_SYMBOL(register_key_type);
91495 void unregister_key_type(struct key_type *ktype)
91496 {
91497 down_write(&key_types_sem);
91498 - list_del_init(&ktype->link);
91499 + pax_list_del_init((struct list_head *)&ktype->link);
91500 downgrade_write(&key_types_sem);
91501 key_gc_keytype(ktype);
91502 pr_notice("Key type %s unregistered\n", ktype->name);
91503 @@ -1084,10 +1086,10 @@ void __init key_init(void)
91504 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
91505
91506 /* add the special key types */
91507 - list_add_tail(&key_type_keyring.link, &key_types_list);
91508 - list_add_tail(&key_type_dead.link, &key_types_list);
91509 - list_add_tail(&key_type_user.link, &key_types_list);
91510 - list_add_tail(&key_type_logon.link, &key_types_list);
91511 + pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
91512 + pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
91513 + pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
91514 + pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
91515
91516 /* record the root user tracking */
91517 rb_link_node(&root_key_user.node,
91518 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
91519 index 4b5c948..2054dc1 100644
91520 --- a/security/keys/keyctl.c
91521 +++ b/security/keys/keyctl.c
91522 @@ -986,7 +986,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
91523 /*
91524 * Copy the iovec data from userspace
91525 */
91526 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
91527 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
91528 unsigned ioc)
91529 {
91530 for (; ioc > 0; ioc--) {
91531 @@ -1008,7 +1008,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
91532 * If successful, 0 will be returned.
91533 */
91534 long keyctl_instantiate_key_common(key_serial_t id,
91535 - const struct iovec *payload_iov,
91536 + const struct iovec __user *payload_iov,
91537 unsigned ioc,
91538 size_t plen,
91539 key_serial_t ringid)
91540 @@ -1103,7 +1103,7 @@ long keyctl_instantiate_key(key_serial_t id,
91541 [0].iov_len = plen
91542 };
91543
91544 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
91545 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
91546 }
91547
91548 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
91549 @@ -1136,7 +1136,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
91550 if (ret == 0)
91551 goto no_payload_free;
91552
91553 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
91554 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
91555 err:
91556 if (iov != iovstack)
91557 kfree(iov);
91558 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
91559 index 6ece7f2..ecdb55c 100644
91560 --- a/security/keys/keyring.c
91561 +++ b/security/keys/keyring.c
91562 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
91563 ret = -EFAULT;
91564
91565 for (loop = 0; loop < klist->nkeys; loop++) {
91566 + key_serial_t serial;
91567 key = rcu_deref_link_locked(klist, loop,
91568 keyring);
91569 + serial = key->serial;
91570
91571 tmp = sizeof(key_serial_t);
91572 if (tmp > buflen)
91573 tmp = buflen;
91574
91575 - if (copy_to_user(buffer,
91576 - &key->serial,
91577 - tmp) != 0)
91578 + if (copy_to_user(buffer, &serial, tmp))
91579 goto error;
91580
91581 buflen -= tmp;
91582 diff --git a/security/min_addr.c b/security/min_addr.c
91583 index f728728..6457a0c 100644
91584 --- a/security/min_addr.c
91585 +++ b/security/min_addr.c
91586 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
91587 */
91588 static void update_mmap_min_addr(void)
91589 {
91590 +#ifndef SPARC
91591 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
91592 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
91593 mmap_min_addr = dac_mmap_min_addr;
91594 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
91595 #else
91596 mmap_min_addr = dac_mmap_min_addr;
91597 #endif
91598 +#endif
91599 }
91600
91601 /*
91602 diff --git a/security/security.c b/security/security.c
91603 index 03f248b..5710c33 100644
91604 --- a/security/security.c
91605 +++ b/security/security.c
91606 @@ -20,6 +20,7 @@
91607 #include <linux/ima.h>
91608 #include <linux/evm.h>
91609 #include <linux/fsnotify.h>
91610 +#include <linux/mm.h>
91611 #include <linux/mman.h>
91612 #include <linux/mount.h>
91613 #include <linux/personality.h>
91614 @@ -32,8 +33,8 @@
91615 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
91616 CONFIG_DEFAULT_SECURITY;
91617
91618 -static struct security_operations *security_ops;
91619 -static struct security_operations default_security_ops = {
91620 +static struct security_operations *security_ops __read_only;
91621 +static struct security_operations default_security_ops __read_only = {
91622 .name = "default",
91623 };
91624
91625 @@ -74,7 +75,9 @@ int __init security_init(void)
91626
91627 void reset_security_ops(void)
91628 {
91629 + pax_open_kernel();
91630 security_ops = &default_security_ops;
91631 + pax_close_kernel();
91632 }
91633
91634 /* Save user chosen LSM */
91635 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
91636 index 7171a95..c35e879 100644
91637 --- a/security/selinux/hooks.c
91638 +++ b/security/selinux/hooks.c
91639 @@ -96,8 +96,6 @@
91640
91641 #define NUM_SEL_MNT_OPTS 5
91642
91643 -extern struct security_operations *security_ops;
91644 -
91645 /* SECMARK reference count */
91646 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
91647
91648 @@ -5498,7 +5496,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
91649
91650 #endif
91651
91652 -static struct security_operations selinux_ops = {
91653 +static struct security_operations selinux_ops __read_only = {
91654 .name = "selinux",
91655
91656 .ptrace_access_check = selinux_ptrace_access_check,
91657 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
91658 index 65f67cb..3f141ef 100644
91659 --- a/security/selinux/include/xfrm.h
91660 +++ b/security/selinux/include/xfrm.h
91661 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
91662
91663 static inline void selinux_xfrm_notify_policyload(void)
91664 {
91665 - atomic_inc(&flow_cache_genid);
91666 + atomic_inc_unchecked(&flow_cache_genid);
91667 rt_genid_bump(&init_net);
91668 }
91669 #else
91670 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
91671 index fa64740..bc95b74 100644
91672 --- a/security/smack/smack_lsm.c
91673 +++ b/security/smack/smack_lsm.c
91674 @@ -3392,7 +3392,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
91675 return 0;
91676 }
91677
91678 -struct security_operations smack_ops = {
91679 +struct security_operations smack_ops __read_only = {
91680 .name = "smack",
91681
91682 .ptrace_access_check = smack_ptrace_access_check,
91683 diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
91684 index 390c646..f2f8db3 100644
91685 --- a/security/tomoyo/mount.c
91686 +++ b/security/tomoyo/mount.c
91687 @@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
91688 type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
91689 need_dev = -1; /* dev_name is a directory */
91690 } else {
91691 + if (!capable(CAP_SYS_ADMIN)) {
91692 + error = -EPERM;
91693 + goto out;
91694 + }
91695 fstype = get_fs_type(type);
91696 if (!fstype) {
91697 error = -ENODEV;
91698 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
91699 index a2ee362..5754f34 100644
91700 --- a/security/tomoyo/tomoyo.c
91701 +++ b/security/tomoyo/tomoyo.c
91702 @@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
91703 * tomoyo_security_ops is a "struct security_operations" which is used for
91704 * registering TOMOYO.
91705 */
91706 -static struct security_operations tomoyo_security_ops = {
91707 +static struct security_operations tomoyo_security_ops __read_only = {
91708 .name = "tomoyo",
91709 .cred_alloc_blank = tomoyo_cred_alloc_blank,
91710 .cred_prepare = tomoyo_cred_prepare,
91711 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
91712 index 20ef514..4182bed 100644
91713 --- a/security/yama/Kconfig
91714 +++ b/security/yama/Kconfig
91715 @@ -1,6 +1,6 @@
91716 config SECURITY_YAMA
91717 bool "Yama support"
91718 - depends on SECURITY
91719 + depends on SECURITY && !GRKERNSEC
91720 select SECURITYFS
91721 select SECURITY_PATH
91722 default n
91723 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
91724 index 13c88fbc..f8c115e 100644
91725 --- a/security/yama/yama_lsm.c
91726 +++ b/security/yama/yama_lsm.c
91727 @@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent)
91728 }
91729
91730 #ifndef CONFIG_SECURITY_YAMA_STACKED
91731 -static struct security_operations yama_ops = {
91732 +static struct security_operations yama_ops __read_only = {
91733 .name = "yama",
91734
91735 .ptrace_access_check = yama_ptrace_access_check,
91736 @@ -376,28 +376,24 @@ static struct security_operations yama_ops = {
91737 #endif
91738
91739 #ifdef CONFIG_SYSCTL
91740 +static int zero __read_only;
91741 +static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
91742 +
91743 static int yama_dointvec_minmax(struct ctl_table *table, int write,
91744 void __user *buffer, size_t *lenp, loff_t *ppos)
91745 {
91746 - int rc;
91747 + ctl_table_no_const yama_table;
91748
91749 if (write && !capable(CAP_SYS_PTRACE))
91750 return -EPERM;
91751
91752 - rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
91753 - if (rc)
91754 - return rc;
91755 -
91756 + yama_table = *table;
91757 /* Lock the max value if it ever gets set. */
91758 - if (write && *(int *)table->data == *(int *)table->extra2)
91759 - table->extra1 = table->extra2;
91760 -
91761 - return rc;
91762 + if (ptrace_scope == max_scope)
91763 + yama_table.extra1 = &max_scope;
91764 + return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
91765 }
91766
91767 -static int zero;
91768 -static int max_scope = YAMA_SCOPE_NO_ATTACH;
91769 -
91770 struct ctl_path yama_sysctl_path[] = {
91771 { .procname = "kernel", },
91772 { .procname = "yama", },
91773 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
91774 index 4cedc69..e59d8a3 100644
91775 --- a/sound/aoa/codecs/onyx.c
91776 +++ b/sound/aoa/codecs/onyx.c
91777 @@ -54,7 +54,7 @@ struct onyx {
91778 spdif_locked:1,
91779 analog_locked:1,
91780 original_mute:2;
91781 - int open_count;
91782 + local_t open_count;
91783 struct codec_info *codec_info;
91784
91785 /* mutex serializes concurrent access to the device
91786 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
91787 struct onyx *onyx = cii->codec_data;
91788
91789 mutex_lock(&onyx->mutex);
91790 - onyx->open_count++;
91791 + local_inc(&onyx->open_count);
91792 mutex_unlock(&onyx->mutex);
91793
91794 return 0;
91795 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
91796 struct onyx *onyx = cii->codec_data;
91797
91798 mutex_lock(&onyx->mutex);
91799 - onyx->open_count--;
91800 - if (!onyx->open_count)
91801 + if (local_dec_and_test(&onyx->open_count))
91802 onyx->spdif_locked = onyx->analog_locked = 0;
91803 mutex_unlock(&onyx->mutex);
91804
91805 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
91806 index ffd2025..df062c9 100644
91807 --- a/sound/aoa/codecs/onyx.h
91808 +++ b/sound/aoa/codecs/onyx.h
91809 @@ -11,6 +11,7 @@
91810 #include <linux/i2c.h>
91811 #include <asm/pmac_low_i2c.h>
91812 #include <asm/prom.h>
91813 +#include <asm/local.h>
91814
91815 /* PCM3052 register definitions */
91816
91817 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
91818 index 4c1cc51..16040040 100644
91819 --- a/sound/core/oss/pcm_oss.c
91820 +++ b/sound/core/oss/pcm_oss.c
91821 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
91822 if (in_kernel) {
91823 mm_segment_t fs;
91824 fs = snd_enter_user();
91825 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
91826 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
91827 snd_leave_user(fs);
91828 } else {
91829 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
91830 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
91831 }
91832 if (ret != -EPIPE && ret != -ESTRPIPE)
91833 break;
91834 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
91835 if (in_kernel) {
91836 mm_segment_t fs;
91837 fs = snd_enter_user();
91838 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
91839 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
91840 snd_leave_user(fs);
91841 } else {
91842 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
91843 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
91844 }
91845 if (ret == -EPIPE) {
91846 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
91847 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
91848 struct snd_pcm_plugin_channel *channels;
91849 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
91850 if (!in_kernel) {
91851 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
91852 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
91853 return -EFAULT;
91854 buf = runtime->oss.buffer;
91855 }
91856 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
91857 }
91858 } else {
91859 tmp = snd_pcm_oss_write2(substream,
91860 - (const char __force *)buf,
91861 + (const char __force_kernel *)buf,
91862 runtime->oss.period_bytes, 0);
91863 if (tmp <= 0)
91864 goto err;
91865 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
91866 struct snd_pcm_runtime *runtime = substream->runtime;
91867 snd_pcm_sframes_t frames, frames1;
91868 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
91869 - char __user *final_dst = (char __force __user *)buf;
91870 + char __user *final_dst = (char __force_user *)buf;
91871 if (runtime->oss.plugin_first) {
91872 struct snd_pcm_plugin_channel *channels;
91873 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
91874 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
91875 xfer += tmp;
91876 runtime->oss.buffer_used -= tmp;
91877 } else {
91878 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
91879 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
91880 runtime->oss.period_bytes, 0);
91881 if (tmp <= 0)
91882 goto err;
91883 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
91884 size1);
91885 size1 /= runtime->channels; /* frames */
91886 fs = snd_enter_user();
91887 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
91888 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
91889 snd_leave_user(fs);
91890 }
91891 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
91892 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
91893 index af49721..e85058e 100644
91894 --- a/sound/core/pcm_compat.c
91895 +++ b/sound/core/pcm_compat.c
91896 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
91897 int err;
91898
91899 fs = snd_enter_user();
91900 - err = snd_pcm_delay(substream, &delay);
91901 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
91902 snd_leave_user(fs);
91903 if (err < 0)
91904 return err;
91905 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
91906 index eb560fa..69a4995 100644
91907 --- a/sound/core/pcm_native.c
91908 +++ b/sound/core/pcm_native.c
91909 @@ -2806,11 +2806,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
91910 switch (substream->stream) {
91911 case SNDRV_PCM_STREAM_PLAYBACK:
91912 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
91913 - (void __user *)arg);
91914 + (void __force_user *)arg);
91915 break;
91916 case SNDRV_PCM_STREAM_CAPTURE:
91917 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
91918 - (void __user *)arg);
91919 + (void __force_user *)arg);
91920 break;
91921 default:
91922 result = -EINVAL;
91923 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
91924 index 040c60e..989a19a 100644
91925 --- a/sound/core/seq/seq_device.c
91926 +++ b/sound/core/seq/seq_device.c
91927 @@ -64,7 +64,7 @@ struct ops_list {
91928 int argsize; /* argument size */
91929
91930 /* operators */
91931 - struct snd_seq_dev_ops ops;
91932 + struct snd_seq_dev_ops *ops;
91933
91934 /* registered devices */
91935 struct list_head dev_list; /* list of devices */
91936 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
91937
91938 mutex_lock(&ops->reg_mutex);
91939 /* copy driver operators */
91940 - ops->ops = *entry;
91941 + ops->ops = entry;
91942 ops->driver |= DRIVER_LOADED;
91943 ops->argsize = argsize;
91944
91945 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
91946 dev->name, ops->id, ops->argsize, dev->argsize);
91947 return -EINVAL;
91948 }
91949 - if (ops->ops.init_device(dev) >= 0) {
91950 + if (ops->ops->init_device(dev) >= 0) {
91951 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
91952 ops->num_init_devices++;
91953 } else {
91954 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
91955 dev->name, ops->id, ops->argsize, dev->argsize);
91956 return -EINVAL;
91957 }
91958 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
91959 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
91960 dev->status = SNDRV_SEQ_DEVICE_FREE;
91961 dev->driver_data = NULL;
91962 ops->num_init_devices--;
91963 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
91964 index 4e0dd22..7a1f32c 100644
91965 --- a/sound/drivers/mts64.c
91966 +++ b/sound/drivers/mts64.c
91967 @@ -29,6 +29,7 @@
91968 #include <sound/initval.h>
91969 #include <sound/rawmidi.h>
91970 #include <sound/control.h>
91971 +#include <asm/local.h>
91972
91973 #define CARD_NAME "Miditerminal 4140"
91974 #define DRIVER_NAME "MTS64"
91975 @@ -67,7 +68,7 @@ struct mts64 {
91976 struct pardevice *pardev;
91977 int pardev_claimed;
91978
91979 - int open_count;
91980 + local_t open_count;
91981 int current_midi_output_port;
91982 int current_midi_input_port;
91983 u8 mode[MTS64_NUM_INPUT_PORTS];
91984 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
91985 {
91986 struct mts64 *mts = substream->rmidi->private_data;
91987
91988 - if (mts->open_count == 0) {
91989 + if (local_read(&mts->open_count) == 0) {
91990 /* We don't need a spinlock here, because this is just called
91991 if the device has not been opened before.
91992 So there aren't any IRQs from the device */
91993 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
91994
91995 msleep(50);
91996 }
91997 - ++(mts->open_count);
91998 + local_inc(&mts->open_count);
91999
92000 return 0;
92001 }
92002 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
92003 struct mts64 *mts = substream->rmidi->private_data;
92004 unsigned long flags;
92005
92006 - --(mts->open_count);
92007 - if (mts->open_count == 0) {
92008 + if (local_dec_return(&mts->open_count) == 0) {
92009 /* We need the spinlock_irqsave here because we can still
92010 have IRQs at this point */
92011 spin_lock_irqsave(&mts->lock, flags);
92012 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
92013
92014 msleep(500);
92015
92016 - } else if (mts->open_count < 0)
92017 - mts->open_count = 0;
92018 + } else if (local_read(&mts->open_count) < 0)
92019 + local_set(&mts->open_count, 0);
92020
92021 return 0;
92022 }
92023 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
92024 index b953fb4..1999c01 100644
92025 --- a/sound/drivers/opl4/opl4_lib.c
92026 +++ b/sound/drivers/opl4/opl4_lib.c
92027 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
92028 MODULE_DESCRIPTION("OPL4 driver");
92029 MODULE_LICENSE("GPL");
92030
92031 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
92032 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
92033 {
92034 int timeout = 10;
92035 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
92036 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
92037 index 991018d..8984740 100644
92038 --- a/sound/drivers/portman2x4.c
92039 +++ b/sound/drivers/portman2x4.c
92040 @@ -48,6 +48,7 @@
92041 #include <sound/initval.h>
92042 #include <sound/rawmidi.h>
92043 #include <sound/control.h>
92044 +#include <asm/local.h>
92045
92046 #define CARD_NAME "Portman 2x4"
92047 #define DRIVER_NAME "portman"
92048 @@ -85,7 +86,7 @@ struct portman {
92049 struct pardevice *pardev;
92050 int pardev_claimed;
92051
92052 - int open_count;
92053 + local_t open_count;
92054 int mode[PORTMAN_NUM_INPUT_PORTS];
92055 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
92056 };
92057 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
92058 index ea995af..f1bfa37 100644
92059 --- a/sound/firewire/amdtp.c
92060 +++ b/sound/firewire/amdtp.c
92061 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
92062 ptr = s->pcm_buffer_pointer + data_blocks;
92063 if (ptr >= pcm->runtime->buffer_size)
92064 ptr -= pcm->runtime->buffer_size;
92065 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
92066 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
92067
92068 s->pcm_period_pointer += data_blocks;
92069 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
92070 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
92071 */
92072 void amdtp_out_stream_update(struct amdtp_out_stream *s)
92073 {
92074 - ACCESS_ONCE(s->source_node_id_field) =
92075 + ACCESS_ONCE_RW(s->source_node_id_field) =
92076 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
92077 }
92078 EXPORT_SYMBOL(amdtp_out_stream_update);
92079 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
92080 index b680c5e..061b7a0 100644
92081 --- a/sound/firewire/amdtp.h
92082 +++ b/sound/firewire/amdtp.h
92083 @@ -139,7 +139,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
92084 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
92085 struct snd_pcm_substream *pcm)
92086 {
92087 - ACCESS_ONCE(s->pcm) = pcm;
92088 + ACCESS_ONCE_RW(s->pcm) = pcm;
92089 }
92090
92091 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
92092 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
92093 index d428ffe..751ef78 100644
92094 --- a/sound/firewire/isight.c
92095 +++ b/sound/firewire/isight.c
92096 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
92097 ptr += count;
92098 if (ptr >= runtime->buffer_size)
92099 ptr -= runtime->buffer_size;
92100 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
92101 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
92102
92103 isight->period_counter += count;
92104 if (isight->period_counter >= runtime->period_size) {
92105 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
92106 if (err < 0)
92107 return err;
92108
92109 - ACCESS_ONCE(isight->pcm_active) = true;
92110 + ACCESS_ONCE_RW(isight->pcm_active) = true;
92111
92112 return 0;
92113 }
92114 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
92115 {
92116 struct isight *isight = substream->private_data;
92117
92118 - ACCESS_ONCE(isight->pcm_active) = false;
92119 + ACCESS_ONCE_RW(isight->pcm_active) = false;
92120
92121 mutex_lock(&isight->mutex);
92122 isight_stop_streaming(isight);
92123 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
92124
92125 switch (cmd) {
92126 case SNDRV_PCM_TRIGGER_START:
92127 - ACCESS_ONCE(isight->pcm_running) = true;
92128 + ACCESS_ONCE_RW(isight->pcm_running) = true;
92129 break;
92130 case SNDRV_PCM_TRIGGER_STOP:
92131 - ACCESS_ONCE(isight->pcm_running) = false;
92132 + ACCESS_ONCE_RW(isight->pcm_running) = false;
92133 break;
92134 default:
92135 return -EINVAL;
92136 diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
92137 index 844a555..985ab83 100644
92138 --- a/sound/firewire/scs1x.c
92139 +++ b/sound/firewire/scs1x.c
92140 @@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
92141 {
92142 struct scs *scs = stream->rmidi->private_data;
92143
92144 - ACCESS_ONCE(scs->output) = up ? stream : NULL;
92145 + ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
92146 if (up) {
92147 scs->output_idle = false;
92148 tasklet_schedule(&scs->tasklet);
92149 @@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
92150 {
92151 struct scs *scs = stream->rmidi->private_data;
92152
92153 - ACCESS_ONCE(scs->input) = up ? stream : NULL;
92154 + ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
92155 }
92156
92157 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
92158 @@ -457,8 +457,8 @@ static int scs_remove(struct device *dev)
92159
92160 snd_card_disconnect(scs->card);
92161
92162 - ACCESS_ONCE(scs->output) = NULL;
92163 - ACCESS_ONCE(scs->input) = NULL;
92164 + ACCESS_ONCE_RW(scs->output) = NULL;
92165 + ACCESS_ONCE_RW(scs->input) = NULL;
92166
92167 wait_event(scs->idle_wait, scs->output_idle);
92168
92169 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
92170 index 048439a..3be9f6f 100644
92171 --- a/sound/oss/sb_audio.c
92172 +++ b/sound/oss/sb_audio.c
92173 @@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
92174 buf16 = (signed short *)(localbuf + localoffs);
92175 while (c)
92176 {
92177 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
92178 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
92179 if (copy_from_user(lbuf8,
92180 userbuf+useroffs + p,
92181 locallen))
92182 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
92183 index 7d8803a..559f8d0 100644
92184 --- a/sound/oss/swarm_cs4297a.c
92185 +++ b/sound/oss/swarm_cs4297a.c
92186 @@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
92187 {
92188 struct cs4297a_state *s;
92189 u32 pwr, id;
92190 - mm_segment_t fs;
92191 int rval;
92192 #ifndef CONFIG_BCM_CS4297A_CSWARM
92193 u64 cfg;
92194 @@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
92195 if (!rval) {
92196 char *sb1250_duart_present;
92197
92198 +#if 0
92199 + mm_segment_t fs;
92200 fs = get_fs();
92201 set_fs(KERNEL_DS);
92202 -#if 0
92203 val = SOUND_MASK_LINE;
92204 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
92205 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
92206 val = initvol[i].vol;
92207 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
92208 }
92209 + set_fs(fs);
92210 // cs4297a_write_ac97(s, 0x18, 0x0808);
92211 #else
92212 // cs4297a_write_ac97(s, 0x5e, 0x180);
92213 cs4297a_write_ac97(s, 0x02, 0x0808);
92214 cs4297a_write_ac97(s, 0x18, 0x0808);
92215 #endif
92216 - set_fs(fs);
92217
92218 list_add(&s->list, &cs4297a_devs);
92219
92220 diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
92221 index 4631a23..001ae57 100644
92222 --- a/sound/pci/ymfpci/ymfpci.h
92223 +++ b/sound/pci/ymfpci/ymfpci.h
92224 @@ -358,7 +358,7 @@ struct snd_ymfpci {
92225 spinlock_t reg_lock;
92226 spinlock_t voice_lock;
92227 wait_queue_head_t interrupt_sleep;
92228 - atomic_t interrupt_sleep_count;
92229 + atomic_unchecked_t interrupt_sleep_count;
92230 struct snd_info_entry *proc_entry;
92231 const struct firmware *dsp_microcode;
92232 const struct firmware *controller_microcode;
92233 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
92234 index 22056c5..25d3244 100644
92235 --- a/sound/pci/ymfpci/ymfpci_main.c
92236 +++ b/sound/pci/ymfpci/ymfpci_main.c
92237 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
92238 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
92239 break;
92240 }
92241 - if (atomic_read(&chip->interrupt_sleep_count)) {
92242 - atomic_set(&chip->interrupt_sleep_count, 0);
92243 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
92244 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
92245 wake_up(&chip->interrupt_sleep);
92246 }
92247 __end:
92248 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
92249 continue;
92250 init_waitqueue_entry(&wait, current);
92251 add_wait_queue(&chip->interrupt_sleep, &wait);
92252 - atomic_inc(&chip->interrupt_sleep_count);
92253 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
92254 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
92255 remove_wait_queue(&chip->interrupt_sleep, &wait);
92256 }
92257 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
92258 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
92259 spin_unlock(&chip->reg_lock);
92260
92261 - if (atomic_read(&chip->interrupt_sleep_count)) {
92262 - atomic_set(&chip->interrupt_sleep_count, 0);
92263 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
92264 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
92265 wake_up(&chip->interrupt_sleep);
92266 }
92267 }
92268 @@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
92269 spin_lock_init(&chip->reg_lock);
92270 spin_lock_init(&chip->voice_lock);
92271 init_waitqueue_head(&chip->interrupt_sleep);
92272 - atomic_set(&chip->interrupt_sleep_count, 0);
92273 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
92274 chip->card = card;
92275 chip->pci = pci;
92276 chip->irq = -1;
92277 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
92278 index 7decbd9..d17d9d0 100644
92279 --- a/sound/soc/fsl/fsl_ssi.c
92280 +++ b/sound/soc/fsl/fsl_ssi.c
92281 @@ -643,7 +643,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
92282 {
92283 struct fsl_ssi_private *ssi_private;
92284 int ret = 0;
92285 - struct device_attribute *dev_attr = NULL;
92286 + device_attribute_no_const *dev_attr = NULL;
92287 struct device_node *np = pdev->dev.of_node;
92288 const char *p, *sprop;
92289 const uint32_t *iprop;
92290 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
92291 new file mode 100644
92292 index 0000000..50f2f2f
92293 --- /dev/null
92294 +++ b/tools/gcc/.gitignore
92295 @@ -0,0 +1 @@
92296 +size_overflow_hash.h
92297 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
92298 new file mode 100644
92299 index 0000000..144dbee
92300 --- /dev/null
92301 +++ b/tools/gcc/Makefile
92302 @@ -0,0 +1,45 @@
92303 +#CC := gcc
92304 +#PLUGIN_SOURCE_FILES := pax_plugin.c
92305 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
92306 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
92307 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
92308 +
92309 +ifeq ($(PLUGINCC),$(HOSTCC))
92310 +HOSTLIBS := hostlibs
92311 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
92312 +else
92313 +HOSTLIBS := hostcxxlibs
92314 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -ggdb -Wno-unused-parameter
92315 +endif
92316 +
92317 +$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
92318 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
92319 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
92320 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
92321 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
92322 +$(HOSTLIBS)-y += colorize_plugin.so
92323 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
92324 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
92325 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
92326 +
92327 +always := $($(HOSTLIBS)-y)
92328 +
92329 +constify_plugin-objs := constify_plugin.o
92330 +stackleak_plugin-objs := stackleak_plugin.o
92331 +kallocstat_plugin-objs := kallocstat_plugin.o
92332 +kernexec_plugin-objs := kernexec_plugin.o
92333 +checker_plugin-objs := checker_plugin.o
92334 +colorize_plugin-objs := colorize_plugin.o
92335 +size_overflow_plugin-objs := size_overflow_plugin.o
92336 +latent_entropy_plugin-objs := latent_entropy_plugin.o
92337 +structleak_plugin-objs := structleak_plugin.o
92338 +
92339 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
92340 +
92341 +quiet_cmd_build_size_overflow_hash = GENHASH $@
92342 + cmd_build_size_overflow_hash = \
92343 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
92344 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
92345 + $(call if_changed,build_size_overflow_hash)
92346 +
92347 +targets += size_overflow_hash.h
92348 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
92349 new file mode 100644
92350 index 0000000..d41b5af
92351 --- /dev/null
92352 +++ b/tools/gcc/checker_plugin.c
92353 @@ -0,0 +1,171 @@
92354 +/*
92355 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
92356 + * Licensed under the GPL v2
92357 + *
92358 + * Note: the choice of the license means that the compilation process is
92359 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
92360 + * but for the kernel it doesn't matter since it doesn't link against
92361 + * any of the gcc libraries
92362 + *
92363 + * gcc plugin to implement various sparse (source code checker) features
92364 + *
92365 + * TODO:
92366 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
92367 + *
92368 + * BUGS:
92369 + * - none known
92370 + */
92371 +#include "gcc-plugin.h"
92372 +#include "config.h"
92373 +#include "system.h"
92374 +#include "coretypes.h"
92375 +#include "tree.h"
92376 +#include "tree-pass.h"
92377 +#include "flags.h"
92378 +#include "intl.h"
92379 +#include "toplev.h"
92380 +#include "plugin.h"
92381 +//#include "expr.h" where are you...
92382 +#include "diagnostic.h"
92383 +#include "plugin-version.h"
92384 +#include "tm.h"
92385 +#include "function.h"
92386 +#include "basic-block.h"
92387 +#include "gimple.h"
92388 +#include "rtl.h"
92389 +#include "emit-rtl.h"
92390 +#include "tree-flow.h"
92391 +#include "target.h"
92392 +
92393 +extern void c_register_addr_space (const char *str, addr_space_t as);
92394 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
92395 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
92396 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
92397 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
92398 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
92399 +
92400 +extern void print_gimple_stmt(FILE *, gimple, int, int);
92401 +extern rtx emit_move_insn(rtx x, rtx y);
92402 +
92403 +int plugin_is_GPL_compatible;
92404 +
92405 +static struct plugin_info checker_plugin_info = {
92406 + .version = "201111150100",
92407 +};
92408 +
92409 +#define ADDR_SPACE_KERNEL 0
92410 +#define ADDR_SPACE_FORCE_KERNEL 1
92411 +#define ADDR_SPACE_USER 2
92412 +#define ADDR_SPACE_FORCE_USER 3
92413 +#define ADDR_SPACE_IOMEM 0
92414 +#define ADDR_SPACE_FORCE_IOMEM 0
92415 +#define ADDR_SPACE_PERCPU 0
92416 +#define ADDR_SPACE_FORCE_PERCPU 0
92417 +#define ADDR_SPACE_RCU 0
92418 +#define ADDR_SPACE_FORCE_RCU 0
92419 +
92420 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
92421 +{
92422 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
92423 +}
92424 +
92425 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
92426 +{
92427 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
92428 +}
92429 +
92430 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
92431 +{
92432 + return default_addr_space_valid_pointer_mode(mode, as);
92433 +}
92434 +
92435 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
92436 +{
92437 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
92438 +}
92439 +
92440 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
92441 +{
92442 + return default_addr_space_legitimize_address(x, oldx, mode, as);
92443 +}
92444 +
92445 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
92446 +{
92447 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
92448 + return true;
92449 +
92450 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
92451 + return true;
92452 +
92453 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
92454 + return true;
92455 +
92456 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
92457 + return true;
92458 +
92459 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
92460 + return true;
92461 +
92462 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
92463 + return true;
92464 +
92465 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
92466 + return true;
92467 +
92468 + return subset == superset;
92469 +}
92470 +
92471 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
92472 +{
92473 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
92474 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
92475 +
92476 + return op;
92477 +}
92478 +
92479 +static void register_checker_address_spaces(void *event_data, void *data)
92480 +{
92481 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
92482 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
92483 + c_register_addr_space("__user", ADDR_SPACE_USER);
92484 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
92485 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
92486 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
92487 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
92488 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
92489 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
92490 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
92491 +
92492 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
92493 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
92494 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
92495 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
92496 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
92497 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
92498 + targetm.addr_space.convert = checker_addr_space_convert;
92499 +}
92500 +
92501 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
92502 +{
92503 + const char * const plugin_name = plugin_info->base_name;
92504 + const int argc = plugin_info->argc;
92505 + const struct plugin_argument * const argv = plugin_info->argv;
92506 + int i;
92507 +
92508 + if (!plugin_default_version_check(version, &gcc_version)) {
92509 + error(G_("incompatible gcc/plugin versions"));
92510 + return 1;
92511 + }
92512 +
92513 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
92514 +
92515 + for (i = 0; i < argc; ++i)
92516 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
92517 +
92518 + if (TARGET_64BIT == 0)
92519 + return 0;
92520 +
92521 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
92522 +
92523 + return 0;
92524 +}
92525 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
92526 new file mode 100644
92527 index 0000000..414fe5e
92528 --- /dev/null
92529 +++ b/tools/gcc/colorize_plugin.c
92530 @@ -0,0 +1,151 @@
92531 +/*
92532 + * Copyright 2012-2013 by PaX Team <pageexec@freemail.hu>
92533 + * Licensed under the GPL v2
92534 + *
92535 + * Note: the choice of the license means that the compilation process is
92536 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
92537 + * but for the kernel it doesn't matter since it doesn't link against
92538 + * any of the gcc libraries
92539 + *
92540 + * gcc plugin to colorize diagnostic output
92541 + *
92542 + */
92543 +
92544 +#include "gcc-plugin.h"
92545 +#include "config.h"
92546 +#include "system.h"
92547 +#include "coretypes.h"
92548 +#include "tree.h"
92549 +#include "tree-pass.h"
92550 +#include "flags.h"
92551 +#include "intl.h"
92552 +#include "toplev.h"
92553 +#include "plugin.h"
92554 +#include "diagnostic.h"
92555 +#include "plugin-version.h"
92556 +#include "tm.h"
92557 +
92558 +int plugin_is_GPL_compatible;
92559 +
92560 +static struct plugin_info colorize_plugin_info = {
92561 + .version = "201302112000",
92562 + .help = NULL,
92563 +};
92564 +
92565 +#define GREEN "\033[32m\033[2m"
92566 +#define LIGHTGREEN "\033[32m\033[1m"
92567 +#define YELLOW "\033[33m\033[2m"
92568 +#define LIGHTYELLOW "\033[33m\033[1m"
92569 +#define RED "\033[31m\033[2m"
92570 +#define LIGHTRED "\033[31m\033[1m"
92571 +#define BLUE "\033[34m\033[2m"
92572 +#define LIGHTBLUE "\033[34m\033[1m"
92573 +#define BRIGHT "\033[m\033[1m"
92574 +#define NORMAL "\033[m"
92575 +
92576 +static diagnostic_starter_fn old_starter;
92577 +static diagnostic_finalizer_fn old_finalizer;
92578 +
92579 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
92580 +{
92581 + const char *color;
92582 + char *newprefix;
92583 +
92584 + switch (diagnostic->kind) {
92585 + case DK_NOTE:
92586 + color = LIGHTBLUE;
92587 + break;
92588 +
92589 + case DK_PEDWARN:
92590 + case DK_WARNING:
92591 + color = LIGHTYELLOW;
92592 + break;
92593 +
92594 + case DK_ERROR:
92595 + case DK_FATAL:
92596 + case DK_ICE:
92597 + case DK_PERMERROR:
92598 + case DK_SORRY:
92599 + color = LIGHTRED;
92600 + break;
92601 +
92602 + default:
92603 + color = NORMAL;
92604 + }
92605 +
92606 + old_starter(context, diagnostic);
92607 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
92608 + return;
92609 + pp_destroy_prefix(context->printer);
92610 + pp_set_prefix(context->printer, newprefix);
92611 +}
92612 +
92613 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
92614 +{
92615 + old_finalizer(context, diagnostic);
92616 +}
92617 +
92618 +static void colorize_arm(void)
92619 +{
92620 + old_starter = diagnostic_starter(global_dc);
92621 + old_finalizer = diagnostic_finalizer(global_dc);
92622 +
92623 + diagnostic_starter(global_dc) = start_colorize;
92624 + diagnostic_finalizer(global_dc) = finalize_colorize;
92625 +}
92626 +
92627 +static unsigned int execute_colorize_rearm(void)
92628 +{
92629 + if (diagnostic_starter(global_dc) == start_colorize)
92630 + return 0;
92631 +
92632 + colorize_arm();
92633 + return 0;
92634 +}
92635 +
92636 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
92637 + .pass = {
92638 + .type = SIMPLE_IPA_PASS,
92639 + .name = "colorize_rearm",
92640 +#if BUILDING_GCC_VERSION >= 4008
92641 + .optinfo_flags = OPTGROUP_NONE,
92642 +#endif
92643 + .gate = NULL,
92644 + .execute = execute_colorize_rearm,
92645 + .sub = NULL,
92646 + .next = NULL,
92647 + .static_pass_number = 0,
92648 + .tv_id = TV_NONE,
92649 + .properties_required = 0,
92650 + .properties_provided = 0,
92651 + .properties_destroyed = 0,
92652 + .todo_flags_start = 0,
92653 + .todo_flags_finish = 0
92654 + }
92655 +};
92656 +
92657 +static void colorize_start_unit(void *gcc_data, void *user_data)
92658 +{
92659 + colorize_arm();
92660 +}
92661 +
92662 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
92663 +{
92664 + const char * const plugin_name = plugin_info->base_name;
92665 + struct register_pass_info colorize_rearm_pass_info = {
92666 + .pass = &pass_ipa_colorize_rearm.pass,
92667 + .reference_pass_name = "*free_lang_data",
92668 + .ref_pass_instance_number = 1,
92669 + .pos_op = PASS_POS_INSERT_AFTER
92670 + };
92671 +
92672 + if (!plugin_default_version_check(version, &gcc_version)) {
92673 + error(G_("incompatible gcc/plugin versions"));
92674 + return 1;
92675 + }
92676 +
92677 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
92678 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
92679 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
92680 + return 0;
92681 +}
92682 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
92683 new file mode 100644
92684 index 0000000..bee0acb
92685 --- /dev/null
92686 +++ b/tools/gcc/constify_plugin.c
92687 @@ -0,0 +1,518 @@
92688 +/*
92689 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
92690 + * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
92691 + * Licensed under the GPL v2, or (at your option) v3
92692 + *
92693 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
92694 + *
92695 + * Homepage:
92696 + * http://www.grsecurity.net/~ephox/const_plugin/
92697 + *
92698 + * Usage:
92699 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
92700 + * $ gcc -fplugin=constify_plugin.so test.c -O2
92701 + */
92702 +
92703 +#include "gcc-plugin.h"
92704 +#include "config.h"
92705 +#include "system.h"
92706 +#include "coretypes.h"
92707 +#include "tree.h"
92708 +#include "tree-pass.h"
92709 +#include "flags.h"
92710 +#include "intl.h"
92711 +#include "toplev.h"
92712 +#include "plugin.h"
92713 +#include "diagnostic.h"
92714 +#include "plugin-version.h"
92715 +#include "tm.h"
92716 +#include "function.h"
92717 +#include "basic-block.h"
92718 +#include "gimple.h"
92719 +#include "rtl.h"
92720 +#include "emit-rtl.h"
92721 +#include "tree-flow.h"
92722 +#include "target.h"
92723 +#include "langhooks.h"
92724 +
92725 +// should come from c-tree.h if only it were installed for gcc 4.5...
92726 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
92727 +
92728 +// unused type flag in all versions 4.5-4.8
92729 +#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
92730 +
92731 +int plugin_is_GPL_compatible;
92732 +
92733 +static struct plugin_info const_plugin_info = {
92734 + .version = "201303270300",
92735 + .help = "no-constify\tturn off constification\n",
92736 +};
92737 +
92738 +typedef struct {
92739 + bool has_fptr_field;
92740 + bool has_writable_field;
92741 + bool has_do_const_field;
92742 + bool has_no_const_field;
92743 +} constify_info;
92744 +
92745 +static const_tree get_field_type(const_tree field)
92746 +{
92747 + return strip_array_types(TREE_TYPE(field));
92748 +}
92749 +
92750 +static bool is_fptr(const_tree field)
92751 +{
92752 + const_tree ptr = get_field_type(field);
92753 +
92754 + if (TREE_CODE(ptr) != POINTER_TYPE)
92755 + return false;
92756 +
92757 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
92758 +}
92759 +
92760 +/*
92761 + * determine whether the given structure type meets the requirements for automatic constification,
92762 + * including the constification attributes on nested structure types
92763 + */
92764 +static void constifiable(const_tree node, constify_info *cinfo)
92765 +{
92766 + const_tree field;
92767 +
92768 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
92769 +
92770 + // e.g., pointer to structure fields while still constructing the structure type
92771 + if (TYPE_FIELDS(node) == NULL_TREE)
92772 + return;
92773 +
92774 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
92775 + const_tree type = get_field_type(field);
92776 + enum tree_code code = TREE_CODE(type);
92777 +
92778 + if (node == type)
92779 + continue;
92780 +
92781 + if (is_fptr(field))
92782 + cinfo->has_fptr_field = true;
92783 + else if (!TREE_READONLY(field))
92784 + cinfo->has_writable_field = true;
92785 +
92786 + if (code == RECORD_TYPE || code == UNION_TYPE) {
92787 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
92788 + cinfo->has_do_const_field = true;
92789 + else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
92790 + cinfo->has_no_const_field = true;
92791 + else
92792 + constifiable(type, cinfo);
92793 + }
92794 + }
92795 +}
92796 +
92797 +static bool constified(const_tree node)
92798 +{
92799 + constify_info cinfo = {
92800 + .has_fptr_field = false,
92801 + .has_writable_field = false,
92802 + .has_do_const_field = false,
92803 + .has_no_const_field = false
92804 + };
92805 +
92806 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
92807 +
92808 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
92809 + gcc_assert(!TYPE_READONLY(node));
92810 + return false;
92811 + }
92812 +
92813 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
92814 + gcc_assert(TYPE_READONLY(node));
92815 + return true;
92816 + }
92817 +
92818 + constifiable(node, &cinfo);
92819 + if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
92820 + return false;
92821 +
92822 + return TYPE_READONLY(node);
92823 +}
92824 +
92825 +static void deconstify_tree(tree node);
92826 +
92827 +static void deconstify_type(tree type)
92828 +{
92829 + tree field;
92830 +
92831 + gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
92832 +
92833 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
92834 + const_tree fieldtype = get_field_type(field);
92835 +
92836 + // special case handling of simple ptr-to-same-array-type members
92837 + if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
92838 + const_tree ptrtype = TREE_TYPE(TREE_TYPE(field));
92839 +
92840 + if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
92841 + continue;
92842 + if (TREE_TYPE(TREE_TYPE(field)) == type)
92843 + continue;
92844 + if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
92845 + TREE_TYPE(field) = copy_node(TREE_TYPE(field));
92846 + TREE_TYPE(TREE_TYPE(field)) = type;
92847 + }
92848 + continue;
92849 + }
92850 + if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
92851 + continue;
92852 + if (!constified(fieldtype))
92853 + continue;
92854 +
92855 + deconstify_tree(field);
92856 + TREE_READONLY(field) = 0;
92857 + }
92858 + TYPE_READONLY(type) = 0;
92859 + C_TYPE_FIELDS_READONLY(type) = 0;
92860 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
92861 + TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
92862 +}
92863 +
92864 +static void deconstify_tree(tree node)
92865 +{
92866 + tree old_type, new_type, field;
92867 +
92868 + old_type = TREE_TYPE(node);
92869 + while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
92870 + node = TREE_TYPE(node) = copy_node(old_type);
92871 + old_type = TREE_TYPE(old_type);
92872 + }
92873 +
92874 + gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
92875 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
92876 +
92877 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
92878 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
92879 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
92880 + DECL_FIELD_CONTEXT(field) = new_type;
92881 +
92882 + deconstify_type(new_type);
92883 +
92884 + TREE_TYPE(node) = new_type;
92885 +}
92886 +
92887 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
92888 +{
92889 + tree type;
92890 + constify_info cinfo = {
92891 + .has_fptr_field = false,
92892 + .has_writable_field = false,
92893 + .has_do_const_field = false,
92894 + .has_no_const_field = false
92895 + };
92896 +
92897 + *no_add_attrs = true;
92898 + if (TREE_CODE(*node) == FUNCTION_DECL) {
92899 + error("%qE attribute does not apply to functions", name);
92900 + return NULL_TREE;
92901 + }
92902 +
92903 + if (TREE_CODE(*node) == PARM_DECL) {
92904 + error("%qE attribute does not apply to function parameters", name);
92905 + return NULL_TREE;
92906 + }
92907 +
92908 + if (TREE_CODE(*node) == VAR_DECL) {
92909 + error("%qE attribute does not apply to variables", name);
92910 + return NULL_TREE;
92911 + }
92912 +
92913 + if (TYPE_P(*node)) {
92914 + *no_add_attrs = false;
92915 + type = *node;
92916 + } else {
92917 + gcc_assert(TREE_CODE(*node) == TYPE_DECL);
92918 + type = TREE_TYPE(*node);
92919 + }
92920 +
92921 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
92922 + error("%qE attribute applies to struct and union types only", name);
92923 + return NULL_TREE;
92924 + }
92925 +
92926 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
92927 + error("%qE attribute is already applied to the type", name);
92928 + return NULL_TREE;
92929 + }
92930 +
92931 + if (TYPE_P(*node)) {
92932 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
92933 + error("%qE attribute is incompatible with 'do_const'", name);
92934 + return NULL_TREE;
92935 + }
92936 +
92937 + constifiable(type, &cinfo);
92938 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
92939 + deconstify_tree(*node);
92940 + TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
92941 + return NULL_TREE;
92942 + }
92943 +
92944 + error("%qE attribute used on type that is not constified", name);
92945 + return NULL_TREE;
92946 +}
92947 +
92948 +static void constify_type(tree type)
92949 +{
92950 + TYPE_READONLY(type) = 1;
92951 + C_TYPE_FIELDS_READONLY(type) = 1;
92952 + TYPE_CONSTIFY_VISITED(type) = 1;
92953 +// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
92954 +}
92955 +
92956 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
92957 +{
92958 + *no_add_attrs = true;
92959 + if (!TYPE_P(*node)) {
92960 + error("%qE attribute applies to types only", name);
92961 + return NULL_TREE;
92962 + }
92963 +
92964 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
92965 + error("%qE attribute applies to struct and union types only", name);
92966 + return NULL_TREE;
92967 + }
92968 +
92969 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
92970 + error("%qE attribute is already applied to the type", name);
92971 + return NULL_TREE;
92972 + }
92973 +
92974 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
92975 + error("%qE attribute is incompatible with 'no_const'", name);
92976 + return NULL_TREE;
92977 + }
92978 +
92979 + *no_add_attrs = false;
92980 + return NULL_TREE;
92981 +}
92982 +
92983 +static struct attribute_spec no_const_attr = {
92984 + .name = "no_const",
92985 + .min_length = 0,
92986 + .max_length = 0,
92987 + .decl_required = false,
92988 + .type_required = false,
92989 + .function_type_required = false,
92990 + .handler = handle_no_const_attribute,
92991 +#if BUILDING_GCC_VERSION >= 4007
92992 + .affects_type_identity = true
92993 +#endif
92994 +};
92995 +
92996 +static struct attribute_spec do_const_attr = {
92997 + .name = "do_const",
92998 + .min_length = 0,
92999 + .max_length = 0,
93000 + .decl_required = false,
93001 + .type_required = false,
93002 + .function_type_required = false,
93003 + .handler = handle_do_const_attribute,
93004 +#if BUILDING_GCC_VERSION >= 4007
93005 + .affects_type_identity = true
93006 +#endif
93007 +};
93008 +
93009 +static void register_attributes(void *event_data, void *data)
93010 +{
93011 + register_attribute(&no_const_attr);
93012 + register_attribute(&do_const_attr);
93013 +}
93014 +
93015 +static void finish_type(void *event_data, void *data)
93016 +{
93017 + tree type = (tree)event_data;
93018 + constify_info cinfo = {
93019 + .has_fptr_field = false,
93020 + .has_writable_field = false,
93021 + .has_do_const_field = false,
93022 + .has_no_const_field = false
93023 + };
93024 +
93025 + if (type == NULL_TREE || type == error_mark_node)
93026 + return;
93027 +
93028 + if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
93029 + return;
93030 +
93031 + constifiable(type, &cinfo);
93032 +
93033 + if (TYPE_READONLY(type) && C_TYPE_FIELDS_READONLY(type)) {
93034 + if (!lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
93035 + return;
93036 + if (cinfo.has_writable_field)
93037 + return;
93038 + error("'do_const' attribute used on type that is%sconstified", cinfo.has_fptr_field ? " " : " not ");
93039 + return;
93040 + }
93041 +
93042 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
93043 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
93044 + deconstify_type(type);
93045 + TYPE_CONSTIFY_VISITED(type) = 1;
93046 + } else
93047 + error("'no_const' attribute used on type that is not constified");
93048 + return;
93049 + }
93050 +
93051 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
93052 + constify_type(type);
93053 + return;
93054 + }
93055 +
93056 + if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
93057 + constify_type(type);
93058 + return;
93059 + }
93060 +
93061 + deconstify_type(type);
93062 + TYPE_CONSTIFY_VISITED(type) = 1;
93063 +}
93064 +
93065 +static unsigned int check_local_variables(void)
93066 +{
93067 + unsigned int ret = 0;
93068 + tree var;
93069 +
93070 +#if BUILDING_GCC_VERSION == 4005
93071 + tree vars;
93072 +#else
93073 + unsigned int i;
93074 +#endif
93075 +
93076 +#if BUILDING_GCC_VERSION == 4005
93077 + for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
93078 + var = TREE_VALUE(vars);
93079 +#else
93080 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
93081 +#endif
93082 + tree type = TREE_TYPE(var);
93083 +
93084 + gcc_assert(DECL_P(var));
93085 + if (is_global_var(var))
93086 + continue;
93087 +
93088 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
93089 + continue;
93090 +
93091 + if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
93092 + continue;
93093 +
93094 + if (!TYPE_CONSTIFY_VISITED(type))
93095 + continue;
93096 +
93097 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
93098 + ret = 1;
93099 + }
93100 + return ret;
93101 +}
93102 +
93103 +static struct gimple_opt_pass pass_local_variable = {
93104 + {
93105 + .type = GIMPLE_PASS,
93106 + .name = "check_local_variables",
93107 +#if BUILDING_GCC_VERSION >= 4008
93108 + .optinfo_flags = OPTGROUP_NONE,
93109 +#endif
93110 + .gate = NULL,
93111 + .execute = check_local_variables,
93112 + .sub = NULL,
93113 + .next = NULL,
93114 + .static_pass_number = 0,
93115 + .tv_id = TV_NONE,
93116 + .properties_required = 0,
93117 + .properties_provided = 0,
93118 + .properties_destroyed = 0,
93119 + .todo_flags_start = 0,
93120 + .todo_flags_finish = 0
93121 + }
93122 +};
93123 +
93124 +static struct {
93125 + const char *name;
93126 + const char *asm_op;
93127 +} sections[] = {
93128 + {".init.rodata", "\t.section\t.init.rodata,\"a\""},
93129 + {".ref.rodata", "\t.section\t.ref.rodata,\"a\""},
93130 + {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""},
93131 + {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""},
93132 + {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""},
93133 + {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""},
93134 + {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""},
93135 + {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""},
93136 + {".data..read_only", "\t.section\t.data..read_only,\"a\""},
93137 +};
93138 +
93139 +static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
93140 +
93141 +static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
93142 +{
93143 + size_t i;
93144 +
93145 + for (i = 0; i < ARRAY_SIZE(sections); i++)
93146 + if (!strcmp(sections[i].name, name))
93147 + return 0;
93148 + return old_section_type_flags(decl, name, reloc);
93149 +}
93150 +
93151 +static void constify_start_unit(void *gcc_data, void *user_data)
93152 +{
93153 +// size_t i;
93154 +
93155 +// for (i = 0; i < ARRAY_SIZE(sections); i++)
93156 +// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
93157 +// sections[i].section = get_section(sections[i].name, 0, NULL);
93158 +
93159 + old_section_type_flags = targetm.section_type_flags;
93160 + targetm.section_type_flags = constify_section_type_flags;
93161 +}
93162 +
93163 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
93164 +{
93165 + const char * const plugin_name = plugin_info->base_name;
93166 + const int argc = plugin_info->argc;
93167 + const struct plugin_argument * const argv = plugin_info->argv;
93168 + int i;
93169 + bool constify = true;
93170 +
93171 + struct register_pass_info local_variable_pass_info = {
93172 + .pass = &pass_local_variable.pass,
93173 + .reference_pass_name = "ssa",
93174 + .ref_pass_instance_number = 1,
93175 + .pos_op = PASS_POS_INSERT_BEFORE
93176 + };
93177 +
93178 + if (!plugin_default_version_check(version, &gcc_version)) {
93179 + error(G_("incompatible gcc/plugin versions"));
93180 + return 1;
93181 + }
93182 +
93183 + for (i = 0; i < argc; ++i) {
93184 + if (!(strcmp(argv[i].key, "no-constify"))) {
93185 + constify = false;
93186 + continue;
93187 + }
93188 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93189 + }
93190 +
93191 + if (strcmp(lang_hooks.name, "GNU C")) {
93192 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
93193 + constify = false;
93194 + }
93195 +
93196 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
93197 + if (constify) {
93198 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
93199 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
93200 + register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
93201 + }
93202 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
93203 +
93204 + return 0;
93205 +}
93206 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
93207 new file mode 100644
93208 index 0000000..e518932
93209 --- /dev/null
93210 +++ b/tools/gcc/generate_size_overflow_hash.sh
93211 @@ -0,0 +1,94 @@
93212 +#!/bin/bash
93213 +
93214 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
93215 +
93216 +header1="size_overflow_hash.h"
93217 +database="size_overflow_hash.data"
93218 +n=65536
93219 +
93220 +usage() {
93221 +cat <<EOF
93222 +usage: $0 options
93223 +OPTIONS:
93224 + -h|--help help
93225 + -o header file
93226 + -d database file
93227 + -n hash array size
93228 +EOF
93229 + return 0
93230 +}
93231 +
93232 +while true
93233 +do
93234 + case "$1" in
93235 + -h|--help) usage && exit 0;;
93236 + -n) n=$2; shift 2;;
93237 + -o) header1="$2"; shift 2;;
93238 + -d) database="$2"; shift 2;;
93239 + --) shift 1; break ;;
93240 + *) break ;;
93241 + esac
93242 +done
93243 +
93244 +create_defines() {
93245 + for i in `seq 0 31`
93246 + do
93247 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
93248 + done
93249 + echo >> "$header1"
93250 +}
93251 +
93252 +create_structs() {
93253 + rm -f "$header1"
93254 +
93255 + create_defines
93256 +
93257 + cat "$database" | while read data
93258 + do
93259 + data_array=($data)
93260 + struct_hash_name="${data_array[0]}"
93261 + funcn="${data_array[1]}"
93262 + params="${data_array[2]}"
93263 + next="${data_array[4]}"
93264 +
93265 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
93266 +
93267 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
93268 + echo -en "\t.param\t= " >> "$header1"
93269 + line=
93270 + for param_num in ${params//-/ };
93271 + do
93272 + line="${line}PARAM"$param_num"|"
93273 + done
93274 +
93275 + echo -e "${line%?},\n};\n" >> "$header1"
93276 + done
93277 +}
93278 +
93279 +create_headers() {
93280 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
93281 +}
93282 +
93283 +create_array_elements() {
93284 + index=0
93285 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
93286 + do
93287 + data_array=($data)
93288 + i="${data_array[3]}"
93289 + hash="${data_array[0]}"
93290 + while [[ $index -lt $i ]]
93291 + do
93292 + echo -e "\t["$index"]\t= NULL," >> "$header1"
93293 + index=$(($index + 1))
93294 + done
93295 + index=$(($index + 1))
93296 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
93297 + done
93298 + echo '};' >> $header1
93299 +}
93300 +
93301 +create_structs
93302 +create_headers
93303 +create_array_elements
93304 +
93305 +exit 0
93306 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
93307 new file mode 100644
93308 index 0000000..568b360
93309 --- /dev/null
93310 +++ b/tools/gcc/kallocstat_plugin.c
93311 @@ -0,0 +1,170 @@
93312 +/*
93313 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
93314 + * Licensed under the GPL v2
93315 + *
93316 + * Note: the choice of the license means that the compilation process is
93317 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
93318 + * but for the kernel it doesn't matter since it doesn't link against
93319 + * any of the gcc libraries
93320 + *
93321 + * gcc plugin to find the distribution of k*alloc sizes
93322 + *
93323 + * TODO:
93324 + *
93325 + * BUGS:
93326 + * - none known
93327 + */
93328 +#include "gcc-plugin.h"
93329 +#include "config.h"
93330 +#include "system.h"
93331 +#include "coretypes.h"
93332 +#include "tree.h"
93333 +#include "tree-pass.h"
93334 +#include "flags.h"
93335 +#include "intl.h"
93336 +#include "toplev.h"
93337 +#include "plugin.h"
93338 +//#include "expr.h" where are you...
93339 +#include "diagnostic.h"
93340 +#include "plugin-version.h"
93341 +#include "tm.h"
93342 +#include "function.h"
93343 +#include "basic-block.h"
93344 +#include "gimple.h"
93345 +#include "rtl.h"
93346 +#include "emit-rtl.h"
93347 +
93348 +extern void print_gimple_stmt(FILE *, gimple, int, int);
93349 +
93350 +int plugin_is_GPL_compatible;
93351 +
93352 +static const char * const kalloc_functions[] = {
93353 + "__kmalloc",
93354 + "kmalloc",
93355 + "kmalloc_large",
93356 + "kmalloc_node",
93357 + "kmalloc_order",
93358 + "kmalloc_order_trace",
93359 + "kmalloc_slab",
93360 + "kzalloc",
93361 + "kzalloc_node",
93362 +};
93363 +
93364 +static struct plugin_info kallocstat_plugin_info = {
93365 + .version = "201302112000",
93366 +};
93367 +
93368 +static unsigned int execute_kallocstat(void);
93369 +
93370 +static struct gimple_opt_pass kallocstat_pass = {
93371 + .pass = {
93372 + .type = GIMPLE_PASS,
93373 + .name = "kallocstat",
93374 +#if BUILDING_GCC_VERSION >= 4008
93375 + .optinfo_flags = OPTGROUP_NONE,
93376 +#endif
93377 + .gate = NULL,
93378 + .execute = execute_kallocstat,
93379 + .sub = NULL,
93380 + .next = NULL,
93381 + .static_pass_number = 0,
93382 + .tv_id = TV_NONE,
93383 + .properties_required = 0,
93384 + .properties_provided = 0,
93385 + .properties_destroyed = 0,
93386 + .todo_flags_start = 0,
93387 + .todo_flags_finish = 0
93388 + }
93389 +};
93390 +
93391 +static bool is_kalloc(const char *fnname)
93392 +{
93393 + size_t i;
93394 +
93395 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
93396 + if (!strcmp(fnname, kalloc_functions[i]))
93397 + return true;
93398 + return false;
93399 +}
93400 +
93401 +static unsigned int execute_kallocstat(void)
93402 +{
93403 + basic_block bb;
93404 +
93405 + // 1. loop through BBs and GIMPLE statements
93406 + FOR_EACH_BB(bb) {
93407 + gimple_stmt_iterator gsi;
93408 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
93409 + // gimple match:
93410 + tree fndecl, size;
93411 + gimple call_stmt;
93412 + const char *fnname;
93413 +
93414 + // is it a call
93415 + call_stmt = gsi_stmt(gsi);
93416 + if (!is_gimple_call(call_stmt))
93417 + continue;
93418 + fndecl = gimple_call_fndecl(call_stmt);
93419 + if (fndecl == NULL_TREE)
93420 + continue;
93421 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
93422 + continue;
93423 +
93424 + // is it a call to k*alloc
93425 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
93426 + if (!is_kalloc(fnname))
93427 + continue;
93428 +
93429 + // is the size arg the result of a simple const assignment
93430 + size = gimple_call_arg(call_stmt, 0);
93431 + while (true) {
93432 + gimple def_stmt;
93433 + expanded_location xloc;
93434 + size_t size_val;
93435 +
93436 + if (TREE_CODE(size) != SSA_NAME)
93437 + break;
93438 + def_stmt = SSA_NAME_DEF_STMT(size);
93439 + if (!def_stmt || !is_gimple_assign(def_stmt))
93440 + break;
93441 + if (gimple_num_ops(def_stmt) != 2)
93442 + break;
93443 + size = gimple_assign_rhs1(def_stmt);
93444 + if (!TREE_CONSTANT(size))
93445 + continue;
93446 + xloc = expand_location(gimple_location(def_stmt));
93447 + if (!xloc.file)
93448 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
93449 + size_val = TREE_INT_CST_LOW(size);
93450 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
93451 + break;
93452 + }
93453 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
93454 +//debug_tree(gimple_call_fn(call_stmt));
93455 +//print_node(stderr, "pax", fndecl, 4);
93456 + }
93457 + }
93458 +
93459 + return 0;
93460 +}
93461 +
93462 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
93463 +{
93464 + const char * const plugin_name = plugin_info->base_name;
93465 + struct register_pass_info kallocstat_pass_info = {
93466 + .pass = &kallocstat_pass.pass,
93467 + .reference_pass_name = "ssa",
93468 + .ref_pass_instance_number = 1,
93469 + .pos_op = PASS_POS_INSERT_AFTER
93470 + };
93471 +
93472 + if (!plugin_default_version_check(version, &gcc_version)) {
93473 + error(G_("incompatible gcc/plugin versions"));
93474 + return 1;
93475 + }
93476 +
93477 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
93478 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
93479 +
93480 + return 0;
93481 +}
93482 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
93483 new file mode 100644
93484 index 0000000..0408e06
93485 --- /dev/null
93486 +++ b/tools/gcc/kernexec_plugin.c
93487 @@ -0,0 +1,465 @@
93488 +/*
93489 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
93490 + * Licensed under the GPL v2
93491 + *
93492 + * Note: the choice of the license means that the compilation process is
93493 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
93494 + * but for the kernel it doesn't matter since it doesn't link against
93495 + * any of the gcc libraries
93496 + *
93497 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
93498 + *
93499 + * TODO:
93500 + *
93501 + * BUGS:
93502 + * - none known
93503 + */
93504 +#include "gcc-plugin.h"
93505 +#include "config.h"
93506 +#include "system.h"
93507 +#include "coretypes.h"
93508 +#include "tree.h"
93509 +#include "tree-pass.h"
93510 +#include "flags.h"
93511 +#include "intl.h"
93512 +#include "toplev.h"
93513 +#include "plugin.h"
93514 +//#include "expr.h" where are you...
93515 +#include "diagnostic.h"
93516 +#include "plugin-version.h"
93517 +#include "tm.h"
93518 +#include "function.h"
93519 +#include "basic-block.h"
93520 +#include "gimple.h"
93521 +#include "rtl.h"
93522 +#include "emit-rtl.h"
93523 +#include "tree-flow.h"
93524 +
93525 +extern void print_gimple_stmt(FILE *, gimple, int, int);
93526 +extern rtx emit_move_insn(rtx x, rtx y);
93527 +
93528 +#if BUILDING_GCC_VERSION <= 4006
93529 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
93530 +#endif
93531 +
93532 +#if BUILDING_GCC_VERSION >= 4008
93533 +#define TODO_dump_func 0
93534 +#endif
93535 +
93536 +int plugin_is_GPL_compatible;
93537 +
93538 +static struct plugin_info kernexec_plugin_info = {
93539 + .version = "201302112000",
93540 + .help = "method=[bts|or]\tinstrumentation method\n"
93541 +};
93542 +
93543 +static unsigned int execute_kernexec_reload(void);
93544 +static unsigned int execute_kernexec_fptr(void);
93545 +static unsigned int execute_kernexec_retaddr(void);
93546 +static bool kernexec_cmodel_check(void);
93547 +
93548 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
93549 +static void (*kernexec_instrument_retaddr)(rtx);
93550 +
93551 +static struct gimple_opt_pass kernexec_reload_pass = {
93552 + .pass = {
93553 + .type = GIMPLE_PASS,
93554 + .name = "kernexec_reload",
93555 +#if BUILDING_GCC_VERSION >= 4008
93556 + .optinfo_flags = OPTGROUP_NONE,
93557 +#endif
93558 + .gate = kernexec_cmodel_check,
93559 + .execute = execute_kernexec_reload,
93560 + .sub = NULL,
93561 + .next = NULL,
93562 + .static_pass_number = 0,
93563 + .tv_id = TV_NONE,
93564 + .properties_required = 0,
93565 + .properties_provided = 0,
93566 + .properties_destroyed = 0,
93567 + .todo_flags_start = 0,
93568 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
93569 + }
93570 +};
93571 +
93572 +static struct gimple_opt_pass kernexec_fptr_pass = {
93573 + .pass = {
93574 + .type = GIMPLE_PASS,
93575 + .name = "kernexec_fptr",
93576 +#if BUILDING_GCC_VERSION >= 4008
93577 + .optinfo_flags = OPTGROUP_NONE,
93578 +#endif
93579 + .gate = kernexec_cmodel_check,
93580 + .execute = execute_kernexec_fptr,
93581 + .sub = NULL,
93582 + .next = NULL,
93583 + .static_pass_number = 0,
93584 + .tv_id = TV_NONE,
93585 + .properties_required = 0,
93586 + .properties_provided = 0,
93587 + .properties_destroyed = 0,
93588 + .todo_flags_start = 0,
93589 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
93590 + }
93591 +};
93592 +
93593 +static struct rtl_opt_pass kernexec_retaddr_pass = {
93594 + .pass = {
93595 + .type = RTL_PASS,
93596 + .name = "kernexec_retaddr",
93597 +#if BUILDING_GCC_VERSION >= 4008
93598 + .optinfo_flags = OPTGROUP_NONE,
93599 +#endif
93600 + .gate = kernexec_cmodel_check,
93601 + .execute = execute_kernexec_retaddr,
93602 + .sub = NULL,
93603 + .next = NULL,
93604 + .static_pass_number = 0,
93605 + .tv_id = TV_NONE,
93606 + .properties_required = 0,
93607 + .properties_provided = 0,
93608 + .properties_destroyed = 0,
93609 + .todo_flags_start = 0,
93610 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
93611 + }
93612 +};
93613 +
93614 +static bool kernexec_cmodel_check(void)
93615 +{
93616 + tree section;
93617 +
93618 + if (ix86_cmodel != CM_KERNEL)
93619 + return false;
93620 +
93621 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
93622 + if (!section || !TREE_VALUE(section))
93623 + return true;
93624 +
93625 + section = TREE_VALUE(TREE_VALUE(section));
93626 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
93627 + return true;
93628 +
93629 + return false;
93630 +}
93631 +
93632 +/*
93633 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
93634 + */
93635 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
93636 +{
93637 + gimple asm_movabs_stmt;
93638 +
93639 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
93640 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
93641 + gimple_asm_set_volatile(asm_movabs_stmt, true);
93642 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
93643 + update_stmt(asm_movabs_stmt);
93644 +}
93645 +
93646 +/*
93647 + * find all asm() stmts that clobber r10 and add a reload of r10
93648 + */
93649 +static unsigned int execute_kernexec_reload(void)
93650 +{
93651 + basic_block bb;
93652 +
93653 + // 1. loop through BBs and GIMPLE statements
93654 + FOR_EACH_BB(bb) {
93655 + gimple_stmt_iterator gsi;
93656 +
93657 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
93658 + // gimple match: __asm__ ("" : : : "r10");
93659 + gimple asm_stmt;
93660 + size_t nclobbers;
93661 +
93662 + // is it an asm ...
93663 + asm_stmt = gsi_stmt(gsi);
93664 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
93665 + continue;
93666 +
93667 + // ... clobbering r10
93668 + nclobbers = gimple_asm_nclobbers(asm_stmt);
93669 + while (nclobbers--) {
93670 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
93671 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
93672 + continue;
93673 + kernexec_reload_fptr_mask(&gsi);
93674 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
93675 + break;
93676 + }
93677 + }
93678 + }
93679 +
93680 + return 0;
93681 +}
93682 +
93683 +/*
93684 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
93685 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
93686 + */
93687 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
93688 +{
93689 + gimple assign_intptr, assign_new_fptr, call_stmt;
93690 + tree intptr, old_fptr, new_fptr, kernexec_mask;
93691 +
93692 + call_stmt = gsi_stmt(*gsi);
93693 + old_fptr = gimple_call_fn(call_stmt);
93694 +
93695 + // create temporary unsigned long variable used for bitops and cast fptr to it
93696 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
93697 +#if BUILDING_GCC_VERSION <= 4007
93698 + add_referenced_var(intptr);
93699 + mark_sym_for_renaming(intptr);
93700 +#endif
93701 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
93702 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
93703 + update_stmt(assign_intptr);
93704 +
93705 + // apply logical or to temporary unsigned long and bitmask
93706 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
93707 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
93708 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
93709 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
93710 + update_stmt(assign_intptr);
93711 +
93712 + // cast temporary unsigned long back to a temporary fptr variable
93713 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
93714 +#if BUILDING_GCC_VERSION <= 4007
93715 + add_referenced_var(new_fptr);
93716 + mark_sym_for_renaming(new_fptr);
93717 +#endif
93718 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
93719 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
93720 + update_stmt(assign_new_fptr);
93721 +
93722 + // replace call stmt fn with the new fptr
93723 + gimple_call_set_fn(call_stmt, new_fptr);
93724 + update_stmt(call_stmt);
93725 +}
93726 +
93727 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
93728 +{
93729 + gimple asm_or_stmt, call_stmt;
93730 + tree old_fptr, new_fptr, input, output;
93731 +#if BUILDING_GCC_VERSION <= 4007
93732 + VEC(tree, gc) *inputs = NULL;
93733 + VEC(tree, gc) *outputs = NULL;
93734 +#else
93735 + vec<tree, va_gc> *inputs = NULL;
93736 + vec<tree, va_gc> *outputs = NULL;
93737 +#endif
93738 +
93739 + call_stmt = gsi_stmt(*gsi);
93740 + old_fptr = gimple_call_fn(call_stmt);
93741 +
93742 + // create temporary fptr variable
93743 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
93744 +#if BUILDING_GCC_VERSION <= 4007
93745 + add_referenced_var(new_fptr);
93746 + mark_sym_for_renaming(new_fptr);
93747 +#endif
93748 +
93749 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
93750 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
93751 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
93752 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
93753 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
93754 +#if BUILDING_GCC_VERSION <= 4007
93755 + VEC_safe_push(tree, gc, inputs, input);
93756 + VEC_safe_push(tree, gc, outputs, output);
93757 +#else
93758 + vec_safe_push(inputs, input);
93759 + vec_safe_push(outputs, output);
93760 +#endif
93761 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
93762 + gimple_asm_set_volatile(asm_or_stmt, true);
93763 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
93764 + update_stmt(asm_or_stmt);
93765 +
93766 + // replace call stmt fn with the new fptr
93767 + gimple_call_set_fn(call_stmt, new_fptr);
93768 + update_stmt(call_stmt);
93769 +}
93770 +
93771 +/*
93772 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
93773 + */
93774 +static unsigned int execute_kernexec_fptr(void)
93775 +{
93776 + basic_block bb;
93777 +
93778 + // 1. loop through BBs and GIMPLE statements
93779 + FOR_EACH_BB(bb) {
93780 + gimple_stmt_iterator gsi;
93781 +
93782 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
93783 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
93784 + tree fn;
93785 + gimple call_stmt;
93786 +
93787 + // is it a call ...
93788 + call_stmt = gsi_stmt(gsi);
93789 + if (!is_gimple_call(call_stmt))
93790 + continue;
93791 + fn = gimple_call_fn(call_stmt);
93792 + if (TREE_CODE(fn) == ADDR_EXPR)
93793 + continue;
93794 + if (TREE_CODE(fn) != SSA_NAME)
93795 + gcc_unreachable();
93796 +
93797 + // ... through a function pointer
93798 + if (SSA_NAME_VAR(fn) != NULL_TREE) {
93799 + fn = SSA_NAME_VAR(fn);
93800 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
93801 + debug_tree(fn);
93802 + gcc_unreachable();
93803 + }
93804 + }
93805 + fn = TREE_TYPE(fn);
93806 + if (TREE_CODE(fn) != POINTER_TYPE)
93807 + continue;
93808 + fn = TREE_TYPE(fn);
93809 + if (TREE_CODE(fn) != FUNCTION_TYPE)
93810 + continue;
93811 +
93812 + kernexec_instrument_fptr(&gsi);
93813 +
93814 +//debug_tree(gimple_call_fn(call_stmt));
93815 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
93816 + }
93817 + }
93818 +
93819 + return 0;
93820 +}
93821 +
93822 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
93823 +static void kernexec_instrument_retaddr_bts(rtx insn)
93824 +{
93825 + rtx btsq;
93826 + rtvec argvec, constraintvec, labelvec;
93827 + int line;
93828 +
93829 + // create asm volatile("btsq $63,(%%rsp)":::)
93830 + argvec = rtvec_alloc(0);
93831 + constraintvec = rtvec_alloc(0);
93832 + labelvec = rtvec_alloc(0);
93833 + line = expand_location(RTL_LOCATION(insn)).line;
93834 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
93835 + MEM_VOLATILE_P(btsq) = 1;
93836 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
93837 + emit_insn_before(btsq, insn);
93838 +}
93839 +
93840 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
93841 +static void kernexec_instrument_retaddr_or(rtx insn)
93842 +{
93843 + rtx orq;
93844 + rtvec argvec, constraintvec, labelvec;
93845 + int line;
93846 +
93847 + // create asm volatile("orq %%r10,(%%rsp)":::)
93848 + argvec = rtvec_alloc(0);
93849 + constraintvec = rtvec_alloc(0);
93850 + labelvec = rtvec_alloc(0);
93851 + line = expand_location(RTL_LOCATION(insn)).line;
93852 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
93853 + MEM_VOLATILE_P(orq) = 1;
93854 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
93855 + emit_insn_before(orq, insn);
93856 +}
93857 +
93858 +/*
93859 + * find all asm level function returns and forcibly set the highest bit of the return address
93860 + */
93861 +static unsigned int execute_kernexec_retaddr(void)
93862 +{
93863 + rtx insn;
93864 +
93865 + // 1. find function returns
93866 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
93867 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
93868 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
93869 + // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
93870 + rtx body;
93871 +
93872 + // is it a retn
93873 + if (!JUMP_P(insn))
93874 + continue;
93875 + body = PATTERN(insn);
93876 + if (GET_CODE(body) == PARALLEL)
93877 + body = XVECEXP(body, 0, 0);
93878 + if (!ANY_RETURN_P(body))
93879 + continue;
93880 + kernexec_instrument_retaddr(insn);
93881 + }
93882 +
93883 +// print_simple_rtl(stderr, get_insns());
93884 +// print_rtl(stderr, get_insns());
93885 +
93886 + return 0;
93887 +}
93888 +
93889 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
93890 +{
93891 + const char * const plugin_name = plugin_info->base_name;
93892 + const int argc = plugin_info->argc;
93893 + const struct plugin_argument * const argv = plugin_info->argv;
93894 + int i;
93895 + struct register_pass_info kernexec_reload_pass_info = {
93896 + .pass = &kernexec_reload_pass.pass,
93897 + .reference_pass_name = "ssa",
93898 + .ref_pass_instance_number = 1,
93899 + .pos_op = PASS_POS_INSERT_AFTER
93900 + };
93901 + struct register_pass_info kernexec_fptr_pass_info = {
93902 + .pass = &kernexec_fptr_pass.pass,
93903 + .reference_pass_name = "ssa",
93904 + .ref_pass_instance_number = 1,
93905 + .pos_op = PASS_POS_INSERT_AFTER
93906 + };
93907 + struct register_pass_info kernexec_retaddr_pass_info = {
93908 + .pass = &kernexec_retaddr_pass.pass,
93909 + .reference_pass_name = "pro_and_epilogue",
93910 + .ref_pass_instance_number = 1,
93911 + .pos_op = PASS_POS_INSERT_AFTER
93912 + };
93913 +
93914 + if (!plugin_default_version_check(version, &gcc_version)) {
93915 + error(G_("incompatible gcc/plugin versions"));
93916 + return 1;
93917 + }
93918 +
93919 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
93920 +
93921 + if (TARGET_64BIT == 0)
93922 + return 0;
93923 +
93924 + for (i = 0; i < argc; ++i) {
93925 + if (!strcmp(argv[i].key, "method")) {
93926 + if (!argv[i].value) {
93927 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93928 + continue;
93929 + }
93930 + if (!strcmp(argv[i].value, "bts")) {
93931 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
93932 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
93933 + } else if (!strcmp(argv[i].value, "or")) {
93934 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
93935 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
93936 + fix_register("r10", 1, 1);
93937 + } else
93938 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
93939 + continue;
93940 + }
93941 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
93942 + }
93943 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
93944 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
93945 +
93946 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
93947 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
93948 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
93949 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
93950 +
93951 + return 0;
93952 +}
93953 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
93954 new file mode 100644
93955 index 0000000..b5395ba
93956 --- /dev/null
93957 +++ b/tools/gcc/latent_entropy_plugin.c
93958 @@ -0,0 +1,327 @@
93959 +/*
93960 + * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
93961 + * Licensed under the GPL v2
93962 + *
93963 + * Note: the choice of the license means that the compilation process is
93964 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
93965 + * but for the kernel it doesn't matter since it doesn't link against
93966 + * any of the gcc libraries
93967 + *
93968 + * gcc plugin to help generate a little bit of entropy from program state,
93969 + * used during boot in the kernel
93970 + *
93971 + * TODO:
93972 + * - add ipa pass to identify not explicitly marked candidate functions
93973 + * - mix in more program state (function arguments/return values, loop variables, etc)
93974 + * - more instrumentation control via attribute parameters
93975 + *
93976 + * BUGS:
93977 + * - LTO needs -flto-partition=none for now
93978 + */
93979 +#include "gcc-plugin.h"
93980 +#include "config.h"
93981 +#include "system.h"
93982 +#include "coretypes.h"
93983 +#include "tree.h"
93984 +#include "tree-pass.h"
93985 +#include "flags.h"
93986 +#include "intl.h"
93987 +#include "toplev.h"
93988 +#include "plugin.h"
93989 +//#include "expr.h" where are you...
93990 +#include "diagnostic.h"
93991 +#include "plugin-version.h"
93992 +#include "tm.h"
93993 +#include "function.h"
93994 +#include "basic-block.h"
93995 +#include "gimple.h"
93996 +#include "rtl.h"
93997 +#include "emit-rtl.h"
93998 +#include "tree-flow.h"
93999 +#include "langhooks.h"
94000 +
94001 +#if BUILDING_GCC_VERSION >= 4008
94002 +#define TODO_dump_func 0
94003 +#endif
94004 +
94005 +int plugin_is_GPL_compatible;
94006 +
94007 +static tree latent_entropy_decl;
94008 +
94009 +static struct plugin_info latent_entropy_plugin_info = {
94010 + .version = "201303102320",
94011 + .help = NULL
94012 +};
94013 +
94014 +static unsigned int execute_latent_entropy(void);
94015 +static bool gate_latent_entropy(void);
94016 +
94017 +static struct gimple_opt_pass latent_entropy_pass = {
94018 + .pass = {
94019 + .type = GIMPLE_PASS,
94020 + .name = "latent_entropy",
94021 +#if BUILDING_GCC_VERSION >= 4008
94022 + .optinfo_flags = OPTGROUP_NONE,
94023 +#endif
94024 + .gate = gate_latent_entropy,
94025 + .execute = execute_latent_entropy,
94026 + .sub = NULL,
94027 + .next = NULL,
94028 + .static_pass_number = 0,
94029 + .tv_id = TV_NONE,
94030 + .properties_required = PROP_gimple_leh | PROP_cfg,
94031 + .properties_provided = 0,
94032 + .properties_destroyed = 0,
94033 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
94034 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
94035 + }
94036 +};
94037 +
94038 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
94039 +{
94040 + if (TREE_CODE(*node) != FUNCTION_DECL) {
94041 + *no_add_attrs = true;
94042 + error("%qE attribute only applies to functions", name);
94043 + }
94044 + return NULL_TREE;
94045 +}
94046 +
94047 +static struct attribute_spec latent_entropy_attr = {
94048 + .name = "latent_entropy",
94049 + .min_length = 0,
94050 + .max_length = 0,
94051 + .decl_required = true,
94052 + .type_required = false,
94053 + .function_type_required = false,
94054 + .handler = handle_latent_entropy_attribute,
94055 +#if BUILDING_GCC_VERSION >= 4007
94056 + .affects_type_identity = false
94057 +#endif
94058 +};
94059 +
94060 +static void register_attributes(void *event_data, void *data)
94061 +{
94062 + register_attribute(&latent_entropy_attr);
94063 +}
94064 +
94065 +static bool gate_latent_entropy(void)
94066 +{
94067 + tree latent_entropy_attr;
94068 +
94069 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
94070 + return latent_entropy_attr != NULL_TREE;
94071 +}
94072 +
94073 +static unsigned HOST_WIDE_INT seed;
94074 +static unsigned HOST_WIDE_INT get_random_const(void)
94075 +{
94076 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
94077 + return seed;
94078 +}
94079 +
94080 +static enum tree_code get_op(tree *rhs)
94081 +{
94082 + static enum tree_code op;
94083 + unsigned HOST_WIDE_INT random_const;
94084 +
94085 + random_const = get_random_const();
94086 +
94087 + switch (op) {
94088 + case BIT_XOR_EXPR:
94089 + op = PLUS_EXPR;
94090 + break;
94091 +
94092 + case PLUS_EXPR:
94093 + if (rhs) {
94094 + op = LROTATE_EXPR;
94095 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
94096 + break;
94097 + }
94098 +
94099 + case LROTATE_EXPR:
94100 + default:
94101 + op = BIT_XOR_EXPR;
94102 + break;
94103 + }
94104 + if (rhs)
94105 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
94106 + return op;
94107 +}
94108 +
94109 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
94110 +{
94111 + gimple_stmt_iterator gsi;
94112 + gimple assign;
94113 + tree addxorrol, rhs;
94114 + enum tree_code op;
94115 +
94116 + op = get_op(&rhs);
94117 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
94118 + assign = gimple_build_assign(local_entropy, addxorrol);
94119 +#if BUILDING_GCC_VERSION <= 4007
94120 + find_referenced_vars_in(assign);
94121 +#endif
94122 +//debug_bb(bb);
94123 + gsi = gsi_after_labels(bb);
94124 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
94125 + update_stmt(assign);
94126 +}
94127 +
94128 +static void perturb_latent_entropy(basic_block bb, tree rhs)
94129 +{
94130 + gimple_stmt_iterator gsi;
94131 + gimple assign;
94132 + tree addxorrol, temp;
94133 +
94134 + // 1. create temporary copy of latent_entropy
94135 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
94136 +#if BUILDING_GCC_VERSION <= 4007
94137 + add_referenced_var(temp);
94138 + mark_sym_for_renaming(temp);
94139 +#endif
94140 +
94141 + // 2. read...
94142 + assign = gimple_build_assign(temp, latent_entropy_decl);
94143 +#if BUILDING_GCC_VERSION <= 4007
94144 + find_referenced_vars_in(assign);
94145 +#endif
94146 + gsi = gsi_after_labels(bb);
94147 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
94148 + update_stmt(assign);
94149 +
94150 + // 3. ...modify...
94151 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
94152 + assign = gimple_build_assign(temp, addxorrol);
94153 +#if BUILDING_GCC_VERSION <= 4007
94154 + find_referenced_vars_in(assign);
94155 +#endif
94156 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
94157 + update_stmt(assign);
94158 +
94159 + // 4. ...write latent_entropy
94160 + assign = gimple_build_assign(latent_entropy_decl, temp);
94161 +#if BUILDING_GCC_VERSION <= 4007
94162 + find_referenced_vars_in(assign);
94163 +#endif
94164 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
94165 + update_stmt(assign);
94166 +}
94167 +
94168 +static unsigned int execute_latent_entropy(void)
94169 +{
94170 + basic_block bb;
94171 + gimple assign;
94172 + gimple_stmt_iterator gsi;
94173 + tree local_entropy;
94174 +
94175 + if (!latent_entropy_decl) {
94176 + struct varpool_node *node;
94177 +
94178 +#if BUILDING_GCC_VERSION <= 4007
94179 + for (node = varpool_nodes; node; node = node->next) {
94180 + tree var = node->decl;
94181 +#else
94182 + FOR_EACH_VARIABLE(node) {
94183 + tree var = node->symbol.decl;
94184 +#endif
94185 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
94186 + continue;
94187 + latent_entropy_decl = var;
94188 +// debug_tree(var);
94189 + break;
94190 + }
94191 + if (!latent_entropy_decl) {
94192 +// debug_tree(current_function_decl);
94193 + return 0;
94194 + }
94195 + }
94196 +
94197 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
94198 +
94199 + // 1. create local entropy variable
94200 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
94201 +#if BUILDING_GCC_VERSION <= 4007
94202 + add_referenced_var(local_entropy);
94203 + mark_sym_for_renaming(local_entropy);
94204 +#endif
94205 +
94206 + // 2. initialize local entropy variable
94207 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
94208 + if (dom_info_available_p(CDI_DOMINATORS))
94209 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
94210 + gsi = gsi_start_bb(bb);
94211 +
94212 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
94213 +// gimple_set_location(assign, loc);
94214 +#if BUILDING_GCC_VERSION <= 4007
94215 + find_referenced_vars_in(assign);
94216 +#endif
94217 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
94218 + update_stmt(assign);
94219 + bb = bb->next_bb;
94220 +
94221 + // 3. instrument each BB with an operation on the local entropy variable
94222 + while (bb != EXIT_BLOCK_PTR) {
94223 + perturb_local_entropy(bb, local_entropy);
94224 + bb = bb->next_bb;
94225 + };
94226 +
94227 + // 4. mix local entropy into the global entropy variable
94228 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
94229 + return 0;
94230 +}
94231 +
94232 +static void start_unit_callback(void *gcc_data, void *user_data)
94233 +{
94234 + tree latent_entropy_type;
94235 +
94236 +#if BUILDING_GCC_VERSION >= 4007
94237 + seed = get_random_seed(false);
94238 +#else
94239 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
94240 + seed *= seed;
94241 +#endif
94242 +
94243 + if (in_lto_p)
94244 + return;
94245 +
94246 + // extern volatile u64 latent_entropy
94247 + gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
94248 + latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
94249 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
94250 +
94251 + TREE_STATIC(latent_entropy_decl) = 1;
94252 + TREE_PUBLIC(latent_entropy_decl) = 1;
94253 + TREE_USED(latent_entropy_decl) = 1;
94254 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
94255 + DECL_EXTERNAL(latent_entropy_decl) = 1;
94256 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
94257 + DECL_INITIAL(latent_entropy_decl) = NULL;
94258 + lang_hooks.decls.pushdecl(latent_entropy_decl);
94259 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
94260 +// varpool_finalize_decl(latent_entropy_decl);
94261 +// varpool_mark_needed_node(latent_entropy_decl);
94262 +}
94263 +
94264 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
94265 +{
94266 + const char * const plugin_name = plugin_info->base_name;
94267 + struct register_pass_info latent_entropy_pass_info = {
94268 + .pass = &latent_entropy_pass.pass,
94269 + .reference_pass_name = "optimized",
94270 + .ref_pass_instance_number = 1,
94271 + .pos_op = PASS_POS_INSERT_BEFORE
94272 + };
94273 +
94274 + if (!plugin_default_version_check(version, &gcc_version)) {
94275 + error(G_("incompatible gcc/plugin versions"));
94276 + return 1;
94277 + }
94278 +
94279 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
94280 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
94281 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
94282 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
94283 +
94284 + return 0;
94285 +}
94286 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
94287 new file mode 100644
94288 index 0000000..ddd5b2e
94289 --- /dev/null
94290 +++ b/tools/gcc/size_overflow_hash.data
94291 @@ -0,0 +1,5876 @@
94292 +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
94293 +batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
94294 +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
94295 +compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
94296 +carl9170_alloc_27 carl9170_alloc 1 27 NULL
94297 +sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
94298 +padzero_55 padzero 1 55 &sel_read_policyvers_55
94299 +cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
94300 +__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
94301 +DepcaSignature_80 DepcaSignature 2 80 NULL nohasharray
94302 +crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 &DepcaSignature_80
94303 +snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
94304 +load_msg_95 load_msg 2 95 NULL
94305 +device_flush_iotlb_115 device_flush_iotlb 2-3 115 NULL
94306 +init_q_132 init_q 4 132 NULL
94307 +memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
94308 +hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL
94309 +tracing_trace_options_write_153 tracing_trace_options_write 3 153 NULL
94310 +nvme_create_queue_170 nvme_create_queue 3 170 NULL
94311 +xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
94312 +iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
94313 +virtblk_add_req_197 virtblk_add_req 2-3 197 NULL
94314 +proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
94315 +br_port_info_size_268 br_port_info_size 0 268 NULL
94316 +generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
94317 +read_file_war_stats_292 read_file_war_stats 3 292 NULL
94318 +syslog_print_307 syslog_print 2 307 NULL
94319 +platform_device_add_data_310 platform_device_add_data 3 310 NULL
94320 +dn_setsockopt_314 dn_setsockopt 5 314 NULL
94321 +next_node_allowed_318 next_node_allowed 1 318 NULL
94322 +compat_sys_ioctl_333 compat_sys_ioctl 3 333 NULL
94323 +btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
94324 +lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
94325 +snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
94326 +_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
94327 +dccp_manip_pkt_476 dccp_manip_pkt 4 476 NULL
94328 +pidlist_resize_496 pidlist_resize 2 496 NULL
94329 +read_vbt_r0_503 read_vbt_r0 1 503 NULL
94330 +rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
94331 +ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
94332 +zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0-1-2 537 NULL
94333 +iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
94334 +dle_count_543 dle_count 0 543 NULL
94335 +devres_alloc_551 devres_alloc 2 551 NULL
94336 +snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
94337 +dev_hard_header_565 dev_hard_header 0 565 NULL nohasharray
94338 +start_isoc_chain_565 start_isoc_chain 2 565 &dev_hard_header_565
94339 +compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
94340 +smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
94341 +ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
94342 +compat_sys_shmat_620 compat_sys_shmat 3 620 NULL
94343 +isp1760_register_628 isp1760_register 1-2 628 NULL
94344 +drbd_bm_find_next_643 drbd_bm_find_next 2 643 NULL
94345 +unlink_queued_645 unlink_queued 3-4 645 NULL
94346 +dtim_interval_read_654 dtim_interval_read 3 654 NULL
94347 +ceph_copy_user_to_page_vector_656 ceph_copy_user_to_page_vector 4-3 656 NULL
94348 +div_u64_rem_672 div_u64_rem 0 672 NULL
94349 +mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
94350 +rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
94351 +persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL
94352 +ipath_resize_cq_712 ipath_resize_cq 2 712 NULL
94353 +sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
94354 +wm8962_gpio_direction_out_738 wm8962_gpio_direction_out 2 738 NULL
94355 +dvb_video_write_754 dvb_video_write 3 754 NULL
94356 +iwl_read_targ_mem_772 iwl_read_targ_mem 0 772 NULL
94357 +snd_pcm_drain_811 snd_pcm_drain 0 811 NULL
94358 +if_writecmd_815 if_writecmd 2 815 NULL
94359 +aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
94360 +read_fifo_826 read_fifo 3 826 NULL
94361 +read_tree_block_841 read_tree_block 3 841 NULL
94362 +um_idi_read_850 um_idi_read 3 850 NULL
94363 +ieee80211_if_fmt_rc_rateidx_mcs_mask_5ghz_856 ieee80211_if_fmt_rc_rateidx_mcs_mask_5ghz 3 856 NULL
94364 +o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
94365 +iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
94366 +snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
94367 +btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
94368 +readw_931 readw 0 931 NULL
94369 +carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
94370 +ieee80211_ie_build_vht_cap_956 ieee80211_ie_build_vht_cap 0 956 NULL nohasharray
94371 +__nodes_weight_956 __nodes_weight 2-0 956 &ieee80211_ie_build_vht_cap_956
94372 +sys_msgrcv_959 sys_msgrcv 3 959 NULL
94373 +hdlcdev_rx_997 hdlcdev_rx 3 997 NULL
94374 +smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
94375 +lp872x_select_buck_vout_addr_1045 lp872x_select_buck_vout_addr 0 1045 NULL
94376 +gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
94377 +Read_hfc16_1070 Read_hfc16 0 1070 NULL
94378 +mce_request_packet_1073 mce_request_packet 3 1073 NULL
94379 +agp_create_memory_1075 agp_create_memory 1 1075 NULL
94380 +_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
94381 +nfs_pgarray_set_1085 nfs_pgarray_set 2 1085 NULL
94382 +llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
94383 +nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL
94384 +store_risefalltime_1109 store_risefalltime 5 1109 NULL
94385 +cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL
94386 +vmalloc_32_1135 vmalloc_32 1 1135 NULL
94387 +i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
94388 +ipc_alloc_1192 ipc_alloc 1 1192 NULL
94389 +ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
94390 +i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
94391 +dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
94392 +i2cdev_read_1206 i2cdev_read 3 1206 NULL
94393 +ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
94394 +thin_status_1239 thin_status 5 1239 NULL
94395 +acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
94396 +ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
94397 +qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
94398 +ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
94399 +batadv_tt_prepare_packet_buff_1280 batadv_tt_prepare_packet_buff 4 1280 NULL
94400 +tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
94401 +compat_put_u64_1319 compat_put_u64 1 1319 NULL
94402 +ffs_1322 ffs 0 1322 NULL
94403 +carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
94404 +btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
94405 +gen_pool_best_fit_1348 gen_pool_best_fit 2-3-4 1348 NULL
94406 +io_mapping_create_wc_1354 io_mapping_create_wc 1-2 1354 NULL
94407 +snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
94408 +ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
94409 +fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
94410 +ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
94411 +sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
94412 +do_msgsnd_1387 do_msgsnd 4 1387 NULL
94413 +zone_page_state_1393 zone_page_state 0 1393 NULL
94414 +file_read_actor_1401 file_read_actor 4 1401 NULL
94415 +lm3533_als_get_threshold_reg_1404 lm3533_als_get_threshold_reg 0-1 1404 NULL
94416 +stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
94417 +tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
94418 +xprt_alloc_1475 xprt_alloc 2 1475 NULL
94419 +sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
94420 +posix_acl_permission_1495 posix_acl_permission 0 1495 NULL
94421 +tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
94422 +alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
94423 +ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
94424 +fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
94425 +packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
94426 +btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
94427 +v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
94428 +btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
94429 +ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
94430 +netdev_feature_string_1667 netdev_feature_string 0 1667 NULL
94431 +compat_x25_ioctl_1674 compat_x25_ioctl 3 1674 NULL
94432 +rmap_add_1677 rmap_add 3 1677 NULL
94433 +configfs_read_file_1683 configfs_read_file 3 1683 NULL
94434 +coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
94435 +btrfs_dir_data_len_1714 btrfs_dir_data_len 0 1714 NULL
94436 +dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
94437 +tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
94438 +compat_cdrom_generic_command_1756 compat_cdrom_generic_command 4 1756 NULL
94439 +ieee80211_new_mesh_header_1761 ieee80211_new_mesh_header 0 1761 NULL
94440 +ebt_size_mwt_1768 ebt_size_mwt 0 1768 NULL
94441 +cosa_write_1774 cosa_write 3 1774 NULL
94442 +update_macheader_1775 update_macheader 7 1775 NULL
94443 +fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
94444 +__nodelist_scnprintf_1815 __nodelist_scnprintf 0-2-4 1815 NULL
94445 +rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
94446 +nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
94447 +tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
94448 +cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
94449 +ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
94450 +read_swap_header_1957 read_swap_header 0 1957 NULL
94451 +ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
94452 +sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
94453 +__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
94454 +atomic_read_unchecked_1995 atomic_read_unchecked 0 1995 NULL
94455 +batadv_tt_commit_changes_2008 batadv_tt_commit_changes 4 2008 NULL
94456 +sep_prepare_input_dma_table_2009 sep_prepare_input_dma_table 2-3 2009 NULL
94457 +rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
94458 +ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
94459 +write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
94460 +BcmCopySection_2035 BcmCopySection 5 2035 NULL
94461 +devm_ioremap_nocache_2036 devm_ioremap_nocache 2-3 2036 NULL
94462 +ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
94463 +ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
94464 +subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
94465 +iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
94466 +get_unaligned_le32_2092 get_unaligned_le32 0 2092 NULL
94467 +idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
94468 +audit_expand_2098 audit_expand 2 2098 NULL
94469 +num_pages_spanned_2105 num_pages_spanned 0 2105 NULL
94470 +iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
94471 +ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
94472 +__find_xattr_2117 __find_xattr 6 2117 NULL nohasharray
94473 +enable_read_2117 enable_read 3 2117 &__find_xattr_2117
94474 +pcf50633_write_block_2124 pcf50633_write_block 3-2 2124 NULL
94475 +check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
94476 +mlx4_init_icm_table_2151 mlx4_init_icm_table 5-4 2151 NULL nohasharray
94477 +multipath_status_2151 multipath_status 5 2151 &mlx4_init_icm_table_2151
94478 +iov_iter_count_2152 iov_iter_count 0 2152 NULL
94479 +_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
94480 +ssb_bus_ssbbus_register_2217 ssb_bus_ssbbus_register 2 2217 NULL
94481 +u32_array_read_2219 u32_array_read 3 2219 NULL
94482 +vhci_write_2224 vhci_write 3 2224 NULL
94483 +efx_tsoh_page_count_2225 efx_tsoh_page_count 0 2225 NULL
94484 +lowpan_get_mac_header_length_2231 lowpan_get_mac_header_length 0 2231 NULL
94485 +ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
94486 +netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
94487 +sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
94488 +do_update_counters_2259 do_update_counters 4 2259 NULL
94489 +ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
94490 +debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
94491 +kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
94492 +intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
94493 +picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
94494 +gart_map_page_2325 gart_map_page 3-4 2325 NULL
94495 +__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
94496 +zr364xx_read_2354 zr364xx_read 3 2354 NULL
94497 +viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
94498 +xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL
94499 +il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
94500 +rtl_port_map_2385 rtl_port_map 1-2 2385 NULL
94501 +rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
94502 +isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
94503 +raid1_size_2419 raid1_size 0-2 2419 NULL
94504 +roccat_common2_send_2422 roccat_common2_send 4 2422 NULL
94505 +hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
94506 +ioremap_nocache_2439 ioremap_nocache 1-2 2439 NULL
94507 +tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
94508 +nfs4_alloc_slots_2454 nfs4_alloc_slots 1 2454 NULL nohasharray
94509 +ath6kl_usb_bmi_write_2454 ath6kl_usb_bmi_write 3 2454 &nfs4_alloc_slots_2454
94510 +b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
94511 +update_pmkid_2481 update_pmkid 4 2481 NULL
94512 +wiphy_new_2482 wiphy_new 2 2482 NULL
94513 +bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
94514 +squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
94515 +dm_write_2513 dm_write 3 2513 NULL
94516 +v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
94517 +ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
94518 +gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
94519 +pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
94520 +smk_write_logging_2618 smk_write_logging 3 2618 NULL
94521 +lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
94522 +nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL
94523 +memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
94524 +__xip_file_write_2733 __xip_file_write 4-3 2733 NULL
94525 +hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
94526 +mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
94527 +__next_cpu_2782 __next_cpu 1 2782 NULL
94528 +sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
94529 +vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL nohasharray
94530 +snd_pcm_reset_2829 snd_pcm_reset 0 2829 &vb2_dc_get_userptr_2829
94531 +wait_for_avail_2847 wait_for_avail 0 2847 NULL
94532 +ufs_free_fragments_2857 ufs_free_fragments 2 2857 NULL
94533 +sfq_alloc_2861 sfq_alloc 1 2861 NULL
94534 +move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
94535 +__swab64p_2875 __swab64p 0 2875 NULL
94536 +nla_padlen_2883 nla_padlen 1 2883 NULL
94537 +cmm_write_2896 cmm_write 3 2896 NULL
94538 +rbd_req_sync_read_2915 rbd_req_sync_read 4-5 2915 NULL
94539 +alloc_page_cgroup_2919 alloc_page_cgroup 1 2919 NULL
94540 +xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
94541 +nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
94542 +tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
94543 +ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
94544 +do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL
94545 +p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
94546 +do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
94547 +depth_write_3021 depth_write 3 3021 NULL
94548 +snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
94549 +xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
94550 +iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 NULL
94551 +nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
94552 +il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
94553 +__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL
94554 +dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
94555 +free_coherent_3082 free_coherent 4-2 3082 NULL
94556 +ttusb2_msg_3100 ttusb2_msg 4 3100 NULL
94557 +rb_alloc_3102 rb_alloc 1 3102 NULL
94558 +simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
94559 +print_time_3132 print_time 0 3132 NULL
94560 +fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
94561 +CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
94562 +compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
94563 +uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
94564 +uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL
94565 +compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
94566 +mempool_create_node_3191 mempool_create_node 1 3191 NULL
94567 +alloc_context_3194 alloc_context 1 3194 NULL
94568 +shmem_pread_slow_3198 shmem_pread_slow 3 3198 NULL
94569 +kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
94570 +do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
94571 +ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
94572 +key_key_read_3241 key_key_read 3 3241 NULL
94573 +number_3243 number 0 3243 NULL
94574 +check_vendor_extension_3254 check_vendor_extension 1 3254 NULL
94575 +__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
94576 +arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
94577 +dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
94578 +compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
94579 +de600_read_byte_3332 de600_read_byte 0 3332 NULL
94580 +aac_rkt_ioremap_3333 aac_rkt_ioremap 2 3333 NULL
94581 +read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
94582 +tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
94583 +il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
94584 +gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
94585 +scnprintf_3360 scnprintf 0-2 3360 NULL nohasharray
94586 +tps65090_clr_bits_3360 tps65090_clr_bits 2 3360 &scnprintf_3360
94587 +mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
94588 +send_stream_3397 send_stream 4 3397 NULL
94589 +isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
94590 +msix_map_region_3411 msix_map_region 3 3411 NULL
94591 +mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL
94592 +pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
94593 +crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
94594 +pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
94595 +percpu_modalloc_3448 percpu_modalloc 2-3 3448 NULL
94596 +jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 NULL nohasharray
94597 +snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 4-2-5 3464 &jffs2_acl_setxattr_3464
94598 +alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
94599 +security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
94600 +xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL
94601 +mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL nohasharray
94602 +ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 &mem_tx_free_mem_blks_read_3521
94603 +btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
94604 +b43legacy_read16_3561 b43legacy_read16 0 3561 NULL
94605 +alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
94606 +evtchn_read_3569 evtchn_read 3 3569 NULL
94607 +vc_resize_3585 vc_resize 2-3 3585 NULL
94608 +compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
94609 +sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
94610 +edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
94611 +tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
94612 +aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
94613 +x86_swiotlb_alloc_coherent_3649 x86_swiotlb_alloc_coherent 2 3649 NULL nohasharray
94614 +cm_copy_private_data_3649 cm_copy_private_data 2 3649 &x86_swiotlb_alloc_coherent_3649
94615 +ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
94616 +i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
94617 +ntfs_attr_make_non_resident_3694 ntfs_attr_make_non_resident 0 3694 NULL
94618 +create_irq_3703 create_irq 0 3703 NULL nohasharray
94619 +btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 &create_irq_3703 nohasharray
94620 +snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 &btmrvl_psmode_write_3703
94621 +videobuf_pages_to_sg_3708 videobuf_pages_to_sg 2 3708 NULL
94622 +lm3533_als_get_threshold_3725 lm3533_als_get_threshold 2 3725 NULL
94623 +ci_ll_write_3740 ci_ll_write 4 3740 NULL nohasharray
94624 +ath6kl_mgmt_tx_3740 ath6kl_mgmt_tx 7 3740 &ci_ll_write_3740
94625 +sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
94626 +ncp_file_write_3813 ncp_file_write 3 3813 NULL
94627 +read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
94628 +stringify_nodemap_3842 stringify_nodemap 2 3842 NULL
94629 +ubi_eba_read_leb_3847 ubi_eba_read_leb 0 3847 NULL
94630 +create_one_cdev_3852 create_one_cdev 2 3852 NULL
94631 +smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
94632 +get_fd_set_3866 get_fd_set 1 3866 NULL
94633 +garp_attr_create_3883 garp_attr_create 3 3883 NULL
94634 +uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
94635 +efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL
94636 +nvram_write_3894 nvram_write 3 3894 NULL
94637 +pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
94638 +comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
94639 +vcs_write_3910 vcs_write 3 3910 NULL
94640 +atalk_compat_ioctl_3991 atalk_compat_ioctl 3 3991 NULL
94641 +do_add_counters_3992 do_add_counters 3 3992 NULL
94642 +userspace_status_4004 userspace_status 4 4004 NULL
94643 +mei_write_4005 mei_write 3 4005 NULL nohasharray
94644 +xfs_check_block_4005 xfs_check_block 4 4005 &mei_write_4005
94645 +snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
94646 +blk_end_request_4024 blk_end_request 3 4024 NULL
94647 +ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
94648 +usbnet_write_cmd_async_4035 usbnet_write_cmd_async 7 4035 NULL
94649 +read_file_queues_4078 read_file_queues 3 4078 NULL
94650 +fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
94651 +da9052_free_irq_4090 da9052_free_irq 2 4090 NULL
94652 +tm6000_read_4151 tm6000_read 3 4151 NULL
94653 +mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
94654 +msg_bits_4158 msg_bits 0-3-4 4158 NULL
94655 +get_alua_req_4166 get_alua_req 3 4166 NULL
94656 +blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
94657 +read_file_bool_4180 read_file_bool 3 4180 NULL
94658 +f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
94659 +_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
94660 +__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
94661 +ext4_new_inode_4247 ext4_new_inode 5 4247 NULL
94662 +xt_compat_add_offset_4289 xt_compat_add_offset 0 4289 NULL
94663 +__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL
94664 +dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL
94665 +nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
94666 +snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
94667 +__copy_from_user_inatomic_4365 __copy_from_user_inatomic 3 4365 NULL
94668 +sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
94669 +irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
94670 +access_process_vm_4412 access_process_vm 0-2-4 4412 NULL nohasharray
94671 +cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
94672 +libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
94673 +do_pages_stat_4437 do_pages_stat 2 4437 NULL
94674 +at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
94675 +snd_seq_expand_var_event_4481 snd_seq_expand_var_event 0-5 4481 NULL
94676 +sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
94677 +vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
94678 +set_link_security_4502 set_link_security 4 4502 NULL
94679 +sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
94680 +da9052_group_write_4534 da9052_group_write 2-3 4534 NULL
94681 +tty_register_device_4544 tty_register_device 2 4544 NULL
94682 +videobuf_vmalloc_to_sg_4548 videobuf_vmalloc_to_sg 2 4548 NULL
94683 +btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
94684 +xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
94685 +bch_alloc_4593 bch_alloc 1 4593 NULL
94686 +iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
94687 +skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
94688 +cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
94689 +short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
94690 +kone_receive_4690 kone_receive 4 4690 NULL
94691 +round_pipe_size_4701 round_pipe_size 0 4701 NULL
94692 +cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
94693 +btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
94694 +ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
94695 +show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
94696 +pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
94697 +ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
94698 +repair_io_failure_4815 repair_io_failure 4 4815 NULL
94699 +__iio_allocate_sw_ring_buffer_4843 __iio_allocate_sw_ring_buffer 3 4843 NULL
94700 +gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
94701 +key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
94702 +ocfs2_defrag_extent_4873 ocfs2_defrag_extent 3 4873 NULL
94703 +hid_register_field_4874 hid_register_field 2-3 4874 NULL
94704 +vga_arb_read_4886 vga_arb_read 3 4886 NULL
94705 +sys_ipc_4889 sys_ipc 3 4889 NULL
94706 +lp872x_write_byte_4914 lp872x_write_byte 2 4914 NULL
94707 +sys_process_vm_writev_4928 sys_process_vm_writev 3-5 4928 NULL
94708 +ntfs_rl_insert_4931 ntfs_rl_insert 2-4 4931 NULL
94709 +ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
94710 +da9055_reg_write_4942 da9055_reg_write 2 4942 NULL
94711 +devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
94712 +compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
94713 +skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
94714 +ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval 3 4976 NULL
94715 +vmw_surface_define_size_4993 vmw_surface_define_size 0 4993 NULL
94716 +qla82xx_pci_mem_write_direct_5008 qla82xx_pci_mem_write_direct 2 5008 NULL
94717 +lm3533_als_set_target_5010 lm3533_als_set_target 2-3 5010 NULL
94718 +do_mincore_5018 do_mincore 0-1 5018 NULL
94719 +mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
94720 +ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 2-3 5066 NULL
94721 +snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
94722 +snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
94723 +get_random_bytes_5091 get_random_bytes 2 5091 NULL nohasharray
94724 +kfifo_copy_from_user_5091 kfifo_copy_from_user 3 5091 &get_random_bytes_5091 nohasharray
94725 +blk_rq_sectors_5091 blk_rq_sectors 0 5091 &kfifo_copy_from_user_5091
94726 +mpol_to_str_5093 mpol_to_str 2 5093 NULL
94727 +sound_write_5102 sound_write 3 5102 NULL
94728 +ufs_add_fragments_5144 ufs_add_fragments 2 5144 NULL
94729 +compat_ptr_5159 compat_ptr 0-1 5159 NULL
94730 +__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
94731 +iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
94732 +acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
94733 +sfi_map_memory_5183 sfi_map_memory 1-2 5183 NULL
94734 +skb_network_header_5203 skb_network_header 0 5203 NULL
94735 +pipe_set_size_5204 pipe_set_size 2 5204 NULL
94736 +ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
94737 +ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
94738 +ssb_ioremap_5228 ssb_ioremap 2 5228 NULL
94739 +isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
94740 +do_atmif_sioc_5247 do_atmif_sioc 3 5247 NULL
94741 +pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
94742 +allocate_cnodes_5329 allocate_cnodes 1 5329 NULL
94743 +ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
94744 +cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
94745 +bitmap_fold_5396 bitmap_fold 4 5396 NULL
94746 +nilfs_palloc_entries_per_group_5418 nilfs_palloc_entries_per_group 0 5418 NULL
94747 +sfi_map_table_5462 sfi_map_table 1 5462 NULL
94748 +xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
94749 +xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
94750 +ubi_leb_write_5478 ubi_leb_write 4-5 5478 NULL
94751 +cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
94752 +tty_write_5494 tty_write 3 5494 NULL
94753 +tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
94754 +ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
94755 +__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
94756 +spidev_message_5518 spidev_message 3 5518 NULL
94757 +ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
94758 +brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
94759 +le_readq_5557 le_readq 0 5557 NULL
94760 +inw_5558 inw 0 5558 NULL
94761 +__first_dma_cap_5560 __first_dma_cap 0 5560 NULL
94762 +fir16_create_5574 fir16_create 3 5574 NULL
94763 +bioset_create_5580 bioset_create 1 5580 NULL
94764 +oz_ep_alloc_5587 oz_ep_alloc 2 5587 NULL
94765 +do_msgrcv_5590 do_msgrcv 4 5590 NULL
94766 +usb_dump_device_descriptor_5599 usb_dump_device_descriptor 0 5599 NULL
94767 +ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
94768 +compat_copy_entries_5617 compat_copy_entries 0 5617 NULL
94769 +ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
94770 +posix_clock_register_5662 posix_clock_register 2 5662 NULL
94771 +mthca_map_reg_5664 mthca_map_reg 2-3 5664 NULL
94772 +__videobuf_alloc_vb_5665 __videobuf_alloc_vb 1 5665 NULL
94773 +get_arg_5694 get_arg 3 5694 NULL
94774 +vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
94775 +rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
94776 +ubi_cdev_compat_ioctl_5746 ubi_cdev_compat_ioctl 3 5746 NULL
94777 +sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
94778 +compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
94779 +__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
94780 +skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
94781 +nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL
94782 +ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
94783 +autofs4_root_compat_ioctl_5838 autofs4_root_compat_ioctl 3 5838 NULL
94784 +ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
94785 +ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
94786 +setup_req_5848 setup_req 3 5848 NULL
94787 +rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
94788 +compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL nohasharray
94789 +uinput_compat_ioctl_5861 uinput_compat_ioctl 3 5861 &compat_sys_move_pages_5861
94790 +port_show_regs_5904 port_show_regs 3 5904 NULL
94791 +rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL
94792 +uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
94793 +qla82xx_pci_mem_read_2M_5912 qla82xx_pci_mem_read_2M 2 5912 NULL
94794 +ttm_bo_kmap_ttm_5922 ttm_bo_kmap_ttm 3 5922 NULL
94795 +lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
94796 +ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
94797 +edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
94798 +tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
94799 +__apu_get_register_5967 __apu_get_register 0 5967 NULL
94800 +ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
94801 +ntfs_rl_append_6037 ntfs_rl_append 2-4 6037 NULL
94802 +da9052_request_irq_6058 da9052_request_irq 2 6058 NULL
94803 +sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
94804 +rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
94805 +ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
94806 +dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
94807 +matrix_keypad_build_keymap_6129 matrix_keypad_build_keymap 3 6129 NULL
94808 +nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
94809 +ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
94810 +vdma_mem_alloc_6171 vdma_mem_alloc 1 6171 NULL
94811 +wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
94812 +mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL
94813 +v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
94814 +mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
94815 +f_hidg_read_6238 f_hidg_read 3 6238 NULL
94816 +fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
94817 +pcpu_next_pop_6277 pcpu_next_pop 4 6277 NULL
94818 +tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
94819 +snd_hda_override_conn_list_6282 snd_hda_override_conn_list 0 6282 NULL nohasharray
94820 +xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
94821 +posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL
94822 +nf_nat_ipv6_manip_pkt_6289 nf_nat_ipv6_manip_pkt 2 6289 NULL
94823 +nf_nat_sack_adjust_6297 nf_nat_sack_adjust 2 6297 NULL
94824 +mid_get_vbt_data_r10_6308 mid_get_vbt_data_r10 2 6308 NULL
94825 +_proc_do_string_6376 _proc_do_string 2 6376 NULL
94826 +osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
94827 +posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
94828 +ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
94829 +__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
94830 +ext4_compat_ioctl_6471 ext4_compat_ioctl 3 6471 NULL
94831 +ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
94832 +cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
94833 +dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
94834 +mei_read_6507 mei_read 3 6507 NULL
94835 +cpumask_next_and_6516 cpumask_next_and 1 6516 NULL
94836 +read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
94837 +rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
94838 +wdm_read_6549 wdm_read 3 6549 NULL
94839 +fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
94840 +usb_dump_config_descriptor_6572 usb_dump_config_descriptor 0 6572 NULL
94841 +snd_pcm_hw_refine_old_user_6586 snd_pcm_hw_refine_old_user 0 6586 NULL
94842 +usemap_size_6601 usemap_size 0-1 6601 NULL
94843 +snmp_mib_init_6604 snmp_mib_init 2-3 6604 NULL
94844 +ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
94845 +virtscsi_alloc_tgt_6643 virtscsi_alloc_tgt 2 6643 NULL
94846 +aac_srcv_ioremap_6659 aac_srcv_ioremap 2 6659 NULL
94847 +process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
94848 +ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL
94849 +ieee80211_build_preq_ies_6691 ieee80211_build_preq_ies 0-4 6691 NULL
94850 +btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2 6696 NULL
94851 +ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
94852 +bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
94853 +mpeg_read_6708 mpeg_read 3 6708 NULL
94854 +video_proc_write_6724 video_proc_write 3 6724 NULL
94855 +posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
94856 +rds_rdma_pages_6735 rds_rdma_pages 0 6735 NULL
94857 +sfi_check_table_6772 sfi_check_table 1 6772 NULL
94858 +iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
94859 +ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
94860 +hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
94861 +tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
94862 +make_8259A_irq_6828 make_8259A_irq 1 6828 NULL
94863 +calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
94864 +mon_bin_read_6841 mon_bin_read 3 6841 NULL
94865 +snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
94866 +ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 NULL nohasharray
94867 +raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 &ieee80211_if_fmt_path_refresh_time_6888
94868 +dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
94869 +spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
94870 +proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911 nohasharray
94871 +acm_alloc_minor_6911 acm_alloc_minor 0 6911 &proc_sessionid_read_6911
94872 +__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
94873 +ieee80211_rx_mgmt_probe_resp_6918 ieee80211_rx_mgmt_probe_resp 3 6918 NULL
94874 +do_msgrcv_6921 do_msgrcv 3 6921 NULL
94875 +cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
94876 +qsfp_cks_6945 qsfp_cks 0-2 6945 NULL
94877 +pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
94878 +videobuf_dma_init_kernel_6963 videobuf_dma_init_kernel 3 6963 NULL
94879 +rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL
94880 +crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
94881 +request_key_async_6990 request_key_async 4 6990 NULL
94882 +r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
94883 +cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
94884 +tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
94885 +wimax_msg_7030 wimax_msg 4 7030 NULL
94886 +ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
94887 +snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
94888 +event_enable_read_7074 event_enable_read 3 7074 NULL
94889 +beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
94890 +lp_compat_ioctl_7098 lp_compat_ioctl 3 7098 NULL
94891 +pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
94892 +check_header_7108 check_header 0 7108 NULL
94893 +utf16_strsize_7203 utf16_strsize 0 7203 NULL nohasharray
94894 +__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 &utf16_strsize_7203
94895 +sys32_ipc_7238 sys32_ipc 3-5-6-4 7238 NULL
94896 +get_param_h_7247 get_param_h 0 7247 NULL
94897 +vm_mmap_pgoff_7259 vm_mmap_pgoff 0 7259 NULL
94898 +dma_ops_alloc_addresses_7272 dma_ops_alloc_addresses 3-4-5 7272 NULL
94899 +rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
94900 +mgmt_control_7349 mgmt_control 3 7349 NULL
94901 +ext3_free_blocks_7362 ext3_free_blocks 3-4 7362 NULL
94902 +ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
94903 +hweight_long_7388 hweight_long 0-1 7388 NULL
94904 +vhost_scsi_compat_ioctl_7393 vhost_scsi_compat_ioctl 3 7393 NULL
94905 +sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
94906 +readb_7401 readb 0 7401 NULL
94907 +drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
94908 +ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
94909 +ms_rw_multi_sector_7459 ms_rw_multi_sector 3-4 7459 NULL
94910 +__mutex_lock_common_7469 __mutex_lock_common 0 7469 NULL nohasharray
94911 +wm8996_gpio_direction_out_7469 wm8996_gpio_direction_out 2 7469 &__mutex_lock_common_7469
94912 +garp_request_join_7471 garp_request_join 4 7471 NULL
94913 +compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
94914 +snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
94915 +sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL nohasharray
94916 +ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 &sdhci_alloc_host_7509
94917 +array_zalloc_7519 array_zalloc 1-2 7519 NULL
94918 +setup_usemap_7524 setup_usemap 3 7524 NULL
94919 +goal_in_my_reservation_7553 goal_in_my_reservation 3 7553 NULL
94920 +smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
94921 +ext3_try_to_allocate_7590 ext3_try_to_allocate 5-3 7590 NULL
94922 +groups_alloc_7614 groups_alloc 1 7614 NULL
94923 +sg_virt_7616 sg_virt 0 7616 NULL
94924 +cpumask_first_7648 cpumask_first 0 7648 NULL
94925 +skb_copy_expand_7685 skb_copy_expand 2-3 7685 NULL nohasharray
94926 +acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 &skb_copy_expand_7685
94927 +acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
94928 +dev_write_7708 dev_write 3 7708 NULL
94929 +tps65090_set_bits_7709 tps65090_set_bits 2 7709 NULL
94930 +brcmf_sdcard_send_buf_7713 brcmf_sdcard_send_buf 6 7713 NULL nohasharray
94931 +dbg_check_cats_7713 dbg_check_cats 0 7713 &brcmf_sdcard_send_buf_7713
94932 +set_bypass_pwup_pfs_7742 set_bypass_pwup_pfs 3 7742 NULL
94933 +vxge_device_register_7752 vxge_device_register 4 7752 NULL
94934 +osdv2_attr_list_elem_size_7763 osdv2_attr_list_elem_size 0-1 7763 NULL
94935 +ubi_io_read_vid_hdr_7766 ubi_io_read_vid_hdr 0 7766 NULL
94936 +alloc_candev_7776 alloc_candev 1-2 7776 NULL
94937 +dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
94938 +bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
94939 +diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
94940 +ubifs_leb_read_7828 ubifs_leb_read 0 7828 NULL
94941 +da9052_reg_update_7858 da9052_reg_update 2 7858 NULL
94942 +tps6586x_clr_bits_7889 tps6586x_clr_bits 2 7889 NULL
94943 +dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
94944 +xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
94945 +gfs2_tune_get_i_7903 gfs2_tune_get_i 0 7903 NULL
94946 +ext3_group_extend_7911 ext3_group_extend 3 7911 NULL
94947 +libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
94948 +f_hidg_write_7932 f_hidg_write 3 7932 NULL
94949 +io_apic_setup_irq_pin_once_7934 io_apic_setup_irq_pin_once 1 7934 NULL
94950 +smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
94951 +sys_mbind_7990 sys_mbind 5 7990 NULL
94952 +tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
94953 +vcs_read_8017 vcs_read 3 8017 NULL
94954 +normalize_up_8037 normalize_up 0-1-2 8037 NULL
94955 +vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
94956 +ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
94957 +dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
94958 +leb_read_lock_8070 leb_read_lock 0 8070 NULL
94959 +alloc_targets_8074 alloc_targets 2 8074 NULL nohasharray
94960 +qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 &alloc_targets_8074
94961 +venus_lookup_8121 venus_lookup 4 8121 NULL
94962 +lm3533_als_set_threshold_8125 lm3533_als_set_threshold 2 8125 NULL
94963 +ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
94964 +ext_sd_execute_write_data_8175 ext_sd_execute_write_data 9 8175 NULL
94965 +dma_map_area_8178 dma_map_area 5-2-3 8178 NULL
94966 +__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
94967 +ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
94968 +recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
94969 +rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
94970 +ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
94971 +play_iframe_8219 play_iframe 3 8219 NULL
94972 +create_log_8225 create_log 2 8225 NULL nohasharray
94973 +kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 &create_log_8225
94974 +sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
94975 +check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
94976 +add_rx_skb_8257 add_rx_skb 3 8257 NULL
94977 +t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
94978 +init_cdev_8274 init_cdev 1 8274 NULL
94979 +rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
94980 +qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
94981 +snd_pcm_update_state_8320 snd_pcm_update_state 0 8320 NULL
94982 +lm3533_led_get_pattern_8321 lm3533_led_get_pattern 0 8321 NULL nohasharray
94983 +construct_key_and_link_8321 construct_key_and_link 4 8321 &lm3533_led_get_pattern_8321
94984 +ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
94985 +tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
94986 +ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL
94987 +ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
94988 +uvc_v4l2_compat_ioctl32_8375 uvc_v4l2_compat_ioctl32 3 8375 NULL
94989 +xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
94990 +zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
94991 +uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 NULL
94992 +snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
94993 +irq_create_mapping_8437 irq_create_mapping 2 8437 NULL
94994 +afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
94995 +_irq_to_enable_addr_8485 _irq_to_enable_addr 0-1 8485 NULL
94996 +batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL
94997 +dev_config_8506 dev_config 3 8506 NULL
94998 +ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
94999 +utf16_strnlen_8513 utf16_strnlen 0 8513 NULL
95000 +opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL
95001 +pnp_resource_len_8532 pnp_resource_len 0 8532 NULL
95002 +alloc_pg_vec_8533 alloc_pg_vec 2 8533 NULL
95003 +ocfs2_read_virt_blocks_8538 ocfs2_read_virt_blocks 2-3 8538 NULL
95004 +profile_remove_8556 profile_remove 3 8556 NULL
95005 +cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
95006 +mga_ioremap_8571 mga_ioremap 1-2 8571 NULL
95007 +isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
95008 +tower_write_8580 tower_write 3 8580 NULL
95009 +rtllib_MFIE_rate_len_8606 rtllib_MFIE_rate_len 0 8606 NULL
95010 +shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
95011 +it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
95012 +scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
95013 +fuse_send_write_pages_8636 fuse_send_write_pages 0 8636 NULL
95014 +generic_acl_set_8658 generic_acl_set 4 8658 NULL
95015 +dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
95016 +lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
95017 +tc3589x_gpio_irq_unmap_8680 tc3589x_gpio_irq_unmap 2 8680 NULL
95018 +rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
95019 +skb_frag_size_8695 skb_frag_size 0 8695 NULL
95020 +arcfb_write_8702 arcfb_write 3 8702 NULL
95021 +i_size_read_8703 i_size_read 0 8703 NULL nohasharray
95022 +init_header_8703 init_header 0 8703 &i_size_read_8703
95023 +ctrl_out_8712 ctrl_out 3-5 8712 NULL
95024 +jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL nohasharray
95025 +snapshot_status_8729 snapshot_status 5 8729 &jffs2_acl_count_8729
95026 +f_dupfd_8730 f_dupfd 1 8730 NULL
95027 +tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
95028 +joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
95029 +sys_prctl_8766 sys_prctl 4 8766 NULL
95030 +x32_arch_ptrace_8767 x32_arch_ptrace 3-4 8767 NULL
95031 +paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL
95032 +ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL
95033 +__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
95034 +cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
95035 +metronomefb_write_8823 metronomefb_write 3 8823 NULL
95036 +icmpv6_manip_pkt_8833 icmpv6_manip_pkt 4 8833 NULL nohasharray
95037 +get_queue_depth_8833 get_queue_depth 0 8833 &icmpv6_manip_pkt_8833
95038 +dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
95039 +usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
95040 +debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
95041 +wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
95042 +send_pages_8872 send_pages 3 8872 NULL
95043 +compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
95044 +tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 NULL
95045 +sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
95046 +layout_commit_8926 layout_commit 3 8926 NULL
95047 +adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
95048 +driver_stats_read_8944 driver_stats_read 3 8944 NULL
95049 +read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
95050 +seq_bitmap_list_8963 seq_bitmap_list 3 8963 NULL
95051 +usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
95052 +qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
95053 +venus_mkdir_8967 venus_mkdir 4 8967 NULL
95054 +vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
95055 +seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
95056 +bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
95057 +btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3 8986 NULL
95058 +palmas_ldo_write_9012 palmas_ldo_write 2 9012 NULL
95059 +snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
95060 +__pskb_copy_9038 __pskb_copy 2 9038 NULL
95061 +nla_put_9042 nla_put 3 9042 NULL
95062 +snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
95063 +snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
95064 +fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
95065 +string_9080 string 0 9080 NULL
95066 +create_queues_9088 create_queues 2-3 9088 NULL
95067 +ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
95068 +caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray
95069 +gfn_to_rmap_9110 gfn_to_rmap 2-3 9110 &caif_stream_sendmsg_9110
95070 +pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
95071 +ext4_list_backups_9138 ext4_list_backups 0 9138 NULL
95072 +dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
95073 +isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
95074 +count_leading_zeros_9183 count_leading_zeros 0 9183 NULL
95075 +alloc_group_attrs_9194 alloc_group_attrs 2 9194 NULL nohasharray
95076 +altera_swap_ir_9194 altera_swap_ir 2 9194 &alloc_group_attrs_9194
95077 +gx1_gx_base_9198 gx1_gx_base 0 9198 NULL
95078 +snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
95079 +tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
95080 +sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
95081 +ocfs2_clear_ext_refcount_9256 ocfs2_clear_ext_refcount 4 9256 NULL
95082 +tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
95083 +sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
95084 +hdpvr_read_9273 hdpvr_read 3 9273 NULL
95085 +flakey_status_9274 flakey_status 5 9274 NULL
95086 +qla82xx_pci_set_window_9303 qla82xx_pci_set_window 0-2 9303 NULL
95087 +iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
95088 +ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
95089 +ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL
95090 +ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
95091 +read_9397 read 3 9397 NULL
95092 +nf_nat_sip_expect_9418 nf_nat_sip_expect 8 9418 NULL
95093 +cfg80211_report_obss_beacon_9422 cfg80211_report_obss_beacon 3 9422 NULL
95094 +bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
95095 +ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
95096 +kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
95097 +ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
95098 +mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
95099 +ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
95100 +agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
95101 +rbd_coll_end_req_9472 rbd_coll_end_req 3 9472 NULL
95102 +__alloc_preds_9492 __alloc_preds 2 9492 NULL nohasharray
95103 +crypt_status_9492 crypt_status 5 9492 &__alloc_preds_9492
95104 +lp_write_9511 lp_write 3 9511 NULL
95105 +scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
95106 +lm3533_update_9529 lm3533_update 2 9529 NULL
95107 +read_file_dma_9530 read_file_dma 3 9530 NULL
95108 +ext3_alloc_branch_9534 ext3_alloc_branch 5 9534 NULL
95109 +tps65910_gpio_output_9539 tps65910_gpio_output 2 9539 NULL
95110 +audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
95111 +readl_9557 readl 0 9557 NULL
95112 +fw_node_create_9559 fw_node_create 2 9559 NULL
95113 +kobj_map_9566 kobj_map 2-3 9566 NULL
95114 +f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL
95115 +biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
95116 +ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
95117 +do_sync_9604 do_sync 1 9604 NULL
95118 +snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
95119 +saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
95120 +compat_sys_keyctl_9639 compat_sys_keyctl 4-2-3 9639 NULL
95121 +ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
95122 +uvc_alloc_buffers_9656 uvc_alloc_buffers 2-3 9656 NULL
95123 +queue_received_packet_9657 queue_received_packet 5 9657 NULL
95124 +snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
95125 +ks8842_read16_9676 ks8842_read16 0 9676 NULL nohasharray
95126 +dns_query_9676 dns_query 3 9676 &ks8842_read16_9676
95127 +qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
95128 +__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
95129 +x25_asy_compat_ioctl_9694 x25_asy_compat_ioctl 4 9694 NULL nohasharray
95130 +is_hole_9694 is_hole 2 9694 &x25_asy_compat_ioctl_9694
95131 +fnb_9703 fnb 2-3 9703 NULL
95132 +ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 NULL
95133 +ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL
95134 +ddb_input_read_9743 ddb_input_read 3 9743 NULL
95135 +sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL
95136 +btrfs_super_root_9763 btrfs_super_root 0 9763 NULL
95137 +__alloc_percpu_9764 __alloc_percpu 1-2 9764 NULL
95138 +do_sigpending_9766 do_sigpending 2 9766 NULL
95139 +__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
95140 +snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
95141 +_regmap_write_9803 _regmap_write 2 9803 NULL
95142 +ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
95143 +ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 NULL
95144 +pnp_mem_start_9817 pnp_mem_start 0 9817 NULL
95145 +kernel_physical_mapping_init_9818 kernel_physical_mapping_init 0-2-1 9818 NULL
95146 +dvb_dvr_set_buffer_size_9840 dvb_dvr_set_buffer_size 2 9840 NULL
95147 +cfg80211_send_deauth_9862 cfg80211_send_deauth 3 9862 NULL
95148 +pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
95149 +btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
95150 +f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL
95151 +mlx4_bitmap_alloc_range_9876 mlx4_bitmap_alloc_range 2-3 9876 NULL
95152 +wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL
95153 +bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
95154 +snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
95155 +snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 3-5 9895 NULL
95156 +receive_DataRequest_9904 receive_DataRequest 3 9904 NULL
95157 +nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL
95158 +pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
95159 +read_file_misc_9948 read_file_misc 3 9948 NULL
95160 +set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
95161 +ext2_new_blocks_9954 ext2_new_blocks 2 9954 NULL
95162 +csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
95163 +get_free_serial_index_9969 get_free_serial_index 0 9969 NULL
95164 +btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
95165 +ath6kl_usb_submit_ctrl_out_9978 ath6kl_usb_submit_ctrl_out 6 9978 NULL
95166 +twl6040_clear_bits_9985 twl6040_clear_bits 2 9985 NULL
95167 +aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
95168 +handle_request_10024 handle_request 9 10024 NULL
95169 +batadv_orig_hash_add_if_10033 batadv_orig_hash_add_if 2 10033 NULL
95170 +ieee80211_probereq_get_10040 ieee80211_probereq_get 4-5 10040 NULL
95171 +rbd_coll_end_req_index_10041 rbd_coll_end_req_index 5 10041 NULL
95172 +ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
95173 +wm831x_gpio_direction_in_10099 wm831x_gpio_direction_in 2 10099 NULL
95174 +ufs_bitmap_search_10105 ufs_bitmap_search 0-3 10105 NULL
95175 +get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray
95176 +dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110
95177 +gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL
95178 +offset_to_bit_10134 offset_to_bit 0 10134 NULL
95179 +aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
95180 +rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
95181 +hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL nohasharray
95182 +ol_chunk_entries_10159 ol_chunk_entries 0 10159 &hidg_alloc_ep_req_10159
95183 +stmpe_irq_unmap_10164 stmpe_irq_unmap 2 10164 NULL
95184 +asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
95185 +proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
95186 +jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
95187 +do_ioctl_trans_10194 do_ioctl_trans 3 10194 NULL
95188 +cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
95189 +snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
95190 +ubi_leb_change_10289 ubi_leb_change 4 10289 NULL
95191 +lm3533_led_delay_set_10291 lm3533_led_delay_set 2 10291 NULL
95192 +read_emulate_10310 read_emulate 2-4 10310 NULL
95193 +ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
95194 +ubi_leb_read_10328 ubi_leb_read 0 10328 NULL
95195 +tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
95196 +ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
95197 +dbAllocAny_10354 dbAllocAny 0 10354 NULL
95198 +ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
95199 +ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
95200 +sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
95201 +ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
95202 +do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
95203 +fwtty_rx_10434 fwtty_rx 3 10434 NULL
95204 +event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
95205 +ca91cx42_alloc_resource_10502 ca91cx42_alloc_resource 2 10502 NULL
95206 +qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
95207 +sel_write_disable_10511 sel_write_disable 3 10511 NULL
95208 +osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
95209 +rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
95210 +snd_pcm_hw_params_user_10520 snd_pcm_hw_params_user 0 10520 NULL
95211 +ocfs2_add_refcounted_extent_10526 ocfs2_add_refcounted_extent 6 10526 NULL
95212 +get_vm_area_caller_10527 get_vm_area_caller 1 10527 NULL
95213 +snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 NULL
95214 +ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL
95215 +scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL
95216 +otp_read_10594 otp_read 2-4-5 10594 NULL
95217 +supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
95218 +mc13783_set_fmt_10616 mc13783_set_fmt 3 10616 NULL
95219 +ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
95220 +alloc_coherent_10632 alloc_coherent 2 10632 NULL
95221 +nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
95222 +parport_write_10669 parport_write 0 10669 NULL
95223 +inl_10708 inl 0 10708 NULL nohasharray
95224 +selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 &inl_10708
95225 +pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
95226 +shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
95227 +spi_sync_10731 spi_sync 0 10731 NULL
95228 +sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray
95229 +apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737
95230 +compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
95231 +sys_syslog_10746 sys_syslog 3 10746 NULL
95232 +alloc_one_pg_vec_page_10747 alloc_one_pg_vec_page 1 10747 NULL
95233 +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
95234 +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
95235 +sys_bind_10799 sys_bind 3 10799 NULL
95236 +compat_put_int_10828 compat_put_int 1 10828 NULL
95237 +lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
95238 +ida_get_new_above_10853 ida_get_new_above 2 10853 NULL
95239 +fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
95240 +ol_chunk_blocks_10864 ol_chunk_blocks 0 10864 NULL
95241 +batadv_check_unicast_packet_10866 batadv_check_unicast_packet 2 10866 NULL
95242 +snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
95243 +mid_get_vbt_data_r0_10876 mid_get_vbt_data_r0 2 10876 NULL
95244 +bl_mark_for_commit_10879 bl_mark_for_commit 2-3 10879 NULL
95245 +get_scq_10897 get_scq 2 10897 NULL
95246 +cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
95247 +tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
95248 +__copy_from_user_10918 __copy_from_user 3 10918 NULL
95249 +da9052_map_irq_10952 da9052_map_irq 2 10952 NULL
95250 +bm_entry_read_10976 bm_entry_read 3 10976 NULL
95251 +i915_min_freq_write_10981 i915_min_freq_write 3 10981 NULL
95252 +sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
95253 +__hci_num_ctrl_10985 __hci_num_ctrl 0 10985 NULL
95254 +xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
95255 +rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
95256 +SetLineNumber_11023 SetLineNumber 0 11023 NULL
95257 +mb_find_next_bit_11037 mb_find_next_bit 2-3-0 11037 NULL
95258 +nouveau_gpio_create__11048 nouveau_gpio_create_ 4 11048 NULL
95259 +tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
95260 +carl9170_handle_mpdu_11056 carl9170_handle_mpdu 3 11056 NULL
95261 +tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
95262 +snd_pcm_delay_11081 snd_pcm_delay 0 11081 NULL
95263 +count_argc_11083 count_argc 0 11083 NULL
95264 +kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
95265 +stmpe_gpio_to_irq_11110 stmpe_gpio_to_irq 2 11110 NULL
95266 +tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
95267 +page_offset_11120 page_offset 0 11120 NULL
95268 +tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
95269 +alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
95270 +acpi_os_map_memory_11161 acpi_os_map_memory 1-2 11161 NULL
95271 +ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL nohasharray
95272 +snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 3-4 11172 &ioat2_alloc_ring_11172
95273 +__swab16p_11220 __swab16p 0 11220 NULL
95274 +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
95275 +of_irq_count_11253 of_irq_count 0 11253 NULL
95276 +ubifs_write_node_11258 ubifs_write_node 5-3 11258 NULL
95277 +hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
95278 +cru_detect_11272 cru_detect 1 11272 NULL
95279 +ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
95280 +tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 NULL
95281 +construct_key_11329 construct_key 3 11329 NULL nohasharray
95282 +__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
95283 +next_segment_11330 next_segment 0-2-1 11330 NULL
95284 +persistent_ram_buffer_map_11332 persistent_ram_buffer_map 1-2 11332 NULL
95285 +ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL
95286 +i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
95287 +sel_write_create_11353 sel_write_create 3 11353 NULL
95288 +handle_unit_11355 handle_unit 0-1 11355 NULL
95289 +batadv_skb_head_push_11360 batadv_skb_head_push 2 11360 NULL
95290 +drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
95291 +qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
95292 +dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
95293 +___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
95294 +str_to_user_11411 str_to_user 2 11411 NULL
95295 +mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
95296 +ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL
95297 +adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 NULL
95298 +trace_options_read_11419 trace_options_read 3 11419 NULL
95299 +xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
95300 +prepare_image_11424 prepare_image 0 11424 NULL
95301 +vring_size_11426 vring_size 0-1-2 11426 NULL
95302 +bttv_read_11432 bttv_read 3 11432 NULL
95303 +create_zero_mask_11453 create_zero_mask 0-1 11453 NULL
95304 +swp_offset_11475 swp_offset 0 11475 NULL
95305 +sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 NULL
95306 +xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
95307 +sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
95308 +kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
95309 +twl_direction_in_11527 twl_direction_in 2 11527 NULL
95310 +setup_IO_APIC_irq_extra_11537 setup_IO_APIC_irq_extra 1 11537 NULL
95311 +skb_cow_data_11565 skb_cow_data 0-2 11565 NULL
95312 +mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
95313 +oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
95314 +snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
95315 +fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
95316 +sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
95317 +nla_total_size_11658 nla_total_size 0-1 11658 NULL
95318 +ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
95319 +btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
95320 +da9055_gpio_direction_output_11680 da9055_gpio_direction_output 2 11680 NULL
95321 +dsp_buffer_alloc_11684 dsp_buffer_alloc 2 11684 NULL
95322 +sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
95323 +split_11691 split 2 11691 NULL
95324 +snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
95325 +blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
95326 +tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
95327 +nfsd4_get_drc_mem_11748 nfsd4_get_drc_mem 0-1-2 11748 NULL
95328 +dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
95329 +iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
95330 +ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
95331 +ebt_buf_add_11779 ebt_buf_add 0 11779 NULL
95332 +btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
95333 +pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
95334 +zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
95335 +sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
95336 +rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
95337 +unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
95338 +nf_nat_sdp_media_11863 nf_nat_sdp_media 9 11863 NULL
95339 +ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
95340 +kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
95341 +fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
95342 +bitmap_remap_11929 bitmap_remap 5 11929 NULL
95343 +atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
95344 +dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
95345 +kvm_set_msr_common_11953 kvm_set_msr_common 3 11953 NULL
95346 +f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
95347 +atmel_read16_11981 atmel_read16 0 11981 NULL
95348 +read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL
95349 +mwifiex_cfg80211_mgmt_tx_12022 mwifiex_cfg80211_mgmt_tx 9 12022 NULL
95350 +ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
95351 +ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050 NULL
95352 +il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
95353 +ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
95354 +ubifs_recover_log_leb_12079 ubifs_recover_log_leb 3 12079 NULL
95355 +da9052_gpio_direction_output_12120 da9052_gpio_direction_output 2 12120 NULL
95356 +alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
95357 +set_powered_12129 set_powered 4 12129 NULL
95358 +nfs_writedata_alloc_12133 nfs_writedata_alloc 2 12133 NULL
95359 +ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
95360 +xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
95361 +batadv_add_packet_12136 batadv_add_packet 3 12136 NULL
95362 +rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
95363 +vmw_fifo_reserve_12141 vmw_fifo_reserve 2 12141 NULL
95364 +get_idx_gc_leb_12148 get_idx_gc_leb 0 12148 NULL
95365 +btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
95366 +vmbus_open_12154 vmbus_open 2-3 12154 NULL
95367 +wil_rxdesc_phy_length_12165 wil_rxdesc_phy_length 0 12165 NULL
95368 +dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
95369 +ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
95370 +compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
95371 +ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
95372 +bl_is_sector_init_12199 bl_is_sector_init 2 12199 NULL
95373 +receive_copy_12216 receive_copy 3 12216 NULL
95374 +snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
95375 +aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 NULL
95376 +ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
95377 +ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
95378 +shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
95379 +add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
95380 +note_last_dentry_12285 note_last_dentry 3 12285 NULL
95381 +roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray
95382 +il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288
95383 +vxge_get_num_vfs_12302 vxge_get_num_vfs 0 12302 NULL
95384 +split_bvec_12312 split_bvec 6 12312 NULL
95385 +tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
95386 +pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
95387 +mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
95388 +__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
95389 +receive_packet_12367 receive_packet 2 12367 NULL
95390 +xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
95391 +btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL
95392 +ntfs_get_size_for_mapping_pairs_12413 ntfs_get_size_for_mapping_pairs 0 12413 NULL
95393 +ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
95394 +skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
95395 +qla4_82xx_pci_mem_write_direct_12479 qla4_82xx_pci_mem_write_direct 2 12479 NULL
95396 +x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
95397 +rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
95398 +nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
95399 +qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
95400 +arizona_gpio_direction_out_12564 arizona_gpio_direction_out 2 12564 NULL
95401 +hvc_alloc_12579 hvc_alloc 4 12579 NULL
95402 +snd_pcm_plugin_alloc_12580 snd_pcm_plugin_alloc 2 12580 NULL
95403 +macvtap_compat_ioctl_12587 macvtap_compat_ioctl 3 12587 NULL
95404 +pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
95405 +ipv6_get_l4proto_12600 ipv6_get_l4proto 2 12600 NULL
95406 +vhci_put_user_12604 vhci_put_user 4 12604 NULL
95407 +fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
95408 +pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
95409 +ctrl_cdev_compat_ioctl_12634 ctrl_cdev_compat_ioctl 3 12634 NULL
95410 +pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
95411 +dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
95412 +ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
95413 +sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
95414 +sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
95415 +ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
95416 +ivtv_write_12721 ivtv_write 3 12721 NULL
95417 +key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
95418 +__videobuf_alloc_cached_12740 __videobuf_alloc_cached 1 12740 NULL
95419 +ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
95420 +listxattr_12769 listxattr 3 12769 NULL
95421 +sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
95422 +platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
95423 +scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
95424 +xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
95425 +readq_12825 readq 0 12825 NULL
95426 +TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
95427 +ath6kl_wmi_add_wow_pattern_cmd_12842 ath6kl_wmi_add_wow_pattern_cmd 4 12842 NULL nohasharray
95428 +spidev_sync_12842 spidev_sync 0 12842 &ath6kl_wmi_add_wow_pattern_cmd_12842
95429 +spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
95430 +get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
95431 +ocfs2_hamming_encode_block_12904 ocfs2_hamming_encode_block 2 12904 NULL
95432 +get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
95433 +rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
95434 +ci_ll_init_12930 ci_ll_init 3 12930 NULL
95435 +tps65910_reg_update_bits_12934 tps65910_reg_update_bits 2 12934 NULL
95436 +do_inode_permission_12946 do_inode_permission 0 12946 NULL
95437 +bm_status_write_12964 bm_status_write 3 12964 NULL
95438 +_drbd_md_first_sector_12984 _drbd_md_first_sector 0 12984 NULL
95439 +acpi_tb_install_table_12988 acpi_tb_install_table 1 12988 NULL
95440 +TransmitTcb_12989 TransmitTcb 4 12989 NULL
95441 +sk_peek_offset_12991 sk_peek_offset 0 12991 NULL
95442 +subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
95443 +generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
95444 +ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
95445 +__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL
95446 +irq_set_chip_and_handler_13088 irq_set_chip_and_handler 1 13088 NULL
95447 +tps6586x_set_bits_13089 tps6586x_set_bits 2 13089 NULL
95448 +xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
95449 +blk_rq_map_sg_13092 blk_rq_map_sg 0 13092 NULL
95450 +mb_find_next_zero_bit_13100 mb_find_next_zero_bit 2-3 13100 NULL
95451 +ubifs_compat_ioctl_13108 ubifs_compat_ioctl 3 13108 NULL
95452 +snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
95453 +xen_allocate_irq_dynamic_13116 xen_allocate_irq_dynamic 0 13116 NULL
95454 +bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
95455 +blk_update_request_13146 blk_update_request 3 13146 NULL
95456 +caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
95457 +pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
95458 +dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
95459 +create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
95460 +compat_put_ulong_13186 compat_put_ulong 1 13186 NULL
95461 +comedi_read_13199 comedi_read 3 13199 NULL
95462 +mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
95463 +__nodes_fold_13215 __nodes_fold 4 13215 NULL
95464 +get_unaligned_le64_13219 get_unaligned_le64 0 13219 NULL
95465 +svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
95466 +asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
95467 +fw_download_code_13249 fw_download_code 3 13249 NULL
95468 +init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
95469 +hostap_80211_get_hdrlen_13255 hostap_80211_get_hdrlen 0 13255 NULL
95470 +bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
95471 +carl9170_rx_13272 carl9170_rx 3 13272 NULL
95472 +pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
95473 +il4965_stats_flag_13281 il4965_stats_flag 0-3 13281 NULL
95474 +lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
95475 +platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
95476 +us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
95477 +kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
95478 +mthca_alloc_mtt_range_13371 mthca_alloc_mtt_range 2 13371 NULL
95479 +iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
95480 +wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
95481 +carl9170_rx_untie_data_13405 carl9170_rx_untie_data 3 13405 NULL
95482 +sky2_receive_13407 sky2_receive 2 13407 NULL
95483 +netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
95484 +keyring_read_13438 keyring_read 3 13438 NULL
95485 +sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL nohasharray
95486 +set_tap_pwup_pfs_13440 set_tap_pwup_pfs 3 13440 &sctp_setsockopt_peer_primary_addr_13440
95487 +ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 7-8-9 13443 NULL
95488 +mthca_buddy_alloc_13454 mthca_buddy_alloc 2 13454 NULL
95489 +ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 2 13512 NULL
95490 +core_status_13515 core_status 4 13515 NULL
95491 +smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
95492 +bm_init_13529 bm_init 2 13529 NULL
95493 +llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
95494 +ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL
95495 +alloc_tio_13564 alloc_tio 3 13564 NULL
95496 +ubifs_get_idx_gc_leb_13566 ubifs_get_idx_gc_leb 0 13566 NULL
95497 +read_file_antenna_13574 read_file_antenna 3 13574 NULL
95498 +pm860x_set_bits_13582 pm860x_set_bits 2 13582 NULL
95499 +cache_write_13589 cache_write 3 13589 NULL
95500 +mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
95501 +aac_sa_ioremap_13596 aac_sa_ioremap 2 13596 NULL nohasharray
95502 +irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 &aac_sa_ioremap_13596
95503 +usb_dump_interface_descriptor_13603 usb_dump_interface_descriptor 0 13603 NULL
95504 +swap_cgroup_swapon_13614 swap_cgroup_swapon 2 13614 NULL
95505 +wm8994_bulk_write_13615 wm8994_bulk_write 3-2 13615 NULL
95506 +pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
95507 +iio_device_add_event_sysfs_13627 iio_device_add_event_sysfs 0 13627 NULL
95508 +packet_snd_13634 packet_snd 3 13634 NULL
95509 +blk_msg_write_13655 blk_msg_write 3 13655 NULL
95510 +cache_downcall_13666 cache_downcall 3 13666 NULL
95511 +fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
95512 +audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
95513 +ufs_dtog_13750 ufs_dtog 0-2 13750 NULL
95514 +ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
95515 +fb_sys_read_13778 fb_sys_read 3 13778 NULL
95516 +ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
95517 +random_read_13815 random_read 3 13815 NULL
95518 +hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
95519 +evdev_ioctl_compat_13851 evdev_ioctl_compat 2-3 13851 NULL
95520 +compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL nohasharray
95521 +alloc_trace_uprobe_13870 alloc_trace_uprobe 3 13870 &compat_ip_setsockopt_13870
95522 +snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
95523 +ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
95524 +ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL nohasharray
95525 +ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940
95526 +iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
95527 +compat_chaninfo_13945 compat_chaninfo 2 13945 NULL
95528 +ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
95529 +lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
95530 +snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
95531 +com90xx_found_13974 com90xx_found 3 13974 NULL
95532 +qcam_read_13977 qcam_read 3 13977 NULL
95533 +dsp_read_13980 dsp_read 2 13980 NULL
95534 +bm_block_bits_13981 bm_block_bits 0 13981 NULL nohasharray
95535 +dvb_demux_read_13981 dvb_demux_read 3 13981 &bm_block_bits_13981
95536 +ieee80211_bss_info_update_13991 ieee80211_bss_info_update 4 13991 NULL
95537 +btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
95538 +_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
95539 +dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
95540 +read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
95541 +ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
95542 +sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
95543 +do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
95544 +compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
95545 +nlmsg_len_14115 nlmsg_len 0 14115 NULL
95546 +vfio_fops_compat_ioctl_14130 vfio_fops_compat_ioctl 3 14130 NULL
95547 +ntfs_rl_replace_14136 ntfs_rl_replace 2-4 14136 NULL
95548 +em_canid_change_14150 em_canid_change 3 14150 NULL
95549 +gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
95550 +print_input_mask_14168 print_input_mask 3-0 14168 NULL
95551 +ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
95552 +alloc_async_14208 alloc_async 1 14208 NULL
95553 +sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
95554 +pool_status_14230 pool_status 5 14230 NULL
95555 +ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
95556 +inode_to_path_14235 inode_to_path 2 14235 NULL
95557 +dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4-2 14244 NULL
95558 +snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 4-2 14245 NULL
95559 +reiserfs_compat_ioctl_14265 reiserfs_compat_ioctl 3 14265 NULL
95560 +ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
95561 +add_numbered_child_14273 add_numbered_child 5 14273 NULL
95562 +OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
95563 +snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
95564 +rr_status_14293 rr_status 5 14293 NULL
95565 +lp8788_write_byte_14299 lp8788_write_byte 2 14299 NULL
95566 +read_default_ldt_14302 read_default_ldt 2 14302 NULL
95567 +oo_objects_14319 oo_objects 0 14319 NULL
95568 +p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
95569 +snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
95570 +acpi_get_override_irq_14381 acpi_get_override_irq 1 14381 NULL
95571 +ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
95572 +smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
95573 +mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
95574 +get_kcore_size_14425 get_kcore_size 0 14425 NULL
95575 +check_lpt_crc_14442 check_lpt_crc 0 14442 NULL
95576 +block_size_14443 block_size 0 14443 NULL
95577 +ci13xxx_add_device_14456 ci13xxx_add_device 3 14456 NULL
95578 +snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
95579 +udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
95580 +ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
95581 +stripe_status_14506 stripe_status 5 14506 NULL
95582 +ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
95583 +dataflash_read_user_otp_14536 dataflash_read_user_otp 2-3 14536 NULL nohasharray
95584 +ep0_write_14536 ep0_write 3 14536 &dataflash_read_user_otp_14536
95585 +picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
95586 +drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
95587 +usb_dump_desc_14553 usb_dump_desc 0 14553 NULL
95588 +idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
95589 +ocfs2_trim_group_14641 ocfs2_trim_group 4-3 14641 NULL
95590 +dbJoin_14644 dbJoin 0 14644 NULL
95591 +profile_replace_14652 profile_replace 3 14652 NULL
95592 +pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
95593 +ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
95594 +tsi148_master_set_14685 tsi148_master_set 4 14685 NULL
95595 +u_audio_playback_14709 u_audio_playback 3 14709 NULL
95596 +vfd_write_14717 vfd_write 3 14717 NULL
95597 +__blk_end_request_14729 __blk_end_request 3 14729 NULL
95598 +raid1_resize_14740 raid1_resize 2 14740 NULL
95599 +btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL
95600 +rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
95601 +qla82xx_pci_mem_write_2M_14765 qla82xx_pci_mem_write_2M 2 14765 NULL
95602 +regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL
95603 +lm3533_als_get_hysteresis_14776 lm3533_als_get_hysteresis 2 14776 NULL
95604 +sta_dev_read_14782 sta_dev_read 3 14782 NULL
95605 +ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
95606 +hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
95607 +snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
95608 +bcma_scan_read32_14802 bcma_scan_read32 0 14802 NULL
95609 +do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
95610 +__mutex_fastpath_lock_retval_14844 __mutex_fastpath_lock_retval 0 14844 NULL
95611 +lcd_write_14857 lcd_write 3 14857 NULL nohasharray
95612 +__krealloc_14857 __krealloc 2 14857 &lcd_write_14857
95613 +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
95614 +sriov_enable_migration_14889 sriov_enable_migration 2 14889 NULL
95615 +acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
95616 +unifi_read_14899 unifi_read 3 14899 NULL
95617 +krealloc_14908 krealloc 2 14908 NULL
95618 +regmap_irq_get_virq_14910 regmap_irq_get_virq 2 14910 NULL
95619 +__arch_hweight64_14923 __arch_hweight64 0 14923 NULL
95620 +ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
95621 +queue_cnt_14951 queue_cnt 0 14951 NULL
95622 +videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
95623 +mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
95624 +setkey_14987 setkey 3 14987 NULL nohasharray
95625 +gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987
95626 +vmap_15025 vmap 2 15025 NULL
95627 +blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
95628 +irq_get_next_irq_15053 irq_get_next_irq 1 15053 NULL
95629 +cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
95630 +ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
95631 +nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
95632 +ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5 15072 NULL
95633 +__alloc_extent_buffer_15093 __alloc_extent_buffer 3 15093 NULL
95634 +hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
95635 +start_port_15124 start_port 0 15124 NULL
95636 +memchr_15126 memchr 0 15126 NULL
95637 +ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
95638 +self_check_not_bad_15175 self_check_not_bad 0 15175 NULL
95639 +iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
95640 +reserve_resources_15194 reserve_resources 3 15194 NULL
95641 +bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
95642 +variax_alloc_sysex_buffer_15237 variax_alloc_sysex_buffer 3 15237 NULL
95643 +il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
95644 +compat_raw_ioctl_15290 compat_raw_ioctl 3 15290 NULL
95645 +sys_connect_15291 sys_connect 3 15291 NULL nohasharray
95646 +xlate_dev_mem_ptr_15291 xlate_dev_mem_ptr 1 15291 &sys_connect_15291
95647 +arch_enable_uv_irq_15294 arch_enable_uv_irq 2 15294 NULL
95648 +acpi_ev_create_gpe_block_15297 acpi_ev_create_gpe_block 5 15297 NULL
95649 +tpm_tis_init_15304 tpm_tis_init 2-3 15304 NULL
95650 +fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
95651 +__ocfs2_remove_xattr_range_15330 __ocfs2_remove_xattr_range 4-5-3 15330 NULL
95652 +kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL
95653 +ioread16_15342 ioread16 0 15342 NULL
95654 +alloc_ring_15345 alloc_ring 2-4 15345 NULL
95655 +acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
95656 +compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
95657 +fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
95658 +domain_flush_pages_15379 domain_flush_pages 2-3 15379 NULL
95659 +alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
95660 +btrfs_level_size_15392 btrfs_level_size 0 15392 NULL
95661 +pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
95662 +get_modalias_15406 get_modalias 2 15406 NULL
95663 +__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4 15423 NULL
95664 +tcp_mtu_to_mss_15438 tcp_mtu_to_mss 0-2 15438 NULL
95665 +hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
95666 +memweight_15450 memweight 2 15450 NULL
95667 +vmalloc_15464 vmalloc 1 15464 NULL
95668 +insert_old_idx_znode_15500 insert_old_idx_znode 0 15500 NULL
95669 +zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
95670 +da9052_bat_irq_15533 da9052_bat_irq 1 15533 NULL
95671 +p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
95672 +ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
95673 +ieee80211_amsdu_to_8023s_15561 ieee80211_amsdu_to_8023s 5 15561 NULL
95674 +snd_pcm_channel_info_15572 snd_pcm_channel_info 0 15572 NULL
95675 +persistent_status_15574 persistent_status 4 15574 NULL
95676 +bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
95677 +vme_user_write_15587 vme_user_write 3 15587 NULL
95678 +ocfs2_truncate_rec_15595 ocfs2_truncate_rec 7 15595 NULL
95679 +sx150x_install_irq_chip_15609 sx150x_install_irq_chip 3 15609 NULL
95680 +iommu_device_max_index_15620 iommu_device_max_index 0-3-2-1 15620 NULL nohasharray
95681 +compat_fillonedir_15620 compat_fillonedir 3 15620 &iommu_device_max_index_15620
95682 +set_dis_tap_pfs_15621 set_dis_tap_pfs 3 15621 NULL
95683 +proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
95684 +tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
95685 +sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642 nohasharray
95686 +pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642
95687 +fs_path_add_15648 fs_path_add 3 15648 NULL
95688 +xsd_read_15653 xsd_read 3 15653 NULL
95689 +compat_sys_fcntl_15654 compat_sys_fcntl 3 15654 NULL
95690 +unix_bind_15668 unix_bind 3 15668 NULL
95691 +dm_read_15674 dm_read 3 15674 NULL
95692 +pstore_mkfile_15675 pstore_mkfile 6 15675 NULL
95693 +uf_sme_queue_message_15697 uf_sme_queue_message 3 15697 NULL
95694 +ocfs2_split_tree_15716 ocfs2_split_tree 5 15716 NULL
95695 +HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
95696 +bitmap_search_next_usable_block_15762 bitmap_search_next_usable_block 3-1 15762 NULL
95697 +do_test_15766 do_test 1 15766 NULL
95698 +set_std_nic_pfs_15792 set_std_nic_pfs 3 15792 NULL
95699 +smk_read_direct_15803 smk_read_direct 3 15803 NULL
95700 +snd_pcm_ioctl_compat_15804 snd_pcm_ioctl_compat 3 15804 NULL
95701 +gx1_read_conf_reg_15817 gx1_read_conf_reg 0 15817 NULL nohasharray
95702 +nameseq_list_15817 nameseq_list 3 15817 &gx1_read_conf_reg_15817 nohasharray
95703 +gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
95704 +afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
95705 +brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
95706 +table_size_15851 table_size 0-1-2 15851 NULL
95707 +ubi_io_write_15870 ubi_io_write 5-4 15870 NULL nohasharray
95708 +media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
95709 +__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
95710 +ERR_PTR_15881 ERR_PTR 0 15881 NULL
95711 +nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
95712 +native_read_msr_15905 native_read_msr 0 15905 NULL
95713 +wm5100_gpio_direction_in_15934 wm5100_gpio_direction_in 2 15934 NULL
95714 +parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
95715 +power_read_15939 power_read 3 15939 NULL
95716 +lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
95717 +snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 4-2-5 15952 NULL
95718 +remap_pci_mem_15966 remap_pci_mem 1-2 15966 NULL
95719 +set_spte_15977 set_spte 7-6 15977 NULL
95720 +frame_alloc_15981 frame_alloc 4 15981 NULL
95721 +alloc_vm_area_15989 alloc_vm_area 1 15989 NULL
95722 +hdpvr_register_videodev_16010 hdpvr_register_videodev 3 16010 NULL
95723 +viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
95724 +got_frame_16028 got_frame 2 16028 NULL
95725 +isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
95726 +dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
95727 +isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
95728 +irq_set_chip_and_handler_name_16111 irq_set_chip_and_handler_name 1 16111 NULL
95729 +snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
95730 +compat_sys_select_16131 compat_sys_select 1 16131 NULL
95731 +fsm_init_16134 fsm_init 2 16134 NULL
95732 +hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
95733 +ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
95734 +bnx2i_get_cid_num_16166 bnx2i_get_cid_num 0 16166 NULL
95735 +mapping_level_16188 mapping_level 2 16188 NULL
95736 +cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
95737 +create_table_16213 create_table 2 16213 NULL
95738 +atomic_read_file_16227 atomic_read_file 3 16227 NULL
95739 +BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
95740 +btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL
95741 +mark_written_sectors_16262 mark_written_sectors 2 16262 NULL
95742 +reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
95743 +set_disc_pfs_16270 set_disc_pfs 3 16270 NULL
95744 +ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
95745 +drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
95746 +nand_bch_init_16280 nand_bch_init 3-2 16280 &drbd_setsockopt_16280
95747 +account_16283 account 0-2-4 16283 NULL nohasharray
95748 +mirror_status_16283 mirror_status 5 16283 &account_16283
95749 +stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
95750 +rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL
95751 +rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
95752 +vmw_cursor_update_image_16332 vmw_cursor_update_image 3-4 16332 NULL
95753 +tps80031_update_16360 tps80031_update 3 16360 NULL
95754 +total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
95755 +rbd_add_16366 rbd_add 3 16366 NULL
95756 +iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
95757 +nl80211_send_unprot_deauth_16378 nl80211_send_unprot_deauth 4 16378 NULL
95758 +diva_os_malloc_16406 diva_os_malloc 2 16406 NULL
95759 +ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
95760 +rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
95761 +netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
95762 +tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
95763 +snd_interval_max_16529 snd_interval_max 0 16529 NULL
95764 +raid10_resize_16537 raid10_resize 2 16537 NULL
95765 +tcp_manip_pkt_16563 tcp_manip_pkt 4 16563 NULL
95766 +lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
95767 +agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
95768 +btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL
95769 +mfd_add_devices_16668 mfd_add_devices 4 16668 NULL
95770 +da9052_reg_write_16685 da9052_reg_write 2 16685 NULL
95771 +em28xx_v4l2_read_16701 em28xx_v4l2_read 3 16701 NULL
95772 +arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
95773 +compat_blkdev_driver_ioctl_16769 compat_blkdev_driver_ioctl 4 16769 NULL
95774 +blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
95775 +i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
95776 +get_server_iovec_16804 get_server_iovec 2 16804 NULL
95777 +tipc_send2name_16809 tipc_send2name 6 16809 NULL
95778 +dm_vcalloc_16814 dm_vcalloc 1-2 16814 NULL
95779 +drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
95780 +scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
95781 +hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
95782 +alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
95783 +carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
95784 +st_write_16874 st_write 3 16874 NULL
95785 +__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
95786 +wm8350_set_bits_16911 wm8350_set_bits 2 16911 NULL
95787 +psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
95788 +snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
95789 +random32_16937 random32 0 16937 NULL
95790 +_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
95791 +squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
95792 +cfg80211_send_unprot_disassoc_16951 cfg80211_send_unprot_disassoc 3 16951 NULL
95793 +keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
95794 +ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
95795 +copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
95796 +jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
95797 +__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
95798 +dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
95799 +simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
95800 +__kmalloc_reserve_17080 __kmalloc_reserve 1 17080 NULL
95801 +carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
95802 +mac_address_string_17091 mac_address_string 0 17091 NULL
95803 +entry_length_17093 entry_length 0 17093 NULL
95804 +sys_preadv_17100 sys_preadv 3 17100 NULL
95805 +pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
95806 +mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
95807 +nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
95808 +sep_read_17161 sep_read 3 17161 NULL
95809 +befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
95810 +tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
95811 +UniStrnlen_17169 UniStrnlen 0 17169 NULL
95812 +access_remote_vm_17189 access_remote_vm 0-2-4 17189 NULL
95813 +driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
95814 +iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
95815 +dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
95816 +ms_rw_17220 ms_rw 3-4 17220 NULL
95817 +__be16_to_cpup_17261 __be16_to_cpup 0 17261 NULL
95818 +error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
95819 +alloc_ep_17269 alloc_ep 1 17269 NULL
95820 +pg_read_17276 pg_read 3 17276 NULL
95821 +raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
95822 +hmac_sha256_17278 hmac_sha256 2 17278 NULL
95823 +neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
95824 +minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
95825 +ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301 NULL
95826 +skb_pad_17302 skb_pad 2 17302 NULL
95827 +mb_cache_create_17307 mb_cache_create 2 17307 NULL
95828 +gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL
95829 +ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
95830 +ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
95831 +ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
95832 +lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
95833 +sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
95834 +nla_get_u32_17455 nla_get_u32 0 17455 NULL
95835 +__ref_totlen_17461 __ref_totlen 0 17461 NULL
95836 +compat_cmd_17465 compat_cmd 2 17465 NULL
95837 +probe_bios_17467 probe_bios 1 17467 NULL
95838 +probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
95839 +__alloc_session_17485 __alloc_session 2-1 17485 NULL
95840 +TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
95841 +bitmap_pos_to_ord_17503 bitmap_pos_to_ord 3 17503 NULL
95842 +arizona_apply_fll_17538 arizona_apply_fll 2 17538 NULL
95843 +xlog_do_log_recovery_17550 xlog_do_log_recovery 3 17550 NULL
95844 +__copy_to_user_17551 __copy_to_user 0-3 17551 NULL
95845 +copy_from_user_17559 copy_from_user 3 17559 NULL
95846 +snd_pcm_action_lock_irq_17569 snd_pcm_action_lock_irq 0 17569 NULL
95847 +acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
95848 +neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
95849 +rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
95850 +iwl_dump_nic_event_log_17601 iwl_dump_nic_event_log 0 17601 NULL
95851 +wm8994_gpio_to_irq_17604 wm8994_gpio_to_irq 2 17604 NULL
95852 +osst_execute_17607 osst_execute 7-6 17607 NULL
95853 +ocfs2_mark_extent_written_17615 ocfs2_mark_extent_written 6 17615 NULL
95854 +ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
95855 +twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL
95856 +packet_setsockopt_17662 packet_setsockopt 5 17662 NULL nohasharray
95857 +ubi_io_read_data_17662 ubi_io_read_data 0 17662 &packet_setsockopt_17662
95858 +pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
95859 +__einj_error_trigger_17707 __einj_error_trigger 1 17707 NULL nohasharray
95860 +venus_rename_17707 venus_rename 5-4 17707 &__einj_error_trigger_17707
95861 +exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
95862 +sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
95863 +mark_unsafe_pages_17759 mark_unsafe_pages 0 17759 NULL
95864 +brcmf_usb_attach_17766 brcmf_usb_attach 2-3 17766 NULL
95865 +ubifs_leb_change_17789 ubifs_leb_change 4 17789 NULL
95866 +get_unaligned_be64_17794 get_unaligned_be64 0 17794 NULL
95867 +_snd_pcm_lib_alloc_vmalloc_buffer_17820 _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 NULL
95868 +gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
95869 +cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
95870 +count_leafs_17842 count_leafs 0 17842 NULL
95871 +sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
95872 +alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
95873 +ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
95874 +orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL
95875 +init_per_cpu_17880 init_per_cpu 1 17880 NULL
95876 +ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
95877 +compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
95878 +ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
95879 +ocfs2_clusters_to_blocks_17896 ocfs2_clusters_to_blocks 0-2 17896 NULL
95880 +wm8400_set_bits_17898 wm8400_set_bits 2 17898 NULL
95881 +recover_head_17904 recover_head 3 17904 NULL
95882 +dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
95883 +xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
95884 +srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
95885 +scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
95886 +ufs_free_blocks_17963 ufs_free_blocks 2-3 17963 NULL
95887 +calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
95888 +smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
95889 +gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL
95890 +ext4_num_overhead_clusters_18001 ext4_num_overhead_clusters 2 18001 NULL
95891 +pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
95892 +prandom32_18007 prandom32 0 18007 NULL
95893 +alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
95894 +fill_read_18019 fill_read 0 18019 NULL
95895 +o2hb_highest_node_18034 o2hb_highest_node 2 18034 NULL
95896 +cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
95897 +find_next_inuse_18051 find_next_inuse 2-3 18051 NULL
95898 +ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
95899 +lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL
95900 +hex_byte_pack_18064 hex_byte_pack 0 18064 NULL
95901 +packet_came_18072 packet_came 3 18072 NULL
95902 +kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
95903 +get_vm_area_18080 get_vm_area 1 18080 NULL
95904 +mpi_alloc_18094 mpi_alloc 1 18094 NULL
95905 +tps65910_gpio_input_18110 tps65910_gpio_input 2 18110 NULL
95906 +dfs_file_read_18116 dfs_file_read 3 18116 NULL
95907 +svc_getnl_18120 svc_getnl 0 18120 NULL
95908 +paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-1-2 18131 NULL
95909 +vmw_surface_dma_size_18132 vmw_surface_dma_size 0 18132 NULL
95910 +tps65910_gpio_set_18135 tps65910_gpio_set 2 18135 NULL
95911 +selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
95912 +is_idx_node_in_use_18165 is_idx_node_in_use 0 18165 NULL
95913 +_has_tag_18169 _has_tag 2 18169 NULL
95914 +pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
95915 +snd_pcm_hw_refine_user_18204 snd_pcm_hw_refine_user 0 18204 NULL
95916 +orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
95917 +gsm_control_message_18209 gsm_control_message 4 18209 NULL
95918 +do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
95919 +gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
95920 +snd_ctl_ioctl_compat_18250 snd_ctl_ioctl_compat 3 18250 NULL
95921 +qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
95922 +gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
95923 +alloc_ring_18278 alloc_ring 2-4 18278 NULL
95924 +find_dirty_idx_leb_18280 find_dirty_idx_leb 0 18280 NULL
95925 +nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 NULL nohasharray
95926 +bio_phys_segments_18281 bio_phys_segments 0 18281 &nouveau_subdev_create__18281
95927 +ext4_readpages_18283 ext4_readpages 4 18283 NULL
95928 +mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
95929 +um_idi_write_18293 um_idi_write 3 18293 NULL
95930 +ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
95931 +vga_r_18310 vga_r 0 18310 NULL
95932 +ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
95933 +bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
95934 +wm8994_gpio_direction_out_18337 wm8994_gpio_direction_out 2 18337 NULL
95935 +pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
95936 +xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
95937 +ep_io_18367 ep_io 0 18367 NULL
95938 +lp872x_update_bits_18368 lp872x_update_bits 2 18368 NULL
95939 +irq_find_mapping_18388 irq_find_mapping 0-2 18388 NULL
95940 +__video_register_device_18399 __video_register_device 3 18399 NULL
95941 +reada_tree_block_flagged_18402 reada_tree_block_flagged 3 18402 NULL nohasharray
95942 +adis16136_show_serial_18402 adis16136_show_serial 3 18402 &reada_tree_block_flagged_18402
95943 +crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
95944 +usbnet_write_cmd_nopm_18426 usbnet_write_cmd_nopm 7 18426 NULL
95945 +batadv_orig_node_add_if_18433 batadv_orig_node_add_if 2 18433 NULL
95946 +snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
95947 +fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
95948 +regset_tls_set_18459 regset_tls_set 4 18459 NULL
95949 +dma_alloc_from_contiguous_18466 dma_alloc_from_contiguous 3-2 18466 NULL
95950 +pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL
95951 +udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
95952 +snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
95953 +nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 5-6-9 18530 NULL
95954 +seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
95955 +acpi_register_gsi_ioapic_18550 acpi_register_gsi_ioapic 2 18550 NULL
95956 +sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
95957 +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
95958 +debug_output_18575 debug_output 3 18575 NULL
95959 +check_lpt_type_18577 check_lpt_type 0 18577 NULL
95960 +__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
95961 +filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
95962 +slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
95963 +iowarrior_write_18604 iowarrior_write 3 18604 NULL
95964 +batadv_arp_get_type_18609 batadv_arp_get_type 3 18609 NULL
95965 +from_buffer_18625 from_buffer 3 18625 NULL
95966 +f1x_map_sysaddr_to_csrow_18628 f1x_map_sysaddr_to_csrow 2 18628 NULL
95967 +snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
95968 +ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL
95969 +unmap_page_18665 unmap_page 2-3 18665 NULL
95970 +edge_tty_recv_18667 edge_tty_recv 4 18667 NULL nohasharray
95971 +xfs_iext_insert_18667 xfs_iext_insert 3 18667 &edge_tty_recv_18667
95972 +replay_log_leb_18704 replay_log_leb 3 18704 NULL
95973 +iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
95974 +ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
95975 +ocfs2_trim_extent_18711 ocfs2_trim_extent 4-3 18711 NULL
95976 +blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
95977 +snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
95978 +o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
95979 +__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
95980 +wep_packets_read_18751 wep_packets_read 3 18751 NULL
95981 +md_compat_ioctl_18764 md_compat_ioctl 4 18764 NULL
95982 +read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
95983 +ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
95984 +alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
95985 +fat_compat_dir_ioctl_18800 fat_compat_dir_ioctl 3 18800 NULL
95986 +ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
95987 +setup_ioapic_irq_18813 setup_ioapic_irq 1 18813 NULL
95988 +sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
95989 +mtf_test_write_18844 mtf_test_write 3 18844 NULL
95990 +drm_ht_create_18853 drm_ht_create 2 18853 NULL
95991 +sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
95992 +ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
95993 +xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
95994 +width_to_agaw_18883 width_to_agaw 0-1 18883 NULL
95995 +ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
95996 +mangle_packet_18920 mangle_packet 7-9 18920 NULL
95997 +snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
95998 +__nla_reserve_18974 __nla_reserve 3 18974 NULL
95999 +alc_auto_create_extra_outs_18975 alc_auto_create_extra_outs 2 18975 NULL
96000 +find_dirtiest_idx_leb_19001 find_dirtiest_idx_leb 0 19001 NULL
96001 +layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
96002 +huge_page_size_19008 huge_page_size 0 19008 NULL
96003 +usbdev_compat_ioctl_19026 usbdev_compat_ioctl 3 19026 NULL
96004 +prepare_highmem_image_19028 prepare_highmem_image 0 19028 NULL
96005 +revalidate_19043 revalidate 2 19043 NULL
96006 +drm_fb_helper_init_19044 drm_fb_helper_init 3-4 19044 NULL
96007 +create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
96008 +ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
96009 +copy_and_check_19089 copy_and_check 3 19089 NULL
96010 +sys_process_vm_readv_19090 sys_process_vm_readv 3-5 19090 NULL nohasharray
96011 +brcmf_usbdev_qinit_19090 brcmf_usbdev_qinit 2 19090 &sys_process_vm_readv_19090
96012 +sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
96013 +cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
96014 +skb_gro_offset_19123 skb_gro_offset 0 19123 NULL
96015 +ext4_inode_table_19125 ext4_inode_table 0 19125 NULL
96016 +snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
96017 +alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
96018 +sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
96019 +smk_write_access2_19170 smk_write_access2 3 19170 NULL
96020 +iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
96021 +vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
96022 +__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3 19214 NULL
96023 +dev_counters_read_19216 dev_counters_read 3 19216 NULL
96024 +wbcir_tx_19219 wbcir_tx 3 19219 NULL
96025 +gsi_to_irq_19220 gsi_to_irq 0-1 19220 NULL
96026 +snd_mask_max_19224 snd_mask_max 0 19224 NULL
96027 +snd_pcm_capture_rewind_19229 snd_pcm_capture_rewind 0-2 19229 NULL
96028 +bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
96029 +sys_fcntl_19267 sys_fcntl 3 19267 NULL
96030 +il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
96031 +io_mapping_map_wc_19284 io_mapping_map_wc 2 19284 NULL
96032 +qc_capture_19298 qc_capture 3 19298 NULL
96033 +ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 3-4 19303 NULL
96034 +event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
96035 +debug_read_19322 debug_read 3 19322 NULL
96036 +cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL
96037 +read_zero_19366 read_zero 3 19366 NULL
96038 +interpret_user_input_19393 interpret_user_input 2 19393 NULL
96039 +get_unaligned_be16_19400 get_unaligned_be16 0 19400 NULL
96040 +get_n_events_by_type_19401 get_n_events_by_type 0 19401 NULL
96041 +dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
96042 +__phys_addr_19434 __phys_addr 0 19434 NULL
96043 +xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
96044 +hpet_compat_ioctl_19455 hpet_compat_ioctl 3 19455 NULL
96045 +gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
96046 +sky2_read16_19475 sky2_read16 0 19475 NULL
96047 +efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
96048 +ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
96049 +skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
96050 +dev_alloc_skb_19517 dev_alloc_skb 1 19517 NULL
96051 +nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
96052 +gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
96053 +ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
96054 +ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL nohasharray
96055 +wlcore_hw_get_rx_packet_len_19565 wlcore_hw_get_rx_packet_len 0 19565 &ieee80211_if_read_tkip_mic_test_19565
96056 +nfsd_read_19568 nfsd_read 5 19568 NULL
96057 +cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
96058 +bm_status_read_19583 bm_status_read 3 19583 NULL
96059 +batadv_tt_update_orig_19586 batadv_tt_update_orig 4 19586 NULL
96060 +load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
96061 +usbvision_rvmalloc_19655 usbvision_rvmalloc 1 19655 NULL
96062 +LoadBitmap_19658 LoadBitmap 2 19658 NULL
96063 +usbnet_write_cmd_19679 usbnet_write_cmd 7 19679 NULL
96064 +read_reg_19723 read_reg 0 19723 NULL
96065 +wm8350_block_write_19727 wm8350_block_write 3-2 19727 NULL
96066 +memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
96067 +snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
96068 +p9_client_read_19750 p9_client_read 5 19750 NULL
96069 +pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
96070 +ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
96071 +jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
96072 +__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
96073 +saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
96074 +irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
96075 +pcpu_next_unpop_19831 pcpu_next_unpop 4 19831 NULL
96076 +vip_read_19832 vip_read 3 19832 NULL nohasharray
96077 +vfs_getxattr_19832 vfs_getxattr 0 19832 &vip_read_19832
96078 +security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
96079 +crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
96080 +cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
96081 +__nla_put_19857 __nla_put 3 19857 NULL
96082 +ip6gre_err_19869 ip6gre_err 5 19869 NULL
96083 +aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
96084 +ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
96085 +cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
96086 +iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
96087 +attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
96088 +diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
96089 +split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
96090 +__be32_to_cpup_20056 __be32_to_cpup 0 20056 NULL
96091 +alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
96092 +rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
96093 +fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
96094 +team_options_register_20091 team_options_register 3 20091 NULL
96095 +qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
96096 +hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
96097 +tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
96098 +read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
96099 +wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
96100 +create_trace_probe_20175 create_trace_probe 1 20175 NULL
96101 +udf_bitmap_new_block_20214 udf_bitmap_new_block 4 20214 NULL
96102 +pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
96103 +rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
96104 +tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
96105 +pcpu_alloc_20255 pcpu_alloc 1-2 20255 NULL
96106 +resource_size_20256 resource_size 0 20256 NULL
96107 +_rtl92s_get_h2c_cmdlen_20312 _rtl92s_get_h2c_cmdlen 0 20312 NULL
96108 +tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
96109 +snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
96110 +gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
96111 +qla82xx_pci_mem_read_direct_20368 qla82xx_pci_mem_read_direct 2 20368 NULL
96112 +snd_pcm_stop_20376 snd_pcm_stop 0 20376 NULL
96113 +smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
96114 +u64_to_uptr_20384 u64_to_uptr 1 20384 NULL
96115 +snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL
96116 +__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
96117 +pm860x_write_reg_cache_20448 pm860x_write_reg_cache 2 20448 NULL
96118 +nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
96119 +ip_vs_icmp_xmit_v6_20464 ip_vs_icmp_xmit_v6 4 20464 NULL
96120 +compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
96121 +read_buf_20469 read_buf 2 20469 NULL
96122 +btrfs_get_32_20476 btrfs_get_32 0 20476 NULL
96123 +fast_user_write_20494 fast_user_write 5 20494 NULL
96124 +ocfs2_db_frozen_trigger_20503 ocfs2_db_frozen_trigger 4 20503 NULL nohasharray
96125 +hidraw_report_event_20503 hidraw_report_event 3 20503 &ocfs2_db_frozen_trigger_20503
96126 +pcpu_alloc_area_20511 pcpu_alloc_area 0-3 20511 NULL
96127 +pcpu_depopulate_chunk_20517 pcpu_depopulate_chunk 2-3 20517 NULL
96128 +xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
96129 +drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
96130 +amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
96131 +scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
96132 +venus_create_20555 venus_create 4 20555 NULL
96133 +btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL
96134 +crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
96135 +rc5t583_set_bits_20573 rc5t583_set_bits 2 20573 NULL
96136 +i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
96137 +batadv_tt_append_diff_20588 batadv_tt_append_diff 4 20588 NULL
96138 +lirc_write_20604 lirc_write 3 20604 NULL
96139 +qib_qsfp_write_20614 qib_qsfp_write 0-4-2 20614 NULL
96140 +snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
96141 +kfifo_copy_to_user_20646 kfifo_copy_to_user 3 20646 NULL
96142 +cpulist_scnprintf_20648 cpulist_scnprintf 0-2 20648 NULL
96143 +oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
96144 +ceph_osdc_new_request_20654 ceph_osdc_new_request 15-4 20654 NULL
96145 +oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
96146 +sec_reg_write_20667 sec_reg_write 2 20667 NULL
96147 +tps65910_reg_clear_bits_20672 tps65910_reg_clear_bits 2 20672 NULL
96148 +snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
96149 +dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL
96150 +cpumask_size_20683 cpumask_size 0 20683 NULL
96151 +btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
96152 +read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
96153 +__maestro_read_20700 __maestro_read 0 20700 NULL
96154 +cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
96155 +pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
96156 +ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
96157 +security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
96158 +vring_add_indirect_20737 vring_add_indirect 3-4 20737 NULL
96159 +io_apic_set_pci_routing_20740 io_apic_set_pci_routing 2 20740 NULL
96160 +vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
96161 +ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
96162 +ubi_io_read_20767 ubi_io_read 0 20767 NULL
96163 +fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
96164 +iommu_range_alloc_20794 iommu_range_alloc 3 20794 NULL
96165 +iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
96166 +sys_sendto_20809 sys_sendto 6 20809 NULL
96167 +strndup_user_20819 strndup_user 2 20819 NULL
96168 +wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
96169 +uvc_alloc_entity_20836 uvc_alloc_entity 4-3 20836 NULL
96170 +snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
96171 +ocfs2_bmap_20874 ocfs2_bmap 2 20874 NULL
96172 +sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
96173 +key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
96174 +vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
96175 +compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
96176 +htable_bits_20933 htable_bits 0 20933 NULL
96177 +altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
96178 +rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
96179 +snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
96180 +brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
96181 +alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
96182 +ocfs2_free_clusters_21001 ocfs2_free_clusters 4 21001 NULL
96183 +btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
96184 +rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
96185 +lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
96186 +proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
96187 +event_calibration_read_21083 event_calibration_read 3 21083 NULL
96188 +compat_sock_ioctl_trans_21092 compat_sock_ioctl_trans 4 21092 NULL
96189 +multipath_status_21094 multipath_status 5 21094 NULL
96190 +__cfg80211_send_disassoc_21096 __cfg80211_send_disassoc 3 21096 NULL
96191 +ext2_valid_block_bitmap_21101 ext2_valid_block_bitmap 3 21101 NULL
96192 +ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
96193 +i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
96194 +tps6586x_irq_init_21144 tps6586x_irq_init 3 21144 NULL
96195 +ocfs2_block_check_validate_21149 ocfs2_block_check_validate 2 21149 NULL
96196 +ath6kl_mgmt_tx_21153 ath6kl_mgmt_tx 9 21153 NULL
96197 +setup_msi_irq_21169 setup_msi_irq 3 21169 NULL
96198 +cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
96199 +ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
96200 +_ocfs2_free_clusters_21220 _ocfs2_free_clusters 4 21220 NULL
96201 +get_numpages_21227 get_numpages 0-1-2 21227 NULL
96202 +input_ff_create_21240 input_ff_create 2 21240 NULL
96203 +cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
96204 +ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
96205 +ip_vs_icmp_xmit_21269 ip_vs_icmp_xmit 4 21269 NULL
96206 +rc5t583_clear_bits_21300 rc5t583_clear_bits 2 21300 NULL
96207 +vmw_gmr2_bind_21305 vmw_gmr2_bind 3 21305 NULL
96208 +do_msg_fill_21307 do_msg_fill 3 21307 NULL
96209 +add_res_range_21310 add_res_range 4 21310 NULL
96210 +get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
96211 +ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
96212 +gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
96213 +max77693_irq_domain_map_21357 max77693_irq_domain_map 2 21357 NULL
96214 +alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
96215 +video_ioctl2_21380 video_ioctl2 2 21380 NULL
96216 +diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
96217 +snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
96218 +snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
96219 +tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
96220 +tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
96221 +aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
96222 +btrfs_iref_to_path_21445 btrfs_iref_to_path 7 21445 NULL
96223 +__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL
96224 +concat_writev_21451 concat_writev 3 21451 NULL
96225 +extend_netdev_table_21453 extend_netdev_table 2 21453 NULL
96226 +read_file_xmit_21487 read_file_xmit 3 21487 NULL
96227 +mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
96228 +btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
96229 +il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
96230 +cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
96231 +rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
96232 +rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
96233 +xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL
96234 +ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
96235 +xlog_do_recovery_pass_21618 xlog_do_recovery_pass 3 21618 NULL
96236 +__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
96237 +rbd_req_sync_op_21632 rbd_req_sync_op 7-8 21632 NULL
96238 +validate_nnode_21638 validate_nnode 0 21638 NULL
96239 +__irq_alloc_descs_21639 __irq_alloc_descs 2-1-3 21639 NULL
96240 +carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
96241 +hpet_setup_msi_irq_21662 hpet_setup_msi_irq 1 21662 NULL
96242 +atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
96243 +ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
96244 +regmap_register_patch_21681 regmap_register_patch 3 21681 NULL
96245 +rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
96246 +evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
96247 +reiserfs_allocate_list_bitmaps_21732 reiserfs_allocate_list_bitmaps 3 21732 NULL
96248 +__nf_nat_mangle_tcp_packet_21744 __nf_nat_mangle_tcp_packet 8-6 21744 NULL
96249 +mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
96250 +gen_pool_add_21776 gen_pool_add 3 21776 NULL
96251 +xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
96252 +__ioremap_caller_21800 __ioremap_caller 1-2 21800 NULL
96253 +dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
96254 +wm8994_request_irq_21822 wm8994_request_irq 2 21822 NULL
96255 +oom_adj_read_21847 oom_adj_read 3 21847 NULL
96256 +acpi_tb_check_xsdt_21862 acpi_tb_check_xsdt 1 21862 NULL
96257 +lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
96258 +brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL
96259 +sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL nohasharray
96260 +tcp_cookie_size_check_21873 tcp_cookie_size_check 0-1 21873 &sisusbcon_bmove_21873
96261 +__alloc_reserved_percpu_21895 __alloc_reserved_percpu 1-2 21895 NULL
96262 +rio_destid_first_21900 rio_destid_first 0 21900 NULL
96263 +dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
96264 +qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
96265 +rbd_req_read_21952 rbd_req_read 4-5 21952 NULL
96266 +security_mmap_addr_21970 security_mmap_addr 0 21970 NULL
96267 +alloc_ldt_21972 alloc_ldt 2 21972 NULL
96268 +rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL nohasharray
96269 +compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 0-3 22001 &rxpipe_descr_host_int_trig_rx_data_read_22001
96270 +btrfs_get_16_22023 btrfs_get_16 0 22023 NULL
96271 +ti_recv_22027 ti_recv 4 22027 NULL
96272 +pcf50633_irq_unmask_22034 pcf50633_irq_unmask 2 22034 NULL
96273 +zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
96274 +ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
96275 +btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2 22077 NULL
96276 +mem_rw_22085 mem_rw 3 22085 NULL
96277 +snd_pcm_xrun_22088 snd_pcm_xrun 0 22088 NULL
96278 +lowpan_fragment_xmit_22095 lowpan_fragment_xmit 3-4 22095 NULL
96279 +sys_remap_file_pages_22124 sys_remap_file_pages 1 22124 NULL
96280 +__bitmap_size_22138 __bitmap_size 0 22138 NULL
96281 +compat_insn_22142 compat_insn 2 22142 NULL
96282 +pn533_dep_link_up_22154 pn533_dep_link_up 5 22154 NULL
96283 +do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL
96284 +__kfifo_alloc_22173 __kfifo_alloc 2-3 22173 NULL
96285 +fls_22210 fls 0 22210 NULL
96286 +bio_chain_clone_22227 bio_chain_clone 4 22227 NULL
96287 +mem_write_22232 mem_write 3 22232 NULL
96288 +p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
96289 +atomic64_xchg_22246 atomic64_xchg 0 22246 NULL
96290 +compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
96291 +__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
96292 +queue_max_sectors_22280 queue_max_sectors 0 22280 NULL
96293 +pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 NULL nohasharray
96294 +__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 &pci_vpd_srdt_size_22300
96295 +mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
96296 +get_segment_base_22324 get_segment_base 0 22324 NULL
96297 +radix_tree_find_next_bit_22334 radix_tree_find_next_bit 2-3 22334 NULL
96298 +atomic_read_22342 atomic_read 0 22342 NULL
96299 +mlx4_db_alloc_22358 mlx4_db_alloc 3 22358 NULL
96300 +irq_reserve_irq_22360 irq_reserve_irq 1 22360 NULL
96301 +snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
96302 +wlcore_alloc_hw_22365 wlcore_alloc_hw 1 22365 NULL nohasharray
96303 +tps6586x_gpio_to_irq_22365 tps6586x_gpio_to_irq 2 22365 &wlcore_alloc_hw_22365
96304 +evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
96305 +alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
96306 +btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
96307 +zoran_write_22404 zoran_write 3 22404 NULL
96308 +queue_reply_22416 queue_reply 3 22416 NULL
96309 +__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
96310 +queue_max_segments_22441 queue_max_segments 0 22441 NULL
96311 +handle_received_packet_22457 handle_received_packet 3 22457 NULL
96312 +mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
96313 +cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
96314 +mp_find_ioapic_pin_22499 mp_find_ioapic_pin 0-2 22499 NULL
96315 +mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
96316 +ip4_addr_string_22511 ip4_addr_string 0 22511 NULL
96317 +pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
96318 +ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
96319 +atomic_long_read_unchecked_22551 atomic_long_read_unchecked 0 22551 NULL
96320 +agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
96321 +dbFindCtl_22587 dbFindCtl 0 22587 NULL
96322 +snapshot_read_22601 snapshot_read 3 22601 NULL
96323 +sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
96324 +ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
96325 +wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
96326 +pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
96327 +iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
96328 +ubifs_leb_write_22679 ubifs_leb_write 4-5 22679 NULL
96329 +fill_gap_22681 fill_gap 0 22681 NULL
96330 +ocfs2_get_block_22687 ocfs2_get_block 2 22687 NULL
96331 +compat_fd_ioctl_22694 compat_fd_ioctl 4 22694 NULL
96332 +alloc_libipw_22708 alloc_libipw 1 22708 NULL
96333 +brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
96334 +cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4 22735 NULL
96335 +ceph_decode_32_22738 ceph_decode_32 0 22738 NULL
96336 +iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
96337 +print_frame_22769 print_frame 0 22769 NULL
96338 +ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
96339 +retu_write_22800 retu_write 2 22800 NULL
96340 +compat_blkdev_ioctl_22841 compat_blkdev_ioctl 3 22841 NULL
96341 +can_nocow_odirect_22854 can_nocow_odirect 3 22854 NULL nohasharray
96342 +read_file_rcstat_22854 read_file_rcstat 3 22854 &can_nocow_odirect_22854
96343 +do_atm_iobuf_22857 do_atm_iobuf 3 22857 NULL
96344 +nfs4_realloc_slot_table_22859 nfs4_realloc_slot_table 2 22859 NULL
96345 +create_attr_set_22861 create_attr_set 1 22861 NULL
96346 +vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
96347 +usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
96348 +mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
96349 +pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
96350 +alloc_sglist_22960 alloc_sglist 1-2-3 22960 NULL
96351 +caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
96352 +vme_get_size_22964 vme_get_size 0 22964 NULL
96353 +tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
96354 +usb_get_langid_22983 usb_get_langid 0 22983 NULL
96355 +remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
96356 +viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
96357 +cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
96358 +st_status_23032 st_status 5 23032 NULL
96359 +nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
96360 +reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL
96361 +vm_map_ram_23078 vm_map_ram 2 23078 NULL nohasharray
96362 +raw_sendmsg_23078 raw_sendmsg 4 23078 &vm_map_ram_23078
96363 +qla4_82xx_pci_mem_read_2M_23081 qla4_82xx_pci_mem_read_2M 2 23081 NULL
96364 +isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
96365 +lnw_gpio_irq_map_23087 lnw_gpio_irq_map 2 23087 NULL
96366 +rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
96367 +fls_long_23096 fls_long 0 23096 NULL
96368 +ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
96369 +pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
96370 +mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
96371 +nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
96372 +__clear_user_23118 __clear_user 0 23118 NULL
96373 +dm_write_async_23120 dm_write_async 3 23120 NULL
96374 +drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
96375 +ca91cx42_master_set_23146 ca91cx42_master_set 4 23146 NULL
96376 +wm831x_set_bits_23158 wm831x_set_bits 2 23158 NULL
96377 +read_file_ani_23161 read_file_ani 3 23161 NULL
96378 +ioremap_23172 ioremap 1-2 23172 NULL
96379 +usblp_write_23178 usblp_write 3 23178 NULL
96380 +msnd_fifo_alloc_23179 msnd_fifo_alloc 2 23179 NULL
96381 +gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
96382 +ieee80211_get_mesh_hdrlen_23183 ieee80211_get_mesh_hdrlen 0 23183 NULL
96383 +fix_unclean_leb_23188 fix_unclean_leb 3 23188 NULL
96384 +mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
96385 +__next_dma_cap_23195 __next_dma_cap 1 23195 NULL
96386 +convert_ip_to_linear_23198 convert_ip_to_linear 0 23198 NULL
96387 +pm80x_free_irq_23210 pm80x_free_irq 2 23210 NULL nohasharray
96388 +compat_rawv6_ioctl_23210 compat_rawv6_ioctl 3 23210 &pm80x_free_irq_23210
96389 +tty_buffer_request_room_23228 tty_buffer_request_room 2 23228 NULL
96390 +xlog_get_bp_23229 xlog_get_bp 2 23229 NULL
96391 +rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
96392 +__gfn_to_rmap_23240 __gfn_to_rmap 1-2 23240 NULL
96393 +uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
96394 +ipv6_skip_exthdr_23283 ipv6_skip_exthdr 0-2 23283 NULL
96395 +doc_probe_23285 doc_probe 1 23285 NULL
96396 +diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
96397 +i2cdev_write_23310 i2cdev_write 3 23310 NULL
96398 +mc13xxx_get_num_regulators_dt_23344 mc13xxx_get_num_regulators_dt 0 23344 NULL
96399 +page_readlink_23346 page_readlink 3 23346 NULL
96400 +get_dst_timing_23358 get_dst_timing 0 23358 NULL
96401 +iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
96402 +vga_mm_r_23419 vga_mm_r 0 23419 NULL
96403 +vzalloc_node_23424 vzalloc_node 1 23424 NULL
96404 +__cxio_init_resource_fifo_23447 __cxio_init_resource_fifo 3 23447 NULL nohasharray
96405 +ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 &__cxio_init_resource_fifo_23447
96406 +hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
96407 +linear_conf_23485 linear_conf 2 23485 NULL nohasharray
96408 +divasa_remap_pci_bar_23485 divasa_remap_pci_bar 3-4 23485 &linear_conf_23485
96409 +event_filter_read_23494 event_filter_read 3 23494 NULL
96410 +ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
96411 +xen_allocate_irq_gsi_23546 xen_allocate_irq_gsi 1-0 23546 NULL
96412 +tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
96413 +dbg_leb_change_23555 dbg_leb_change 4 23555 NULL
96414 +venus_symlink_23570 venus_symlink 4-6 23570 NULL
96415 +iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
96416 +snd_interval_min_23590 snd_interval_min 0 23590 NULL
96417 +_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
96418 +islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
96419 +__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
96420 +ext3_compat_ioctl_23659 ext3_compat_ioctl 3 23659 NULL
96421 +sInW_23663 sInW 0 23663 NULL
96422 +proc_ioctl_compat_23682 proc_ioctl_compat 2 23682 NULL
96423 +nftl_partscan_23688 nftl_partscan 0 23688 NULL
96424 +cx18_read_23699 cx18_read 3 23699 NULL
96425 +mp_config_acpi_gsi_23728 mp_config_acpi_gsi 2 23728 NULL
96426 +pack_sg_list_p_23739 pack_sg_list_p 0-2 23739 NULL
96427 +rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
96428 +__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
96429 +security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
96430 +diva_alloc_dma_map_23798 diva_alloc_dma_map 2 23798 NULL
96431 +rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
96432 +__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
96433 +xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
96434 +iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
96435 +p54_init_common_23850 p54_init_common 1 23850 NULL
96436 +gart_alloc_coherent_23852 gart_alloc_coherent 2 23852 NULL
96437 +bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
96438 +ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
96439 +ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
96440 +ieee80211_if_read_channel_type_23884 ieee80211_if_read_channel_type 3 23884 NULL
96441 +nes_alloc_resource_23891 nes_alloc_resource 3 23891 NULL
96442 +tipc_snprintf_23893 tipc_snprintf 2 23893 NULL
96443 +add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
96444 +ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911
96445 +f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL
96446 +ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL nohasharray
96447 +mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 &ipath_reg_phys_mr_23918
96448 +kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
96449 +__alloc_skb_23940 __alloc_skb 1 23940 NULL
96450 +uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL
96451 +cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
96452 +zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
96453 +cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
96454 +dgrp_send_24028 dgrp_send 0-2 24028 NULL
96455 +ocfs2_mark_extent_refcounted_24035 ocfs2_mark_extent_refcounted 6 24035 NULL
96456 +afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
96457 +blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
96458 +vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
96459 +pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
96460 +request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
96461 +ieee80211_send_auth_24121 ieee80211_send_auth 5 24121 NULL
96462 +mpu401_read_24126 mpu401_read 3 24126 NULL
96463 +irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
96464 +trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
96465 +adu_read_24177 adu_read 3 24177 NULL
96466 +safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
96467 +ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
96468 +efx_vf_size_24213 efx_vf_size 0 24213 NULL
96469 +tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
96470 +pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 1-2-3 24224 NULL nohasharray
96471 +mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224
96472 +pci_num_vf_24235 pci_num_vf 0 24235 NULL
96473 +sel_read_bool_24236 sel_read_bool 3 24236 NULL
96474 +thin_status_24278 thin_status 5 24278 NULL
96475 +compat_sys_preadv64_24283 compat_sys_preadv64 3 24283 NULL
96476 +msg_size_24288 msg_size 0 24288 NULL
96477 +ext2_free_blocks_24292 ext2_free_blocks 2-3 24292 NULL
96478 +map_page_24298 map_page 3-4 24298 NULL
96479 +btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
96480 +ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
96481 +ocfs2_direct_IO_get_blocks_24333 ocfs2_direct_IO_get_blocks 2 24333 NULL
96482 +kzalloc_node_24352 kzalloc_node 1 24352 NULL
96483 +qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
96484 +cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
96485 +btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
96486 +igetword_24373 igetword 0 24373 NULL
96487 +pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
96488 +getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398
96489 +blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
96490 +b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
96491 +ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
96492 +ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
96493 +smk_user_access_24440 smk_user_access 3 24440 NULL
96494 +page_address_24444 page_address 0 24444 NULL
96495 +evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
96496 +ocfs2_write_cluster_by_desc_24466 ocfs2_write_cluster_by_desc 5-6 24466 NULL
96497 +pd_video_read_24510 pd_video_read 3 24510 NULL
96498 +request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
96499 +xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
96500 +named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
96501 +do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
96502 +write_cache_pages_24562 write_cache_pages 0 24562 NULL
96503 +tsi148_alloc_resource_24563 tsi148_alloc_resource 2 24563 NULL
96504 +udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
96505 +palmas_smps_write_24600 palmas_smps_write 2 24600 NULL nohasharray
96506 +count_preds_24600 count_preds 0 24600 &palmas_smps_write_24600
96507 +sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605 NULL
96508 +context_alloc_24645 context_alloc 3 24645 NULL
96509 +blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
96510 +unifi_net_data_malloc_24716 unifi_net_data_malloc 3 24716 NULL
96511 +simple_attr_read_24738 simple_attr_read 3 24738 NULL
96512 +qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
96513 +ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
96514 +get_dma_residue_24749 get_dma_residue 0 24749 NULL
96515 +kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
96516 +nfsd4_sanitize_slot_size_24756 nfsd4_sanitize_slot_size 0-1 24756 NULL
96517 +i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
96518 +ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
96519 +skb_make_writable_24783 skb_make_writable 2 24783 NULL
96520 +datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
96521 +cache_read_24790 cache_read 3 24790 NULL
96522 +unpack_str_24798 unpack_str 0 24798 NULL
96523 +__next_cpu_nr_24805 __next_cpu_nr 1 24805 NULL
96524 +comedi_buf_alloc_24822 comedi_buf_alloc 3 24822 NULL
96525 +snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
96526 +snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
96527 +pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
96528 +l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
96529 +bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 3-2 24873 NULL
96530 +queues_read_24877 queues_read 3 24877 NULL
96531 +codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
96532 +v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL
96533 +next_token_24929 next_token 0 24929 NULL
96534 +uf_create_device_nodes_24948 uf_create_device_nodes 2 24948 NULL
96535 +ocfs2_fiemap_24949 ocfs2_fiemap 3-4 24949 NULL
96536 +packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
96537 +sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
96538 +ensure_wear_leveling_24971 ensure_wear_leveling 0 24971 NULL
96539 +twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL
96540 +nf_nat_sdp_port_24977 nf_nat_sdp_port 7 24977 NULL
96541 +llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
96542 +key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
96543 +il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
96544 +ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
96545 +nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
96546 +gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
96547 +gfs2_iter_unstuffed_25099 gfs2_iter_unstuffed 0 25099 NULL
96548 +cxio_hal_init_rhdl_resource_25104 cxio_hal_init_rhdl_resource 1 25104 NULL
96549 +snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
96550 +fs32_to_cpu_25143 fs32_to_cpu 0 25143 NULL
96551 +sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
96552 +ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
96553 +mmu_set_spte_25177 mmu_set_spte 8-9 25177 NULL
96554 +sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
96555 +ks8851_rdreg32_25187 ks8851_rdreg32 0 25187 NULL
96556 +ocfs2_block_check_compute_25223 ocfs2_block_check_compute 2 25223 NULL
96557 +mon_stat_read_25238 mon_stat_read 3 25238 NULL
96558 +tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
96559 +nilfs_palloc_find_available_slot_25245 nilfs_palloc_find_available_slot 3-5 25245 NULL
96560 +stripe_status_25259 stripe_status 5 25259 NULL
96561 +snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
96562 +crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
96563 +vfs_writev_25278 vfs_writev 3 25278 NULL
96564 +l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
96565 +snd_seq_ioctl_compat_25307 snd_seq_ioctl_compat 3 25307 NULL
96566 +help_25316 help 5 25316 NULL nohasharray
96567 +ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 &help_25316
96568 +rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
96569 +i915_gem_execbuffer_relocate_slow_25355 i915_gem_execbuffer_relocate_slow 7 25355 NULL
96570 +rio_destid_next_25368 rio_destid_next 2 25368 NULL nohasharray
96571 +unix_mkname_25368 unix_mkname 0-2 25368 &rio_destid_next_25368
96572 +sel_read_mls_25369 sel_read_mls 3 25369 NULL
96573 +tc3589x_gpio_to_irq_25371 tc3589x_gpio_to_irq 2 25371 NULL
96574 +ebt_buf_add_pad_25413 ebt_buf_add_pad 0 25413 NULL
96575 +dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
96576 +ath6kl_wmi_beginscan_cmd_25462 ath6kl_wmi_beginscan_cmd 8 25462 NULL
96577 +generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
96578 +crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
96579 +ocfs2_hamming_encode_25501 ocfs2_hamming_encode 3 25501 NULL
96580 +ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4 25502 NULL
96581 +snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
96582 +sb_permission_25523 sb_permission 0 25523 NULL
96583 +ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
96584 +ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
96585 +wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
96586 +ht_print_chan_25556 ht_print_chan 0 25556 NULL
96587 +skb_tailroom_25567 skb_tailroom 0 25567 NULL
96588 +__devres_alloc_25598 __devres_alloc 2 25598 NULL
96589 +copy_user_generic_25611 copy_user_generic 0 25611 NULL
96590 +proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
96591 +__get_user_pages_25628 __get_user_pages 0 25628 NULL nohasharray
96592 +befs_utf2nls_25628 befs_utf2nls 3 25628 &__get_user_pages_25628
96593 +__direct_map_25647 __direct_map 5-6 25647 NULL
96594 +ext2_try_to_allocate_25667 ext2_try_to_allocate 4-2 25667 NULL
96595 +aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
96596 +sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
96597 +ebitmap_start_positive_25703 ebitmap_start_positive 0 25703 NULL
96598 +rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
96599 +ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
96600 +sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
96601 +__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
96602 +mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
96603 +ext2_find_near_25734 ext2_find_near 0 25734 NULL
96604 +cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
96605 +event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
96606 +sg_read_25799 sg_read 3 25799 NULL
96607 +sys32_rt_sigpending_25814 sys32_rt_sigpending 2 25814 NULL
96608 +system_enable_read_25815 system_enable_read 3 25815 NULL
96609 +realloc_buffer_25816 realloc_buffer 2 25816 NULL
96610 +pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
96611 +parport_read_25855 parport_read 0 25855 NULL
96612 +xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
96613 +uf_ap_process_data_pdu_25860 uf_ap_process_data_pdu 7 25860 NULL
96614 +ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
96615 +run_delalloc_nocow_25896 run_delalloc_nocow 3 25896 NULL
96616 +sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
96617 +lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
96618 +do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
96619 +utf16_strlen_25913 utf16_strlen 0 25913 NULL
96620 +rcname_read_25919 rcname_read 3 25919 NULL
96621 +snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
96622 +key_flags_read_25931 key_flags_read 3 25931 NULL
96623 +copy_play_buf_25932 copy_play_buf 3 25932 NULL
96624 +tps80031_clr_bits_25942 tps80031_clr_bits 3 25942 NULL
96625 +flush_25957 flush 2 25957 NULL
96626 +video_register_device_25971 video_register_device 3 25971 NULL
96627 +udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
96628 +ebt_compat_entry_padsize_26001 ebt_compat_entry_padsize 0 26001 NULL
96629 +lpfc_sli_probe_sriov_nr_virtfn_26004 lpfc_sli_probe_sriov_nr_virtfn 2 26004 NULL
96630 +mirror_status_26010 mirror_status 5 26010 NULL
96631 +irq_create_strict_mappings_26025 irq_create_strict_mappings 2-4 26025 NULL
96632 +xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
96633 +skb_mac_header_26034 skb_mac_header 0 26034 NULL
96634 +mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
96635 +selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
96636 +tun_do_read_26047 tun_do_read 5 26047 NULL
96637 +keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
96638 +rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
96639 +cpumask_next_26096 cpumask_next 1 26096 NULL
96640 +skb_cow_26138 skb_cow 2 26138 NULL
96641 +usb_dump_device_strings_26146 usb_dump_device_strings 0 26146 NULL
96642 +__fswab64_26155 __fswab64 0 26155 NULL
96643 +copy_oldmem_page_26164 copy_oldmem_page 3-1 26164 NULL
96644 +gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
96645 +ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
96646 +mid_get_vbt_data_r1_26170 mid_get_vbt_data_r1 2 26170 NULL
96647 +disk_devt_26180 disk_devt 0 26180 NULL
96648 +get_registers_26187 get_registers 3 26187 NULL
96649 +cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
96650 +ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
96651 +xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
96652 +mce_write_26201 mce_write 3 26201 NULL
96653 +_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
96654 +cxio_num_stags_26233 cxio_num_stags 0 26233 NULL
96655 +bio_split_26235 bio_split 2 26235 NULL
96656 +crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
96657 +wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
96658 +ext2_find_goal_26306 ext2_find_goal 0 26306 NULL
96659 +snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL nohasharray
96660 +pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
96661 +pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
96662 +ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 5 26357 NULL
96663 +cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
96664 +dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
96665 +invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
96666 +virtio_has_feature_26467 virtio_has_feature 0 26467 NULL
96667 +ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
96668 +ulong_write_file_26485 ulong_write_file 3 26485 NULL
96669 +dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
96670 +read_vmcore_26501 read_vmcore 3 26501 NULL
96671 +vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 3-4 26507 NULL
96672 +iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
96673 +rds_message_inc_copy_to_user_26540 rds_message_inc_copy_to_user 3 26540 NULL
96674 +__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
96675 +ip6_addr_string_26568 ip6_addr_string 0 26568 NULL
96676 +rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL nohasharray
96677 +batadv_receive_server_sync_packet_26577 batadv_receive_server_sync_packet 3 26577 &rts51x_read_mem_26577
96678 +cirrusfb_get_memsize_26597 cirrusfb_get_memsize 0 26597 NULL
96679 +__unmap_single_26604 __unmap_single 2-3 26604 NULL
96680 +iommu_alloc_26621 iommu_alloc 4 26621 NULL
96681 +pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
96682 +mmap_region_26649 mmap_region 0-2 26649 NULL
96683 +irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
96684 +inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
96685 +cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
96686 +__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
96687 +rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
96688 +aty_ld_le32_26720 aty_ld_le32 0 26720 NULL
96689 +nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
96690 +pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
96691 +srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
96692 +snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
96693 +ntfs_are_rl_mergeable_26777 ntfs_are_rl_mergeable 0 26777 NULL
96694 +qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
96695 +cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
96696 +smk_write_load_26829 smk_write_load 3 26829 NULL
96697 +slgt_compat_ioctl_26834 slgt_compat_ioctl 3 26834 NULL
96698 +__nodes_onto_26838 __nodes_onto 4 26838 NULL
96699 +scnprint_id_26842 scnprint_id 3 26842 NULL
96700 +ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
96701 +netxen_nic_hw_read_wx_128M_26858 netxen_nic_hw_read_wx_128M 2 26858 NULL
96702 +svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
96703 +ext2_compat_ioctl_26883 ext2_compat_ioctl 3 26883 NULL
96704 +slhc_uncompress_26905 slhc_uncompress 0-3 26905 NULL
96705 +wm8994_reg_write_26919 wm8994_reg_write 2 26919 NULL
96706 +x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
96707 +compat_mtw_from_user_26932 compat_mtw_from_user 0 26932 NULL
96708 +scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
96709 +pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
96710 +sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
96711 +carl9170_handle_mpdu_26940 carl9170_handle_mpdu 3 26940 NULL nohasharray
96712 +create_bm_block_list_26940 create_bm_block_list 0 26940 &carl9170_handle_mpdu_26940
96713 +hecubafb_write_26942 hecubafb_write 3 26942 NULL
96714 +extract_entropy_user_26952 extract_entropy_user 3 26952 NULL nohasharray
96715 +do_trimming_26952 do_trimming 3 26952 &extract_entropy_user_26952
96716 +pcf857x_irq_domain_map_26998 pcf857x_irq_domain_map 2 26998 NULL
96717 +rbd_do_op_27025 rbd_do_op 7-8 27025 NULL
96718 +ufs_alloc_fragments_27059 ufs_alloc_fragments 3-0-2 27059 NULL
96719 +__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
96720 +snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
96721 +paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
96722 +alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
96723 +find_first_bit_27088 find_first_bit 0-2 27088 NULL
96724 +btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
96725 +__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
96726 +get_kernel_page_27133 get_kernel_page 0 27133 NULL
96727 +drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
96728 +pms_capture_27142 pms_capture 4 27142 NULL
96729 +btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
96730 +snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL
96731 +mc13xxx_irq_handle_27166 mc13xxx_irq_handle 3 27166 NULL
96732 +i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
96733 +ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
96734 +get_unaligned_be32_27184 get_unaligned_be32 0 27184 NULL
96735 +mmc_blk_compat_ioctl_27194 mmc_blk_compat_ioctl 4 27194 NULL
96736 +dbAllocAG_27228 dbAllocAG 0 27228 NULL
96737 +rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
96738 +cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
96739 +__dma_map_cont_27289 __dma_map_cont 5 27289 NULL
96740 +hpi_read_reg_27302 hpi_read_reg 0 27302 NULL
96741 +copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
96742 +ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 NULL
96743 +rbd_req_sync_exec_27320 rbd_req_sync_exec 8 27320 NULL
96744 +ocfs2_blocks_to_clusters_27327 ocfs2_blocks_to_clusters 0-2 27327 NULL
96745 +snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
96746 +afs_cell_create_27346 afs_cell_create 2 27346 NULL
96747 +pcbit_stat_27364 pcbit_stat 2 27364 NULL
96748 +init_memory_mapping_27395 init_memory_mapping 0 27395 NULL
96749 +phys_pte_init_27411 phys_pte_init 0-3-2 27411 NULL
96750 +ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
96751 +acpi_os_get_root_pointer_27416 acpi_os_get_root_pointer 0 27416 NULL nohasharray
96752 +ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 &acpi_os_get_root_pointer_27416
96753 +pack_sg_list_27425 pack_sg_list 0-2 27425 NULL
96754 +tps65910_reg_set_bits_27468 tps65910_reg_set_bits 2 27468 NULL
96755 +rtsx_write_cfg_seq_27485 rtsx_write_cfg_seq 5-3 27485 NULL
96756 +v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
96757 +set_tpl_pfs_27490 set_tpl_pfs 3 27490 NULL
96758 +hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
96759 +qib_create_cq_27497 qib_create_cq 2 27497 NULL
96760 +ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
96761 +btrfs_get_64_27499 btrfs_get_64 0 27499 NULL
96762 +__usbnet_write_cmd_27500 __usbnet_write_cmd 7 27500 NULL
96763 +garmin_read_process_27509 garmin_read_process 3 27509 NULL
96764 +ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
96765 +snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
96766 +libipw_alloc_txb_27579 libipw_alloc_txb 1-2-3 27579 NULL
96767 +read_flush_procfs_27642 read_flush_procfs 3 27642 NULL nohasharray
96768 +nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &read_flush_procfs_27642 nohasharray
96769 +ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 &nl80211_send_connect_result_27642
96770 +add_new_gdb_27643 add_new_gdb 3 27643 NULL
96771 +qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
96772 +cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
96773 +set_bypass_pwoff_pfs_27669 set_bypass_pwoff_pfs 3 27669 NULL
96774 +qword_get_27670 qword_get 0 27670 NULL
96775 +ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
96776 +__tty_buffer_request_room_27700 __tty_buffer_request_room 2 27700 NULL
96777 +fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
96778 +evm_write_key_27715 evm_write_key 3 27715 NULL
96779 +ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
96780 +reg_w_buf_27724 reg_w_buf 3 27724 NULL
96781 +xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
96782 +a4t_cs_init_27734 a4t_cs_init 3 27734 NULL
96783 +kcalloc_27770 kcalloc 1-2 27770 NULL
96784 +twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL
96785 +DivaSTraceGetMemotyRequirement_27797 DivaSTraceGetMemotyRequirement 0-1 27797 NULL
96786 +ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
96787 +mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
96788 +fwtty_buffer_rx_27821 fwtty_buffer_rx 3 27821 NULL
96789 +init_header_complete_27833 init_header_complete 0 27833 NULL nohasharray
96790 +sys_listxattr_27833 sys_listxattr 3 27833 &init_header_complete_27833
96791 +read_profile_27859 read_profile 3 27859 NULL
96792 +sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
96793 +ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
96794 +unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
96795 +gluebi_write_27905 gluebi_write 3 27905 NULL
96796 +mc13783_set_sysclk_27914 mc13783_set_sysclk 5 27914 NULL
96797 +bm_find_next_27929 bm_find_next 2 27929 NULL
96798 +tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
96799 +tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL
96800 +mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 NULL
96801 +f2fs_bio_alloc_27983 f2fs_bio_alloc 2 27983 NULL
96802 +edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
96803 +snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
96804 +serial8250_port_size_28019 serial8250_port_size 0 28019 NULL
96805 +sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
96806 +rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL
96807 +cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2 28053 NULL
96808 +pool_status_28055 pool_status 5 28055 NULL
96809 +lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
96810 +tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
96811 +ext4_read_block_bitmap_nowait_28078 ext4_read_block_bitmap_nowait 2 28078 NULL
96812 +GetRecvByte_28082 GetRecvByte 0 28082 NULL
96813 +platform_get_irq_28088 platform_get_irq 0 28088 NULL
96814 +gdth_init_isa_28091 gdth_init_isa 1 28091 NULL
96815 +mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
96816 +rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
96817 +vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
96818 +video_read_28148 video_read 3 28148 NULL
96819 +snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
96820 +stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
96821 +vread_28173 vread 0 28173 NULL
96822 +macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
96823 +d_path_28198 d_path 0 28198 NULL
96824 +nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
96825 +line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL nohasharray
96826 +set_dis_disc_pfs_28225 set_dis_disc_pfs 3 28225 &line6_alloc_sysex_buffer_28225
96827 +amd_nb_num_28228 amd_nb_num 0 28228 NULL
96828 +ext4_validate_block_bitmap_28243 ext4_validate_block_bitmap 3 28243 NULL
96829 +usemap_size_28281 usemap_size 0 28281 NULL
96830 +dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
96831 +acpi_register_gsi_xen_28305 acpi_register_gsi_xen 2 28305 NULL nohasharray
96832 +nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 &acpi_register_gsi_xen_28305
96833 +snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
96834 +bm_entry_write_28338 bm_entry_write 3 28338 NULL
96835 +snapshot_write_28351 snapshot_write 3 28351 NULL
96836 +__next_wq_cpu_28352 __next_wq_cpu 1 28352 NULL
96837 +sys_writev_28384 sys_writev 3 28384 NULL
96838 +dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
96839 +tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
96840 +set_bypass_pfs_28395 set_bypass_pfs 3 28395 NULL
96841 +subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
96842 +mpage_readpages_28436 mpage_readpages 3 28436 NULL
96843 +snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
96844 +key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
96845 +alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
96846 +vmw_du_crtc_cursor_set_28479 vmw_du_crtc_cursor_set 4-5 28479 NULL
96847 +ocfs2_backup_super_blkno_28484 ocfs2_backup_super_blkno 0-2 28484 NULL
96848 +max_response_pages_28492 max_response_pages 0 28492 NULL
96849 +ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
96850 +__next_node_28521 __next_node 1 28521 NULL
96851 +i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
96852 +early_init_dt_alloc_memory_arch_28528 early_init_dt_alloc_memory_arch 1 28528 NULL
96853 +sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
96854 +run_delalloc_range_28545 run_delalloc_range 3 28545 NULL nohasharray
96855 +mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 &run_delalloc_range_28545
96856 +b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
96857 +i2o_msg_post_wait_mem_28558 i2o_msg_post_wait_mem 0 28558 NULL
96858 +phys_pud_init_28574 phys_pud_init 0-3-2 28574 NULL
96859 +cfg80211_send_rx_auth_28580 cfg80211_send_rx_auth 3 28580 NULL
96860 +oxygen_read32_28582 oxygen_read32 0 28582 NULL
96861 +ocfs2_read_dir_block_28587 ocfs2_read_dir_block 2 28587 NULL
96862 +wm8350_clear_bits_28596 wm8350_clear_bits 2 28596 NULL
96863 +extract_entropy_28604 extract_entropy 3-5 28604 NULL
96864 +kfifo_unused_28612 kfifo_unused 0 28612 NULL
96865 +mp_override_legacy_irq_28618 mp_override_legacy_irq 4 28618 NULL
96866 +snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
96867 +_set_range_28627 _set_range 3 28627 NULL
96868 +v4l2_compat_ioctl32_28630 v4l2_compat_ioctl32 3 28630 NULL
96869 +setup_usemap_28636 setup_usemap 3-4 28636 NULL
96870 +blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
96871 +__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
96872 +nl80211_send_new_peer_candidate_28692 nl80211_send_new_peer_candidate 5 28692 NULL nohasharray
96873 +kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 &nl80211_send_new_peer_candidate_28692
96874 +drm_plane_init_28731 drm_plane_init 6 28731 NULL
96875 +spi_execute_28736 spi_execute 5 28736 NULL
96876 +snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL nohasharray
96877 +phantom_compat_ioctl_28738 phantom_compat_ioctl 3 28738 &snd_pcm_aio_write_28738
96878 +read_file_btcoex_28743 read_file_btcoex 3 28743 NULL
96879 +cxio_init_resource_fifo_28764 cxio_init_resource_fifo 3 28764 NULL
96880 +ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
96881 +dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
96882 +sel_write_member_28800 sel_write_member 3 28800 NULL
96883 +cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
96884 +btrfs_ref_to_path_28809 btrfs_ref_to_path 0-8-3 28809 NULL
96885 +memory_bm_create_28814 memory_bm_create 0 28814 NULL
96886 +iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
96887 +vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
96888 +ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
96889 +packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
96890 +tps6586x_update_28898 tps6586x_update 2 28898 NULL
96891 +da9055_group_write_28904 da9055_group_write 2-3 28904 NULL
96892 +ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
96893 +ocfs2_frozen_trigger_28929 ocfs2_frozen_trigger 4 28929 NULL
96894 +push_rx_28939 push_rx 3 28939 NULL
96895 +btrfs_trim_block_group_28963 btrfs_trim_block_group 3 28963 NULL
96896 +alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
96897 +ext4_mb_add_groupinfo_28988 ext4_mb_add_groupinfo 2 28988 NULL
96898 +bin_uuid_28999 bin_uuid 3 28999 NULL
96899 +xz_dec_init_29029 xz_dec_init 2 29029 NULL
96900 +sys_fcntl64_29031 sys_fcntl64 3 29031 NULL
96901 +ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
96902 +rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
96903 +iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
96904 +lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
96905 +ieee80211_probereq_get_29069 ieee80211_probereq_get 4-6 29069 NULL
96906 +sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL
96907 +mark_extents_written_29082 mark_extents_written 2 29082 NULL
96908 +isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
96909 +snprintf_29125 snprintf 0 29125 NULL
96910 +iov_shorten_29130 iov_shorten 0 29130 NULL
96911 +pm860x_reg_write_29141 pm860x_reg_write 2 29141 NULL
96912 +proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
96913 +reshape_ring_29147 reshape_ring 2 29147 NULL
96914 +drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
96915 +wusb_prf_256_29203 wusb_prf_256 7 29203 NULL nohasharray
96916 +alloc_group_attrs_29203 alloc_group_attrs 3 29203 &wusb_prf_256_29203
96917 +comedi_alloc_subdevices_29207 comedi_alloc_subdevices 2 29207 NULL
96918 +do_shrinker_shrink_29208 do_shrinker_shrink 0 29208 NULL
96919 +rds_iw_inc_copy_to_user_29214 rds_iw_inc_copy_to_user 3 29214 NULL
96920 +iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
96921 +devm_ioremap_29235 devm_ioremap 2-3 29235 NULL
96922 +irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
96923 +recover_peb_29238 recover_peb 6-7 29238 NULL
96924 +security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
96925 +prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
96926 +bitmap_ord_to_pos_29279 bitmap_ord_to_pos 3 29279 NULL
96927 +sn9c102_read_29305 sn9c102_read 3 29305 NULL
96928 +fd_do_writev_29329 fd_do_writev 3 29329 NULL
96929 +lo_compat_ioctl_29336 lo_compat_ioctl 4 29336 NULL
96930 +tun_put_user_29337 tun_put_user 5 29337 NULL
96931 +__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
96932 +alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
96933 +mwifiex_cfg80211_mgmt_tx_29387 mwifiex_cfg80211_mgmt_tx 7 29387 NULL
96934 +pca953x_irq_setup_29407 pca953x_irq_setup 3 29407 NULL
96935 +mempool_create_29437 mempool_create 1 29437 NULL
96936 +crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
96937 +apei_exec_ctx_get_output_29457 apei_exec_ctx_get_output 0 29457 NULL
96938 +validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
96939 +do_register_entry_29478 do_register_entry 4 29478 NULL
96940 +simple_strtoul_29480 simple_strtoul 0 29480 NULL
96941 +btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
96942 +btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
96943 +atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
96944 +ftrace_write_29551 ftrace_write 3 29551 NULL
96945 +idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
96946 +leaf_dealloc_29566 leaf_dealloc 3 29566 NULL nohasharray
96947 +alloc_empty_pages_29566 alloc_empty_pages 2 29566 &leaf_dealloc_29566
96948 +lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
96949 +pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4 29589 NULL
96950 +wm8903_gpio_set_29597 wm8903_gpio_set 2 29597 NULL
96951 +slots_per_page_29601 slots_per_page 0 29601 NULL
96952 +qla4_82xx_pci_set_window_29605 qla4_82xx_pci_set_window 0-2 29605 NULL
96953 +nla_get_u16_29624 nla_get_u16 0 29624 NULL
96954 +tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
96955 +lowmem_page_address_29649 lowmem_page_address 0 29649 NULL
96956 +sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
96957 +br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
96958 +sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
96959 +sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL nohasharray
96960 +posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 &sd_alloc_ctl_entry_29708
96961 +probes_write_29711 probes_write 3 29711 NULL
96962 +emi62_writememory_29731 emi62_writememory 4 29731 NULL
96963 +read_cis_cache_29735 read_cis_cache 4 29735 NULL
96964 +da9055_gpio_direction_input_29742 da9055_gpio_direction_input 2 29742 NULL
96965 +cxio_hal_init_resource_29771 cxio_hal_init_resource 2-6-7 29771 NULL nohasharray
96966 +ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 &cxio_hal_init_resource_29771
96967 +dbAlloc_29794 dbAlloc 0 29794 NULL
96968 +ext4_trim_all_free_29806 ext4_trim_all_free 4-3-2 29806 NULL
96969 +arizona_gpio_set_29823 arizona_gpio_set 2 29823 NULL
96970 +tcp_sendpage_29829 tcp_sendpage 4 29829 NULL
96971 +scan_bitmap_block_29840 scan_bitmap_block 4 29840 NULL
96972 +__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
96973 +kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
96974 +ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
96975 +scsi_end_request_29876 scsi_end_request 3 29876 NULL
96976 +crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
96977 +nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
96978 +tps6586x_write_29894 tps6586x_write 2 29894 NULL
96979 +check586_29914 check586 2 29914 NULL
96980 +ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
96981 +__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL
96982 +irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL nohasharray
96983 +diva_os_get_context_size_29983 diva_os_get_context_size 0 29983 &irias_add_octseq_attrib_29983
96984 +arch_setup_dmar_msi_29992 arch_setup_dmar_msi 1 29992 NULL
96985 +utf32_to_utf8_30028 utf32_to_utf8 0 30028 NULL
96986 +alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
96987 +scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
96988 +drp_wmove_30043 drp_wmove 4 30043 NULL
96989 +cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
96990 +snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
96991 +tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
96992 +rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
96993 +skb_pagelen_30113 skb_pagelen 0 30113 NULL
96994 +spi_async_locked_30117 spi_async_locked 0 30117 NULL
96995 +calgary_unmap_page_30130 calgary_unmap_page 2-3 30130 NULL
96996 +_osd_req_sizeof_alist_header_30134 _osd_req_sizeof_alist_header 0 30134 NULL
96997 +u_memcpya_30139 u_memcpya 2-3 30139 NULL
96998 +cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
96999 +mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
97000 +drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
97001 +snd_pcm_playback_forward_30201 snd_pcm_playback_forward 0-2 30201 NULL
97002 +usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
97003 +nfs_idmap_request_key_30208 nfs_idmap_request_key 3 30208 NULL
97004 +read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
97005 +snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
97006 +isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
97007 +compat_readv_30273 compat_readv 3 30273 NULL
97008 +lapic_register_intr_30279 lapic_register_intr 1 30279 NULL
97009 +__be64_to_cpup_30283 __be64_to_cpup 0 30283 NULL
97010 +skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
97011 +pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
97012 +tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
97013 +ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
97014 +generic_ptrace_pokedata_30338 generic_ptrace_pokedata 2 30338 NULL
97015 +resource_from_user_30341 resource_from_user 3 30341 NULL
97016 +__vmalloc_node_flags_30352 __vmalloc_node_flags 1 30352 NULL
97017 +sys_get_mempolicy_30379 sys_get_mempolicy 3 30379 NULL
97018 +mangle_sdp_packet_30381 mangle_sdp_packet 10 30381 NULL
97019 +c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
97020 +get_kernel_pages_30397 get_kernel_pages 0 30397 NULL
97021 +_drbd_bm_find_next_zero_30415 _drbd_bm_find_next_zero 2 30415 NULL
97022 +vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
97023 +tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
97024 +enable_write_30456 enable_write 3 30456 NULL
97025 +tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
97026 +urandom_read_30462 urandom_read 3 30462 NULL
97027 +zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
97028 +write_head_30481 write_head 4 30481 NULL
97029 +adu_write_30487 adu_write 3 30487 NULL
97030 +dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
97031 +debug_debug2_read_30526 debug_debug2_read 3 30526 NULL
97032 +batadv_dat_snoop_incoming_arp_request_30548 batadv_dat_snoop_incoming_arp_request 3 30548 NULL
97033 +disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
97034 +set_le_30581 set_le 4 30581 NULL
97035 +blk_init_tags_30592 blk_init_tags 1 30592 NULL
97036 +i2c_hid_get_report_length_30598 i2c_hid_get_report_length 0 30598 NULL
97037 +sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
97038 +macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
97039 +compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
97040 +nfsd_nrpools_30651 nfsd_nrpools 0 30651 NULL
97041 +agp_remap_30665 agp_remap 2 30665 NULL
97042 +jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
97043 +dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
97044 +lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
97045 +snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
97046 +snapshot_status_30744 snapshot_status 5 30744 NULL
97047 +max77693_update_reg_30747 max77693_update_reg 2 30747 NULL
97048 +tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 NULL
97049 +wm8350_read_auxadc_30780 wm8350_read_auxadc 2 30780 NULL
97050 +smk_read_doi_30813 smk_read_doi 3 30813 NULL
97051 +get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
97052 +create_subvol_30836 create_subvol 4 30836 NULL
97053 +sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
97054 +cfg80211_rx_mgmt_30844 cfg80211_rx_mgmt 5 30844 NULL nohasharray
97055 +twl6040_set_bits_30844 twl6040_set_bits 2 30844 &cfg80211_rx_mgmt_30844
97056 +hda_hwdep_ioctl_compat_30847 hda_hwdep_ioctl_compat 4 30847 NULL
97057 +ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
97058 +f1x_match_to_this_node_30888 f1x_match_to_this_node 3 30888 NULL
97059 +regmap_update_bits_check_30894 regmap_update_bits_check 2 30894 NULL
97060 +iommu_map_mmio_space_30919 iommu_map_mmio_space 1 30919 NULL
97061 +sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
97062 +tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
97063 +huge_page_mask_30981 huge_page_mask 0 30981 NULL
97064 +i2400mu_rx_size_grow_30989 i2400mu_rx_size_grow 0 30989 NULL
97065 +lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
97066 +phys_pmd_init_31024 phys_pmd_init 0-3-2 31024 NULL
97067 +compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
97068 +lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
97069 +find_next_bit_le_31064 find_next_bit_le 0-2-3 31064 NULL
97070 +sys_mincore_31079 sys_mincore 1 31079 NULL
97071 +ttm_bo_ioremap_31082 ttm_bo_ioremap 2-3 31082 NULL
97072 +sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
97073 +compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
97074 +depth_read_31112 depth_read 3 31112 NULL
97075 +ssb_read16_31139 ssb_read16 0 31139 NULL
97076 +kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
97077 +size_inside_page_31141 size_inside_page 0 31141 NULL
97078 +w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
97079 +ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
97080 +acpi_ex_system_memory_space_handler_31192 acpi_ex_system_memory_space_handler 2 31192 NULL
97081 +r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
97082 +mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
97083 +__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
97084 +cpumask_weight_31215 cpumask_weight 0 31215 NULL
97085 +__read_reg_31216 __read_reg 0 31216 NULL
97086 +atm_get_addr_31221 atm_get_addr 3 31221 NULL
97087 +cyy_readb_31240 cyy_readb 0 31240 NULL
97088 +_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
97089 +ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
97090 +ceph_copy_page_vector_to_user_31270 ceph_copy_page_vector_to_user 3-4 31270 NULL
97091 +sctp_tsnmap_find_gap_ack_31272 sctp_tsnmap_find_gap_ack 3-2 31272 NULL
97092 +uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
97093 +sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
97094 +futex_uaddr_31316 futex_uaddr 2 31316 NULL
97095 +command_file_write_31318 command_file_write 3 31318 NULL
97096 +__cpu_to_node_31345 __cpu_to_node 0 31345 NULL
97097 +xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
97098 +vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3 31374 NULL
97099 +trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
97100 +inb_31388 inb 0 31388 NULL
97101 +key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
97102 +mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 NULL
97103 +TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
97104 +snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
97105 +acpi_sci_ioapic_setup_31445 acpi_sci_ioapic_setup 4 31445 NULL
97106 +opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
97107 +_regmap_update_bits_31456 _regmap_update_bits 2 31456 NULL
97108 +input_get_new_minor_31464 input_get_new_minor 1 31464 NULL
97109 +do_fcntl_31468 do_fcntl 3 31468 NULL
97110 +xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
97111 +alg_setkey_31485 alg_setkey 3 31485 NULL
97112 +rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
97113 +qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
97114 +__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
97115 +hidraw_write_31536 hidraw_write 3 31536 NULL
97116 +mtd_div_by_eb_31543 mtd_div_by_eb 0-1 31543 NULL
97117 +usbvision_read_31555 usbvision_read 3 31555 NULL
97118 +normalize_31566 normalize 0-1-2 31566 NULL
97119 +tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
97120 +get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL
97121 +osst_write_31581 osst_write 3 31581 NULL
97122 +snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL
97123 +iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
97124 +mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL
97125 +arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
97126 +videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
97127 +pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
97128 +xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
97129 +__lgread_31668 __lgread 4 31668 NULL
97130 +symbol_string_31670 symbol_string 0 31670 NULL
97131 +_usb_writeN_sync_31682 _usb_writeN_sync 4 31682 NULL
97132 +forced_ps_read_31685 forced_ps_read 3 31685 NULL
97133 +reiserfs_in_journal_31689 reiserfs_in_journal 3 31689 NULL
97134 +audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
97135 +ath6kl_wmi_send_probe_response_cmd_31728 ath6kl_wmi_send_probe_response_cmd 6 31728 NULL
97136 +utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL
97137 +shmem_pwrite_slow_31741 shmem_pwrite_slow 3 31741 NULL
97138 +NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL nohasharray
97139 +input_abs_get_max_31742 input_abs_get_max 0 31742 &NCR_700_change_queue_depth_31742
97140 +bcm_char_read_31750 bcm_char_read 3 31750 NULL
97141 +lm3533_led_get_pattern_reg_31752 lm3533_led_get_pattern_reg 0-2 31752 NULL
97142 +snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
97143 +usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
97144 +get_count_order_31800 get_count_order 0 31800 NULL
97145 +ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
97146 +isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
97147 +strnlen_user_31815 strnlen_user 0-2 31815 NULL
97148 +sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
97149 +drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
97150 +ddb_output_write_31902 ddb_output_write 3 31902 NULL
97151 +xattr_permission_31907 xattr_permission 0 31907 NULL
97152 +new_dir_31919 new_dir 3 31919 NULL
97153 +kmem_alloc_31920 kmem_alloc 1 31920 NULL
97154 +guestwidth_to_adjustwidth_31937 guestwidth_to_adjustwidth 0-1 31937 NULL
97155 +iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4 31942 NULL
97156 +vb2_write_31948 vb2_write 3 31948 NULL
97157 +pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
97158 +regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL
97159 +copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
97160 +mtd_add_partition_31971 mtd_add_partition 3 31971 NULL
97161 +find_next_zero_bit_31990 find_next_zero_bit 0-2-3 31990 NULL
97162 +tps6586x_irq_map_32002 tps6586x_irq_map 2 32002 NULL
97163 +calc_hmac_32010 calc_hmac 3 32010 NULL
97164 +aead_len_32021 aead_len 0 32021 NULL
97165 +ocfs2_remove_extent_32032 ocfs2_remove_extent 4-3 32032 NULL
97166 +posix_acl_set_32037 posix_acl_set 4 32037 NULL
97167 +vmw_cursor_update_dmabuf_32045 vmw_cursor_update_dmabuf 3-4 32045 NULL
97168 +sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 NULL
97169 +proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
97170 +cfg80211_send_unprot_deauth_32080 cfg80211_send_unprot_deauth 3 32080 NULL
97171 +bio_alloc_32095 bio_alloc 2 32095 NULL
97172 +alloc_pwms_32100 alloc_pwms 1-2 32100 NULL
97173 +ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
97174 +disk_status_32120 disk_status 4 32120 NULL
97175 +rc5t583_write_32124 rc5t583_write 2 32124 NULL
97176 +venus_link_32165 venus_link 5 32165 NULL
97177 +drbd_new_dev_size_32171 drbd_new_dev_size 0 32171 NULL
97178 +do_writepages_32173 do_writepages 0 32173 NULL nohasharray
97179 +ntfs_rl_realloc_nofail_32173 ntfs_rl_realloc_nofail 3 32173 &do_writepages_32173
97180 +load_header_32183 load_header 0 32183 NULL
97181 +ubi_wl_scrub_peb_32196 ubi_wl_scrub_peb 0 32196 NULL
97182 +wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
97183 +riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
97184 +lm3533_write_32236 lm3533_write 2 32236 NULL
97185 +lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
97186 +ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
97187 +fb_compat_ioctl_32265 fb_compat_ioctl 3 32265 NULL
97188 +vmalloc_user_32308 vmalloc_user 1 32308 NULL
97189 +hex_string_32310 hex_string 0 32310 NULL
97190 +flakey_status_32315 flakey_status 5 32315 NULL
97191 +nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
97192 +nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
97193 +t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
97194 +dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL
97195 +f1x_translate_sysaddr_to_cs_32359 f1x_translate_sysaddr_to_cs 2 32359 NULL
97196 +sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
97197 +_drbd_bm_find_next_32372 _drbd_bm_find_next 2 32372 NULL
97198 +variax_set_raw2_32374 variax_set_raw2 4 32374 NULL
97199 +usbtmc_read_32377 usbtmc_read 3 32377 NULL
97200 +qla4_82xx_pci_mem_write_2M_32398 qla4_82xx_pci_mem_write_2M 2 32398 NULL
97201 +xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
97202 +log_text_32428 log_text 0 32428 NULL
97203 +regmap_irq_map_32429 regmap_irq_map 2 32429 NULL
97204 +hid_input_report_32458 hid_input_report 4 32458 NULL
97205 +snd_pcm_sync_ptr_32461 snd_pcm_sync_ptr 0 32461 NULL
97206 +ieee80211_fill_mesh_addresses_32465 ieee80211_fill_mesh_addresses 0 32465 NULL
97207 +ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
97208 +ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
97209 +disconnect_32521 disconnect 4 32521 NULL
97210 +qsfp_read_32522 qsfp_read 0-4-2 32522 NULL
97211 +ilo_read_32531 ilo_read 3 32531 NULL
97212 +ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL nohasharray
97213 +crypt_status_32533 crypt_status 5 32533 &ieee80211_if_read_estab_plinks_32533
97214 +format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
97215 +__first_node_32558 __first_node 0 32558 NULL
97216 +aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
97217 +pnp_mem_len_32584 pnp_mem_len 0 32584 NULL
97218 +mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
97219 +pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
97220 +read_file_beacon_32595 read_file_beacon 3 32595 NULL
97221 +ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
97222 +sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
97223 +cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
97224 +ieee80211_hdrlen_32637 ieee80211_hdrlen 0 32637 NULL
97225 +ite_decode_bytes_32642 ite_decode_bytes 3 32642 NULL
97226 +kvmalloc_32646 kvmalloc 1 32646 NULL
97227 +ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
97228 +generic_readlink_32654 generic_readlink 3 32654 NULL
97229 +move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
97230 +tps80031_set_bits_32686 tps80031_set_bits 3 32686 NULL
97231 +jfs_readpages_32702 jfs_readpages 4 32702 NULL
97232 +snd_hwdep_ioctl_compat_32736 snd_hwdep_ioctl_compat 3 32736 NULL
97233 +megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
97234 +stats_read_ul_32751 stats_read_ul 3 32751 NULL
97235 +tty_compat_ioctl_32761 tty_compat_ioctl 3 32761 NULL
97236 +sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
97237 +firmwareUpload_32794 firmwareUpload 3 32794 NULL
97238 +rproc_name_read_32805 rproc_name_read 3 32805 NULL
97239 +vga_rseq_32848 vga_rseq 0 32848 NULL
97240 +new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
97241 +io_apic_setup_irq_pin_32868 io_apic_setup_irq_pin 1 32868 NULL
97242 +ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 NULL nohasharray
97243 +cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 &ath6kl_usb_submit_ctrl_in_32880
97244 +ath6kl_usb_post_recv_transfers_32892 ath6kl_usb_post_recv_transfers 2 32892 NULL
97245 +il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
97246 +zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
97247 +rmap_recycle_32938 rmap_recycle 3 32938 NULL
97248 +irq_reserve_irqs_32946 irq_reserve_irqs 1-2 32946 NULL
97249 +ext4_valid_block_bitmap_32958 ext4_valid_block_bitmap 3 32958 NULL
97250 +arch_ptrace_32981 arch_ptrace 3 32981 NULL
97251 +compat_filldir_32999 compat_filldir 3 32999 NULL
97252 +ext3_alloc_blocks_33007 ext3_alloc_blocks 3 33007 NULL
97253 +snd_pcm_prepare_33036 snd_pcm_prepare 0 33036 NULL
97254 +pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
97255 +ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
97256 +bitmap_resize_33054 bitmap_resize 2 33054 NULL
97257 +stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
97258 +sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
97259 +acl_permission_check_33083 acl_permission_check 0 33083 NULL
97260 +ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
97261 +write_node_33121 write_node 4 33121 NULL
97262 +calc_patch_size_33124 calc_patch_size 0 33124 NULL
97263 +fb_sys_write_33130 fb_sys_write 3 33130 NULL
97264 +debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
97265 +dataflash_read_fact_otp_33204 dataflash_read_fact_otp 2-3 33204 NULL
97266 +pp_read_33210 pp_read 3 33210 NULL
97267 +xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
97268 +snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
97269 +sched_find_first_bit_33270 sched_find_first_bit 0 33270 NULL
97270 +cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
97271 +mei_compat_ioctl_33275 mei_compat_ioctl 3 33275 NULL
97272 +pcf50633_irq_mask_33280 pcf50633_irq_mask 2 33280 NULL
97273 +mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
97274 +ath6kl_usb_ctrl_msg_exchange_33327 ath6kl_usb_ctrl_msg_exchange 4 33327 NULL
97275 +gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
97276 +joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
97277 +create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
97278 +irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
97279 +read_file_regidx_33370 read_file_regidx 3 33370 NULL
97280 +ceph_osdc_writepages_33375 ceph_osdc_writepages 5 33375 NULL
97281 +ocfs2_quota_read_33382 ocfs2_quota_read 5 33382 NULL
97282 +ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
97283 +scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
97284 +ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 2-3 33394 NULL
97285 +ext4_meta_bg_first_block_no_33408 ext4_meta_bg_first_block_no 2 33408 NULL nohasharray
97286 +snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 &ext4_meta_bg_first_block_no_33408
97287 +ufs_getfrag_block_33409 ufs_getfrag_block 2 33409 NULL
97288 +ubh_scanc_33436 ubh_scanc 0-4-3 33436 NULL
97289 +ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
97290 +create_entry_33479 create_entry 2 33479 NULL
97291 +ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
97292 +elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
97293 +netxen_nic_hw_write_wx_128M_33488 netxen_nic_hw_write_wx_128M 2 33488 NULL
97294 +ol_dqblk_chunk_off_33489 ol_dqblk_chunk_off 2 33489 NULL
97295 +res_counter_read_33499 res_counter_read 4 33499 NULL
97296 +fb_read_33506 fb_read 3 33506 NULL
97297 +musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
97298 +ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
97299 +nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
97300 +aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
97301 +acpi_gsi_to_irq_33533 acpi_gsi_to_irq 1 33533 NULL
97302 +tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
97303 +dup_array_33551 dup_array 3 33551 NULL
97304 +solo_enc_read_33553 solo_enc_read 3 33553 NULL
97305 +count_subheaders_33591 count_subheaders 0 33591 NULL
97306 +scsi_execute_33596 scsi_execute 5 33596 NULL
97307 +comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
97308 +xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL nohasharray
97309 +ip6_find_1stfragopt_33608 ip6_find_1stfragopt 0 33608 &xt_compat_target_offset_33608
97310 +il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 NULL
97311 +irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
97312 +inw_p_33668 inw_p 0 33668 NULL
97313 +arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
97314 +i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL
97315 +ath6kl_wmi_startscan_cmd_33674 ath6kl_wmi_startscan_cmd 8 33674 NULL
97316 +rbd_alloc_coll_33678 rbd_alloc_coll 1 33678 NULL
97317 +nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL
97318 +compat_insnlist_33706 compat_insnlist 2 33706 NULL
97319 +sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
97320 +netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
97321 +tipc_link_stats_33716 tipc_link_stats 3 33716 NULL
97322 +pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
97323 +ocfs2_extent_map_get_blocks_33720 ocfs2_extent_map_get_blocks 2 33720 NULL
97324 +__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
97325 +Read_hfc_33755 Read_hfc 0 33755 NULL
97326 +vifs_state_read_33762 vifs_state_read 3 33762 NULL
97327 +hashtab_create_33769 hashtab_create 3 33769 NULL
97328 +midibuf_message_length_33770 midibuf_message_length 0 33770 NULL
97329 +if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
97330 +find_next_offset_33804 find_next_offset 3 33804 NULL
97331 +sky2_rx_pad_33819 sky2_rx_pad 0 33819 NULL
97332 +sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
97333 +scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL
97334 +udplite_manip_pkt_33832 udplite_manip_pkt 4 33832 NULL
97335 +snd_pcm_action_nonatomic_33844 snd_pcm_action_nonatomic 0 33844 NULL
97336 +usb_dump_endpoint_descriptor_33849 usb_dump_endpoint_descriptor 0 33849 NULL
97337 +calgary_alloc_coherent_33851 calgary_alloc_coherent 2 33851 NULL
97338 +oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
97339 +cap_mmap_addr_33853 cap_mmap_addr 0 33853 NULL
97340 +config_proc_write_33878 config_proc_write 3 33878 NULL
97341 +get_user_pages_33908 get_user_pages 0 33908 NULL
97342 +queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
97343 +sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
97344 +lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
97345 +read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
97346 +vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
97347 +__ntfs_malloc_34022 __ntfs_malloc 1 34022 NULL
97348 +ppp_write_34034 ppp_write 3 34034 NULL
97349 +tty_insert_flip_string_34042 tty_insert_flip_string 3 34042 NULL
97350 +__domain_flush_pages_34045 __domain_flush_pages 2-3 34045 NULL
97351 +acpi_dev_get_irqresource_34064 acpi_dev_get_irqresource 2 34064 NULL
97352 +memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL
97353 +read_file_ant_diversity_34071 read_file_ant_diversity 3 34071 NULL
97354 +show_risefalltime_34084 show_risefalltime 4 34084 NULL
97355 +compat_hdio_ioctl_34088 compat_hdio_ioctl 4 34088 NULL
97356 +pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
97357 +islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
97358 +ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2 34135 NULL
97359 +cdc_mbim_process_dgram_34136 cdc_mbim_process_dgram 3 34136 NULL
97360 +ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
97361 +shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
97362 +skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
97363 +ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL
97364 +bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
97365 +pcf857x_to_irq_34273 pcf857x_to_irq 2 34273 NULL
97366 +zone_spanned_pages_in_node_34299 zone_spanned_pages_in_node 0 34299 NULL
97367 +iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 NULL nohasharray
97368 +pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 &iov_iter_single_seg_count_34326
97369 +crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL
97370 +rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
97371 +p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
97372 +reiserfs_resize_34377 reiserfs_resize 2 34377 NULL
97373 +ea_read_34378 ea_read 0 34378 NULL
97374 +av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
97375 +usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
97376 +read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
97377 +ivtv_read_pos_34400 ivtv_read_pos 3 34400 NULL nohasharray
97378 +iwl_calib_set_34400 iwl_calib_set 3 34400 &ivtv_read_pos_34400
97379 +nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
97380 +usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
97381 +mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
97382 +line6_dumpreq_init_34473 line6_dumpreq_init 3 34473 NULL
97383 +skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
97384 +i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
97385 +security_inode_permission_34488 security_inode_permission 0 34488 NULL
97386 +alloc_buf_34532 alloc_buf 1 34532 NULL
97387 +tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
97388 +hugetlbfs_read_actor_34547 hugetlbfs_read_actor 0-2-5-4 34547 NULL
97389 +dbBackSplit_34561 dbBackSplit 0 34561 NULL
97390 +alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
97391 +velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
97392 +init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
97393 +inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
97394 +ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL
97395 +__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
97396 +__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
97397 +cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
97398 +nf_nat_mangle_udp_packet_34661 nf_nat_mangle_udp_packet 8-6 34661 NULL
97399 +isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
97400 +port_print_34704 port_print 3 34704 NULL
97401 +alloc_irq_and_cfg_at_34706 alloc_irq_and_cfg_at 1 34706 NULL
97402 +ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
97403 +platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
97404 +reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
97405 +qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
97406 +ssd1307fb_write_array_34779 ssd1307fb_write_array 4 34779 NULL
97407 +__copy_in_user_34790 __copy_in_user 3 34790 NULL
97408 +drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
97409 +b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
97410 +nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
97411 +acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
97412 +usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
97413 +ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL
97414 +lm3533_ctrlbank_get_reg_34886 lm3533_ctrlbank_get_reg 0-2 34886 NULL
97415 +msg_print_text_34889 msg_print_text 0 34889 NULL
97416 +ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
97417 +compat_put_uint_34905 compat_put_uint 1 34905 NULL
97418 +__inode_permission_34925 __inode_permission 0 34925 NULL nohasharray
97419 +btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 &__inode_permission_34925
97420 +skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 NULL
97421 +Realloc_34961 Realloc 2 34961 NULL
97422 +rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
97423 +l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
97424 +sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
97425 +coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
97426 +btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
97427 +pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
97428 +store_ifalias_35088 store_ifalias 4 35088 NULL
97429 +__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
97430 +capi_write_35104 capi_write 3 35104 NULL nohasharray
97431 +tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
97432 +ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
97433 +pointer_35138 pointer 0 35138 NULL
97434 +gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
97435 +iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
97436 +ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
97437 +unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
97438 +_osd_req_alist_elem_size_35216 _osd_req_alist_elem_size 0-2 35216 NULL
97439 +striped_read_35218 striped_read 0-2-8-3 35218 NULL nohasharray
97440 +security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218
97441 +video_register_device_no_warn_35226 video_register_device_no_warn 3 35226 NULL nohasharray
97442 +rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 &video_register_device_no_warn_35226
97443 +set_fd_set_35249 set_fd_set 1 35249 NULL
97444 +ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
97445 +dma_show_regs_35266 dma_show_regs 3 35266 NULL
97446 +irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
97447 +i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
97448 +isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
97449 +brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 NULL nohasharray
97450 +__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 &brcmf_sdio_forensic_read_35311
97451 +tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
97452 +sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
97453 +new_bind_ctl_35324 new_bind_ctl 2 35324 NULL
97454 +irq_domain_disassociate_many_35325 irq_domain_disassociate_many 2-3 35325 NULL
97455 +fallback_on_nodma_alloc_35332 fallback_on_nodma_alloc 2 35332 NULL
97456 +pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
97457 +ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL
97458 +nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
97459 +hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
97460 +compat_filldir64_35354 compat_filldir64 3 35354 NULL
97461 +rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
97462 +__set_test_and_free_35436 __set_test_and_free 2 35436 NULL
97463 +buffer_to_user_35439 buffer_to_user 3 35439 NULL
97464 +rdev_get_id_35454 rdev_get_id 0 35454 NULL
97465 +i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
97466 +do_atm_ioctl_35519 do_atm_ioctl 3 35519 NULL
97467 +async_setkey_35521 async_setkey 3 35521 NULL
97468 +__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
97469 +iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
97470 +rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
97471 +ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
97472 +ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
97473 +ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
97474 +ReadZReg_35604 ReadZReg 0 35604 NULL
97475 +kernel_readv_35617 kernel_readv 3 35617 NULL
97476 +reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
97477 +scrub_stripe_35637 scrub_stripe 4-3 35637 NULL
97478 +spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
97479 +store_debug_level_35652 store_debug_level 3 35652 NULL
97480 +regmap_update_bits_35668 regmap_update_bits 2 35668 NULL
97481 +rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
97482 +compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
97483 +dm_table_create_35687 dm_table_create 3 35687 NULL
97484 +rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
97485 +pci_enable_sriov_35745 pci_enable_sriov 2 35745 NULL
97486 +iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
97487 +udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
97488 +read_file_stations_35795 read_file_stations 3 35795 NULL
97489 +pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
97490 +tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
97491 +mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
97492 +fls64_35862 fls64 0 35862 NULL
97493 +kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
97494 +ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
97495 +uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
97496 +kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
97497 +vol_cdev_compat_ioctl_35923 vol_cdev_compat_ioctl 3 35923 NULL
97498 +sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
97499 +rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
97500 +put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
97501 +wm8350_reg_write_35967 wm8350_reg_write 2 35967 NULL
97502 +ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL
97503 +ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
97504 +acl_alloc_35979 acl_alloc 1 35979 NULL
97505 +generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
97506 +koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
97507 +il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
97508 +ubi_eba_write_leb_36029 ubi_eba_write_leb 5-6 36029 NULL
97509 +sys_init_module_36047 sys_init_module 2 36047 NULL
97510 +account_shadowed_36048 account_shadowed 2 36048 NULL
97511 +gpio_power_read_36059 gpio_power_read 3 36059 NULL
97512 +snd_pcm_playback_hw_avail_36061 snd_pcm_playback_hw_avail 0 36061 NULL
97513 +write_emulate_36065 write_emulate 2-4 36065 NULL
97514 +radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL
97515 +ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
97516 +ext3_new_blocks_36073 ext3_new_blocks 3 36073 NULL
97517 +ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
97518 +snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
97519 +vga_arb_write_36112 vga_arb_write 3 36112 NULL
97520 +simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
97521 +compat_ptrace_request_36131 compat_ptrace_request 3-4 36131 NULL
97522 +vmalloc_exec_36132 vmalloc_exec 1 36132 NULL
97523 +ext3_readpages_36144 ext3_readpages 4 36144 NULL
97524 +iwl_trans_txq_alloc_36147 iwl_trans_txq_alloc 3 36147 NULL
97525 +alloc_vm_area_36149 alloc_vm_area 1 36149 NULL
97526 +twl_set_36154 twl_set 2 36154 NULL
97527 +b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
97528 +btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
97529 +snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
97530 +ubifs_read_nnode_36221 ubifs_read_nnode 0 36221 NULL
97531 +nfqnl_mangle_36226 nfqnl_mangle 4-2 36226 NULL
97532 +atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
97533 +viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
97534 +rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
97535 +scrub_stripe_36248 scrub_stripe 5-4 36248 NULL
97536 +compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
97537 +usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
97538 +codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
97539 +crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL
97540 +readahead_tree_block_36285 readahead_tree_block 3 36285 NULL
97541 +nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
97542 +lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
97543 +ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
97544 +fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
97545 +lc_create_36332 lc_create 4 36332 NULL
97546 +jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
97547 +v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL nohasharray
97548 +xz_dec_lzma2_create_36353 xz_dec_lzma2_create 2 36353 &v9fs_file_readn_36353
97549 +to_sector_36361 to_sector 0-1 36361 NULL
97550 +tunables_read_36385 tunables_read 3 36385 NULL
97551 +afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
97552 +sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
97553 +alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
97554 +b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
97555 +tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 NULL
97556 +__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
97557 +mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
97558 +get_param_l_36518 get_param_l 0 36518 NULL
97559 +ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
97560 +lguest_setup_irq_36531 lguest_setup_irq 1 36531 NULL
97561 +crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
97562 +ssd1307fb_write_data_array_36538 ssd1307fb_write_data_array 3 36538 NULL
97563 +cpu_type_read_36540 cpu_type_read 3 36540 NULL
97564 +get_entry_len_36549 get_entry_len 0 36549 NULL
97565 +__kfifo_to_user_36555 __kfifo_to_user 3 36555 NULL nohasharray
97566 +macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
97567 +btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL
97568 +ssb_bus_scan_36578 ssb_bus_scan 2 36578 NULL
97569 +__erst_read_36579 __erst_read 0 36579 NULL
97570 +put_cmsg_36589 put_cmsg 4 36589 NULL
97571 +pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
97572 +fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
97573 +vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
97574 +format_decode_36638 format_decode 0 36638 NULL
97575 +ced_ioctl_36647 ced_ioctl 2 36647 NULL
97576 +lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
97577 +osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
97578 +iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
97579 +ptr_to_compat_36680 ptr_to_compat 0 36680 NULL
97580 +ext4_mb_discard_group_preallocations_36685 ext4_mb_discard_group_preallocations 2 36685 NULL
97581 +snd_soc_update_bits_36714 snd_soc_update_bits 2 36714 NULL
97582 +extract_icmp6_fields_36732 extract_icmp6_fields 2 36732 NULL
97583 +snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4 36740 NULL
97584 +cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
97585 +ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
97586 +snd_soc_update_bits_locked_36766 snd_soc_update_bits_locked 2 36766 NULL
97587 +ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
97588 +ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
97589 +proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
97590 +hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
97591 +int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
97592 +pcf50633_reg_write_36841 pcf50633_reg_write 2 36841 NULL nohasharray
97593 +fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 &pcf50633_reg_write_36841
97594 +keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
97595 +cm_write_36858 cm_write 3 36858 NULL
97596 +tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
97597 +svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
97598 +ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
97599 +selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
97600 +lm3533_als_get_target_36905 lm3533_als_get_target 2-3 36905 NULL
97601 +OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
97602 +crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
97603 +div_u64_36951 div_u64 0 36951 NULL
97604 +write_leb_36957 write_leb 5 36957 NULL
97605 +ntfs_external_attr_find_36963 ntfs_external_attr_find 0 36963 NULL
97606 +sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
97607 +mc13xxx_reg_rmw_36997 mc13xxx_reg_rmw 2 36997 NULL
97608 +drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL
97609 +auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
97610 +setxattr_37006 setxattr 4 37006 NULL
97611 +ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL
97612 +parse_command_37079 parse_command 2 37079 NULL
97613 +wm8994_gpio_set_37082 wm8994_gpio_set 2 37082 NULL
97614 +pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
97615 +tun_get_user_37094 tun_get_user 5 37094 NULL
97616 +has_wrprotected_page_37123 has_wrprotected_page 2-3 37123 NULL
97617 +msg_word_37164 msg_word 0 37164 NULL
97618 +can_set_xattr_37182 can_set_xattr 4 37182 NULL
97619 +crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL
97620 +regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
97621 +__do_replace_37227 __do_replace 5 37227 NULL
97622 +rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
97623 +prot_queue_del_37258 prot_queue_del 0 37258 NULL
97624 +ath6kl_wmi_set_ie_cmd_37260 ath6kl_wmi_set_ie_cmd 6 37260 NULL
97625 +exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
97626 +c101_run_37279 c101_run 2 37279 NULL
97627 +srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
97628 +jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
97629 +send_msg_37323 send_msg 4 37323 NULL
97630 +brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
97631 +l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL
97632 +scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
97633 +rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
97634 +security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
97635 +iommu_num_pages_37391 iommu_num_pages 0-1-3-2 37391 NULL
97636 +sys_getxattr_37418 sys_getxattr 4 37418 NULL
97637 +hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
97638 +acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL nohasharray
97639 +find_next_bit_37422 find_next_bit 0-2-3 37422 &acpi_os_allocate_zeroed_37422
97640 +tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4 37428 NULL
97641 +iwl_print_last_event_logs_37433 iwl_print_last_event_logs 0-7-9 37433 NULL
97642 +tcp_established_options_37450 tcp_established_options 0 37450 NULL
97643 +brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
97644 +ufs_data_ptr_to_cpu_37475 ufs_data_ptr_to_cpu 0 37475 NULL
97645 +get_est_timing_37484 get_est_timing 0 37484 NULL
97646 +xfs_trans_read_buf_map_37487 xfs_trans_read_buf_map 5 37487 NULL
97647 +kmem_realloc_37489 kmem_realloc 2 37489 NULL
97648 +kvm_vcpu_compat_ioctl_37500 kvm_vcpu_compat_ioctl 3 37500 NULL
97649 +vmalloc_32_user_37519 vmalloc_32_user 1 37519 NULL
97650 +fault_inject_read_37534 fault_inject_read 3 37534 NULL
97651 +hdr_size_37536 hdr_size 0 37536 NULL
97652 +sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL nohasharray
97653 +nf_nat_mangle_tcp_packet_37551 nf_nat_mangle_tcp_packet 6-8 37551 &sep_create_dcb_dmatables_context_37551
97654 +xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
97655 +mlx4_get_mgm_entry_size_37607 mlx4_get_mgm_entry_size 0 37607 NULL
97656 +kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
97657 +alloc_fd_37637 alloc_fd 1 37637 NULL
97658 +bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
97659 +rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
97660 +vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
97661 +lnw_gpio_to_irq_37665 lnw_gpio_to_irq 2 37665 NULL
97662 +ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
97663 +regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
97664 +nametbl_header_37698 nametbl_header 2 37698 NULL
97665 +__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
97666 +soc_widget_update_bits_locked_37715 soc_widget_update_bits_locked 2 37715 NULL
97667 +read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
97668 +ocfs2_duplicate_clusters_by_jbd_37749 ocfs2_duplicate_clusters_by_jbd 6-4-5 37749 NULL
97669 +ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
97670 +ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
97671 +dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
97672 +il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
97673 +smk_read_logging_37804 smk_read_logging 3 37804 NULL
97674 +deny_write_access_37813 deny_write_access 0 37813 NULL
97675 +rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
97676 +bitmap_find_next_zero_area_37827 bitmap_find_next_zero_area 2-3-5-4 37827 NULL
97677 +o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
97678 +xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
97679 +sys_setxattr_37880 sys_setxattr 4 37880 NULL
97680 +lm3533_als_get_target_reg_37881 lm3533_als_get_target_reg 0-1-2 37881 NULL
97681 +dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
97682 +max77686_irq_domain_map_37897 max77686_irq_domain_map 2 37897 NULL
97683 +tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
97684 +pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
97685 +read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
97686 +fifo_alloc_37961 fifo_alloc 1 37961 NULL
97687 +ext3_free_blocks_sb_37967 ext3_free_blocks_sb 3-4 37967 NULL
97688 +rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
97689 +persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL
97690 +vfs_readv_38011 vfs_readv 3 38011 NULL
97691 +aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
97692 +klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 NULL nohasharray
97693 +il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 &klsi_105_prepare_write_buffer_38044
97694 +_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL
97695 +nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
97696 +alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
97697 +xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL
97698 +uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
97699 +request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
97700 +proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
97701 +ep0_read_38095 ep0_read 3 38095 NULL
97702 +sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL
97703 +snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
97704 +vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
97705 +__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 3-4 38153 NULL
97706 +kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
97707 +cdev_add_38176 cdev_add 2-3 38176 NULL
97708 +brcmf_sdcard_recv_buf_38179 brcmf_sdcard_recv_buf 6 38179 NULL
97709 +rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
97710 +get_ucode_user_38202 get_ucode_user 3 38202 NULL
97711 +ext3_new_block_38208 ext3_new_block 3 38208 NULL
97712 +stmpe_gpio_irq_map_38222 stmpe_gpio_irq_map 3 38222 NULL
97713 +osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
97714 +vhost_net_compat_ioctl_38237 vhost_net_compat_ioctl 3 38237 NULL
97715 +_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
97716 +snd_pcm_playback_rewind_38249 snd_pcm_playback_rewind 0-2 38249 NULL
97717 +ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
97718 +mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &ieee80211_if_read_auto_open_plinks_38268
97719 +verity_status_38273 verity_status 5 38273 NULL
97720 +xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
97721 +xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
97722 +zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
97723 +ieee80211_send_probe_req_38307 ieee80211_send_probe_req 6-4 38307 NULL
97724 +isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
97725 +ida_simple_get_38326 ida_simple_get 2 38326 NULL
97726 +__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
97727 +pyra_sysfs_write_38370 pyra_sysfs_write 6 38370 NULL
97728 +dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
97729 +get_valid_node_allowed_38412 get_valid_node_allowed 1-0 38412 NULL
97730 +ocfs2_which_cluster_group_38413 ocfs2_which_cluster_group 2 38413 NULL
97731 +ht_destroy_irq_38418 ht_destroy_irq 1 38418 NULL
97732 +ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
97733 +asix_write_cmd_async_38420 asix_write_cmd_async 5 38420 NULL
97734 +pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
97735 +pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
97736 +kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
97737 +i915_min_freq_read_38470 i915_min_freq_read 3 38470 NULL
97738 +blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL
97739 +cpu_to_mem_38501 cpu_to_mem 0 38501 NULL
97740 +dev_names_read_38509 dev_names_read 3 38509 NULL
97741 +iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
97742 +event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
97743 +set_queue_count_38519 set_queue_count 0 38519 NULL
97744 +ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
97745 +btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
97746 +cpu_to_node_38561 cpu_to_node 0 38561 NULL
97747 +irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
97748 +il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
97749 +_ipw_read32_38565 _ipw_read32 0 38565 NULL
97750 +snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
97751 +copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
97752 +cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
97753 +compat_sys_ptrace_38595 compat_sys_ptrace 3-4 38595 NULL
97754 +delay_status_38606 delay_status 5 38606 NULL
97755 +icn_writecmd_38629 icn_writecmd 2 38629 NULL
97756 +ext2_readpages_38640 ext2_readpages 4 38640 NULL
97757 +cma_create_area_38642 cma_create_area 2 38642 NULL
97758 +audit_init_entry_38644 audit_init_entry 1 38644 NULL
97759 +mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
97760 +nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
97761 +cfg80211_send_disassoc_38678 cfg80211_send_disassoc 3 38678 NULL
97762 +iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
97763 +ext4_wait_block_bitmap_38695 ext4_wait_block_bitmap 2 38695 NULL
97764 +find_next_usable_block_38716 find_next_usable_block 1-3 38716 NULL
97765 +alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
97766 +udf_readpages_38761 udf_readpages 4 38761 NULL
97767 +iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
97768 +snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
97769 +err_decode_38804 err_decode 2 38804 NULL
97770 +ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
97771 +sys_select_38827 sys_select 1 38827 NULL
97772 +b43_txhdr_size_38832 b43_txhdr_size 0 38832 NULL
97773 +direct_entry_38836 direct_entry 3 38836 NULL
97774 +compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
97775 +interfaces_38859 interfaces 2 38859 NULL
97776 +pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
97777 +sizeof_gpio_leds_priv_38882 sizeof_gpio_leds_priv 0-1 38882 NULL
97778 +dbgfs_state_38894 dbgfs_state 3 38894 NULL
97779 +f2fs_xattr_set_acl_38895 f2fs_xattr_set_acl 4 38895 NULL
97780 +__fswab16_38898 __fswab16 0 38898 NULL
97781 +process_bulk_data_command_38906 process_bulk_data_command 4 38906 NULL
97782 +ext3_trim_all_free_38929 ext3_trim_all_free 3-4-2 38929 NULL
97783 +sbp_count_se_tpg_luns_38943 sbp_count_se_tpg_luns 0 38943 NULL
97784 +__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
97785 +usb_maxpacket_38977 usb_maxpacket 0 38977 NULL
97786 +OSDSetBlock_38986 OSDSetBlock 4-2 38986 NULL
97787 +udf_new_block_38999 udf_new_block 4 38999 NULL
97788 +get_nodes_39012 get_nodes 3 39012 NULL
97789 +twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL
97790 +acpi_install_gpe_block_39031 acpi_install_gpe_block 4 39031 NULL
97791 +_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
97792 +line6_midibuf_read_39067 line6_midibuf_read 0-3 39067 NULL
97793 +ext4_init_block_bitmap_39071 ext4_init_block_bitmap 3 39071 NULL
97794 +tun_get_user_39099 tun_get_user 4 39099 NULL
97795 +tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
97796 +__kfifo_to_user_r_39123 __kfifo_to_user_r 3 39123 NULL
97797 +ea_foreach_39133 ea_foreach 0 39133 NULL
97798 +generic_permission_39150 generic_permission 0 39150 NULL
97799 +alloc_ring_39151 alloc_ring 2-4 39151 NULL
97800 +proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
97801 +create_bounce_buffer_39155 create_bounce_buffer 3 39155 NULL
97802 +ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
97803 +init_list_set_39188 init_list_set 2-3 39188 NULL
97804 +ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
97805 +qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
97806 +qla4_82xx_pci_mem_read_direct_39208 qla4_82xx_pci_mem_read_direct 2 39208 NULL
97807 +vfio_group_fops_compat_ioctl_39219 vfio_group_fops_compat_ioctl 3 39219 NULL
97808 +ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
97809 +batadv_tt_response_fill_table_39236 batadv_tt_response_fill_table 1 39236 NULL
97810 +posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL
97811 +drm_order_39244 drm_order 0 39244 NULL
97812 +snd_pcm_capture_forward_39248 snd_pcm_capture_forward 0-2 39248 NULL
97813 +r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
97814 +pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
97815 +__skb_cow_39254 __skb_cow 2 39254 NULL
97816 +ath6kl_wmi_set_appie_cmd_39266 ath6kl_wmi_set_appie_cmd 5 39266 NULL
97817 +rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
97818 +__vmalloc_node_39308 __vmalloc_node 1 39308 NULL
97819 +__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
97820 +wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
97821 +__cfg80211_send_deauth_39344 __cfg80211_send_deauth 3 39344 NULL
97822 +__copy_from_user_nocache_39351 __copy_from_user_nocache 3 39351 NULL
97823 +ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
97824 +do_write_log_from_user_39362 do_write_log_from_user 3 39362 NULL
97825 +vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
97826 +regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
97827 +ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
97828 +__send_to_port_39386 __send_to_port 3 39386 NULL
97829 +user_power_read_39414 user_power_read 3 39414 NULL
97830 +alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
97831 +sys_semop_39457 sys_semop 3 39457 NULL
97832 +setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
97833 +ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
97834 +atomic64_read_unchecked_39505 atomic64_read_unchecked 0 39505 NULL
97835 +int_proc_write_39542 int_proc_write 3 39542 NULL
97836 +pp_write_39554 pp_write 3 39554 NULL
97837 +ol_dqblk_block_39558 ol_dqblk_block 0-3-2 39558 NULL
97838 +datablob_format_39571 datablob_format 2 39571 NULL nohasharray
97839 +ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
97840 +handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
97841 +adau1373_set_pll_39593 adau1373_set_pll 2 39593 NULL
97842 +mtdchar_compat_ioctl_39602 mtdchar_compat_ioctl 3 39602 NULL
97843 +n_tty_compat_ioctl_helper_39605 n_tty_compat_ioctl_helper 4 39605 NULL
97844 +ext_depth_39607 ext_depth 0 39607 NULL
97845 +nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
97846 +sdio_readb_39618 sdio_readb 0 39618 NULL
97847 +set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
97848 +dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
97849 +snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
97850 +tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL nohasharray
97851 +prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 &tcp_try_rmem_schedule_39657
97852 +kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
97853 +v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
97854 +hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL
97855 +do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
97856 +ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
97857 +tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
97858 +ocfs2_pages_per_cluster_39790 ocfs2_pages_per_cluster 0 39790 NULL
97859 +security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
97860 +snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
97861 +sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
97862 +get_priv_size_39828 get_priv_size 0-1 39828 NULL
97863 +pkt_add_39897 pkt_add 3 39897 NULL
97864 +read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
97865 +gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
97866 +dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
97867 +aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
97868 +exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
97869 +oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
97870 +__spi_async_39932 __spi_async 0 39932 NULL
97871 +__get_order_39935 __get_order 0 39935 NULL
97872 +error_error_frame_read_39947 error_error_frame_read 3 39947 NULL nohasharray
97873 +fwnet_pd_new_39947 fwnet_pd_new 4 39947 &error_error_frame_read_39947
97874 +tty_prepare_flip_string_39955 tty_prepare_flip_string 3 39955 NULL
97875 +dma_push_rx_39973 dma_push_rx 2 39973 NULL
97876 +broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
97877 +mthca_array_init_39987 mthca_array_init 2 39987 NULL
97878 +xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
97879 +nf_nat_icmpv6_reply_translation_40023 nf_nat_icmpv6_reply_translation 5 40023 NULL nohasharray
97880 +ivtvfb_write_40023 ivtvfb_write 3 40023 &nf_nat_icmpv6_reply_translation_40023
97881 +ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
97882 +datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
97883 +regmap_add_irq_chip_40042 regmap_add_irq_chip 4 40042 NULL
97884 +add_tty_40055 add_tty 1 40055 NULL nohasharray
97885 +l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 &add_tty_40055
97886 +atomic_xchg_40070 atomic_xchg 0 40070 NULL
97887 +snd_pcm_sw_params_user_40095 snd_pcm_sw_params_user 0 40095 NULL
97888 +gen_pool_first_fit_40110 gen_pool_first_fit 2-3-4 40110 NULL
97889 +rbd_do_op_40128 rbd_do_op 4-5 40128 NULL
97890 +sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
97891 +rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
97892 +iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
97893 +pt_write_40159 pt_write 3 40159 NULL
97894 +scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
97895 +ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL nohasharray
97896 +devnode_find_40199 devnode_find 3-2 40199 &ipr_alloc_ucode_buffer_40199
97897 +allocate_probes_40204 allocate_probes 1 40204 NULL
97898 +acpi_system_write_alarm_40205 acpi_system_write_alarm 3 40205 NULL
97899 +compat_put_long_40214 compat_put_long 1 40214 NULL
97900 +au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
97901 +xfs_buf_read_map_40226 xfs_buf_read_map 3 40226 NULL
97902 +osst_read_40237 osst_read 3 40237 NULL
97903 +lpage_info_slot_40243 lpage_info_slot 1-3 40243 NULL
97904 +ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
97905 +of_get_child_count_40254 of_get_child_count 0 40254 NULL
97906 +rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
97907 +usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
97908 +rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
97909 +ubi_io_write_data_40305 ubi_io_write_data 4-5 40305 NULL
97910 +batadv_tt_changes_fill_buff_40323 batadv_tt_changes_fill_buff 4 40323 NULL
97911 +ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
97912 +mmio_read_40348 mmio_read 4 40348 NULL
97913 +usb_dump_interface_40353 usb_dump_interface 0 40353 NULL
97914 +ocfs2_release_clusters_40355 ocfs2_release_clusters 4 40355 NULL
97915 +event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
97916 +ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 2-3 40365 NULL
97917 +fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
97918 +brcmf_sdbrcm_get_image_40397 brcmf_sdbrcm_get_image 0-2 40397 NULL
97919 +atmel_rmem16_40450 atmel_rmem16 0 40450 NULL
97920 +tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
97921 +zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
97922 +batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
97923 +devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
97924 +tty_write_room_40495 tty_write_room 0 40495 NULL
97925 +persistent_ram_new_40501 persistent_ram_new 1-2 40501 NULL
97926 +sg_phys_40507 sg_phys 0 40507 NULL
97927 +TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
97928 +ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL
97929 +ima_write_policy_40548 ima_write_policy 3 40548 NULL
97930 +esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
97931 +ufs_inode_getfrag_40560 ufs_inode_getfrag 2-4 40560 NULL
97932 +arch_setup_hpet_msi_40584 arch_setup_hpet_msi 1 40584 NULL
97933 +skge_rx_get_40598 skge_rx_get 3 40598 NULL
97934 +get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
97935 +bl_mark_sectors_init_40613 bl_mark_sectors_init 2-3 40613 NULL
97936 +cpuset_sprintf_cpulist_40627 cpuset_sprintf_cpulist 0 40627 NULL
97937 +twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL
97938 +__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL
97939 +pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
97940 +fops_read_40672 fops_read 3 40672 NULL
97941 +tps80031_write_40678 tps80031_write 3 40678 NULL
97942 +nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
97943 +__seq_open_private_40715 __seq_open_private 3 40715 NULL
97944 +xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL nohasharray
97945 +find_next_zero_bit_le_40744 find_next_zero_bit_le 0-2-3 40744 &xfs_iext_remove_direct_40744
97946 +tps65910_irq_map_40748 tps65910_irq_map 2 40748 NULL
97947 +security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
97948 +fat_generic_compat_ioctl_40755 fat_generic_compat_ioctl 3 40755 NULL
97949 +card_send_command_40757 card_send_command 3 40757 NULL
97950 +ad1889_readl_40765 ad1889_readl 0 40765 NULL
97951 +pg_write_40766 pg_write 3 40766 NULL
97952 +show_list_40775 show_list 3 40775 NULL
97953 +kfifo_out_copy_r_40784 kfifo_out_copy_r 0-3 40784 NULL
97954 +bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
97955 +pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL
97956 +netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL
97957 +nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
97958 +__mlx4_qp_reserve_range_40847 __mlx4_qp_reserve_range 2-3 40847 NULL
97959 +ocfs2_zero_partial_clusters_40856 ocfs2_zero_partial_clusters 2-3 40856 NULL
97960 +v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
97961 +read_file_queue_40895 read_file_queue 3 40895 NULL
97962 +waiters_read_40902 waiters_read 3 40902 NULL
97963 +isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
97964 +gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
97965 +vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
97966 +snd_vx_create_40948 snd_vx_create 4 40948 NULL
97967 +skb_end_offset_40949 skb_end_offset 0 40949 NULL
97968 +wm8994_free_irq_40951 wm8994_free_irq 2 40951 NULL
97969 +rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
97970 +insert_old_idx_40987 insert_old_idx 0 40987 NULL
97971 +il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
97972 +mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
97973 +mtd_block_isbad_41015 mtd_block_isbad 0 41015 NULL
97974 +_req_append_segment_41031 _req_append_segment 2 41031 NULL
97975 +mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
97976 +ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
97977 +vfs_listxattr_41062 vfs_listxattr 0 41062 NULL
97978 +cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
97979 +roccat_read_41093 roccat_read 3 41093 NULL nohasharray
97980 +nvme_map_user_pages_41093 nvme_map_user_pages 3-4 41093 &roccat_read_41093
97981 +dma_attach_41094 dma_attach 5-6 41094 NULL
97982 +provide_user_output_41105 provide_user_output 3 41105 NULL
97983 +f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
97984 +v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
97985 +tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
97986 +dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
97987 +ol_quota_chunk_block_41177 ol_quota_chunk_block 0-2 41177 NULL
97988 +netif_get_num_default_rss_queues_41187 netif_get_num_default_rss_queues 0 41187 NULL
97989 +compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
97990 +dfs_file_write_41196 dfs_file_write 3 41196 NULL
97991 +xfs_readdir_41200 xfs_readdir 3 41200 NULL
97992 +ocfs2_read_quota_block_41207 ocfs2_read_quota_block 2 41207 NULL
97993 +nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
97994 +hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2-3 41255 NULL
97995 +erst_read_41260 erst_read 0 41260 NULL
97996 +__fprog_create_41263 __fprog_create 2 41263 NULL
97997 +alloc_context_41283 alloc_context 1 41283 NULL
97998 +arch_gnttab_map_shared_41306 arch_gnttab_map_shared 3 41306 NULL
97999 +twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
98000 +cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
98001 +jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
98002 +kmp_init_41373 kmp_init 2 41373 NULL
98003 +isr_commands_read_41398 isr_commands_read 3 41398 NULL
98004 +sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
98005 +rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
98006 +xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
98007 +isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
98008 +lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
98009 +ext4_trim_extent_41436 ext4_trim_extent 4 41436 NULL
98010 +iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
98011 +ntfs_file_buffered_write_41442 ntfs_file_buffered_write 4-6 41442 NULL
98012 +pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
98013 +layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
98014 +snd_pcm_status_41472 snd_pcm_status 0 41472 NULL
98015 +rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
98016 +wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
98017 +hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
98018 +xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
98019 +ldisc_receive_41516 ldisc_receive 4 41516 NULL
98020 +tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
98021 +ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
98022 +gserial_setup_41558 gserial_setup 2 41558 NULL
98023 +nr_status_frames_41559 nr_status_frames 0-1 41559 NULL
98024 +batadv_receive_client_update_packet_41578 batadv_receive_client_update_packet 3 41578 NULL
98025 +rng_dev_read_41581 rng_dev_read 3 41581 NULL
98026 +read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
98027 +vga_io_r_41609 vga_io_r 0 41609 NULL
98028 +usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 NULL
98029 +a2mp_send_41615 a2mp_send 4 41615 NULL
98030 +mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
98031 +rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
98032 +get_std_timing_41654 get_std_timing 0 41654 NULL
98033 +squashfs_cache_init_41656 squashfs_cache_init 2 41656 NULL
98034 +ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
98035 +params_period_bytes_41683 params_period_bytes 0 41683 NULL
98036 +aac_src_ioremap_41688 aac_src_ioremap 2 41688 NULL
98037 +bdx_tx_db_init_41719 bdx_tx_db_init 2 41719 NULL
98038 +sys_pwritev_41722 sys_pwritev 3 41722 NULL
98039 +get_bios_ebda_41730 get_bios_ebda 0 41730 NULL
98040 +fillonedir_41746 fillonedir 3 41746 NULL
98041 +ocfs2_dx_dir_rebalance_41793 ocfs2_dx_dir_rebalance 7 41793 NULL
98042 +hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
98043 +da9052_enable_irq_41814 da9052_enable_irq 2 41814 NULL
98044 +sco_send_frame_41815 sco_send_frame 3 41815 NULL
98045 +ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
98046 +do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
98047 +irq_data_to_status_reg_41854 irq_data_to_status_reg 0 41854 NULL
98048 +keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
98049 +ieee80211_rx_radiotap_space_41870 ieee80211_rx_radiotap_space 0 41870 NULL
98050 +get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
98051 +ceph_get_direct_page_vector_41917 ceph_get_direct_page_vector 2 41917 NULL
98052 +find_ge_pid_41918 find_ge_pid 1 41918 NULL
98053 +build_inv_iotlb_pages_41922 build_inv_iotlb_pages 4-5 41922 NULL
98054 +nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
98055 +ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL
98056 +ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
98057 +portnames_read_41958 portnames_read 3 41958 NULL
98058 +ubi_self_check_all_ff_41959 ubi_self_check_all_ff 4 41959 NULL
98059 +dst_mtu_41969 dst_mtu 0 41969 NULL
98060 +cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
98061 +ubi_io_is_bad_41983 ubi_io_is_bad 0 41983 NULL
98062 +lguest_map_42008 lguest_map 1-2 42008 NULL
98063 +pool_allocate_42012 pool_allocate 3 42012 NULL
98064 +spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
98065 +acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
98066 +__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
98067 +irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
98068 +dma_generic_alloc_coherent_42048 dma_generic_alloc_coherent 2 42048 NULL nohasharray
98069 +jffs2_do_link_42048 jffs2_do_link 6 42048 &dma_generic_alloc_coherent_42048
98070 +ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
98071 +InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
98072 +scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
98073 +sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
98074 +submit_inquiry_42108 submit_inquiry 3 42108 NULL
98075 +sysfs_read_file_42113 sysfs_read_file 3 42113 NULL
98076 +Read_hfc16_stable_42131 Read_hfc16_stable 0 42131 NULL
98077 +v9fs_alloc_rdir_buf_42150 v9fs_alloc_rdir_buf 2 42150 NULL
98078 +mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
98079 +read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
98080 +oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
98081 +get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
98082 +btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
98083 +rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
98084 +netxen_nic_map_indirect_address_128M_42257 netxen_nic_map_indirect_address_128M 2 42257 NULL
98085 +savu_sysfs_write_42273 savu_sysfs_write 6 42273 NULL
98086 +snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
98087 +sel_read_perm_42302 sel_read_perm 3 42302 NULL
98088 +sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
98089 +ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
98090 +xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
98091 +hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
98092 +tcp_sync_mss_42330 tcp_sync_mss 2-0 42330 NULL
98093 +snd_pcm_plug_alloc_42339 snd_pcm_plug_alloc 2 42339 NULL
98094 +ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
98095 +il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
98096 +msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
98097 +krng_get_random_42420 krng_get_random 3 42420 NULL
98098 +gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
98099 +key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
98100 +snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
98101 +tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
98102 +tc3589x_gpio_irq_get_virq_42457 tc3589x_gpio_irq_get_virq 2 42457 NULL
98103 +ext3_valid_block_bitmap_42459 ext3_valid_block_bitmap 3 42459 NULL
98104 +__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
98105 +follow_hugetlb_page_42486 follow_hugetlb_page 0-7 42486 NULL
98106 +omfs_readpages_42490 omfs_readpages 4 42490 NULL
98107 +brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
98108 +kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
98109 +smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
98110 +snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
98111 +dbAllocNear_42546 dbAllocNear 0 42546 NULL
98112 +i915_ring_stop_read_42549 i915_ring_stop_read 3 42549 NULL nohasharray
98113 +ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 &i915_ring_stop_read_42549
98114 +iwl_print_event_log_42566 iwl_print_event_log 0-5-7 42566 NULL
98115 +xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
98116 +oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
98117 +map_state_42602 map_state 1 42602 NULL nohasharray
98118 +__pskb_pull_42602 __pskb_pull 2 42602 &map_state_42602
98119 +sys_move_pages_42626 sys_move_pages 2 42626 NULL
98120 +ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
98121 +scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
98122 +br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
98123 +_regmap_raw_write_42652 _regmap_raw_write 4-2 42652 NULL
98124 +l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
98125 +request_key_and_link_42693 request_key_and_link 4 42693 NULL
98126 +vb2_read_42703 vb2_read 3 42703 NULL
98127 +sierra_net_send_cmd_42708 sierra_net_send_cmd 3 42708 NULL
98128 +__ocfs2_decrease_refcount_42717 __ocfs2_decrease_refcount 4 42717 NULL
98129 +dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
98130 +set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
98131 +ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
98132 +xen_bind_pirq_gsi_to_irq_42750 xen_bind_pirq_gsi_to_irq 1 42750 NULL
98133 +snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
98134 +cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL
98135 +koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
98136 +ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0-2 42796 NULL
98137 +fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2-3 42804 NULL
98138 +drm_ioctl_42813 drm_ioctl 2 42813 NULL
98139 +iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
98140 +set_arg_42824 set_arg 3 42824 NULL
98141 +ocfs2_desc_bitmap_to_cluster_off_42831 ocfs2_desc_bitmap_to_cluster_off 2 42831 NULL
98142 +xfs_buf_read_uncached_42844 xfs_buf_read_uncached 3 42844 NULL
98143 +prandom_u32_42853 prandom_u32 0 42853 NULL
98144 +of_property_count_strings_42863 of_property_count_strings 0 42863 NULL
98145 +ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
98146 +pskb_expand_head_42881 pskb_expand_head 2-3 42881 NULL
98147 +vt_compat_ioctl_42887 vt_compat_ioctl 3 42887 NULL
98148 +tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
98149 +xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
98150 +SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
98151 +hd_end_request_42904 hd_end_request 2 42904 NULL
98152 +sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL
98153 +sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
98154 +get_unmapped_area_42944 get_unmapped_area 0 42944 NULL
98155 +sys_sethostname_42962 sys_sethostname 2 42962 NULL
98156 +read_file_node_stat_42964 read_file_node_stat 3 42964 NULL
98157 +compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
98158 +snd_timer_user_ioctl_compat_42985 snd_timer_user_ioctl_compat 3 42985 NULL
98159 +nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
98160 +mlx4_qp_reserve_range_43000 mlx4_qp_reserve_range 2-3 43000 NULL
98161 +isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
98162 +wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
98163 +nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
98164 +cpuset_sprintf_memlist_43088 cpuset_sprintf_memlist 0 43088 NULL
98165 +ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
98166 +read_file_dfs_43145 read_file_dfs 3 43145 NULL
98167 +uuid_string_43154 uuid_string 0 43154 NULL
98168 +usb_string_sub_43164 usb_string_sub 0 43164 NULL
98169 +il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
98170 +ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
98171 +process_measurement_43190 process_measurement 0 43190 NULL
98172 +ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
98173 +uio_write_43202 uio_write 3 43202 NULL
98174 +iso_callback_43208 iso_callback 3 43208 NULL
98175 +f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL
98176 +atomic_long_add_return_43217 atomic_long_add_return 1 43217 NULL
98177 +comedi_compat_ioctl_43218 comedi_compat_ioctl 3 43218 NULL
98178 +vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
98179 +fixup_leb_43256 fixup_leb 3 43256 NULL
98180 +ide_end_rq_43269 ide_end_rq 4 43269 NULL
98181 +evtchn_write_43278 evtchn_write 3 43278 NULL
98182 +filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
98183 +mpage_alloc_43299 mpage_alloc 3 43299 NULL
98184 +get_nr_irqs_gsi_43315 get_nr_irqs_gsi 0 43315 NULL
98185 +__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
98186 +gart_free_coherent_43362 gart_free_coherent 4-2 43362 NULL
98187 +xenfb_write_43412 xenfb_write 3 43412 NULL
98188 +gdm_wimax_netif_rx_43423 gdm_wimax_netif_rx 3 43423 NULL nohasharray
98189 +__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 &gdm_wimax_netif_rx_43423
98190 +usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
98191 +ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
98192 +usemap_size_43443 usemap_size 0-2-1 43443 NULL nohasharray
98193 +usb_string_43443 usb_string 0 43443 &usemap_size_43443
98194 +alloc_new_reservation_43480 alloc_new_reservation 4 43480 NULL
98195 +tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
98196 +ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
98197 +do_readlink_43518 do_readlink 2 43518 NULL
98198 +dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
98199 +cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
98200 +tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL nohasharray
98201 +ufs_alloccg_block_43540 ufs_alloccg_block 3-0 43540 &tx_frag_failed_read_43540
98202 +ath_rx_init_43564 ath_rx_init 2 43564 NULL
98203 +_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
98204 +rpc_malloc_43573 rpc_malloc 2 43573 NULL
98205 +lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
98206 +proc_read_43614 proc_read 3 43614 NULL
98207 +bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
98208 +tps65217_set_bits_43659 tps65217_set_bits 2 43659 NULL nohasharray
98209 +ext4_acl_count_43659 ext4_acl_count 0-1 43659 &tps65217_set_bits_43659
98210 +dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4-2 43679 NULL
98211 +calgary_map_page_43686 calgary_map_page 3-4 43686 NULL
98212 +max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL
98213 +drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
98214 +snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
98215 +ocfs2_replace_clusters_43733 ocfs2_replace_clusters 5 43733 NULL
98216 +osdv1_attr_list_elem_size_43747 osdv1_attr_list_elem_size 0-1 43747 NULL
98217 +__bm_find_next_43748 __bm_find_next 2 43748 NULL
98218 +gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
98219 +sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
98220 +ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
98221 +byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
98222 +btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1 43806 NULL
98223 +ext4_read_block_bitmap_43814 ext4_read_block_bitmap 2 43814 NULL
98224 +ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
98225 +ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
98226 +p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
98227 +read_flush_43851 read_flush 3 43851 NULL
98228 +ocfs2_block_group_find_clear_bits_43874 ocfs2_block_group_find_clear_bits 4 43874 NULL
98229 +pm860x_bulk_write_43875 pm860x_bulk_write 3-2 43875 NULL
98230 +ec_dbgfs_cmd_write_43895 ec_dbgfs_cmd_write 3 43895 NULL
98231 +prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
98232 +SendString_43928 SendString 3 43928 NULL
98233 +xen_register_gsi_43946 xen_register_gsi 1-2 43946 NULL
98234 +stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
98235 +__get_required_blob_size_43980 __get_required_blob_size 0-2-3 43980 NULL
98236 +nla_reserve_43984 nla_reserve 3 43984 NULL
98237 +scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
98238 +bcm_recvmsg_43992 bcm_recvmsg 4 43992 &scsi_command_size_43992
98239 +emit_flags_44006 emit_flags 4-3 44006 NULL
98240 +write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
98241 +xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
98242 +tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
98243 +vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
98244 +scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
98245 +ubifs_find_dirty_idx_leb_44169 ubifs_find_dirty_idx_leb 0 44169 NULL
98246 +ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
98247 +handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
98248 +IO_APIC_get_PCI_irq_vector_44198 IO_APIC_get_PCI_irq_vector 0 44198 NULL
98249 +__set_free_44211 __set_free 2 44211 NULL
98250 +claim_ptd_buffers_44213 claim_ptd_buffers 3 44213 NULL
98251 +srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
98252 +ioapic_register_intr_44238 ioapic_register_intr 1 44238 NULL
98253 +scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
98254 +tc3589x_gpio_irq_map_44245 tc3589x_gpio_irq_map 2 44245 NULL
98255 +enlarge_skb_44248 enlarge_skb 2 44248 NULL nohasharray
98256 +xfs_buf_readahead_map_44248 xfs_buf_readahead_map 3 44248 &enlarge_skb_44248
98257 +ufs_clusteracct_44293 ufs_clusteracct 3 44293 NULL
98258 +ocfs2_zero_range_for_truncate_44294 ocfs2_zero_range_for_truncate 3 44294 NULL
98259 +ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
98260 +bitmap_scnprintf_44318 bitmap_scnprintf 0-2 44318 NULL
98261 +dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
98262 +ubi_eba_write_leb_st_44343 ubi_eba_write_leb_st 5 44343 NULL
98263 +nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 NULL nohasharray
98264 +blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 &nfs_fscache_get_super_cookie_44355
98265 +rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
98266 +aoedev_flush_44398 aoedev_flush 2 44398 NULL
98267 +drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
98268 +osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
98269 +ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
98270 +prandom_u32_state_44445 prandom_u32_state 0 44445 NULL
98271 +btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
98272 +sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
98273 +ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
98274 +security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
98275 +iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
98276 +spidev_write_44510 spidev_write 3 44510 NULL
98277 +sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
98278 +comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
98279 +snd_pcm_drop_44542 snd_pcm_drop 0 44542 NULL
98280 +dbg_chk_pnode_44555 dbg_chk_pnode 0 44555 NULL
98281 +snd_pcm_alloc_vmalloc_buffer_44595 snd_pcm_alloc_vmalloc_buffer 2 44595 NULL
98282 +slip_compat_ioctl_44599 slip_compat_ioctl 4 44599 NULL
98283 +wm5100_gpio_set_44602 wm5100_gpio_set 2 44602 NULL
98284 +brcmf_sdbrcm_glom_len_44618 brcmf_sdbrcm_glom_len 0 44618 NULL
98285 +cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
98286 +ext2_new_block_44645 ext2_new_block 2 44645 NULL
98287 +alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
98288 +mpi_resize_44674 mpi_resize 2 44674 NULL
98289 +ts_read_44687 ts_read 3 44687 NULL
98290 +_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
98291 +clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
98292 +fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
98293 +key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
98294 +WIL_GET_BITS_44747 WIL_GET_BITS 0-1-2-3 44747 NULL
98295 +tnode_new_44757 tnode_new 3 44757 NULL nohasharray
98296 +pty_write_44757 pty_write 3 44757 &tnode_new_44757
98297 +__videobuf_copy_stream_44769 __videobuf_copy_stream 4 44769 NULL
98298 +handsfree_ramp_44777 handsfree_ramp 2 44777 NULL
98299 +sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
98300 +rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
98301 +qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
98302 +rmap_write_protect_44833 rmap_write_protect 2 44833 NULL
98303 +sisusb_write_44834 sisusb_write 3 44834 NULL
98304 +nl80211_send_unprot_disassoc_44846 nl80211_send_unprot_disassoc 4 44846 NULL
98305 +kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
98306 +skb_availroom_44883 skb_availroom 0 44883 NULL
98307 +nf_bridge_encap_header_len_44890 nf_bridge_encap_header_len 0 44890 NULL
98308 +do_tty_write_44896 do_tty_write 5 44896 NULL
98309 +tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
98310 +nf_nat_seq_adjust_44989 nf_nat_seq_adjust 4 44989 NULL
98311 +max77693_write_reg_45004 max77693_write_reg 2 45004 NULL
98312 +ftdi_process_packet_45005 ftdi_process_packet 5 45005 NULL
98313 +bytepos_delta_45017 bytepos_delta 0 45017 NULL
98314 +read_block_bitmap_45021 read_block_bitmap 2 45021 NULL nohasharray
98315 +ptrace_writedata_45021 ptrace_writedata 4-3 45021 &read_block_bitmap_45021
98316 +vhci_get_user_45039 vhci_get_user 3 45039 NULL
98317 +sel_write_user_45060 sel_write_user 3 45060 NULL
98318 +snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL nohasharray
98319 +do_video_ioctl_45069 do_video_ioctl 3 45069 &snd_mixart_BA0_read_45069
98320 +kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL
98321 +pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
98322 +usbdev_read_45114 usbdev_read 3 45114 NULL
98323 +send_to_tty_45141 send_to_tty 3 45141 NULL
98324 +stmpe_irq_map_45146 stmpe_irq_map 2 45146 NULL
98325 +crypto_aead_blocksize_45148 crypto_aead_blocksize 0 45148 NULL
98326 +gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
98327 +ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 NULL nohasharray
98328 +device_write_45156 device_write 3 45156 &ocfs2_remove_inode_range_45156
98329 +ocfs2_dq_frozen_trigger_45159 ocfs2_dq_frozen_trigger 4 45159 NULL
98330 +tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
98331 +sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
98332 +snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL
98333 +num_clusters_in_group_45194 num_clusters_in_group 2 45194 NULL
98334 +add_child_45201 add_child 4 45201 NULL
98335 +iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
98336 +spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
98337 +ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
98338 +input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
98339 +vcc_compat_ioctl_45291 vcc_compat_ioctl 3 45291 NULL
98340 +snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
98341 +copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
98342 +lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
98343 +keymap_store_45406 keymap_store 4 45406 NULL
98344 +ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval 3 45421 NULL
98345 +tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
98346 +do_mmap_pgoff_45441 do_mmap_pgoff 0 45441 NULL
98347 +intel_render_ring_init_dri_45446 intel_render_ring_init_dri 2-3 45446 NULL
98348 +__node_remap_45458 __node_remap 4 45458 NULL
98349 +rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
98350 +udp_manip_pkt_45467 udp_manip_pkt 4 45467 NULL
98351 +tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
98352 +snd_pcm_hwsync_45479 snd_pcm_hwsync 0 45479 NULL
98353 +arizona_init_fll_45503 arizona_init_fll 4-5 45503 NULL
98354 +rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
98355 +clone_bio_45516 clone_bio 6 45516 NULL
98356 +sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
98357 +cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
98358 +copy_macs_45534 copy_macs 4 45534 NULL
98359 +nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
98360 +v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
98361 +cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
98362 +atomic_long_sub_return_45551 atomic_long_sub_return 1 45551 NULL
98363 +ext3_group_first_block_no_45555 ext3_group_first_block_no 0-2 45555 NULL
98364 +stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
98365 +posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
98366 +venus_rmdir_45564 venus_rmdir 4 45564 NULL
98367 +ipath_create_cq_45586 ipath_create_cq 2 45586 NULL
98368 +rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
98369 +hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
98370 +audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
98371 +da9052_gpio_set_45643 da9052_gpio_set 2 45643 NULL
98372 +ebitmap_next_positive_45651 ebitmap_next_positive 3 45651 NULL
98373 +dma_map_cont_45668 dma_map_cont 5 45668 NULL
98374 +compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
98375 +dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
98376 +smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
98377 +dm_compat_ctl_ioctl_45692 dm_compat_ctl_ioctl 3 45692 NULL
98378 +unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
98379 +bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699
98380 +dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
98381 +snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
98382 +rw_copy_check_uvector_45748 rw_copy_check_uvector 3 45748 NULL nohasharray
98383 +v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
98384 +lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
98385 +nilfs_compat_ioctl_45769 nilfs_compat_ioctl 3 45769 NULL
98386 +alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
98387 +raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
98388 +lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
98389 +pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
98390 +fm_v4l2_init_video_device_45821 fm_v4l2_init_video_device 2 45821 NULL
98391 +memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
98392 +amthi_read_45831 amthi_read 4 45831 NULL
98393 +x509_process_extension_45854 x509_process_extension 5 45854 NULL
98394 +isdn_write_45863 isdn_write 3 45863 NULL
98395 +rbd_get_num_segments_45864 rbd_get_num_segments 0-2-3 45864 NULL
98396 +unpack_orig_pfns_45867 unpack_orig_pfns 0 45867 NULL
98397 +get_rdac_req_45882 get_rdac_req 3 45882 NULL
98398 +ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
98399 +dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
98400 +nf_nat_ftp_fmt_cmd_45926 nf_nat_ftp_fmt_cmd 0 45926 NULL
98401 +alloc_mr_45935 alloc_mr 1 45935 NULL
98402 +rb_simple_read_45972 rb_simple_read 3 45972 NULL
98403 +ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
98404 +ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
98405 +sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
98406 +get_free_entries_46030 get_free_entries 1 46030 NULL
98407 +__access_remote_vm_46031 __access_remote_vm 0-5-3 46031 NULL
98408 +snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
98409 +acpi_register_gsi_xen_hvm_46052 acpi_register_gsi_xen_hvm 2 46052 NULL
98410 +line6_midibuf_bytes_used_46059 line6_midibuf_bytes_used 0 46059 NULL
98411 +__ocfs2_move_extent_46060 __ocfs2_move_extent 5-6 46060 NULL nohasharray
98412 +dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
98413 +slhc_toss_46066 slhc_toss 0 46066 NULL
98414 +sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
98415 +vfio_config_do_rw_46091 vfio_config_do_rw 3 46091 NULL
98416 +ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
98417 +arizona_set_irq_wake_46101 arizona_set_irq_wake 2 46101 NULL
98418 +pkt_ctl_compat_ioctl_46110 pkt_ctl_compat_ioctl 3 46110 NULL
98419 +memcg_update_array_size_46111 memcg_update_array_size 1 46111 NULL nohasharray
98420 +il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 &memcg_update_array_size_46111
98421 +mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
98422 +__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
98423 +qlcnic_alloc_msix_entries_46160 qlcnic_alloc_msix_entries 2 46160 NULL
98424 +twl_direction_out_46182 twl_direction_out 2 46182 NULL
98425 +vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
98426 +i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
98427 +tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
98428 +dsp_write_46218 dsp_write 2 46218 NULL
98429 +mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
98430 +__le64_to_cpup_46257 __le64_to_cpup 0 46257 NULL
98431 +nf_nat_ftp_46265 nf_nat_ftp 6 46265 NULL
98432 +ReadReg_46277 ReadReg 0 46277 NULL
98433 +batadv_iv_ogm_queue_add_46319 batadv_iv_ogm_queue_add 3 46319 NULL
98434 +__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
98435 +twl6040_write_46351 twl6040_write 2 46351 NULL
98436 +iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
98437 +smk_write_direct_46363 smk_write_direct 3 46363 NULL
98438 +__iommu_calculate_agaw_46366 __iommu_calculate_agaw 2 46366 NULL
98439 +ubi_dump_flash_46381 ubi_dump_flash 4 46381 NULL
98440 +fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
98441 +crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
98442 +cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
98443 +filldir64_46469 filldir64 3 46469 NULL
98444 +fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL
98445 +pin_code_reply_46510 pin_code_reply 4 46510 NULL
98446 +mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
98447 +kmsg_read_46514 kmsg_read 3 46514 NULL
98448 +bdx_rxdb_create_46525 bdx_rxdb_create 1 46525 NULL
98449 +nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
98450 +pm860x_irq_domain_map_46553 pm860x_irq_domain_map 2 46553 NULL
98451 +mv_get_hc_count_46554 mv_get_hc_count 0 46554 NULL
98452 +link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
98453 +irq_domain_associate_46564 irq_domain_associate 2 46564 NULL
98454 +dn_current_mss_46574 dn_current_mss 0 46574 NULL
98455 +serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
98456 +snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
98457 +il3945_stats_flag_46606 il3945_stats_flag 0-3 46606 NULL
98458 +vscnprintf_46617 vscnprintf 0-2 46617 NULL
98459 +__kfifo_out_r_46623 __kfifo_out_r 0-3 46623 NULL
98460 +request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
98461 +aircable_process_packet_46639 aircable_process_packet 5 46639 NULL
98462 +av7110_ipack_init_46655 av7110_ipack_init 2 46655 NULL
98463 +alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
98464 +__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
98465 +erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
98466 +wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL
98467 +prepare_copy_46725 prepare_copy 2 46725 NULL
98468 +irq_domain_add_simple_46734 irq_domain_add_simple 2-3 46734 NULL
98469 +ext4_count_free_46754 ext4_count_free 2 46754 NULL
98470 +hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
98471 +int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
98472 +regcache_lzo_sync_46777 regcache_lzo_sync 2 46777 NULL
98473 +scrub_chunk_46789 scrub_chunk 4 46789 NULL
98474 +_sys_packet_req_46793 _sys_packet_req 4 46793 NULL
98475 +_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
98476 +xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
98477 +shmem_pwrite_fast_46842 shmem_pwrite_fast 3 46842 NULL
98478 +ieee80211_rx_radiotap_len_46846 ieee80211_rx_radiotap_len 0 46846 NULL
98479 +spi_async_46857 spi_async 0 46857 NULL
98480 +ieee80211_mgmt_tx_46860 ieee80211_mgmt_tx 9 46860 NULL
98481 +vsnprintf_46863 vsnprintf 0 46863 NULL
98482 +nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
98483 +sip_sprintf_addr_46872 sip_sprintf_addr 0 46872 NULL
98484 +rvmalloc_46873 rvmalloc 1 46873 NULL
98485 +hpi_read_word_nolock_46881 hpi_read_word_nolock 0 46881 NULL
98486 +stmpe_gpio_irq_unmap_46884 stmpe_gpio_irq_unmap 2 46884 NULL
98487 +em28xx_alloc_isoc_46892 em28xx_alloc_isoc 4 46892 NULL
98488 +ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
98489 +sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
98490 +ol_dqblk_off_46904 ol_dqblk_off 3-2 46904 NULL
98491 +fb_write_46924 fb_write 3 46924 NULL
98492 +raid_status_46930 raid_status 5 46930 NULL
98493 +btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
98494 +__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
98495 +qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
98496 +crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
98497 +mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
98498 +gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 NULL nohasharray
98499 +sel_write_bool_46996 sel_write_bool 3 46996 &gfs2_xattr_system_set_46996
98500 +ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
98501 +blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
98502 +__map_single_47020 __map_single 3-4-7 47020 NULL
98503 +cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2 47024 NULL
98504 +set_dis_bypass_pfs_47038 set_dis_bypass_pfs 3 47038 NULL
98505 +wm8994_set_bits_47052 wm8994_set_bits 2 47052 NULL
98506 +fs_path_len_47060 fs_path_len 0 47060 NULL
98507 +ufs_new_fragments_47070 ufs_new_fragments 3-5-4 47070 NULL
98508 +pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
98509 +scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
98510 +mousedev_read_47123 mousedev_read 3 47123 NULL
98511 +ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
98512 +acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
98513 +cxio_init_resource_fifo_random_47151 cxio_init_resource_fifo_random 3 47151 NULL
98514 +persistent_ram_iomap_47156 persistent_ram_iomap 1-2 47156 NULL
98515 +mxms_headerlen_47161 mxms_headerlen 0 47161 NULL
98516 +rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
98517 +rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL
98518 +svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
98519 +can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
98520 +ioremap_cache_47189 ioremap_cache 1-2 47189 NULL
98521 +wm8903_gpio_direction_in_47213 wm8903_gpio_direction_in 2 47213 NULL
98522 +l2headersize_47238 l2headersize 0 47238 NULL
98523 +options_write_47243 options_write 3 47243 NULL
98524 +portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
98525 +da9052_disable_irq_nosync_47260 da9052_disable_irq_nosync 2 47260 NULL
98526 +ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
98527 +tty_audit_log_47280 tty_audit_log 8 47280 NULL
98528 +gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
98529 +vsnprintf_47291 vsnprintf 0 47291 NULL
98530 +tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
98531 +ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
98532 +avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
98533 +find_first_zero_bit_le_47369 find_first_zero_bit_le 2 47369 NULL
98534 +__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
98535 +trace_options_core_read_47390 trace_options_core_read 3 47390 NULL
98536 +nametbl_list_47391 nametbl_list 2 47391 NULL
98537 +dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
98538 +pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
98539 +ocfs2_resv_end_47408 ocfs2_resv_end 0 47408 NULL
98540 +crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
98541 +vzalloc_47421 vzalloc 1 47421 NULL
98542 +posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
98543 +newpart_47485 newpart 6 47485 NULL
98544 +core_sys_select_47494 core_sys_select 1 47494 NULL
98545 +alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
98546 +unlink_simple_47506 unlink_simple 3 47506 NULL
98547 +ufs_inode_getblock_47512 ufs_inode_getblock 4 47512 NULL
98548 +snd_pcm_resume_47530 snd_pcm_resume 0 47530 NULL
98549 +vscnprintf_47533 vscnprintf 0-2 47533 NULL nohasharray
98550 +process_vm_rw_47533 process_vm_rw 3-5 47533 &vscnprintf_47533
98551 +oz_events_read_47535 oz_events_read 3 47535 NULL
98552 +ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
98553 +cycx_setup_47562 cycx_setup 4 47562 NULL
98554 +read_ldt_47570 read_ldt 2 47570 NULL
98555 +pci_iomap_47575 pci_iomap 3 47575 NULL
98556 +rpipe_get_idx_47579 rpipe_get_idx 2 47579 NULL
98557 +ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
98558 +wm831x_gpio_direction_out_47607 wm831x_gpio_direction_out 2 47607 NULL
98559 +sctp_ssnmap_new_47608 sctp_ssnmap_new 1-2 47608 NULL
98560 +uea_request_47613 uea_request 4 47613 NULL
98561 +cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
98562 +tps65217_clear_bits_47619 tps65217_clear_bits 2 47619 NULL
98563 +twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL
98564 +irq_set_chip_47638 irq_set_chip 1 47638 NULL
98565 +__build_packet_message_47643 __build_packet_message 3-9 47643 NULL
98566 +irq_linear_revmap_47682 irq_linear_revmap 0 47682 NULL
98567 +snd_pcm_info_47699 snd_pcm_info 0 47699 NULL
98568 +bits_to_user_47733 bits_to_user 2-3 47733 NULL
98569 +carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
98570 +ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
98571 +mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
98572 +ext3_find_near_47752 ext3_find_near 0 47752 NULL
98573 +alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
98574 +irq_domain_legacy_revmap_47765 irq_domain_legacy_revmap 0-2 47765 NULL
98575 +i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
98576 +uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
98577 +error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
98578 +posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL
98579 +stmmac_set_bfsize_47834 stmmac_set_bfsize 0 47834 NULL
98580 +__pcf50633_irq_mask_set_47847 __pcf50633_irq_mask_set 2 47847 NULL
98581 +ubifs_unpack_nnode_47866 ubifs_unpack_nnode 0 47866 NULL
98582 +vhci_read_47878 vhci_read 3 47878 NULL
98583 +keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
98584 +osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
98585 +timeout_read_47915 timeout_read 3 47915 NULL
98586 +comedi_write_47926 comedi_write 3 47926 NULL
98587 +lp8788_irq_map_47964 lp8788_irq_map 2 47964 NULL
98588 +iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
98589 +mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
98590 +pnpacpi_parse_allocated_irqresource_47986 pnpacpi_parse_allocated_irqresource 2 47986 NULL
98591 +dbg_port_buf_47990 dbg_port_buf 2 47990 NULL
98592 +ib_umad_write_47993 ib_umad_write 3 47993 NULL
98593 +ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
98594 +bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
98595 +pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
98596 +wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
98597 +posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
98598 +mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
98599 +skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
98600 +radio_isa_common_probe_48107 radio_isa_common_probe 3 48107 NULL
98601 +vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
98602 +rtsx_read_cfg_seq_48139 rtsx_read_cfg_seq 5-3 48139 NULL
98603 +set_discoverable_48141 set_discoverable 4 48141 NULL
98604 +dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
98605 +bitmap_onto_48152 bitmap_onto 4 48152 NULL
98606 +isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
98607 +c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
98608 +ocfs2_find_next_zero_bit_unaligned_48170 ocfs2_find_next_zero_bit_unaligned 2-3 48170 NULL
98609 +alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
98610 +init_ipath_48187 init_ipath 1 48187 NULL nohasharray
98611 +ieee80211_send_auth_48187 ieee80211_send_auth 6 48187 &init_ipath_48187
98612 +snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
98613 +is_block_in_journal_48223 is_block_in_journal 3 48223 NULL
98614 +uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
98615 +nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
98616 +read_file_recv_48232 read_file_recv 3 48232 NULL
98617 +unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL nohasharray
98618 +blk_rq_pos_48233 blk_rq_pos 0 48233 &unaccount_shadowed_48233
98619 +nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
98620 +cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
98621 +send_set_info_48288 send_set_info 7 48288 NULL
98622 +set_disc_pwup_pfs_48300 set_disc_pwup_pfs 3 48300 NULL
98623 +lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
98624 +timblogiw_read_48305 timblogiw_read 3 48305 NULL
98625 +hash_setkey_48310 hash_setkey 3 48310 NULL
98626 +__alloc_fd_48356 __alloc_fd 2 48356 NULL
98627 +skb_add_data_48363 skb_add_data 3 48363 NULL
98628 +tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
98629 +lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
98630 +snd_power_wait_48422 snd_power_wait 0 48422 NULL
98631 +pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
98632 +nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
98633 +tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
98634 +r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
98635 +send_control_msg_48498 send_control_msg 6 48498 NULL
98636 +mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
98637 +count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
98638 +diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
98639 +brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
98640 +phantom_get_free_48514 phantom_get_free 0 48514 NULL
98641 +wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
98642 +ext3_splice_branch_48531 ext3_splice_branch 6 48531 NULL
98643 +named_distribute_48544 named_distribute 4 48544 NULL
98644 +raid10_size_48571 raid10_size 0-2-3 48571 NULL
98645 +ext_sd_execute_read_data_48589 ext_sd_execute_read_data 9 48589 NULL
98646 +ufs_dtogd_48616 ufs_dtogd 0-2 48616 NULL
98647 +do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
98648 +mtd_read_48655 mtd_read 0 48655 NULL
98649 +lc_create_48662 lc_create 3 48662 NULL
98650 +aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
98651 +sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL nohasharray
98652 +sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
98653 +altera_drscan_48698 altera_drscan 2 48698 NULL
98654 +kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
98655 +ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
98656 +ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
98657 +l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
98658 +lua_sysfs_write_48797 lua_sysfs_write 6 48797 NULL
98659 +il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
98660 +twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
98661 +atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
98662 +azx_get_position_48841 azx_get_position 0 48841 NULL
98663 +vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
98664 +viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL
98665 +__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
98666 +sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
98667 +get_num_ops_48886 get_num_ops 0 48886 NULL
98668 +ext2_alloc_branch_48889 ext2_alloc_branch 4 48889 NULL
98669 +crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
98670 +xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
98671 +msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
98672 +gdth_isa_probe_one_48925 gdth_isa_probe_one 1 48925 NULL nohasharray
98673 +snd_pcm_update_hw_ptr_48925 snd_pcm_update_hw_ptr 0 48925 &gdth_isa_probe_one_48925
98674 +sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL
98675 +event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
98676 +nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
98677 +batadv_orig_hash_del_if_48972 batadv_orig_hash_del_if 2 48972 NULL
98678 +_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
98679 +rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
98680 +sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
98681 +xd_rw_49020 xd_rw 3-4 49020 NULL
98682 +transient_status_49027 transient_status 4 49027 NULL
98683 +ubi_read_49061 ubi_read 0 49061 NULL
98684 +tps65910_reg_write_49066 tps65910_reg_write 2 49066 NULL
98685 +calc_layout_49074 calc_layout 4 49074 NULL
98686 +vmx_set_msr_49090 vmx_set_msr 3 49090 NULL
98687 +scsi_register_49094 scsi_register 2 49094 NULL
98688 +compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
98689 +xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
98690 +pt_read_49136 pt_read 3 49136 NULL
98691 +tipc_multicast_49144 tipc_multicast 5 49144 NULL
98692 +atyfb_setup_generic_49151 atyfb_setup_generic 3 49151 NULL
98693 +ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
98694 +f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL
98695 +ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
98696 +ext4_free_clusters_after_init_49174 ext4_free_clusters_after_init 2 49174 NULL
98697 +dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
98698 +iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
98699 +il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
98700 +do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
98701 +pcf50633_gpio_invert_set_49256 pcf50633_gpio_invert_set 2 49256 NULL
98702 +hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
98703 +ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
98704 +osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
98705 +rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
98706 +uio_read_49300 uio_read 3 49300 NULL
98707 +ocfs2_resmap_find_free_bits_49301 ocfs2_resmap_find_free_bits 3 49301 NULL
98708 +fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL
98709 +srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-3-4 49330 NULL
98710 +cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
98711 +joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
98712 +ocfs2_remove_btree_range_49370 ocfs2_remove_btree_range 4-5-3 49370 NULL
98713 +px_raw_event_49371 px_raw_event 4 49371 NULL
98714 +iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
98715 +applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
98716 +rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
98717 +tnode_alloc_49407 tnode_alloc 1 49407 NULL
98718 +samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
98719 +md_domain_init_49432 md_domain_init 2 49432 NULL
98720 +compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL
98721 +agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
98722 +xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
98723 +savu_sysfs_read_49473 savu_sysfs_read 6 49473 NULL
98724 +ieee80211_ie_split_49474 ieee80211_ie_split 0-5 49474 NULL
98725 +isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
98726 +emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL
98727 +acpi_os_ioremap_49523 acpi_os_ioremap 1-2 49523 NULL
98728 +wm831x_dcdc_set_mode_int_49546 wm831x_dcdc_set_mode_int 2 49546 NULL
98729 +smk_write_access_49561 smk_write_access 3 49561 NULL
98730 +ntfs_malloc_nofs_49572 ntfs_malloc_nofs 1 49572 NULL
98731 +alloc_chunk_49575 alloc_chunk 1 49575 NULL
98732 +sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
98733 +isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
98734 +heap_init_49617 heap_init 2 49617 NULL
98735 +smk_write_doi_49621 smk_write_doi 3 49621 NULL
98736 +btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
98737 +svm_set_msr_49643 svm_set_msr 3 49643 NULL
98738 +aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
98739 +sys_gethostname_49698 sys_gethostname 2 49698 NULL
98740 +cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
98741 +sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
98742 +sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
98743 +check_frame_49741 check_frame 0 49741 NULL
98744 +zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
98745 +btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
98746 +key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
98747 +fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
98748 +isku_sysfs_write_49767 isku_sysfs_write 6 49767 NULL
98749 +ceph_osdc_readpages_49789 ceph_osdc_readpages 10-4 49789 NULL
98750 +nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
98751 +arch_gnttab_map_status_49812 arch_gnttab_map_status 3 49812 NULL
98752 +ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
98753 +add_uuid_49831 add_uuid 4 49831 NULL
98754 +ath6kl_fwlog_block_read_49836 ath6kl_fwlog_block_read 3 49836 NULL
98755 +__btrfs_map_block_49839 __btrfs_map_block 3 49839 NULL
98756 +twl4030_write_49846 twl4030_write 2 49846 NULL
98757 +scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
98758 +timeradd_entry_49850 timeradd_entry 3 49850 NULL
98759 +sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
98760 +ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
98761 +__cow_file_range_49901 __cow_file_range 5 49901 NULL
98762 +__copy_from_user_inatomic_nocache_49921 __copy_from_user_inatomic_nocache 3 49921 NULL
98763 +batadv_tt_realloc_packet_buff_49960 batadv_tt_realloc_packet_buff 4 49960 NULL
98764 +b43legacy_pio_read_49978 b43legacy_pio_read 0 49978 NULL
98765 +ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
98766 +sta2x11_swiotlb_alloc_coherent_49994 sta2x11_swiotlb_alloc_coherent 2 49994 NULL
98767 +l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
98768 +__module_alloc_50004 __module_alloc 1 50004 NULL
98769 +dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
98770 +ptrace_readdata_50020 ptrace_readdata 2-4 50020 NULL
98771 +isdn_read_50021 isdn_read 3 50021 NULL
98772 +rbd_req_write_50041 rbd_req_write 4-5 50041 NULL
98773 +alloc_ebda_hpc_50046 alloc_ebda_hpc 1-2 50046 NULL
98774 +vmw_surface_destroy_size_50072 vmw_surface_destroy_size 0 50072 NULL
98775 +arch_setup_ht_irq_50073 arch_setup_ht_irq 1 50073 NULL
98776 +dev_set_alias_50084 dev_set_alias 3 50084 NULL
98777 +pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
98778 +sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
98779 +altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
98780 +read_file_slot_50111 read_file_slot 3 50111 NULL
98781 +copy_items_50140 copy_items 6 50140 NULL
98782 +tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
98783 +set_cmd_header_50155 set_cmd_header 0 50155 NULL
98784 +reiserfs_bmap_count_50160 reiserfs_bmap_count 0 50160 NULL
98785 +aac_nark_ioremap_50163 aac_nark_ioremap 2 50163 NULL nohasharray
98786 +kmalloc_node_50163 kmalloc_node 1 50163 &aac_nark_ioremap_50163
98787 +rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
98788 +odev_update_50169 odev_update 2 50169 NULL
98789 +ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 NULL nohasharray
98790 +ubi_resize_volume_50172 ubi_resize_volume 2 50172 &ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172
98791 +ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
98792 +cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
98793 +rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
98794 +ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
98795 +mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
98796 +sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
98797 +afs_extract_data_50261 afs_extract_data 5 50261 NULL
98798 +rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
98799 +soc_codec_reg_show_50302 soc_codec_reg_show 0 50302 NULL
98800 +soc_camera_read_50319 soc_camera_read 3 50319 NULL
98801 +do_launder_page_50329 do_launder_page 0 50329 NULL
98802 +nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
98803 +lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
98804 +ocfs2_block_to_cluster_group_50337 ocfs2_block_to_cluster_group 2 50337 NULL nohasharray
98805 +snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 &ocfs2_block_to_cluster_group_50337
98806 +roccat_common2_send_with_status_50343 roccat_common2_send_with_status 4 50343 NULL
98807 +tpm_read_50344 tpm_read 3 50344 NULL
98808 +kvm_arch_create_memslot_50354 kvm_arch_create_memslot 2 50354 NULL
98809 +isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
98810 +unpack_u16_chunk_50357 unpack_u16_chunk 0 50357 NULL
98811 +xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
98812 +roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
98813 +sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
98814 +l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
98815 +iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
98816 +btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
98817 +pgctrl_write_50453 pgctrl_write 3 50453 NULL
98818 +tps65217_update_bits_50472 tps65217_update_bits 2 50472 NULL
98819 +cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
98820 +mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL
98821 +pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
98822 +fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
98823 +ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
98824 +pcf50633_reg_set_bit_mask_50544 pcf50633_reg_set_bit_mask 2 50544 NULL
98825 +hme_read_desc32_50574 hme_read_desc32 0 50574 NULL
98826 +fat_readpages_50582 fat_readpages 4 50582 NULL
98827 +iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
98828 +build_inv_iommu_pages_50589 build_inv_iommu_pages 2-3 50589 NULL
98829 +rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
98830 +__ffs_50625 __ffs 0 50625 NULL
98831 +simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
98832 +ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL nohasharray
98833 +sys_readv_50664 sys_readv 3 50664 &ath6kl_tm_rx_event_50664
98834 +bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
98835 +ext2_try_to_allocate_with_rsv_50669 ext2_try_to_allocate_with_rsv 4-2 50669 NULL
98836 +btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
98837 +xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL
98838 +blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
98839 +__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
98840 +skb_padto_50759 skb_padto 2 50759 NULL
98841 +ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
98842 +tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL
98843 +bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
98844 +tpm_write_50798 tpm_write 3 50798 NULL
98845 +tun_do_read_50800 tun_do_read 4 50800 NULL
98846 +write_flush_50803 write_flush 3 50803 NULL
98847 +dvb_play_50814 dvb_play 3 50814 NULL
98848 +dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
98849 +acpi_ev_install_gpe_block_50829 acpi_ev_install_gpe_block 2 50829 NULL
98850 +pstore_mkfile_50830 pstore_mkfile 5 50830 NULL
98851 +dma_attach_50831 dma_attach 6-7 50831 NULL
98852 +SetArea_50835 SetArea 4 50835 NULL nohasharray
98853 +create_mem_extents_50835 create_mem_extents 0 50835 &SetArea_50835
98854 +self_check_write_50856 self_check_write 5 50856 NULL
98855 +carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
98856 +netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
98857 +osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
98858 +xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
98859 +blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
98860 +hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
98861 +chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
98862 +sock_bindtodevice_50942 sock_bindtodevice 3 50942 NULL
98863 +ocfs2_add_refcount_flag_50952 ocfs2_add_refcount_flag 6 50952 NULL
98864 +iwl_statistics_flag_50981 iwl_statistics_flag 0-3 50981 NULL
98865 +timeout_write_50991 timeout_write 3 50991 NULL
98866 +wm831x_irq_map_50995 wm831x_irq_map 2 50995 NULL nohasharray
98867 +wm8903_gpio_direction_out_50995 wm8903_gpio_direction_out 2 50995 &wm831x_irq_map_50995
98868 +proc_write_51003 proc_write 3 51003 NULL
98869 +lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
98870 +ntfs_attr_find_51028 ntfs_attr_find 0 51028 NULL nohasharray
98871 +fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 &ntfs_attr_find_51028
98872 +BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
98873 +dump_midi_51040 dump_midi 3 51040 NULL
98874 +srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
98875 +do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
98876 +wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
98877 +jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
98878 +__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
98879 +dgrp_net_read_51113 dgrp_net_read 3 51113 NULL
98880 +lm3533_als_get_current_51120 lm3533_als_get_current 2 51120 NULL
98881 +nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
98882 +alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
98883 +simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
98884 +xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
98885 +compat_sys_pwritev64_51151 compat_sys_pwritev64 3 51151 NULL
98886 +snd_pcm_unlink_51210 snd_pcm_unlink 0 51210 NULL
98887 +blk_bio_map_sg_51213 blk_bio_map_sg 0 51213 NULL
98888 +nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
98889 +snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
98890 +tipc_send_51238 tipc_send 4 51238 NULL
98891 +drm_property_create_51239 drm_property_create 4 51239 NULL
98892 +st_read_51251 st_read 3 51251 NULL
98893 +compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
98894 +dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
98895 +ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
98896 +zone_reclaimable_pages_51283 zone_reclaimable_pages 0 51283 NULL
98897 +pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
98898 +fd_do_readv_51297 fd_do_readv 3 51297 NULL
98899 +bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
98900 +alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
98901 +ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
98902 +alloc_smp_req_51337 alloc_smp_req 1 51337 NULL nohasharray
98903 +compat_arch_ptrace_51337 compat_arch_ptrace 3-4 51337 &alloc_smp_req_51337
98904 +ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
98905 +ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
98906 +radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
98907 +ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
98908 +ceph_sync_read_51410 ceph_sync_read 3 51410 NULL
98909 +blk_register_region_51424 blk_register_region 1-2 51424 NULL
98910 +mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
98911 +ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
98912 +print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
98913 +____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
98914 +xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
98915 +ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
98916 +__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
98917 +icmp_manip_pkt_51560 icmp_manip_pkt 4 51560 NULL
98918 +ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
98919 +aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
98920 +raw_ioctl_51607 raw_ioctl 3 51607 NULL
98921 +table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
98922 +dns_resolve_server_name_to_ip_51632 dns_resolve_server_name_to_ip 0 51632 NULL
98923 +sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
98924 +iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
98925 +get_new_cssid_51665 get_new_cssid 2 51665 NULL
98926 +ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
98927 +sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
98928 +sfi_sysfs_install_table_51688 sfi_sysfs_install_table 1 51688 NULL
98929 +sel_write_access_51704 sel_write_access 3 51704 NULL
98930 +tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
98931 +drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
98932 +sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
98933 +msg_set_51725 msg_set 3 51725 NULL
98934 +dbg_check_lpt_nodes_51727 dbg_check_lpt_nodes 0 51727 NULL
98935 +hid_parse_report_51737 hid_parse_report 3 51737 NULL
98936 +get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL
98937 +ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
98938 +if_write_51756 if_write 3 51756 NULL
98939 +ioremap_prot_51764 ioremap_prot 1-2 51764 NULL
98940 +iio_buffer_add_channel_sysfs_51766 iio_buffer_add_channel_sysfs 0 51766 NULL
98941 +__fswab32_51781 __fswab32 0 51781 NULL
98942 +qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
98943 +buffer_from_user_51826 buffer_from_user 3 51826 NULL
98944 +wm2000_write_51834 wm2000_write 2 51834 NULL
98945 +ioread32_51847 ioread32 0 51847 NULL nohasharray
98946 +read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
98947 +do_readv_writev_51849 do_readv_writev 4 51849 NULL
98948 +pointer_size_read_51863 pointer_size_read 3 51863 NULL
98949 +mlx4_alloc_db_from_pgdir_51865 mlx4_alloc_db_from_pgdir 3 51865 NULL
98950 +get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
98951 +user_read_51881 user_read 3 51881 NULL
98952 +dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
98953 +virt_to_phys_51896 virt_to_phys 0 51896 NULL
98954 +iio_read_first_n_sw_rb_51911 iio_read_first_n_sw_rb 2 51911 NULL
98955 +wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
98956 +dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
98957 +__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL
98958 +xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
98959 +irq_dispose_mapping_51941 irq_dispose_mapping 1 51941 NULL
98960 +scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
98961 +arizona_free_irq_51969 arizona_free_irq 2 51969 NULL nohasharray
98962 +snd_mask_min_51969 snd_mask_min 0 51969 &arizona_free_irq_51969
98963 +ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
98964 +dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
98965 +skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
98966 +rdmalt_52022 rdmalt 0 52022 NULL
98967 +vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
98968 +override_release_52032 override_release 2 52032 NULL
98969 +end_port_52042 end_port 0 52042 NULL
98970 +dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
98971 +msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
98972 +dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
98973 +isofs_readpages_52067 isofs_readpages 4 52067 NULL
98974 +nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
98975 +o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
98976 +retry_count_read_52129 retry_count_read 3 52129 NULL
98977 +snd_pcm_channel_info_user_52135 snd_pcm_channel_info_user 0 52135 NULL
98978 +hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL nohasharray
98979 +ext2_alloc_blocks_52145 ext2_alloc_blocks 2 52145 &hysdn_conf_write_52145
98980 +htable_size_52148 htable_size 0-1 52148 NULL
98981 +__le16_to_cpup_52155 __le16_to_cpup 0 52155 NULL nohasharray
98982 +smk_write_load2_52155 smk_write_load2 3 52155 &__le16_to_cpup_52155
98983 +ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
98984 +mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
98985 +print_prefix_52176 print_prefix 0 52176 NULL
98986 +proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
98987 +do_dmabuf_dirty_ldu_52241 do_dmabuf_dirty_ldu 6 52241 NULL
98988 +pm80x_request_irq_52250 pm80x_request_irq 2 52250 NULL
98989 +mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
98990 +shrink_slab_52261 shrink_slab 2-3 52261 NULL
98991 +sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
98992 +mpol_to_str_52293 mpol_to_str 2 52293 NULL
98993 +ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
98994 +read_file_reset_52310 read_file_reset 3 52310 NULL
98995 +ssd1307fb_write_52315 ssd1307fb_write 3 52315 NULL
98996 +request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
98997 +hwflags_read_52318 hwflags_read 3 52318 NULL
98998 +snd_pcm_hw_free_52327 snd_pcm_hw_free 0 52327 NULL
98999 +ntfs_rl_split_52328 ntfs_rl_split 2-4 52328 NULL
99000 +test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
99001 +bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
99002 +copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
99003 +iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
99004 +isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
99005 +jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
99006 +aer_inject_write_52399 aer_inject_write 3 52399 NULL
99007 +pcf50633_reg_clear_bits_52407 pcf50633_reg_clear_bits 2 52407 NULL
99008 +aac_rx_ioremap_52410 aac_rx_ioremap 2 52410 NULL
99009 +cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
99010 +line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
99011 +delay_status_52431 delay_status 5 52431 NULL
99012 +ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
99013 +nl80211_send_mgmt_tx_status_52445 nl80211_send_mgmt_tx_status 5 52445 NULL
99014 +ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
99015 +ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
99016 +fd_do_rw_52495 fd_do_rw 3 52495 NULL nohasharray
99017 +skb_cow_head_52495 skb_cow_head 2 52495 &fd_do_rw_52495
99018 +int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
99019 +pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
99020 +dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
99021 +ocfs2_make_right_split_rec_52562 ocfs2_make_right_split_rec 3 52562 NULL
99022 +emit_code_52583 emit_code 0-3 52583 NULL
99023 +snd_pcm_sw_params_52594 snd_pcm_sw_params 0 52594 NULL
99024 +brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
99025 +ntfs_get_nr_significant_bytes_52688 ntfs_get_nr_significant_bytes 0 52688 NULL
99026 +nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL
99027 +nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
99028 +cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
99029 +blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
99030 +relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
99031 +carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
99032 +alloc_extent_buffer_52824 alloc_extent_buffer 3 52824 NULL
99033 +pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
99034 +ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
99035 +mon_bin_get_event_52863 mon_bin_get_event 4 52863 NULL
99036 +twlreg_write_52880 twlreg_write 3 52880 NULL
99037 +pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
99038 +cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
99039 +kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
99040 +arizona_request_irq_52908 arizona_request_irq 2 52908 NULL
99041 +__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
99042 +iblock_get_bio_52936 iblock_get_bio 3 52936 NULL nohasharray
99043 +__iio_device_attr_init_52936 __iio_device_attr_init 0 52936 &iblock_get_bio_52936
99044 +__nodes_remap_52951 __nodes_remap 5 52951 NULL
99045 +send_packet_52960 send_packet 4 52960 NULL
99046 +ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
99047 +compat_sock_ioctl_52964 compat_sock_ioctl 3 52964 NULL
99048 +tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
99049 +num_node_state_52989 num_node_state 0 52989 NULL
99050 +batadv_check_management_packet_52993 batadv_check_management_packet 3 52993 NULL
99051 +efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
99052 +btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
99053 +tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
99054 +ext4_meta_bg_first_group_53031 ext4_meta_bg_first_group 0-2 53031 NULL
99055 +bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
99056 +regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL
99057 +cfi_read_query_53066 cfi_read_query 0 53066 NULL
99058 +mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
99059 +qib_resize_cq_53090 qib_resize_cq 2 53090 NULL
99060 +verity_status_53120 verity_status 5 53120 NULL
99061 +line6_dumpreq_initbuf_53123 line6_dumpreq_initbuf 3 53123 NULL
99062 +brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
99063 +ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
99064 +clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
99065 +mtdoops_erase_block_53206 mtdoops_erase_block 2 53206 NULL
99066 +tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
99067 +xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5 53258 NULL
99068 +wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL
99069 +btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
99070 +lirc_buffer_init_53282 lirc_buffer_init 3-2 53282 NULL
99071 +batadv_interface_rx_53325 batadv_interface_rx 4 53325 NULL
99072 +gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
99073 +vm_mmap_53339 vm_mmap 0 53339 NULL
99074 +sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL
99075 +get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
99076 +iwl_pcie_txq_alloc_53413 iwl_pcie_txq_alloc 3 53413 NULL
99077 +wm8996_gpio_set_53421 wm8996_gpio_set 2 53421 NULL
99078 +isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
99079 +mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
99080 +apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
99081 +acpi_tb_parse_root_table_53455 acpi_tb_parse_root_table 1 53455 NULL
99082 +n2_run_53459 n2_run 3 53459 NULL
99083 +paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL
99084 +wm831x_write_53469 wm831x_write 2 53469 NULL
99085 +rds_tcp_data_recv_53476 rds_tcp_data_recv 3 53476 NULL
99086 +iowarrior_read_53483 iowarrior_read 3 53483 NULL
99087 +osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
99088 +do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
99089 +snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
99090 +dbAllocNext_53506 dbAllocNext 0 53506 NULL
99091 +ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
99092 +check_acl_53512 check_acl 0 53512 NULL
99093 +set_registers_53582 set_registers 3 53582 NULL
99094 +cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL
99095 +__readw_53594 __readw 0 53594 NULL
99096 +___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
99097 +xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
99098 +ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
99099 +nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
99100 +_preload_range_53676 _preload_range 2-3 53676 NULL
99101 +fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
99102 +v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
99103 +igb_alloc_q_vector_53690 igb_alloc_q_vector 4-6 53690 NULL nohasharray
99104 +bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 &igb_alloc_q_vector_53690
99105 +find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
99106 +bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
99107 +__ocfs2_resv_find_window_53721 __ocfs2_resv_find_window 3 53721 NULL
99108 +wdm_write_53735 wdm_write 3 53735 NULL
99109 +ext3_try_to_allocate_with_rsv_53737 ext3_try_to_allocate_with_rsv 5-3 53737 NULL
99110 +da9052_disable_irq_53745 da9052_disable_irq 2 53745 NULL
99111 +lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 NULL nohasharray
99112 +amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 &lpfc_idiag_queacc_read_qe_53755
99113 +ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
99114 +__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
99115 +__tty_alloc_driver_53799 __tty_alloc_driver 1 53799 NULL
99116 +regmap_raw_write_53803 regmap_raw_write 4-2 53803 NULL
99117 +lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
99118 +nls_nullsize_53815 nls_nullsize 0 53815 NULL
99119 +cpumask_next_zero_53835 cpumask_next_zero 1 53835 NULL
99120 +pms_read_53873 pms_read 3 53873 NULL
99121 +ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
99122 +ocfs2_rm_xattr_cluster_53900 ocfs2_rm_xattr_cluster 5-4-3 53900 NULL
99123 +proc_file_read_53905 proc_file_read 3 53905 NULL
99124 +azx_via_get_position_53916 azx_via_get_position 0 53916 NULL
99125 +ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 4 53938 NULL
99126 +mthca_setup_cmd_doorbells_53954 mthca_setup_cmd_doorbells 2 53954 NULL
99127 +mlx4_num_eq_uar_53965 mlx4_num_eq_uar 0 53965 NULL
99128 +idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
99129 +__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
99130 +ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval 3 53998 NULL
99131 +snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 4-2-5 54018 NULL
99132 +cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
99133 +ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
99134 +pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
99135 +nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
99136 +rproc_state_read_54057 rproc_state_read 3 54057 NULL
99137 +_malloc_54077 _malloc 1 54077 NULL
99138 +bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
99139 +altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
99140 +create_xattr_54106 create_xattr 5 54106 NULL
99141 +strn_len_54122 strn_len 0 54122 NULL
99142 +isku_receive_54130 isku_receive 4 54130 NULL
99143 +isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
99144 +i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
99145 +memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
99146 +nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL
99147 +acpi_os_read_memory_54186 acpi_os_read_memory 1-3 54186 NULL
99148 +__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
99149 +_format_mac_addr_54229 _format_mac_addr 2-0 54229 NULL
99150 +pi_read_regr_54231 pi_read_regr 0 54231 NULL
99151 +reada_add_block_54247 reada_add_block 2 54247 NULL
99152 +xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
99153 +ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
99154 +wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
99155 +audio_write_54261 audio_write 4 54261 &wusb_prf_54261
99156 +mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
99157 +ubi_calc_data_len_54279 ubi_calc_data_len 0-3 54279 NULL
99158 +altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
99159 +dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
99160 +sprintf_54306 sprintf 0 54306 NULL
99161 +irq_domain_associate_many_54307 irq_domain_associate_many 2 54307 NULL
99162 +br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
99163 +__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
99164 +__get_free_pages_54352 __get_free_pages 0 54352 NULL nohasharray
99165 +_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 &__get_free_pages_54352
99166 +tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
99167 +read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
99168 +vfs_readlink_54368 vfs_readlink 3 54368 NULL
99169 +do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
99170 +intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
99171 +ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
99172 +gart_unmap_page_54379 gart_unmap_page 2-3 54379 NULL
99173 +snd_pcm_oss_read2_54387 snd_pcm_oss_read2 0-3 54387 NULL
99174 +i386_mmap_check_54388 i386_mmap_check 0 54388 NULL
99175 +__do_krealloc_54389 __do_krealloc 2 54389 NULL
99176 +iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
99177 +simple_strtoull_54493 simple_strtoull 0 54493 NULL
99178 +btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
99179 +cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
99180 +rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
99181 +viacam_read_54526 viacam_read 3 54526 NULL
99182 +unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
99183 +setsockopt_54539 setsockopt 5 54539 NULL
99184 +mwifiex_usb_submit_rx_urb_54558 mwifiex_usb_submit_rx_urb 2 54558 NULL
99185 +nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
99186 +fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
99187 +nvme_npages_54601 nvme_npages 0-1 54601 NULL
99188 +fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
99189 +irq_of_parse_and_map_54646 irq_of_parse_and_map 0 54646 NULL
99190 +irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
99191 +dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
99192 +twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL
99193 +bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
99194 +vring_new_virtqueue_54673 vring_new_virtqueue 2 54673 NULL
99195 +evm_read_key_54674 evm_read_key 3 54674 NULL
99196 +resource_string_54699 resource_string 0 54699 NULL
99197 +platform_get_irq_byname_54700 platform_get_irq_byname 0 54700 NULL
99198 +rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
99199 +_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
99200 +ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
99201 +kzalloc_54740 kzalloc 1 54740 NULL
99202 +wep_iv_read_54744 wep_iv_read 3 54744 NULL
99203 +iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
99204 +batadv_iv_ogm_aggregate_new_54761 batadv_iv_ogm_aggregate_new 2 54761 NULL
99205 +adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL
99206 +flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
99207 +domain_init_54797 domain_init 2 54797 NULL
99208 +ext3_find_goal_54801 ext3_find_goal 0 54801 NULL
99209 +nfsd_write_54809 nfsd_write 6 54809 NULL
99210 +aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL nohasharray
99211 +crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 &aes_decrypt_fail_read_54815
99212 +generic_perform_write_54832 generic_perform_write 3 54832 NULL
99213 +write_rio_54837 write_rio 3 54837 NULL
99214 +nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 NULL nohasharray
99215 +ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 &nouveau_engctx_create__54839
99216 +ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
99217 +printer_read_54851 printer_read 3 54851 NULL
99218 +alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
99219 +broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
99220 +prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
99221 +tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
99222 +lm3533_led_get_lv_reg_54900 lm3533_led_get_lv_reg 0-2 54900 NULL
99223 +iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL
99224 +btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
99225 +mxms_structlen_54939 mxms_structlen 0 54939 NULL
99226 +add_port_54941 add_port 2 54941 NULL
99227 +virtblk_add_buf_wait_54943 virtblk_add_buf_wait 3-4 54943 NULL
99228 +wl12xx_cmd_build_probe_req_54946 wl12xx_cmd_build_probe_req 6-8 54946 NULL
99229 +ath9k_dump_btcoex_54949 ath9k_dump_btcoex 0 54949 NULL
99230 +c4_add_card_54968 c4_add_card 3 54968 NULL
99231 +iwl_pcie_dump_fh_54975 iwl_pcie_dump_fh 0 54975 NULL
99232 +__proc_file_read_54978 __proc_file_read 3 54978 NULL
99233 +ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
99234 +rds_ib_inc_copy_to_user_55007 rds_ib_inc_copy_to_user 3 55007 NULL
99235 +cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
99236 +error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
99237 +__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
99238 +apei_exec_run_55075 apei_exec_run 0 55075 NULL
99239 +bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
99240 +snd_pcm_capture_hw_avail_55086 snd_pcm_capture_hw_avail 0 55086 NULL
99241 +rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
99242 +corrupt_data_55120 corrupt_data 0 55120 NULL
99243 +crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
99244 +ocfs2_prepare_refcount_change_for_del_55137 ocfs2_prepare_refcount_change_for_del 3 55137 NULL nohasharray
99245 +filldir_55137 filldir 3 55137 &ocfs2_prepare_refcount_change_for_del_55137
99246 +ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
99247 +ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
99248 +mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL
99249 +sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
99250 +sched_feat_write_55202 sched_feat_write 3 55202 NULL
99251 +ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL nohasharray
99252 +pcf50633_gpio_set_55209 pcf50633_gpio_set 2 55209 &ht40allow_map_read_55209
99253 +ssd1307fb_write_cmd_array_55211 ssd1307fb_write_cmd_array 3 55211 NULL nohasharray
99254 +__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 &ssd1307fb_write_cmd_array_55211
99255 +do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
99256 +dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
99257 +tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
99258 +register_unifi_sdio_55239 register_unifi_sdio 2 55239 NULL
99259 +memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
99260 +ptrace_request_55288 ptrace_request 3 55288 NULL
99261 +rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
99262 +rbd_create_rw_ops_55297 rbd_create_rw_ops 1 55297 NULL
99263 +gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
99264 +__get_vm_area_node_55305 __get_vm_area_node 1 55305 NULL
99265 +vme_user_read_55338 vme_user_read 3 55338 NULL
99266 +sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL nohasharray
99267 +__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 &sctp_datamsg_from_user_55342
99268 +acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
99269 +__send_to_port_55383 __send_to_port 3 55383 NULL
99270 +nf_nat_ipv4_manip_pkt_55387 nf_nat_ipv4_manip_pkt 2 55387 NULL
99271 +iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
99272 +alloc_skb_55439 alloc_skb 1 55439 NULL
99273 +__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
99274 +isdnhdlc_decode_55466 isdnhdlc_decode 0 55466 NULL
99275 +cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
99276 +batadv_unicast_push_and_fill_skb_55474 batadv_unicast_push_and_fill_skb 2 55474 NULL
99277 +snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
99278 +i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
99279 +wm5100_gpio_direction_out_55497 wm5100_gpio_direction_out 2 55497 NULL
99280 +ocfs2_rec_clusters_55501 ocfs2_rec_clusters 0 55501 NULL
99281 +ext4_flex_bg_size_55502 ext4_flex_bg_size 0 55502 NULL
99282 +cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL nohasharray
99283 +tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 &cfpkt_pad_trail_55511
99284 +ea_get_55522 ea_get 0 55522 NULL
99285 +buffer_size_55534 buffer_size 0 55534 NULL
99286 +set_msr_interception_55538 set_msr_interception 2 55538 NULL
99287 +tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
99288 +add_partition_55588 add_partition 2 55588 NULL
99289 +macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
99290 +selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
99291 +reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 NULL nohasharray
99292 +pktgen_if_write_55628 pktgen_if_write 3 55628 &reiserfs_xattr_get_55628
99293 +dvb_dmxdev_set_buffer_size_55643 dvb_dmxdev_set_buffer_size 2 55643 NULL
99294 +mlx4_buddy_alloc_55647 mlx4_buddy_alloc 2 55647 NULL
99295 +xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
99296 +ib_umad_compat_ioctl_55650 ib_umad_compat_ioctl 3 55650 NULL
99297 +cfg80211_send_rx_assoc_55651 cfg80211_send_rx_assoc 4 55651 NULL
99298 +read_oldmem_55658 read_oldmem 3 55658 NULL
99299 +lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
99300 +il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
99301 +get_info_55681 get_info 3 55681 NULL
99302 +arizona_gpio_direction_in_55690 arizona_gpio_direction_in 2 55690 NULL
99303 +wil_vring_alloc_skb_55703 wil_vring_alloc_skb 4 55703 NULL
99304 +__videobuf_alloc_uncached_55711 __videobuf_alloc_uncached 1 55711 NULL
99305 +pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
99306 +mtdswap_init_55719 mtdswap_init 2 55719 NULL
99307 +__iio_allocate_kfifo_55738 __iio_allocate_kfifo 3-2 55738 NULL
99308 +set_local_name_55757 set_local_name 4 55757 NULL
99309 +btrfs_init_new_buffer_55761 btrfs_init_new_buffer 4 55761 NULL
99310 +strlen_55778 strlen 0 55778 NULL nohasharray
99311 +is_idx_node_in_tnc_55778 is_idx_node_in_tnc 0 55778 &strlen_55778
99312 +req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray
99313 +conf_read_55786 conf_read 3 55786 &req_bio_endio_55786
99314 +uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
99315 +btrfs_find_create_tree_block_55812 btrfs_find_create_tree_block 3 55812 NULL
99316 +sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
99317 +ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
99318 +hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
99319 +shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
99320 +hsc_write_55875 hsc_write 3 55875 NULL
99321 +pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
99322 +snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
99323 +sel_read_policy_55947 sel_read_policy 3 55947 NULL
99324 +handle_response_55951 handle_response 5 55951 NULL
99325 +simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
99326 +tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
99327 +ssb_bus_pcmciabus_register_56020 ssb_bus_pcmciabus_register 3 56020 NULL
99328 +nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL
99329 +dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
99330 +pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
99331 +usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
99332 +kmem_zalloc_large_56128 kmem_zalloc_large 1 56128 NULL
99333 +sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
99334 +map_addr_56144 map_addr 7 56144 NULL
99335 +rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
99336 +create_irq_nr_56180 create_irq_nr 1 56180 NULL
99337 +ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 0 56194 NULL
99338 +skb_headroom_56200 skb_headroom 0 56200 NULL
99339 +usb_dump_iad_descriptor_56204 usb_dump_iad_descriptor 0 56204 NULL
99340 +ncp_read_bounce_size_56221 ncp_read_bounce_size 0-1 56221 NULL
99341 +ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
99342 +cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
99343 +do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
99344 +scrub_setup_recheck_block_56245 scrub_setup_recheck_block 4-3 56245 NULL
99345 +fd_copyin_56247 fd_copyin 3 56247 NULL
99346 +sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL
99347 +il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
99348 +ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL
99349 +RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
99350 +dvb_aplay_56296 dvb_aplay 3 56296 NULL
99351 +btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
99352 +compat_cdrom_read_audio_56304 compat_cdrom_read_audio 4 56304 NULL
99353 +pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
99354 +journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
99355 +snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
99356 +sixpack_compat_ioctl_56346 sixpack_compat_ioctl 4 56346 NULL
99357 +vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
99358 +tps80031_ext_power_req_config_56353 tps80031_ext_power_req_config 3-4-5 56353 NULL
99359 +iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4 56368 NULL
99360 +dev_read_56369 dev_read 3 56369 NULL
99361 +alloc_dummy_extent_buffer_56374 alloc_dummy_extent_buffer 2 56374 NULL
99362 +snd_pcm_common_ioctl1_56382 snd_pcm_common_ioctl1 0 56382 NULL
99363 +ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
99364 +__get_vm_area_caller_56416 __get_vm_area_caller 1 56416 NULL nohasharray
99365 +acpi_os_write_memory_56416 acpi_os_write_memory 1-3 56416 &__get_vm_area_caller_56416
99366 +store_msg_56417 store_msg 3 56417 NULL
99367 +pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
99368 +ec_dbgfs_cmd_read_56431 ec_dbgfs_cmd_read 3 56431 NULL
99369 +fl_create_56435 fl_create 5 56435 NULL
99370 +gnttab_map_56439 gnttab_map 2 56439 NULL
99371 +cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2 56453 NULL
99372 +set_connectable_56458 set_connectable 4 56458 NULL
99373 +osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
99374 +calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
99375 +crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
99376 +cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
99377 +ip_options_get_56538 ip_options_get 4 56538 NULL
99378 +ocfs2_change_extent_flag_56549 ocfs2_change_extent_flag 5 56549 NULL
99379 +alloc_apertures_56561 alloc_apertures 1 56561 NULL
99380 +rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
99381 +portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
99382 +event_filter_write_56609 event_filter_write 3 56609 NULL
99383 +gather_array_56641 gather_array 3 56641 NULL
99384 +uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
99385 +snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL nohasharray
99386 +da9055_gpio_to_irq_56686 da9055_gpio_to_irq 2 56686 &snd_gus_dram_read_56686
99387 +dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3 56702 NULL
99388 +sta_flags_read_56710 sta_flags_read 3 56710 NULL
99389 +ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
99390 +__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
99391 +pcpu_populate_chunk_56741 pcpu_populate_chunk 2-3 56741 NULL
99392 +drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
99393 +btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
99394 +alloc_iommu_56778 alloc_iommu 2-3 56778 NULL
99395 +__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
99396 +do_syslog_56807 do_syslog 3 56807 NULL
99397 +mtdchar_write_56831 mtdchar_write 3 56831 NULL nohasharray
99398 +ntfs_rl_realloc_56831 ntfs_rl_realloc 3 56831 &mtdchar_write_56831
99399 +snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4 56847 NULL
99400 +ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
99401 +pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
99402 +debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
99403 +batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL
99404 +strcspn_56913 strcspn 0 56913 NULL
99405 +__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
99406 +check_header_56930 check_header 2 56930 NULL
99407 +journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
99408 +diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
99409 +nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
99410 +vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
99411 +btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
99412 +skb_network_offset_57043 skb_network_offset 0 57043 NULL nohasharray
99413 +ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 &skb_network_offset_57043
99414 +bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
99415 +xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL
99416 +autofs_dev_ioctl_compat_57059 autofs_dev_ioctl_compat 3 57059 NULL
99417 +cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
99418 +sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
99419 +pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
99420 +sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
99421 +tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
99422 +snd_pcm_hw_params_old_user_57108 snd_pcm_hw_params_old_user 0 57108 NULL
99423 +crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
99424 +sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
99425 +cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
99426 +nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 NULL nohasharray
99427 +rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 &nl80211_send_deauth_57136 nohasharray
99428 +ima_show_htable_value_57136 ima_show_htable_value 2 57136 &rds_ib_sub_signaled_57136
99429 +snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
99430 +udl_prime_create_57159 udl_prime_create 2 57159 NULL
99431 +stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
99432 +rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
99433 +tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
99434 +dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL
99435 +ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
99436 +oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
99437 +alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
99438 +security_mmap_file_57268 security_mmap_file 0 57268 NULL
99439 +pstore_file_read_57288 pstore_file_read 3 57288 NULL
99440 +snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
99441 +ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
99442 +fw_file_size_57307 fw_file_size 0 57307 NULL
99443 +ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
99444 +__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
99445 +ocfs2_xattr_shrink_size_57328 ocfs2_xattr_shrink_size 3 57328 NULL
99446 +check_mirror_57342 check_mirror 1-2 57342 NULL nohasharray
99447 +usblp_read_57342 usblp_read 3 57342 &check_mirror_57342
99448 +print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
99449 +tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
99450 +max8997_irq_domain_map_57375 max8997_irq_domain_map 2 57375 NULL
99451 +tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
99452 +read_file_blob_57406 read_file_blob 3 57406 NULL
99453 +enclosure_register_57412 enclosure_register 3 57412 NULL
99454 +gre_manip_pkt_57416 gre_manip_pkt 4 57416 NULL
99455 +wm831x_gpio_set_debounce_57428 wm831x_gpio_set_debounce 2 57428 NULL
99456 +compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
99457 +alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
99458 +copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
99459 +sys_pselect6_57449 sys_pselect6 1 57449 NULL
99460 +ReadReg_57453 ReadReg 0 57453 NULL
99461 +__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
99462 +crypto_tfm_alg_blocksize_57463 crypto_tfm_alg_blocksize 0 57463 NULL nohasharray
99463 +send_midi_async_57463 send_midi_async 3 57463 &crypto_tfm_alg_blocksize_57463
99464 +sisusb_clear_vram_57466 sisusb_clear_vram 3-2 57466 NULL
99465 +ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL nohasharray
99466 +sep_lock_user_pages_57470 sep_lock_user_pages 2-3 57470 &ieee80211_if_read_flags_57470
99467 +ocfs2_write_cluster_57483 ocfs2_write_cluster 8-2-9 57483 NULL
99468 +bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
99469 +skb_headlen_57501 skb_headlen 0 57501 NULL
99470 +copy_in_user_57502 copy_in_user 3 57502 NULL
99471 +ks8842_read32_57505 ks8842_read32 0 57505 NULL nohasharray
99472 +ckhdid_printf_57505 ckhdid_printf 2 57505 &ks8842_read32_57505
99473 +init_tag_map_57515 init_tag_map 3 57515 NULL
99474 +wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 NULL nohasharray
99475 +il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 &wil_read_file_ssid_57517
99476 +inode_permission_57531 inode_permission 0 57531 NULL
99477 +acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL nohasharray
99478 +DoC_Probe_57534 DoC_Probe 1 57534 &acpi_dev_get_resources_57534
99479 +ext4_group_first_block_no_57559 ext4_group_first_block_no 0-2 57559 NULL
99480 +snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
99481 +lp8788_update_bits_57600 lp8788_update_bits 2 57600 NULL
99482 +wm831x_gpio_to_irq_57614 wm831x_gpio_to_irq 2 57614 NULL
99483 +sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
99484 +tps65217_reg_write_57623 tps65217_reg_write 2 57623 NULL nohasharray
99485 +tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 &tps65217_reg_write_57623
99486 +osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
99487 +sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
99488 +mem_read_57631 mem_read 3 57631 NULL
99489 +tc3589x_irq_map_57639 tc3589x_irq_map 2 57639 NULL
99490 +sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
99491 +r3964_write_57662 r3964_write 4 57662 NULL
99492 +proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL
99493 +__lgwrite_57669 __lgwrite 4 57669 NULL
99494 +ieee80211_MFIE_rate_len_57692 ieee80211_MFIE_rate_len 0 57692 NULL
99495 +f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL
99496 +check_prefree_segments_57702 check_prefree_segments 2 57702 NULL
99497 +i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
99498 +ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
99499 +compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL
99500 +ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
99501 +ld2_57794 ld2 0 57794 NULL
99502 +ivtv_read_57796 ivtv_read 3 57796 NULL
99503 +generic_ptrace_peekdata_57806 generic_ptrace_peekdata 2 57806 NULL
99504 +usb_dump_config_57817 usb_dump_config 0 57817 NULL
99505 +bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
99506 +copy_to_user_57835 copy_to_user 3 57835 NULL
99507 +flash_read_57843 flash_read 3 57843 NULL
99508 +xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
99509 +ad5380_info_to_reg_57905 ad5380_info_to_reg 0 57905 NULL
99510 +emi26_writememory_57908 emi26_writememory 4 57908 NULL
99511 +iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
99512 +memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
99513 +twl_i2c_write_57923 twl_i2c_write 4-3 57923 NULL
99514 +__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
99515 +sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
99516 +key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
99517 +ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
99518 +ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
99519 +i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
99520 +hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
99521 +rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
99522 +regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
99523 +iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
99524 +io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
99525 +mc13783_write_58033 mc13783_write 2 58033 NULL
99526 +mce_async_out_58056 mce_async_out 3 58056 NULL
99527 +ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
99528 +dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
99529 +cm4040_write_58079 cm4040_write 3 58079 NULL
99530 +udi_log_event_58105 udi_log_event 3 58105 NULL
99531 +savemem_58129 savemem 3 58129 NULL
99532 +ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
99533 +slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
99534 +garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
99535 +asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
99536 +ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
99537 +btrfs_mksubvol_58240 btrfs_mksubvol 3 58240 NULL
99538 +btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
99539 +read_file_debug_58256 read_file_debug 3 58256 NULL
99540 +cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
99541 +profile_load_58267 profile_load 3 58267 NULL
99542 +acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
99543 +r100_mm_rreg_58276 r100_mm_rreg 0 58276 NULL
99544 +iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
99545 +ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
99546 +tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
99547 +pcim_iomap_58334 pcim_iomap 3 58334 NULL
99548 +diva_init_dma_map_58336 diva_init_dma_map 3 58336 NULL
99549 +ieee80211_send_probe_req_58337 ieee80211_send_probe_req 4-6 58337 NULL
99550 +next_pidmap_58347 next_pidmap 2 58347 NULL
99551 +vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
99552 +brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
99553 +il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
99554 +kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3 58406 NULL
99555 +i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
99556 +__mlx4_alloc_mtt_range_58418 __mlx4_alloc_mtt_range 2 58418 NULL
99557 +__iio_add_chan_devattr_58451 __iio_add_chan_devattr 0 58451 NULL
99558 +capabilities_read_58457 capabilities_read 3 58457 NULL
99559 +batadv_iv_ogm_aggr_packet_58462 batadv_iv_ogm_aggr_packet 3 58462 NULL
99560 +lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
99561 +compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
99562 +snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
99563 +snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
99564 +batadv_bla_is_backbone_gw_58488 batadv_bla_is_backbone_gw 3 58488 NULL
99565 +wm831x_reg_write_58489 wm831x_reg_write 2 58489 NULL
99566 +rndis_add_response_58544 rndis_add_response 2 58544 NULL
99567 +wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
99568 +sip_sprintf_addr_port_58574 sip_sprintf_addr_port 0 58574 NULL
99569 +scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
99570 +ea_read_inline_58589 ea_read_inline 0 58589 NULL
99571 +xip_file_read_58592 xip_file_read 3 58592 NULL
99572 +gdth_search_isa_58595 gdth_search_isa 1 58595 NULL
99573 +ebt_buf_count_58607 ebt_buf_count 0 58607 NULL
99574 +skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
99575 +module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL nohasharray
99576 +efi_ioremap_58634 efi_ioremap 1-2 58634 &module_alloc_update_bounds_rx_58634
99577 +tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
99578 +ocfs2_block_to_cluster_start_58653 ocfs2_block_to_cluster_start 2 58653 NULL
99579 +find_zero_58685 find_zero 0-1 58685 NULL nohasharray
99580 +mcs7830_set_reg_async_58685 mcs7830_set_reg_async 3 58685 &find_zero_58685
99581 +uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
99582 +tps6586x_writes_58689 tps6586x_writes 3-2 58689 NULL
99583 +vmalloc_node_58700 vmalloc_node 1 58700 NULL
99584 +acpi_map_58725 acpi_map 1-2 58725 NULL
99585 +da9052_gpio_to_irq_58729 da9052_gpio_to_irq 2 58729 NULL
99586 +csum_exist_in_range_58730 csum_exist_in_range 2 58730 NULL
99587 +frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
99588 +ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
99589 +agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
99590 +__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
99591 +regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
99592 +raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
99593 +isku_sysfs_read_58806 isku_sysfs_read 6 58806 NULL
99594 +ep_read_58813 ep_read 3 58813 NULL
99595 +command_write_58841 command_write 3 58841 NULL
99596 +ocfs2_truncate_log_append_58850 ocfs2_truncate_log_append 3 58850 NULL
99597 +ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
99598 +hw_write_58881 hw_write 2 58881 NULL
99599 +gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
99600 +cs553x_init_one_58886 cs553x_init_one 3 58886 NULL
99601 +raw_ctl_compat_ioctl_58905 raw_ctl_compat_ioctl 3 58905 NULL
99602 +print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
99603 +tun_chr_compat_ioctl_58921 tun_chr_compat_ioctl 3 58921 NULL
99604 +pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
99605 +st5481_isoc_flatten_58952 st5481_isoc_flatten 0 58952 NULL
99606 +ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout 3 58965 NULL
99607 +crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
99608 +handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
99609 +edac_align_ptr_59003 edac_align_ptr 0 59003 NULL
99610 +ep_write_59008 ep_write 3 59008 NULL
99611 +i915_ring_stop_write_59010 i915_ring_stop_write 3 59010 NULL
99612 +init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
99613 +selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
99614 +crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
99615 +regmap_bulk_write_59049 regmap_bulk_write 4-2 59049 NULL
99616 +da9052_gpio_direction_input_59062 da9052_gpio_direction_input 2 59062 NULL
99617 +vfio_device_fops_compat_ioctl_59111 vfio_device_fops_compat_ioctl 3 59111 NULL
99618 +mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
99619 +scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
99620 +nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
99621 +__iio_add_event_config_attrs_59136 __iio_add_event_config_attrs 0 59136 NULL
99622 +print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
99623 +framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
99624 +radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
99625 +pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
99626 +setup_window_59178 setup_window 4-2-5-7 59178 NULL
99627 +ocfs2_move_extent_59187 ocfs2_move_extent 3 59187 NULL
99628 +xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
99629 +fast_rx_path_59214 fast_rx_path 3 59214 NULL
99630 +inftl_partscan_59216 inftl_partscan 0 59216 NULL
99631 +skb_transport_header_59223 skb_transport_header 0 59223 NULL
99632 +dt3155_read_59226 dt3155_read 3 59226 NULL
99633 +paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL
99634 +rbd_do_request_59239 rbd_do_request 6-7 59239 NULL
99635 +tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
99636 +solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
99637 +nla_len_59258 nla_len 0 59258 NULL
99638 +da9055_reg_update_59280 da9055_reg_update 2 59280 NULL
99639 +btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
99640 +fd_copyout_59323 fd_copyout 3 59323 NULL
99641 +read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
99642 +rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
99643 +xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
99644 +xfs_dir2_sf_entsize_59366 xfs_dir2_sf_entsize 0-2 59366 NULL
99645 +pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
99646 +fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
99647 +vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
99648 +snd_pcm_tstamp_59431 snd_pcm_tstamp 0 59431 NULL
99649 +squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
99650 +fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
99651 +descriptor_loc_59446 descriptor_loc 3 59446 NULL
99652 +do_compat_semctl_59449 do_compat_semctl 4 59449 NULL
99653 +virtqueue_add_buf_59470 virtqueue_add_buf 3-4 59470 NULL
99654 +ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
99655 +nfsd_nrpools_59503 nfsd_nrpools 0 59503 NULL
99656 +rds_pin_pages_59507 rds_pin_pages 0 59507 NULL
99657 +mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL
99658 +tunables_write_59563 tunables_write 3 59563 NULL
99659 +memdup_user_59590 memdup_user 2 59590 NULL
99660 +tps6586x_irq_get_virq_59601 tps6586x_irq_get_virq 2 59601 NULL
99661 +mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 NULL
99662 +mtrr_write_59622 mtrr_write 3 59622 NULL
99663 +ip_vs_icmp_xmit_59624 ip_vs_icmp_xmit 4 59624 NULL
99664 +find_first_zero_bit_59636 find_first_zero_bit 0-2 59636 NULL
99665 +ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
99666 +hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
99667 +v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
99668 +__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL
99669 +alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
99670 +wm8400_write_59675 wm8400_write 2 59675 NULL
99671 +sriov_enable_59689 sriov_enable 2 59689 NULL
99672 +mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
99673 +snd_pcm_info_user_59711 snd_pcm_info_user 0 59711 NULL
99674 +prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
99675 +wm8996_gpio_direction_in_59739 wm8996_gpio_direction_in 2 59739 NULL
99676 +ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
99677 +qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
99678 +strnlen_59746 strnlen 0 59746 NULL
99679 +sctp_manip_pkt_59749 sctp_manip_pkt 4 59749 NULL
99680 +ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
99681 +long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
99682 +sec_reg_update_59771 sec_reg_update 2 59771 NULL
99683 +venus_remove_59781 venus_remove 4 59781 NULL
99684 +xlog_do_recover_59789 xlog_do_recover 3 59789 NULL
99685 +ipw_write_59807 ipw_write 3 59807 NULL
99686 +rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
99687 +scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
99688 +ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
99689 +gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
99690 +fs64_to_cpu_59845 fs64_to_cpu 0 59845 NULL
99691 +tun_put_user_59849 tun_put_user 4 59849 NULL
99692 +pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
99693 +shmem_zero_setup_59885 shmem_zero_setup 0 59885 NULL
99694 +ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
99695 +il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 NULL nohasharray
99696 +dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 &il_dbgfs_rxon_flags_read_59950
99697 +compat_ipmi_ioctl_59956 compat_ipmi_ioctl 3 59956 NULL nohasharray
99698 +il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 &compat_ipmi_ioctl_59956
99699 +fb_getput_cmap_59971 fb_getput_cmap 3 59971 NULL
99700 +__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
99701 +osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
99702 +ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
99703 +rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
99704 +mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
99705 +register_device_60015 register_device 2-3 60015 NULL
99706 +osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
99707 +xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
99708 +sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
99709 +ceph_calc_raw_layout_60035 ceph_calc_raw_layout 4 60035 NULL
99710 +bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
99711 +do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
99712 +pin_2_irq_60050 pin_2_irq 0-3 60050 NULL nohasharray
99713 +vcs_size_60050 vcs_size 0 60050 &pin_2_irq_60050
99714 +load_module_60056 load_module 2 60056 NULL nohasharray
99715 +gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 &load_module_60056
99716 +compat_writev_60063 compat_writev 3 60063 NULL
99717 +ieee80211_build_probe_req_60064 ieee80211_build_probe_req 8-6 60064 NULL
99718 +c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
99719 +mp_register_gsi_60079 mp_register_gsi 2 60079 NULL
99720 +rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
99721 +ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
99722 +ttm_bo_kmap_60118 ttm_bo_kmap 3-2 60118 NULL
99723 +jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
99724 +init_state_60165 init_state 2 60165 NULL
99725 +sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
99726 +jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
99727 +rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
99728 +svc_compat_ioctl_60194 svc_compat_ioctl 3 60194 NULL
99729 +ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
99730 +qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
99731 +set_tap_pfs_60203 set_tap_pfs 3 60203 NULL
99732 +ieee80211_mgmt_tx_60209 ieee80211_mgmt_tx 7 60209 NULL
99733 +btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL
99734 +arizona_map_irq_60230 arizona_map_irq 2 60230 NULL
99735 +lp8788_select_buck_vout_addr_60241 lp8788_select_buck_vout_addr 0 60241 NULL
99736 +wm831x_irq_60254 wm831x_irq 2 60254 NULL
99737 +compat_sys_fcntl64_60256 compat_sys_fcntl64 3 60256 NULL
99738 +printer_write_60276 printer_write 3 60276 NULL
99739 +__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
99740 +do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
99741 +getDataLength_60301 getDataLength 0 60301 NULL
99742 +ceph_parse_server_name_60318 ceph_parse_server_name 2 60318 NULL
99743 +__kfifo_from_user_r_60345 __kfifo_from_user_r 3-5 60345 NULL
99744 +dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
99745 +ubi_eba_atomic_leb_change_60379 ubi_eba_atomic_leb_change 5 60379 NULL
99746 +mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
99747 +ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
99748 +driver_names_read_60399 driver_names_read 3 60399 NULL
99749 +simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
99750 +excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
99751 +tstats_write_60432 tstats_write 3 60432 NULL nohasharray
99752 +kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
99753 +tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
99754 +rx_data_60442 rx_data 4 60442 NULL
99755 +tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
99756 +crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
99757 +ath_tx_init_60515 ath_tx_init 2 60515 NULL
99758 +hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
99759 +v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
99760 +nonpaging_map_60551 nonpaging_map 4 60551 NULL
99761 +skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
99762 +wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
99763 +usb_control_msg_60624 usb_control_msg 0 60624 NULL
99764 +acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
99765 +ubifs_recover_leb_60639 ubifs_recover_leb 3 60639 NULL
99766 +fb_get_fscreeninfo_60640 fb_get_fscreeninfo 3 60640 NULL
99767 +if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
99768 +ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
99769 +read_vbt_r10_60679 read_vbt_r10 1 60679 NULL
99770 +init_data_container_60709 init_data_container 1 60709 NULL
99771 +snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
99772 +raid_status_60755 raid_status 5 60755 NULL
99773 +sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
99774 +opticon_write_60775 opticon_write 4 60775 NULL
99775 +acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
99776 +snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
99777 +pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
99778 +alloc_buf_60864 alloc_buf 3-2 60864 NULL
99779 +alloc_irq_from_60868 alloc_irq_from 1 60868 NULL
99780 +generic_writepages_60871 generic_writepages 0 60871 NULL
99781 +ubifs_read_one_lp_60882 ubifs_read_one_lp 0 60882 NULL
99782 +ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL
99783 +wm8962_gpio_set_60894 wm8962_gpio_set 2 60894 NULL
99784 +iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
99785 +mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
99786 +scrub_chunk_60926 scrub_chunk 5 60926 NULL
99787 +pti_char_write_60960 pti_char_write 3 60960 NULL
99788 +mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
99789 +__a2mp_build_60987 __a2mp_build 3 60987 NULL
99790 +hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
99791 +ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
99792 +symtab_init_61050 symtab_init 2 61050 NULL
99793 +fuse_send_write_61053 fuse_send_write 0 61053 NULL
99794 +snd_pcm_pause_61054 snd_pcm_pause 0 61054 NULL
99795 +bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-4-2 61062 NULL
99796 +ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
99797 +snd_pcm_update_hw_ptr0_61084 snd_pcm_update_hw_ptr0 0 61084 NULL
99798 +get_derived_key_61100 get_derived_key 4 61100 NULL
99799 +alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
99800 +p80211_headerlen_61119 p80211_headerlen 0 61119 NULL nohasharray
99801 +__probe_kernel_read_61119 __probe_kernel_read 3 61119 &p80211_headerlen_61119
99802 +vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
99803 +afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
99804 +__vmalloc_61168 __vmalloc 1 61168 NULL
99805 +event_oom_late_read_61175 event_oom_late_read 3 61175 NULL nohasharray
99806 +pair_device_61175 pair_device 4 61175 &event_oom_late_read_61175
99807 +sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
99808 +arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
99809 +smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
99810 +btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL
99811 +vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL
99812 +sys_add_key_61288 sys_add_key 4 61288 NULL
99813 +ext4_issue_discard_61305 ext4_issue_discard 2 61305 NULL
99814 +xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
99815 +st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
99816 +rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
99817 +f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
99818 +debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
99819 +sys_ptrace_61369 sys_ptrace 3 61369 NULL
99820 +change_xattr_61390 change_xattr 5 61390 NULL
99821 +size_entry_mwt_61400 size_entry_mwt 0 61400 NULL
99822 +irq_create_of_mapping_61428 irq_create_of_mapping 0 61428 NULL
99823 +dma_ops_area_alloc_61440 dma_ops_area_alloc 3-4-5 61440 NULL
99824 +tc3589x_irq_unmap_61447 tc3589x_irq_unmap 2 61447 NULL
99825 +unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
99826 +snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 4-2-5 61483 NULL
99827 +btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
99828 +mc13xxx_reg_write_61490 mc13xxx_reg_write 2 61490 NULL
99829 +erst_errno_61526 erst_errno 0 61526 NULL
99830 +ntfs_attr_lookup_61539 ntfs_attr_lookup 0 61539 NULL
99831 +o2hb_pop_count_61553 o2hb_pop_count 2 61553 NULL
99832 +dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
99833 +ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
99834 +seq_open_private_61589 seq_open_private 3 61589 NULL
99835 +__get_vm_area_61599 __get_vm_area 1 61599 NULL
99836 +nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL
99837 +ncp_compat_ioctl_61608 ncp_compat_ioctl 3 61608 NULL
99838 +configfs_write_file_61621 configfs_write_file 3 61621 NULL
99839 +ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL
99840 +ieee80211_rx_bss_info_61630 ieee80211_rx_bss_info 3 61630 NULL
99841 +i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
99842 +snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
99843 +resize_stripes_61650 resize_stripes 2 61650 NULL
99844 +ttm_page_pool_free_61661 ttm_page_pool_free 2 61661 NULL
99845 +insert_one_name_61668 insert_one_name 7 61668 NULL
99846 +snd_pcm_playback_avail_61671 snd_pcm_playback_avail 0 61671 NULL
99847 +lock_loop_61681 lock_loop 1 61681 NULL
99848 +__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
99849 +filter_read_61692 filter_read 3 61692 NULL
99850 +iov_length_61716 iov_length 0 61716 NULL
99851 +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
99852 +read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
99853 +read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
99854 +gfs2_meta_wait_61773 gfs2_meta_wait 0 61773 NULL
99855 +batadv_dat_snoop_incoming_arp_reply_61801 batadv_dat_snoop_incoming_arp_reply 3 61801 NULL
99856 +tps80031_irq_init_61830 tps80031_irq_init 3 61830 NULL
99857 +bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
99858 +fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
99859 +evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
99860 +get_fw_name_61874 get_fw_name 3 61874 NULL
99861 +twl4030_sih_setup_61878 twl4030_sih_setup 3 61878 NULL
99862 +ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
99863 +ax25_addr_size_61899 ax25_addr_size 0 61899 NULL nohasharray
99864 +cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 &ax25_addr_size_61899
99865 +clear_refs_write_61904 clear_refs_write 3 61904 NULL
99866 +rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
99867 +au0828_init_isoc_61917 au0828_init_isoc 3-2 61917 NULL
99868 +sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
99869 +send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
99870 +il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
99871 +squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
99872 +mlx4_alloc_mtt_range_61966 mlx4_alloc_mtt_range 2 61966 NULL
99873 +ocfs2_quota_write_61972 ocfs2_quota_write 5-4 61972 NULL
99874 +cow_file_range_61979 cow_file_range 3 61979 NULL
99875 +virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
99876 +xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
99877 +jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
99878 +pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
99879 +ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
99880 +sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
99881 +do_pselect_62061 do_pselect 1 62061 NULL
99882 +pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
99883 +__next_gcwq_cpu_62078 __next_gcwq_cpu 1 62078 NULL
99884 +get_domain_for_dev_62099 get_domain_for_dev 2 62099 NULL
99885 +jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
99886 +llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
99887 +qib_diag_write_62133 qib_diag_write 3 62133 NULL
99888 +ql_status_62135 ql_status 5 62135 NULL
99889 +video_usercopy_62151 video_usercopy 2 62151 NULL
99890 +prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
99891 +_irq_to_addr_62183 _irq_to_addr 0-1 62183 NULL
99892 +alloc_upcall_62186 alloc_upcall 2 62186 NULL
99893 +btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
99894 +sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
99895 +nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
99896 +allocate_partition_62245 allocate_partition 4 62245 NULL
99897 +il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
99898 +sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
99899 +subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
99900 +udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
99901 +Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
99902 +subseq_list_62332 subseq_list 3-0 62332 NULL
99903 +flash_write_62354 flash_write 3 62354 NULL
99904 +set_wd_exp_mode_pfs_62372 set_wd_exp_mode_pfs 3 62372 NULL
99905 +rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
99906 +altera_irscan_62396 altera_irscan 2 62396 NULL
99907 +set_ssp_62411 set_ssp 4 62411 NULL
99908 +tscadc_writel_62426 tscadc_writel 2 62426 NULL
99909 +netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
99910 +e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
99911 +ip_vs_icmp_xmit_v6_62477 ip_vs_icmp_xmit_v6 4 62477 NULL
99912 +ceph_dns_resolve_name_62488 ceph_dns_resolve_name 2 62488 NULL
99913 +mlx4_en_create_rx_ring_62498 mlx4_en_create_rx_ring 3 62498 NULL
99914 +ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL
99915 +pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
99916 +test_iso_queue_62534 test_iso_queue 5 62534 NULL
99917 +debugfs_read_62535 debugfs_read 3 62535 NULL
99918 +sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
99919 +qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
99920 +xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
99921 +get_subdir_62581 get_subdir 3 62581 NULL
99922 +prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 NULL nohasharray
99923 +nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 &prism2_send_mgmt_62605
99924 +get_desc_base_62617 get_desc_base 0 62617 NULL
99925 +iommu_area_alloc_62619 iommu_area_alloc 2-3-4-7 62619 NULL
99926 +ems_pcmcia_add_card_62627 ems_pcmcia_add_card 2 62627 NULL
99927 +compat_rangeinfo_62630 compat_rangeinfo 2 62630 NULL
99928 +lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
99929 +wm8994_gpio_direction_in_62649 wm8994_gpio_direction_in 2 62649 NULL
99930 +ima_file_mmap_62663 ima_file_mmap 0 62663 NULL
99931 +write_62671 write 3 62671 NULL
99932 +printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
99933 +qla4_83xx_rd_reg_62693 qla4_83xx_rd_reg 0 62693 NULL
99934 +ioremap_wc_62695 ioremap_wc 1-2 62695 NULL
99935 +bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
99936 +rdm_62719 rdm 0 62719 NULL
99937 +key_replays_read_62746 key_replays_read 3 62746 NULL
99938 +init_chip_wc_pat_62768 init_chip_wc_pat 2 62768 NULL
99939 +ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
99940 +page_key_alloc_62771 page_key_alloc 0 62771 NULL
99941 +tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
99942 +__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
99943 +bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL
99944 +xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
99945 +rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
99946 +hpi_read_word_62862 hpi_read_word 0 62862 NULL
99947 +aoechr_write_62883 aoechr_write 3 62883 NULL nohasharray
99948 +em28xx_init_isoc_62883 em28xx_init_isoc 4 62883 &aoechr_write_62883
99949 +resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
99950 +if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
99951 +mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
99952 +getdqbuf_62908 getdqbuf 1 62908 NULL
99953 +agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
99954 +PTR_ERR_63033 PTR_ERR 0 63033 NULL nohasharray
99955 +__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 &PTR_ERR_63033
99956 +pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
99957 +scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
99958 +unlink1_63059 unlink1 3 63059 NULL
99959 +xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL
99960 +ocfs2_decrease_refcount_63078 ocfs2_decrease_refcount 3 63078 NULL
99961 +sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 NULL
99962 +iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
99963 +ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
99964 +snd_pcm_status_user_63140 snd_pcm_status_user 0 63140 NULL
99965 +ubifs_change_one_lp_63157 ubifs_change_one_lp 0 63157 NULL
99966 +smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
99967 +vme_master_read_63221 vme_master_read 0 63221 NULL
99968 +module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
99969 +ptp_read_63251 ptp_read 4 63251 NULL
99970 +ntfs_attr_can_be_non_resident_63267 ntfs_attr_can_be_non_resident 0 63267 NULL
99971 +raid5_resize_63306 raid5_resize 2 63306 NULL
99972 +proc_info_read_63344 proc_info_read 3 63344 NULL
99973 +ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
99974 +idmouse_read_63374 idmouse_read 3 63374 NULL
99975 +edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL nohasharray
99976 +usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 &edac_pci_alloc_ctl_info_63388
99977 +rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
99978 +l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
99979 +sep_prepare_input_output_dma_table_63429 sep_prepare_input_output_dma_table 2-4-3 63429 NULL
99980 +kone_send_63435 kone_send 4 63435 NULL
99981 +nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
99982 +snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
99983 +reada_find_extent_63486 reada_find_extent 2 63486 NULL
99984 +read_kcore_63488 read_kcore 3 63488 NULL
99985 +snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
99986 +ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
99987 +if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
99988 +append_to_buffer_63550 append_to_buffer 3 63550 NULL
99989 +dbg_leb_write_63555 dbg_leb_write 4-5 63555 NULL nohasharray
99990 +kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 &dbg_leb_write_63555
99991 +ubifs_lpt_scan_nolock_63572 ubifs_lpt_scan_nolock 0 63572 NULL
99992 +ocfs2_calc_trunc_pos_63576 ocfs2_calc_trunc_pos 4 63576 NULL
99993 +rproc_alloc_63577 rproc_alloc 5 63577 NULL
99994 +ext3_clear_blocks_63597 ext3_clear_blocks 4-5 63597 NULL
99995 +module_alloc_63630 module_alloc 1 63630 NULL
99996 +ntfs_malloc_nofs_nofail_63631 ntfs_malloc_nofs_nofail 1 63631 NULL
99997 +symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
99998 +_ubh_find_next_zero_bit__63640 _ubh_find_next_zero_bit_ 4-5-3 63640 NULL
99999 +proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
100000 +ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
100001 +hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
100002 +vbi_read_63673 vbi_read 3 63673 NULL nohasharray
100003 +xen_register_pirq_63673 xen_register_pirq 1-2 63673 &vbi_read_63673
100004 +alloc_tty_driver_63681 alloc_tty_driver 1 63681 NULL
100005 +mkiss_compat_ioctl_63686 mkiss_compat_ioctl 4 63686 NULL
100006 +arizona_irq_map_63709 arizona_irq_map 2 63709 NULL
100007 +nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
100008 +btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
100009 +selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
100010 +snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
100011 +snd_pcm_link_63772 snd_pcm_link 0 63772 NULL
100012 +snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
100013 +spidev_compat_ioctl_63778 spidev_compat_ioctl 2-3 63778 NULL
100014 +snapshot_compat_ioctl_63792 snapshot_compat_ioctl 3 63792 NULL
100015 +kovaplus_sysfs_write_63795 kovaplus_sysfs_write 6 63795 NULL
100016 +mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
100017 +copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
100018 +sel_write_load_63830 sel_write_load 3 63830 NULL
100019 +proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
100020 +ieee80211_if_fmt_channel_type_63855 ieee80211_if_fmt_channel_type 3 63855 NULL
100021 +init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
100022 +divas_write_63901 divas_write 3 63901 NULL
100023 +xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
100024 +uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-3-2 63922 NULL
100025 +snd_compr_write_63923 snd_compr_write 3 63923 NULL
100026 +acpi_ev_get_gpe_xrupt_block_63924 acpi_ev_get_gpe_xrupt_block 1 63924 NULL
100027 +tipc_send2port_63935 tipc_send2port 5 63935 NULL
100028 +afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
100029 +__team_options_register_63941 __team_options_register 3 63941 NULL
100030 +macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
100031 +ieee80211_if_fmt_rc_rateidx_mcs_mask_2ghz_63968 ieee80211_if_fmt_rc_rateidx_mcs_mask_2ghz 3 63968 NULL
100032 +ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
100033 +diva_xdi_write_63975 diva_xdi_write 4 63975 NULL
100034 +read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
100035 +kmemdup_64015 kmemdup 2 64015 NULL
100036 +tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 NULL
100037 +dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
100038 +resize_async_buffer_64031 resize_async_buffer 4 64031 NULL
100039 +sep_lli_table_secure_dma_64042 sep_lli_table_secure_dma 2-3 64042 NULL
100040 +get_u8_64076 get_u8 0 64076 NULL
100041 +sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
100042 +lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
100043 +do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
100044 +ol_quota_entries_per_block_64122 ol_quota_entries_per_block 0 64122 NULL
100045 +ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL
100046 +init_bch_64130 init_bch 1-2 64130 NULL
100047 +uea_idma_write_64139 uea_idma_write 3 64139 NULL
100048 +ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
100049 +dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
100050 +cpumask_scnprintf_64170 cpumask_scnprintf 2 64170 NULL
100051 +read_pulse_64227 read_pulse 0-3 64227 NULL
100052 +header_len_64232 header_len 0 64232 NULL
100053 +redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
100054 +io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
100055 +btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
100056 +sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL
100057 +event_id_read_64288 event_id_read 3 64288 NULL nohasharray
100058 +xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
100059 +ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
100060 +error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
100061 +ffz_64324 ffz 0 64324 NULL
100062 +sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
100063 +ts_write_64336 ts_write 3 64336 NULL
100064 +usbtmc_write_64340 usbtmc_write 3 64340 NULL
100065 +do_write_orph_node_64343 do_write_orph_node 2 64343 NULL
100066 +ft1000_read_reg_64352 ft1000_read_reg 0 64352 NULL
100067 +wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
100068 +ilo_write_64378 ilo_write 3 64378 NULL
100069 +btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
100070 +nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
100071 +ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
100072 +pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
100073 +rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
100074 +snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
100075 +keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
100076 +nl80211_send_mgmt_64419 nl80211_send_mgmt 7 64419 NULL
100077 +oom_adj_write_64428 oom_adj_write 3 64428 NULL
100078 +ext4_trim_extent_64431 ext4_trim_extent 4 64431 NULL
100079 +ieee80211_ie_build_ht_cap_64443 ieee80211_ie_build_ht_cap 0 64443 NULL
100080 +cap_capable_64462 cap_capable 0 64462 NULL
100081 +ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
100082 +p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
100083 +msg_data_sz_64503 msg_data_sz 0 64503 NULL
100084 +remove_uuid_64505 remove_uuid 4 64505 NULL
100085 +crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
100086 +opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
100087 +ses_send_diag_64527 ses_send_diag 4 64527 NULL
100088 +prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
100089 +__spi_sync_64561 __spi_sync 0 64561 NULL
100090 +__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
100091 +fanotify_write_64623 fanotify_write 3 64623 NULL
100092 +regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL
100093 +ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray
100094 +tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661
100095 +efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
100096 +rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
100097 +nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
100098 +sec_bulk_write_64691 sec_bulk_write 2-3 64691 NULL
100099 +__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
100100 +snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
100101 +dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
100102 +atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
100103 +i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
100104 +squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
100105 +bio_map_kern_64751 bio_map_kern 3 64751 NULL
100106 +rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
100107 +isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
100108 +twl6040_reg_write_64790 twl6040_reg_write 2 64790 NULL
100109 +nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
100110 +megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
100111 +ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
100112 +do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
100113 +altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
100114 +ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
100115 +ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
100116 +ubifs_wbuf_write_nolock_64946 ubifs_wbuf_write_nolock 3 64946 NULL
100117 +snd_rawmidi_ioctl_compat_64954 snd_rawmidi_ioctl_compat 3 64954 NULL
100118 +ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
100119 +acpi_os_install_interrupt_handler_64968 acpi_os_install_interrupt_handler 1 64968 NULL
100120 +traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
100121 +suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
100122 +ext2_group_first_block_no_64972 ext2_group_first_block_no 0-2 64972 NULL
100123 +pskb_pull_65005 pskb_pull 2 65005 NULL
100124 +unifi_write_65012 unifi_write 3 65012 NULL
100125 +crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
100126 +nfs_readdata_alloc_65015 nfs_readdata_alloc 2 65015 NULL
100127 +insert_dent_65034 insert_dent 7 65034 NULL
100128 +compat_put_ushort_65040 compat_put_ushort 1 65040 NULL
100129 +brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
100130 +compat_cmdtest_65064 compat_cmdtest 2 65064 NULL
100131 +make_idx_node_65068 make_idx_node 0 65068 NULL
100132 +count_run_65072 count_run 0-2-4 65072 NULL nohasharray
100133 +bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 &count_run_65072
100134 +__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
100135 +ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
100136 +ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 0 65090 NULL
100137 +ssb_bus_register_65183 ssb_bus_register 3 65183 NULL
100138 +rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
100139 +print_endpoint_stat_65232 print_endpoint_stat 0-4-3 65232 NULL
100140 +whci_n_caps_65247 whci_n_caps 0 65247 NULL
100141 +kmem_zalloc_greedy_65268 kmem_zalloc_greedy 3-2 65268 NULL
100142 +kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
100143 +compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
100144 +get_unaligned_le16_65293 get_unaligned_le16 0 65293 NULL
100145 +mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
100146 +redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
100147 +get_var_len_65304 get_var_len 0 65304 NULL
100148 +unpack_array_65318 unpack_array 0 65318 NULL
100149 +pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL
100150 +dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
100151 +dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
100152 +batadv_tt_save_orig_buffer_65361 batadv_tt_save_orig_buffer 4 65361 NULL
100153 +alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
100154 +strchr_65372 strchr 0 65372 NULL
100155 +__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
100156 +trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
100157 +mtd_get_device_size_65400 mtd_get_device_size 0 65400 NULL
100158 +iio_device_add_channel_sysfs_65406 iio_device_add_channel_sysfs 0 65406 NULL
100159 +ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL
100160 +drm_calloc_large_65421 drm_calloc_large 1-2 65421 NULL
100161 +xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
100162 +pn533_init_target_frame_65438 pn533_init_target_frame 3 65438 NULL
100163 +usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
100164 +regmap_write_65478 regmap_write 2 65478 NULL
100165 +ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
100166 +dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
100167 +alloc_dr_65495 alloc_dr 2 65495 NULL
100168 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
100169 new file mode 100644
100170 index 0000000..9db0d0e
100171 --- /dev/null
100172 +++ b/tools/gcc/size_overflow_plugin.c
100173 @@ -0,0 +1,2114 @@
100174 +/*
100175 + * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
100176 + * Licensed under the GPL v2, or (at your option) v3
100177 + *
100178 + * Homepage:
100179 + * http://www.grsecurity.net/~ephox/overflow_plugin/
100180 + *
100181 + * Documentation:
100182 + * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
100183 + *
100184 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
100185 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
100186 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
100187 + *
100188 + * Usage:
100189 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -o size_overflow_plugin.so size_overflow_plugin.c
100190 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
100191 + */
100192 +
100193 +#include "gcc-plugin.h"
100194 +#include "config.h"
100195 +#include "system.h"
100196 +#include "coretypes.h"
100197 +#include "tree.h"
100198 +#include "tree-pass.h"
100199 +#include "intl.h"
100200 +#include "plugin-version.h"
100201 +#include "tm.h"
100202 +#include "toplev.h"
100203 +#include "function.h"
100204 +#include "tree-flow.h"
100205 +#include "plugin.h"
100206 +#include "gimple.h"
100207 +#include "diagnostic.h"
100208 +#include "cfgloop.h"
100209 +
100210 +#if BUILDING_GCC_VERSION >= 4008
100211 +#define TODO_dump_func 0
100212 +#endif
100213 +
100214 +struct size_overflow_hash {
100215 + const struct size_overflow_hash * const next;
100216 + const char * const name;
100217 + const unsigned int param;
100218 +};
100219 +
100220 +#include "size_overflow_hash.h"
100221 +
100222 +enum mark {
100223 + MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
100224 +};
100225 +
100226 +enum err_code_conditions {
100227 + CAST_ONLY, FROM_CONST
100228 +};
100229 +
100230 +static unsigned int call_count = 0;
100231 +
100232 +#define __unused __attribute__((__unused__))
100233 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
100234 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
100235 +#define BEFORE_STMT true
100236 +#define AFTER_STMT false
100237 +#define CREATE_NEW_VAR NULL_TREE
100238 +#define CODES_LIMIT 32
100239 +#define MAX_PARAM 31
100240 +#define MY_STMT GF_PLF_1
100241 +#define NO_CAST_CHECK GF_PLF_2
100242 +#define FROM_ARG true
100243 +#define FROM_RET false
100244 +
100245 +#if BUILDING_GCC_VERSION == 4005
100246 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
100247 +#endif
100248 +
100249 +int plugin_is_GPL_compatible;
100250 +void debug_gimple_stmt(gimple gs);
100251 +
100252 +static tree expand(struct pointer_set_t *visited, tree lhs);
100253 +static enum mark pre_expand(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs);
100254 +static tree report_size_overflow_decl;
100255 +static const_tree const_char_ptr_type_node;
100256 +static unsigned int handle_function(void);
100257 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
100258 +static tree get_size_overflow_type(gimple stmt, const_tree node);
100259 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
100260 +static void print_missing_msg(tree func, unsigned int argnum);
100261 +
100262 +static struct plugin_info size_overflow_plugin_info = {
100263 + .version = "20130410beta",
100264 + .help = "no-size-overflow\tturn off size overflow checking\n",
100265 +};
100266 +
100267 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
100268 +{
100269 + unsigned int arg_count;
100270 + enum tree_code code = TREE_CODE(*node);
100271 +
100272 + switch (code) {
100273 + case FUNCTION_DECL:
100274 + arg_count = type_num_arguments(TREE_TYPE(*node));
100275 + break;
100276 + case FUNCTION_TYPE:
100277 + case METHOD_TYPE:
100278 + arg_count = type_num_arguments(*node);
100279 + break;
100280 + default:
100281 + *no_add_attrs = true;
100282 + error("%s: %qE attribute only applies to functions", __func__, name);
100283 + return NULL_TREE;
100284 + }
100285 +
100286 + for (; args; args = TREE_CHAIN(args)) {
100287 + tree position = TREE_VALUE(args);
100288 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
100289 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
100290 + *no_add_attrs = true;
100291 + }
100292 + }
100293 + return NULL_TREE;
100294 +}
100295 +
100296 +static const char* get_asm_name(tree node)
100297 +{
100298 + return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node));
100299 +}
100300 +
100301 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
100302 +{
100303 + unsigned int arg_count, arg_num;
100304 + enum tree_code code = TREE_CODE(*node);
100305 +
100306 + switch (code) {
100307 + case FUNCTION_DECL:
100308 + arg_count = type_num_arguments(TREE_TYPE(*node));
100309 + break;
100310 + case FUNCTION_TYPE:
100311 + case METHOD_TYPE:
100312 + arg_count = type_num_arguments(*node);
100313 + break;
100314 + case FIELD_DECL:
100315 + arg_num = TREE_INT_CST_LOW(TREE_VALUE(args));
100316 + if (arg_num != 0) {
100317 + *no_add_attrs = true;
100318 + error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name);
100319 + }
100320 + return NULL_TREE;
100321 + default:
100322 + *no_add_attrs = true;
100323 + error("%qE attribute only applies to functions", name);
100324 + return NULL_TREE;
100325 + }
100326 +
100327 + if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
100328 + return NULL_TREE;
100329 +
100330 + for (; args; args = TREE_CHAIN(args)) {
100331 + tree position = TREE_VALUE(args);
100332 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
100333 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
100334 + *no_add_attrs = true;
100335 + }
100336 + }
100337 + return NULL_TREE;
100338 +}
100339 +
100340 +static struct attribute_spec size_overflow_attr = {
100341 + .name = "size_overflow",
100342 + .min_length = 1,
100343 + .max_length = -1,
100344 + .decl_required = true,
100345 + .type_required = false,
100346 + .function_type_required = false,
100347 + .handler = handle_size_overflow_attribute,
100348 +#if BUILDING_GCC_VERSION >= 4007
100349 + .affects_type_identity = false
100350 +#endif
100351 +};
100352 +
100353 +static struct attribute_spec intentional_overflow_attr = {
100354 + .name = "intentional_overflow",
100355 + .min_length = 1,
100356 + .max_length = -1,
100357 + .decl_required = true,
100358 + .type_required = false,
100359 + .function_type_required = false,
100360 + .handler = handle_intentional_overflow_attribute,
100361 +#if BUILDING_GCC_VERSION >= 4007
100362 + .affects_type_identity = false
100363 +#endif
100364 +};
100365 +
100366 +static void register_attributes(void __unused *event_data, void __unused *data)
100367 +{
100368 + register_attribute(&size_overflow_attr);
100369 + register_attribute(&intentional_overflow_attr);
100370 +}
100371 +
100372 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
100373 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
100374 +{
100375 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
100376 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
100377 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
100378 +
100379 + unsigned int m = 0x57559429;
100380 + unsigned int n = 0x5052acdb;
100381 + const unsigned int *key4 = (const unsigned int *)key;
100382 + unsigned int h = len;
100383 + unsigned int k = len + seed + n;
100384 + unsigned long long p;
100385 +
100386 + while (len >= 8) {
100387 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
100388 + len -= 8;
100389 + }
100390 + if (len >= 4) {
100391 + cwmixb(key4[0]) key4 += 1;
100392 + len -= 4;
100393 + }
100394 + if (len)
100395 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
100396 + cwmixb(h ^ (k + n));
100397 + return k ^ h;
100398 +
100399 +#undef cwfold
100400 +#undef cwmixa
100401 +#undef cwmixb
100402 +}
100403 +
100404 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
100405 +{
100406 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
100407 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
100408 + return fn ^ codes;
100409 +}
100410 +
100411 +static inline tree get_original_function_decl(tree fndecl)
100412 +{
100413 + if (DECL_ABSTRACT_ORIGIN(fndecl))
100414 + return DECL_ABSTRACT_ORIGIN(fndecl);
100415 + return fndecl;
100416 +}
100417 +
100418 +static inline gimple get_def_stmt(const_tree node)
100419 +{
100420 + gcc_assert(node != NULL_TREE);
100421 + if (TREE_CODE(node) != SSA_NAME)
100422 + return NULL;
100423 + return SSA_NAME_DEF_STMT(node);
100424 +}
100425 +
100426 +static unsigned char get_tree_code(const_tree type)
100427 +{
100428 + switch (TREE_CODE(type)) {
100429 + case ARRAY_TYPE:
100430 + return 0;
100431 + case BOOLEAN_TYPE:
100432 + return 1;
100433 + case ENUMERAL_TYPE:
100434 + return 2;
100435 + case FUNCTION_TYPE:
100436 + return 3;
100437 + case INTEGER_TYPE:
100438 + return 4;
100439 + case POINTER_TYPE:
100440 + return 5;
100441 + case RECORD_TYPE:
100442 + return 6;
100443 + case UNION_TYPE:
100444 + return 7;
100445 + case VOID_TYPE:
100446 + return 8;
100447 + case REAL_TYPE:
100448 + return 9;
100449 + case VECTOR_TYPE:
100450 + return 10;
100451 + case REFERENCE_TYPE:
100452 + return 11;
100453 + case OFFSET_TYPE:
100454 + return 12;
100455 + case COMPLEX_TYPE:
100456 + return 13;
100457 + default:
100458 + debug_tree((tree)type);
100459 + gcc_unreachable();
100460 + }
100461 +}
100462 +
100463 +static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len)
100464 +{
100465 + gcc_assert(type != NULL_TREE);
100466 +
100467 + while (type && len < CODES_LIMIT) {
100468 + tree_codes[len] = get_tree_code(type);
100469 + len++;
100470 + type = TREE_TYPE(type);
100471 + }
100472 + return len;
100473 +}
100474 +
100475 +static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes)
100476 +{
100477 + const_tree arg, result, arg_field, type = TREE_TYPE(fndecl);
100478 + enum tree_code code = TREE_CODE(type);
100479 + size_t len = 0;
100480 +
100481 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
100482 +
100483 + arg = TYPE_ARG_TYPES(type);
100484 + // skip builtins __builtin_constant_p
100485 + if (!arg && DECL_BUILT_IN(fndecl))
100486 + return 0;
100487 +
100488 + if (TREE_CODE_CLASS(code) == tcc_type)
100489 + result = type;
100490 + else
100491 + result = DECL_RESULT(fndecl);
100492 +
100493 + gcc_assert(result != NULL_TREE);
100494 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
100495 +
100496 + if (arg == NULL_TREE) {
100497 + gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON));
100498 + arg_field = DECL_ARGUMENT_FLD(fndecl);
100499 + if (arg_field == NULL_TREE)
100500 + return 0;
100501 + arg = TREE_TYPE(arg_field);
100502 + len = add_type_codes(arg, tree_codes, len);
100503 + gcc_assert(len != 0);
100504 + return len;
100505 + }
100506 +
100507 + gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST);
100508 + while (arg && len < CODES_LIMIT) {
100509 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
100510 + arg = TREE_CHAIN(arg);
100511 + }
100512 +
100513 + gcc_assert(len != 0);
100514 + return len;
100515 +}
100516 +
100517 +static const struct size_overflow_hash *get_function_hash(tree fndecl)
100518 +{
100519 + unsigned int hash;
100520 + const struct size_overflow_hash *entry;
100521 + unsigned char tree_codes[CODES_LIMIT];
100522 + size_t len;
100523 + const char *func_name;
100524 +
100525 + fndecl = get_original_function_decl(fndecl);
100526 + len = get_function_decl(fndecl, tree_codes);
100527 + if (len == 0)
100528 + return NULL;
100529 +
100530 + func_name = get_asm_name(fndecl);
100531 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
100532 +
100533 + entry = size_overflow_hash[hash];
100534 + while (entry) {
100535 + if (!strcmp(entry->name, func_name))
100536 + return entry;
100537 + entry = entry->next;
100538 + }
100539 +
100540 + return NULL;
100541 +}
100542 +
100543 +static bool is_bool(const_tree node)
100544 +{
100545 + const_tree type;
100546 +
100547 + if (node == NULL_TREE)
100548 + return false;
100549 +
100550 + type = TREE_TYPE(node);
100551 + if (!INTEGRAL_TYPE_P(type))
100552 + return false;
100553 + if (TREE_CODE(type) == BOOLEAN_TYPE)
100554 + return true;
100555 + if (TYPE_PRECISION(type) == 1)
100556 + return true;
100557 + return false;
100558 +}
100559 +
100560 +static bool skip_types(const_tree var)
100561 +{
100562 + tree type;
100563 +
100564 + if (is_gimple_constant(var))
100565 + return true;
100566 +
100567 + switch (TREE_CODE(var)) {
100568 + case ADDR_EXPR:
100569 +#if BUILDING_GCC_VERSION >= 4006
100570 + case MEM_REF:
100571 +#endif
100572 + case ARRAY_REF:
100573 + case BIT_FIELD_REF:
100574 + case INDIRECT_REF:
100575 + case TARGET_MEM_REF:
100576 + return true;
100577 + case PARM_DECL:
100578 + case VAR_DECL:
100579 + case COMPONENT_REF:
100580 + return false;
100581 + default:
100582 + break;
100583 + }
100584 +
100585 + gcc_assert(TREE_CODE(var) == SSA_NAME);
100586 +
100587 + type = TREE_TYPE(var);
100588 + switch (TREE_CODE(type)) {
100589 + case INTEGER_TYPE:
100590 + case ENUMERAL_TYPE:
100591 + return false;
100592 + case BOOLEAN_TYPE:
100593 + return is_bool(var);
100594 + default:
100595 + break;
100596 + }
100597 +
100598 + gcc_assert(TREE_CODE(type) == POINTER_TYPE);
100599 +
100600 + type = TREE_TYPE(type);
100601 + gcc_assert(type != NULL_TREE);
100602 + switch (TREE_CODE(type)) {
100603 + case RECORD_TYPE:
100604 + case POINTER_TYPE:
100605 + case ARRAY_TYPE:
100606 + return true;
100607 + case VOID_TYPE:
100608 + case INTEGER_TYPE:
100609 + case UNION_TYPE:
100610 + return false;
100611 + default:
100612 + break;
100613 + }
100614 +
100615 + debug_tree((tree)var);
100616 + gcc_unreachable();
100617 +}
100618 +
100619 +static unsigned int find_arg_number(const_tree arg, tree func)
100620 +{
100621 + tree var;
100622 + unsigned int argnum = 1;
100623 +
100624 + if (TREE_CODE(arg) == SSA_NAME)
100625 + arg = SSA_NAME_VAR(arg);
100626 +
100627 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
100628 + if (!operand_equal_p(arg, var, 0) && strcmp(NAME(var), NAME(arg)))
100629 + continue;
100630 + if (!skip_types(var))
100631 + return argnum;
100632 + }
100633 +
100634 + return 0;
100635 +}
100636 +
100637 +static tree create_new_var(tree type)
100638 +{
100639 + tree new_var = create_tmp_var(type, "cicus");
100640 +
100641 +#if BUILDING_GCC_VERSION <= 4007
100642 + add_referenced_var(new_var);
100643 + mark_sym_for_renaming(new_var);
100644 +#endif
100645 + return new_var;
100646 +}
100647 +
100648 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
100649 +{
100650 + gimple assign;
100651 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
100652 + tree type = TREE_TYPE(rhs1);
100653 + tree lhs = create_new_var(type);
100654 +
100655 + gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
100656 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
100657 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
100658 +
100659 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
100660 + update_stmt(assign);
100661 + gimple_set_plf(assign, MY_STMT, true);
100662 + return assign;
100663 +}
100664 +
100665 +static tree cast_a_tree(tree type, tree var)
100666 +{
100667 + gcc_assert(type != NULL_TREE);
100668 + gcc_assert(var != NULL_TREE);
100669 + gcc_assert(fold_convertible_p(type, var));
100670 +
100671 + return fold_convert(type, var);
100672 +}
100673 +
100674 +static tree get_lhs(const_gimple stmt)
100675 +{
100676 + switch (gimple_code(stmt)) {
100677 + case GIMPLE_ASSIGN:
100678 + return gimple_get_lhs(stmt);
100679 + case GIMPLE_PHI:
100680 + return gimple_phi_result(stmt);
100681 + case GIMPLE_CALL:
100682 + return gimple_call_lhs(stmt);
100683 + default:
100684 + return NULL_TREE;
100685 + }
100686 +}
100687 +
100688 +static bool skip_cast(tree dst_type, const_tree rhs, bool force)
100689 +{
100690 + const_gimple def_stmt = get_def_stmt(rhs);
100691 +
100692 + if (force)
100693 + return false;
100694 +
100695 + if (is_gimple_constant(rhs))
100696 + return false;
100697 +
100698 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
100699 + return false;
100700 +
100701 + if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
100702 + return false;
100703 +
100704 + // DI type can be on 32 bit (from create_assign) but overflow type stays DI
100705 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
100706 + return false;
100707 +
100708 + return true;
100709 +}
100710 +
100711 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
100712 +{
100713 + gimple assign, def_stmt;
100714 +
100715 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
100716 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
100717 + gcc_unreachable();
100718 +
100719 + def_stmt = get_def_stmt(rhs);
100720 + if (skip_cast(dst_type, rhs, force) && gimple_plf(def_stmt, MY_STMT))
100721 + return def_stmt;
100722 +
100723 + if (lhs == CREATE_NEW_VAR)
100724 + lhs = create_new_var(dst_type);
100725 +
100726 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
100727 +
100728 + if (!gsi_end_p(*gsi)) {
100729 + location_t loc = gimple_location(gsi_stmt(*gsi));
100730 + gimple_set_location(assign, loc);
100731 + }
100732 +
100733 + gimple_set_lhs(assign, make_ssa_name(lhs, assign));
100734 +
100735 + if (before)
100736 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
100737 + else
100738 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
100739 + update_stmt(assign);
100740 + gimple_set_plf(assign, MY_STMT, true);
100741 +
100742 + return assign;
100743 +}
100744 +
100745 +static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
100746 +{
100747 + gimple_stmt_iterator gsi;
100748 + tree lhs;
100749 + const_gimple new_stmt;
100750 +
100751 + if (rhs == NULL_TREE)
100752 + return NULL_TREE;
100753 +
100754 + gsi = gsi_for_stmt(stmt);
100755 + new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
100756 +
100757 + lhs = get_lhs(new_stmt);
100758 + gcc_assert(lhs != NULL_TREE);
100759 + return lhs;
100760 +}
100761 +
100762 +static tree cast_to_TI_type(gimple stmt, tree node)
100763 +{
100764 + gimple_stmt_iterator gsi;
100765 + gimple cast_stmt;
100766 + tree type = TREE_TYPE(node);
100767 +
100768 + if (types_compatible_p(type, intTI_type_node))
100769 + return node;
100770 +
100771 + gsi = gsi_for_stmt(stmt);
100772 + cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
100773 + return gimple_get_lhs(cast_stmt);
100774 +}
100775 +
100776 +static void check_function_hash(const_gimple stmt)
100777 +{
100778 + tree func;
100779 + const struct size_overflow_hash *hash;
100780 +
100781 + if (gimple_code(stmt) != GIMPLE_CALL)
100782 + return;
100783 +
100784 + func = gimple_call_fndecl(stmt);
100785 + //fs/xattr.c D.34222_15 = D.34219_14 (dentry_3(D), name_7(D), 0B, 0);
100786 + if (func == NULL_TREE)
100787 + return;
100788 +
100789 + hash = get_function_hash(func);
100790 + if (!hash)
100791 + print_missing_msg(func, 0);
100792 +}
100793 +
100794 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
100795 +{
100796 + tree lhs, new_lhs;
100797 + gimple_stmt_iterator gsi;
100798 +
100799 + if (rhs1 == NULL_TREE) {
100800 + debug_gimple_stmt(oldstmt);
100801 + error("%s: rhs1 is NULL_TREE", __func__);
100802 + gcc_unreachable();
100803 + }
100804 +
100805 + switch (gimple_code(oldstmt)) {
100806 + case GIMPLE_ASM:
100807 + lhs = rhs1;
100808 + break;
100809 + case GIMPLE_CALL:
100810 + lhs = gimple_call_lhs(oldstmt);
100811 + break;
100812 + case GIMPLE_ASSIGN:
100813 + lhs = gimple_get_lhs(oldstmt);
100814 + break;
100815 + default:
100816 + debug_gimple_stmt(oldstmt);
100817 + gcc_unreachable();
100818 + }
100819 +
100820 + gsi = gsi_for_stmt(oldstmt);
100821 + pointer_set_insert(visited, oldstmt);
100822 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
100823 + basic_block next_bb, cur_bb;
100824 + const_edge e;
100825 +
100826 + gcc_assert(before == false);
100827 + gcc_assert(stmt_can_throw_internal(oldstmt));
100828 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
100829 + gcc_assert(!gsi_end_p(gsi));
100830 +
100831 + cur_bb = gimple_bb(oldstmt);
100832 + next_bb = cur_bb->next_bb;
100833 + e = find_edge(cur_bb, next_bb);
100834 + gcc_assert(e != NULL);
100835 + gcc_assert(e->flags & EDGE_FALLTHRU);
100836 +
100837 + gsi = gsi_after_labels(next_bb);
100838 + gcc_assert(!gsi_end_p(gsi));
100839 +
100840 + before = true;
100841 + oldstmt = gsi_stmt(gsi);
100842 + }
100843 +
100844 + new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
100845 + return new_lhs;
100846 +}
100847 +
100848 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
100849 +{
100850 + gimple stmt;
100851 + gimple_stmt_iterator gsi;
100852 + tree size_overflow_type, new_var, lhs = gimple_get_lhs(oldstmt);
100853 +
100854 + if (gimple_plf(oldstmt, MY_STMT))
100855 + return lhs;
100856 +
100857 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
100858 + rhs1 = gimple_assign_rhs1(oldstmt);
100859 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
100860 + }
100861 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
100862 + rhs2 = gimple_assign_rhs2(oldstmt);
100863 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
100864 + }
100865 +
100866 + stmt = gimple_copy(oldstmt);
100867 + gimple_set_location(stmt, gimple_location(oldstmt));
100868 + gimple_set_plf(stmt, MY_STMT, true);
100869 +
100870 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
100871 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
100872 +
100873 + size_overflow_type = get_size_overflow_type(oldstmt, node);
100874 +
100875 + new_var = create_new_var(size_overflow_type);
100876 + new_var = make_ssa_name(new_var, stmt);
100877 + gimple_set_lhs(stmt, new_var);
100878 +
100879 + if (rhs1 != NULL_TREE)
100880 + gimple_assign_set_rhs1(stmt, rhs1);
100881 +
100882 + if (rhs2 != NULL_TREE)
100883 + gimple_assign_set_rhs2(stmt, rhs2);
100884 +#if BUILDING_GCC_VERSION >= 4007
100885 + if (rhs3 != NULL_TREE)
100886 + gimple_assign_set_rhs3(stmt, rhs3);
100887 +#endif
100888 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
100889 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
100890 +
100891 + gsi = gsi_for_stmt(oldstmt);
100892 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
100893 + update_stmt(stmt);
100894 + pointer_set_insert(visited, oldstmt);
100895 + return gimple_get_lhs(stmt);
100896 +}
100897 +
100898 +static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type)
100899 +{
100900 + basic_block first_bb;
100901 + gimple assign;
100902 + gimple_stmt_iterator gsi;
100903 +
100904 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
100905 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
100906 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
100907 +
100908 + gsi = gsi_start_bb(first_bb);
100909 + assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
100910 + return gimple_get_lhs(assign);
100911 +}
100912 +
100913 +static tree use_phi_ssa_name(tree phi_ssa_name, tree new_arg)
100914 +{
100915 + gimple_stmt_iterator gsi;
100916 + const_gimple assign;
100917 + gimple def_stmt = get_def_stmt(new_arg);
100918 +
100919 + if (gimple_code(def_stmt) == GIMPLE_PHI) {
100920 + gsi = gsi_after_labels(gimple_bb(def_stmt));
100921 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, phi_ssa_name, &gsi, BEFORE_STMT, true);
100922 + } else {
100923 + gsi = gsi_for_stmt(def_stmt);
100924 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, phi_ssa_name, &gsi, AFTER_STMT, true);
100925 + }
100926 +
100927 + return gimple_get_lhs(assign);
100928 +}
100929 +
100930 +static tree cast_visited_phi_arg(tree phi_ssa_name, tree arg, tree size_overflow_type)
100931 +{
100932 + basic_block bb;
100933 + gimple_stmt_iterator gsi;
100934 + const_gimple assign, def_stmt;
100935 +
100936 + def_stmt = get_def_stmt(arg);
100937 + bb = gimple_bb(def_stmt);
100938 + gcc_assert(bb->index != 0);
100939 + gsi = gsi_after_labels(bb);
100940 +
100941 + assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
100942 + return gimple_get_lhs(assign);
100943 +}
100944 +
100945 +static tree create_new_phi_arg(tree phi_ssa_name, tree new_arg, tree arg, gimple oldstmt)
100946 +{
100947 + tree size_overflow_type;
100948 + const_gimple def_stmt = get_def_stmt(arg);
100949 +
100950 + if (phi_ssa_name != NULL_TREE)
100951 + phi_ssa_name = SSA_NAME_VAR(phi_ssa_name);
100952 +
100953 + size_overflow_type = get_size_overflow_type(oldstmt, arg);
100954 +
100955 + if (new_arg != NULL_TREE) {
100956 + gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
100957 + return use_phi_ssa_name(phi_ssa_name, new_arg);
100958 + }
100959 +
100960 + switch(gimple_code(def_stmt)) {
100961 + case GIMPLE_PHI:
100962 + return cast_visited_phi_arg(phi_ssa_name, arg, size_overflow_type);
100963 + case GIMPLE_NOP:
100964 + return cast_parm_decl(phi_ssa_name, arg, size_overflow_type);
100965 + default:
100966 + debug_gimple_stmt((gimple)def_stmt);
100967 + gcc_unreachable();
100968 + }
100969 +}
100970 +
100971 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
100972 +{
100973 + basic_block bb;
100974 + gimple phi;
100975 + gimple_seq seq;
100976 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
100977 +
100978 + bb = gsi_bb(gsi);
100979 +
100980 + phi = create_phi_node(result, bb);
100981 + gimple_phi_set_result(phi, make_ssa_name(result, phi));
100982 + seq = phi_nodes(bb);
100983 + gsi = gsi_last(seq);
100984 + gsi_remove(&gsi, false);
100985 +
100986 + gsi = gsi_for_stmt(oldstmt);
100987 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
100988 + gimple_set_bb(phi, bb);
100989 + gimple_set_plf(phi, MY_STMT, true);
100990 + return phi;
100991 +}
100992 +
100993 +static tree handle_phi(struct pointer_set_t *visited, tree orig_result)
100994 +{
100995 + gimple new_phi = NULL;
100996 + gimple oldstmt = get_def_stmt(orig_result);
100997 + tree phi_ssa_name = NULL_TREE;
100998 + unsigned int i;
100999 +
101000 + pointer_set_insert(visited, oldstmt);
101001 + for (i = 0; i < gimple_phi_num_args(oldstmt); i++) {
101002 + tree arg, new_arg;
101003 +
101004 + arg = gimple_phi_arg_def(oldstmt, i);
101005 +
101006 + new_arg = expand(visited, arg);
101007 + new_arg = create_new_phi_arg(phi_ssa_name, new_arg, arg, oldstmt);
101008 + if (i == 0) {
101009 + phi_ssa_name = new_arg;
101010 + new_phi = overflow_create_phi_node(oldstmt, SSA_NAME_VAR(phi_ssa_name));
101011 + }
101012 +
101013 + gcc_assert(new_phi != NULL);
101014 + add_phi_arg(new_phi, new_arg, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
101015 + }
101016 +
101017 + gcc_assert(new_phi != NULL);
101018 + update_stmt(new_phi);
101019 + return gimple_phi_result(new_phi);
101020 +}
101021 +
101022 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
101023 +{
101024 + const_gimple assign;
101025 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
101026 + tree origtype = TREE_TYPE(orig_rhs);
101027 +
101028 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
101029 +
101030 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
101031 + return gimple_get_lhs(assign);
101032 +}
101033 +
101034 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
101035 +{
101036 + const_tree rhs1, lhs, rhs1_type, lhs_type;
101037 + enum machine_mode lhs_mode, rhs_mode;
101038 + gimple def_stmt = get_def_stmt(no_const_rhs);
101039 +
101040 + if (!gimple_assign_cast_p(def_stmt))
101041 + return false;
101042 +
101043 + rhs1 = gimple_assign_rhs1(def_stmt);
101044 + lhs = gimple_get_lhs(def_stmt);
101045 + rhs1_type = TREE_TYPE(rhs1);
101046 + lhs_type = TREE_TYPE(lhs);
101047 + rhs_mode = TYPE_MODE(rhs1_type);
101048 + lhs_mode = TYPE_MODE(lhs_type);
101049 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
101050 + return false;
101051 +
101052 + return true;
101053 +}
101054 +
101055 +static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
101056 +{
101057 + tree rhs1 = gimple_assign_rhs1(stmt);
101058 + tree lhs = gimple_get_lhs(stmt);
101059 + const_tree rhs1_type = TREE_TYPE(rhs1);
101060 + const_tree lhs_type = TREE_TYPE(lhs);
101061 +
101062 + if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
101063 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101064 +
101065 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
101066 +}
101067 +
101068 +static bool no_uses(tree node)
101069 +{
101070 + imm_use_iterator imm_iter;
101071 + use_operand_p use_p;
101072 +
101073 + FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
101074 + const_gimple use_stmt = USE_STMT(use_p);
101075 + if (use_stmt == NULL)
101076 + return true;
101077 + if (is_gimple_debug(use_stmt))
101078 + continue;
101079 + if (!(gimple_bb(use_stmt)->flags & BB_REACHABLE))
101080 + continue;
101081 + return false;
101082 + }
101083 + return true;
101084 +}
101085 +
101086 +// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
101087 +static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
101088 +{
101089 + tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
101090 + gimple def_stmt = get_def_stmt(lhs);
101091 +
101092 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
101093 + return false;
101094 +
101095 + rhs1 = gimple_assign_rhs1(def_stmt);
101096 + rhs_type = TREE_TYPE(rhs1);
101097 + lhs_type = TREE_TYPE(lhs);
101098 + if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
101099 + return false;
101100 + if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
101101 + return false;
101102 +
101103 + def_stmt = get_def_stmt(rhs1);
101104 + if (!def_stmt || gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_num_ops(def_stmt) != 3)
101105 + return false;
101106 +
101107 + if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
101108 + return false;
101109 +
101110 + rhs1 = gimple_assign_rhs1(def_stmt);
101111 + rhs2 = gimple_assign_rhs2(def_stmt);
101112 + if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
101113 + return false;
101114 +
101115 + if (is_gimple_constant(rhs2))
101116 + not_const_rhs = rhs1;
101117 + else
101118 + not_const_rhs = rhs2;
101119 +
101120 + return no_uses(not_const_rhs);
101121 +}
101122 +
101123 +static bool skip_lhs_cast_check(const_gimple stmt)
101124 +{
101125 + const_tree rhs = gimple_assign_rhs1(stmt);
101126 + const_gimple def_stmt = get_def_stmt(rhs);
101127 +
101128 + // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
101129 + if (gimple_code(def_stmt) == GIMPLE_ASM)
101130 + return true;
101131 +
101132 + if (is_const_plus_unsigned_signed_truncation(rhs))
101133 + return true;
101134 +
101135 + return false;
101136 +}
101137 +
101138 +static tree create_cast_overflow_check(struct pointer_set_t *visited, tree new_rhs1, gimple stmt)
101139 +{
101140 + bool cast_lhs, cast_rhs;
101141 + tree lhs = gimple_get_lhs(stmt);
101142 + tree rhs = gimple_assign_rhs1(stmt);
101143 + const_tree lhs_type = TREE_TYPE(lhs);
101144 + const_tree rhs_type = TREE_TYPE(rhs);
101145 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
101146 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
101147 + unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
101148 + unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
101149 +
101150 + static bool check_lhs[3][4] = {
101151 + // ss su us uu
101152 + { false, true, true, false }, // lhs > rhs
101153 + { false, false, false, false }, // lhs = rhs
101154 + { true, true, true, true }, // lhs < rhs
101155 + };
101156 +
101157 + static bool check_rhs[3][4] = {
101158 + // ss su us uu
101159 + { true, false, true, true }, // lhs > rhs
101160 + { true, false, true, true }, // lhs = rhs
101161 + { true, false, true, true }, // lhs < rhs
101162 + };
101163 +
101164 + // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
101165 + if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
101166 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101167 +
101168 + if (lhs_size > rhs_size) {
101169 + cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101170 + cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101171 + } else if (lhs_size == rhs_size) {
101172 + cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101173 + cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101174 + } else {
101175 + cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101176 + cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
101177 + }
101178 +
101179 + if (!cast_lhs && !cast_rhs)
101180 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
101181 +
101182 + if (cast_lhs && !skip_lhs_cast_check(stmt))
101183 + check_size_overflow(stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
101184 +
101185 + if (cast_rhs)
101186 + check_size_overflow(stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
101187 +
101188 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
101189 +}
101190 +
101191 +static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
101192 +{
101193 + tree rhs1, new_rhs1, lhs = gimple_get_lhs(stmt);
101194 +
101195 + if (gimple_plf(stmt, MY_STMT))
101196 + return lhs;
101197 +
101198 + rhs1 = gimple_assign_rhs1(stmt);
101199 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
101200 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101201 +
101202 + new_rhs1 = expand(visited, rhs1);
101203 +
101204 + if (new_rhs1 == NULL_TREE)
101205 + return create_cast_assign(visited, stmt);
101206 +
101207 + if (gimple_plf(stmt, NO_CAST_CHECK))
101208 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
101209 +
101210 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
101211 + tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
101212 +
101213 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
101214 + check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
101215 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101216 + }
101217 +
101218 + if (!gimple_assign_cast_p(stmt))
101219 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
101220 +
101221 + return create_cast_overflow_check(visited, new_rhs1, stmt);
101222 +}
101223 +
101224 +static tree handle_unary_ops(struct pointer_set_t *visited, gimple stmt)
101225 +{
101226 + tree rhs1, lhs = gimple_get_lhs(stmt);
101227 + gimple def_stmt = get_def_stmt(lhs);
101228 +
101229 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
101230 + rhs1 = gimple_assign_rhs1(def_stmt);
101231 +
101232 + if (is_gimple_constant(rhs1))
101233 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
101234 +
101235 + switch (TREE_CODE(rhs1)) {
101236 + case SSA_NAME:
101237 + return handle_unary_rhs(visited, def_stmt);
101238 + case ARRAY_REF:
101239 + case BIT_FIELD_REF:
101240 + case ADDR_EXPR:
101241 + case COMPONENT_REF:
101242 + case INDIRECT_REF:
101243 +#if BUILDING_GCC_VERSION >= 4006
101244 + case MEM_REF:
101245 +#endif
101246 + case TARGET_MEM_REF:
101247 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
101248 + case PARM_DECL:
101249 + case VAR_DECL:
101250 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101251 +
101252 + default:
101253 + debug_gimple_stmt(def_stmt);
101254 + debug_tree(rhs1);
101255 + gcc_unreachable();
101256 + }
101257 +}
101258 +
101259 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
101260 +{
101261 + gimple cond_stmt;
101262 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
101263 +
101264 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
101265 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
101266 + update_stmt(cond_stmt);
101267 +}
101268 +
101269 +static tree create_string_param(tree string)
101270 +{
101271 + tree i_type, a_type;
101272 + const int length = TREE_STRING_LENGTH(string);
101273 +
101274 + gcc_assert(length > 0);
101275 +
101276 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
101277 + a_type = build_array_type(char_type_node, i_type);
101278 +
101279 + TREE_TYPE(string) = a_type;
101280 + TREE_CONSTANT(string) = 1;
101281 + TREE_READONLY(string) = 1;
101282 +
101283 + return build1(ADDR_EXPR, ptr_type_node, string);
101284 +}
101285 +
101286 +static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
101287 +{
101288 + gimple func_stmt;
101289 + const_gimple def_stmt;
101290 + const_tree loc_line;
101291 + tree loc_file, ssa_name, current_func;
101292 + expanded_location xloc;
101293 + char *ssa_name_buf;
101294 + int len;
101295 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
101296 +
101297 + def_stmt = get_def_stmt(arg);
101298 + xloc = expand_location(gimple_location(def_stmt));
101299 +
101300 + if (!gimple_has_location(def_stmt)) {
101301 + xloc = expand_location(gimple_location(stmt));
101302 + if (!gimple_has_location(stmt))
101303 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
101304 + }
101305 +
101306 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
101307 +
101308 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
101309 + loc_file = create_string_param(loc_file);
101310 +
101311 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
101312 + current_func = create_string_param(current_func);
101313 +
101314 + gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
101315 + call_count++;
101316 + len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
101317 + gcc_assert(len > 0);
101318 + ssa_name = build_string(len + 1, ssa_name_buf);
101319 + free(ssa_name_buf);
101320 + ssa_name = create_string_param(ssa_name);
101321 +
101322 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
101323 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
101324 +
101325 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
101326 +}
101327 +
101328 +static void __unused print_the_code_insertions(const_gimple stmt)
101329 +{
101330 + location_t loc = gimple_location(stmt);
101331 +
101332 + inform(loc, "Integer size_overflow check applied here.");
101333 +}
101334 +
101335 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
101336 +{
101337 + basic_block cond_bb, join_bb, bb_true;
101338 + edge e;
101339 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
101340 +
101341 + cond_bb = gimple_bb(stmt);
101342 + if (before)
101343 + gsi_prev(&gsi);
101344 + if (gsi_end_p(gsi))
101345 + e = split_block_after_labels(cond_bb);
101346 + else
101347 + e = split_block(cond_bb, gsi_stmt(gsi));
101348 + cond_bb = e->src;
101349 + join_bb = e->dest;
101350 + e->flags = EDGE_FALSE_VALUE;
101351 + e->probability = REG_BR_PROB_BASE;
101352 +
101353 + bb_true = create_empty_bb(cond_bb);
101354 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
101355 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
101356 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
101357 +
101358 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
101359 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
101360 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
101361 +
101362 + if (current_loops != NULL) {
101363 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
101364 + add_bb_to_loop(bb_true, cond_bb->loop_father);
101365 + }
101366 +
101367 + insert_cond(cond_bb, arg, cond_code, type_value);
101368 + insert_cond_result(bb_true, stmt, arg, min);
101369 +
101370 +// print_the_code_insertions(stmt);
101371 +}
101372 +
101373 +static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
101374 +{
101375 + const_tree rhs_type = TREE_TYPE(rhs);
101376 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
101377 +
101378 + gcc_assert(rhs_type != NULL_TREE);
101379 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
101380 + return;
101381 +
101382 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
101383 +
101384 + if (is_const_plus_unsigned_signed_truncation(rhs))
101385 + return;
101386 +
101387 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
101388 + // typemax (-1) < typemin (0)
101389 + if (TREE_OVERFLOW(type_max))
101390 + return;
101391 +
101392 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
101393 +
101394 + cast_rhs_type = TREE_TYPE(cast_rhs);
101395 + type_max_type = TREE_TYPE(type_max);
101396 + type_min_type = TREE_TYPE(type_min);
101397 + gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
101398 + gcc_assert(types_compatible_p(type_max_type, type_min_type));
101399 +
101400 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
101401 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
101402 +}
101403 +
101404 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
101405 +{
101406 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
101407 + return false;
101408 + if (!is_gimple_constant(rhs))
101409 + return false;
101410 + return true;
101411 +}
101412 +
101413 +static tree get_def_stmt_rhs(const_tree var)
101414 +{
101415 + tree rhs1, def_stmt_rhs1;
101416 + gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
101417 +
101418 + def_stmt = get_def_stmt(var);
101419 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
101420 +
101421 + rhs1 = gimple_assign_rhs1(def_stmt);
101422 + rhs1_def_stmt = get_def_stmt(rhs1);
101423 + if (!gimple_assign_cast_p(rhs1_def_stmt))
101424 + return rhs1;
101425 +
101426 + def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
101427 + def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
101428 +
101429 + switch (gimple_code(def_stmt_rhs1_def_stmt)) {
101430 + case GIMPLE_CALL:
101431 + case GIMPLE_NOP:
101432 + case GIMPLE_ASM:
101433 + case GIMPLE_PHI:
101434 + return def_stmt_rhs1;
101435 + case GIMPLE_ASSIGN:
101436 + return rhs1;
101437 + default:
101438 + debug_gimple_stmt(def_stmt_rhs1_def_stmt);
101439 + gcc_unreachable();
101440 + }
101441 +}
101442 +
101443 +static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
101444 +{
101445 + tree new_rhs, orig_rhs;
101446 + void (*gimple_assign_set_rhs)(gimple, tree);
101447 + tree rhs1 = gimple_assign_rhs1(stmt);
101448 + tree rhs2 = gimple_assign_rhs2(stmt);
101449 + tree lhs = gimple_get_lhs(stmt);
101450 +
101451 + if (!check_overflow)
101452 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101453 +
101454 + if (change_rhs == NULL_TREE)
101455 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101456 +
101457 + if (new_rhs2 == NULL_TREE) {
101458 + orig_rhs = rhs1;
101459 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
101460 + } else {
101461 + orig_rhs = rhs2;
101462 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
101463 + }
101464 +
101465 + check_size_overflow(stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
101466 +
101467 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
101468 + gimple_assign_set_rhs(stmt, new_rhs);
101469 + update_stmt(stmt);
101470 +
101471 + return create_assign(visited, stmt, lhs, AFTER_STMT);
101472 +}
101473 +
101474 +static bool is_subtraction_special(const_gimple stmt)
101475 +{
101476 + gimple rhs1_def_stmt, rhs2_def_stmt;
101477 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
101478 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
101479 + const_tree rhs1 = gimple_assign_rhs1(stmt);
101480 + const_tree rhs2 = gimple_assign_rhs2(stmt);
101481 +
101482 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
101483 + return false;
101484 +
101485 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
101486 +
101487 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
101488 + return false;
101489 +
101490 + rhs1_def_stmt = get_def_stmt(rhs1);
101491 + rhs2_def_stmt = get_def_stmt(rhs2);
101492 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
101493 + return false;
101494 +
101495 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
101496 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
101497 + rhs1_def_stmt_lhs = gimple_get_lhs(rhs1_def_stmt);
101498 + rhs2_def_stmt_lhs = gimple_get_lhs(rhs2_def_stmt);
101499 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
101500 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
101501 + rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
101502 + rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
101503 + if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
101504 + return false;
101505 + if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
101506 + return false;
101507 +
101508 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
101509 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
101510 + return true;
101511 +}
101512 +
101513 +static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs)
101514 +{
101515 + tree new_rhs1, new_rhs2;
101516 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
101517 + gimple assign, stmt = get_def_stmt(lhs);
101518 + tree rhs1 = gimple_assign_rhs1(stmt);
101519 + tree rhs2 = gimple_assign_rhs2(stmt);
101520 +
101521 + if (!is_subtraction_special(stmt))
101522 + return NULL_TREE;
101523 +
101524 + new_rhs1 = expand(visited, rhs1);
101525 + new_rhs2 = expand(visited, rhs2);
101526 +
101527 + new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
101528 + new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
101529 +
101530 + if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
101531 + new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
101532 + new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
101533 + }
101534 +
101535 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
101536 + new_lhs = gimple_get_lhs(assign);
101537 + check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
101538 +
101539 + return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
101540 +}
101541 +
101542 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
101543 +{
101544 + const_gimple def_stmt;
101545 +
101546 + if (TREE_CODE(rhs) != SSA_NAME)
101547 + return false;
101548 +
101549 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
101550 + return false;
101551 +
101552 + def_stmt = get_def_stmt(rhs);
101553 + if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
101554 + return false;
101555 +
101556 + return true;
101557 +}
101558 +
101559 +static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs)
101560 +{
101561 + tree rhs1, rhs2, new_lhs;
101562 + gimple def_stmt = get_def_stmt(lhs);
101563 + tree new_rhs1 = NULL_TREE;
101564 + tree new_rhs2 = NULL_TREE;
101565 +
101566 + rhs1 = gimple_assign_rhs1(def_stmt);
101567 + rhs2 = gimple_assign_rhs2(def_stmt);
101568 +
101569 + /* no DImode/TImode division in the 32/64 bit kernel */
101570 + switch (gimple_assign_rhs_code(def_stmt)) {
101571 + case RDIV_EXPR:
101572 + case TRUNC_DIV_EXPR:
101573 + case CEIL_DIV_EXPR:
101574 + case FLOOR_DIV_EXPR:
101575 + case ROUND_DIV_EXPR:
101576 + case TRUNC_MOD_EXPR:
101577 + case CEIL_MOD_EXPR:
101578 + case FLOOR_MOD_EXPR:
101579 + case ROUND_MOD_EXPR:
101580 + case EXACT_DIV_EXPR:
101581 + case POINTER_PLUS_EXPR:
101582 + case BIT_AND_EXPR:
101583 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
101584 + default:
101585 + break;
101586 + }
101587 +
101588 + new_lhs = handle_integer_truncation(visited, lhs);
101589 + if (new_lhs != NULL_TREE)
101590 + return new_lhs;
101591 +
101592 + if (TREE_CODE(rhs1) == SSA_NAME)
101593 + new_rhs1 = expand(visited, rhs1);
101594 + if (TREE_CODE(rhs2) == SSA_NAME)
101595 + new_rhs2 = expand(visited, rhs2);
101596 +
101597 + if (is_a_neg_overflow(def_stmt, rhs2))
101598 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, NULL_TREE);
101599 + if (is_a_neg_overflow(def_stmt, rhs1))
101600 + return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, new_rhs2);
101601 +
101602 +
101603 + if (is_a_constant_overflow(def_stmt, rhs2))
101604 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
101605 + if (is_a_constant_overflow(def_stmt, rhs1))
101606 + return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
101607 +
101608 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
101609 +}
101610 +
101611 +#if BUILDING_GCC_VERSION >= 4007
101612 +static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs)
101613 +{
101614 + if (is_gimple_constant(rhs))
101615 + return cast_a_tree(size_overflow_type, rhs);
101616 + if (TREE_CODE(rhs) != SSA_NAME)
101617 + return NULL_TREE;
101618 + return expand(visited, rhs);
101619 +}
101620 +
101621 +static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs)
101622 +{
101623 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
101624 + gimple def_stmt = get_def_stmt(lhs);
101625 +
101626 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
101627 +
101628 + rhs1 = gimple_assign_rhs1(def_stmt);
101629 + rhs2 = gimple_assign_rhs2(def_stmt);
101630 + rhs3 = gimple_assign_rhs3(def_stmt);
101631 + new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1);
101632 + new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2);
101633 + new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3);
101634 +
101635 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
101636 +}
101637 +#endif
101638 +
101639 +static tree get_size_overflow_type(gimple stmt, const_tree node)
101640 +{
101641 + const_tree type;
101642 + tree new_type;
101643 +
101644 + gcc_assert(node != NULL_TREE);
101645 +
101646 + type = TREE_TYPE(node);
101647 +
101648 + if (gimple_plf(stmt, MY_STMT))
101649 + return TREE_TYPE(node);
101650 +
101651 + switch (TYPE_MODE(type)) {
101652 + case QImode:
101653 + new_type = intHI_type_node;
101654 + break;
101655 + case HImode:
101656 + new_type = intSI_type_node;
101657 + break;
101658 + case SImode:
101659 + new_type = intDI_type_node;
101660 + break;
101661 + case DImode:
101662 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
101663 + new_type = intDI_type_node;
101664 + else
101665 + new_type = intTI_type_node;
101666 + break;
101667 + default:
101668 + debug_tree((tree)node);
101669 + error("%s: unsupported gcc configuration.", __func__);
101670 + gcc_unreachable();
101671 + }
101672 +
101673 + if (TYPE_QUALS(type) != 0)
101674 + return build_qualified_type(new_type, TYPE_QUALS(type));
101675 + return new_type;
101676 +}
101677 +
101678 +static tree expand_visited(gimple def_stmt)
101679 +{
101680 + const_gimple next_stmt;
101681 + gimple_stmt_iterator gsi;
101682 + enum gimple_code code = gimple_code(def_stmt);
101683 +
101684 + if (code == GIMPLE_ASM)
101685 + return NULL_TREE;
101686 +
101687 + gsi = gsi_for_stmt(def_stmt);
101688 + gsi_next(&gsi);
101689 +
101690 + if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
101691 + return NULL_TREE;
101692 + gcc_assert(!gsi_end_p(gsi));
101693 + next_stmt = gsi_stmt(gsi);
101694 +
101695 + if (gimple_code(def_stmt) == GIMPLE_PHI && !gimple_plf((gimple)next_stmt, MY_STMT))
101696 + return NULL_TREE;
101697 + gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
101698 +
101699 + return get_lhs(next_stmt);
101700 +}
101701 +
101702 +static tree expand(struct pointer_set_t *visited, tree lhs)
101703 +{
101704 + gimple def_stmt;
101705 +
101706 + if (skip_types(lhs))
101707 + return NULL_TREE;
101708 +
101709 + def_stmt = get_def_stmt(lhs);
101710 +
101711 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
101712 + return NULL_TREE;
101713 +
101714 + if (gimple_plf(def_stmt, MY_STMT))
101715 + return lhs;
101716 +
101717 + if (pointer_set_contains(visited, def_stmt))
101718 + return expand_visited(def_stmt);
101719 +
101720 + switch (gimple_code(def_stmt)) {
101721 + case GIMPLE_PHI:
101722 + return handle_phi(visited, lhs);
101723 + case GIMPLE_CALL:
101724 + case GIMPLE_ASM:
101725 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
101726 + case GIMPLE_ASSIGN:
101727 + switch (gimple_num_ops(def_stmt)) {
101728 + case 2:
101729 + return handle_unary_ops(visited, def_stmt);
101730 + case 3:
101731 + return handle_binary_ops(visited, lhs);
101732 +#if BUILDING_GCC_VERSION >= 4007
101733 + case 4:
101734 + return handle_ternary_ops(visited, lhs);
101735 +#endif
101736 + }
101737 + default:
101738 + debug_gimple_stmt(def_stmt);
101739 + error("%s: unknown gimple code", __func__);
101740 + gcc_unreachable();
101741 + }
101742 +}
101743 +
101744 +static tree get_new_tree(gimple stmt, const_tree orig_node, tree new_node)
101745 +{
101746 + const_gimple assign;
101747 + tree orig_type = TREE_TYPE(orig_node);
101748 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
101749 +
101750 + assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
101751 + return gimple_get_lhs(assign);
101752 +}
101753 +
101754 +static void change_function_arg(gimple stmt, const_tree orig_arg, unsigned int argnum, tree new_arg)
101755 +{
101756 + gimple_call_set_arg(stmt, argnum, get_new_tree(stmt, orig_arg, new_arg));
101757 + update_stmt(stmt);
101758 +}
101759 +
101760 +static void change_function_return(gimple stmt, const_tree orig_ret, tree new_ret)
101761 +{
101762 + gimple_return_set_retval(stmt, get_new_tree(stmt, orig_ret, new_ret));
101763 + update_stmt(stmt);
101764 +}
101765 +
101766 +static bool get_function_arg(unsigned int* argnum, const_tree fndecl)
101767 +{
101768 + tree arg;
101769 + const_tree origarg;
101770 +
101771 + if (!DECL_ABSTRACT_ORIGIN(fndecl))
101772 + return true;
101773 +
101774 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
101775 + while (origarg && *argnum) {
101776 + (*argnum)--;
101777 + origarg = TREE_CHAIN(origarg);
101778 + }
101779 +
101780 + gcc_assert(*argnum == 0);
101781 +
101782 + gcc_assert(origarg != NULL_TREE);
101783 + *argnum = 0;
101784 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg), (*argnum)++)
101785 + if (operand_equal_p(origarg, arg, 0) || !strcmp(NAME(origarg), NAME(arg)))
101786 + return true;
101787 + return false;
101788 +}
101789 +
101790 +static enum mark walk_phi(struct pointer_set_t *visited, bool *search_err_code, const_tree result)
101791 +{
101792 + gimple phi = get_def_stmt(result);
101793 + unsigned int i, n = gimple_phi_num_args(phi);
101794 +
101795 + if (!phi)
101796 + return MARK_NO;
101797 +
101798 + pointer_set_insert(visited, phi);
101799 + for (i = 0; i < n; i++) {
101800 + enum mark marked;
101801 + const_tree arg = gimple_phi_arg_def(phi, i);
101802 + marked = pre_expand(visited, search_err_code, arg);
101803 + if (marked != MARK_NO)
101804 + return marked;
101805 + }
101806 + return MARK_NO;
101807 +}
101808 +
101809 +static enum mark walk_unary_ops(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
101810 +{
101811 + gimple def_stmt = get_def_stmt(lhs);
101812 + const_tree rhs;
101813 +
101814 + if (!def_stmt)
101815 + return MARK_NO;
101816 +
101817 + rhs = gimple_assign_rhs1(def_stmt);
101818 +
101819 + def_stmt = get_def_stmt(rhs);
101820 + if (is_gimple_constant(rhs))
101821 + search_err_code[FROM_CONST] = true;
101822 +
101823 + return pre_expand(visited, search_err_code, rhs);
101824 +}
101825 +
101826 +static enum mark walk_binary_ops(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
101827 +{
101828 + gimple def_stmt = get_def_stmt(lhs);
101829 + const_tree rhs1, rhs2;
101830 + enum mark marked;
101831 +
101832 + if (!def_stmt)
101833 + return MARK_NO;
101834 +
101835 + search_err_code[CAST_ONLY] = false;
101836 +
101837 + rhs1 = gimple_assign_rhs1(def_stmt);
101838 + rhs2 = gimple_assign_rhs2(def_stmt);
101839 + marked = pre_expand(visited, search_err_code, rhs1);
101840 + if (marked != MARK_NO)
101841 + return marked;
101842 + return pre_expand(visited, search_err_code, rhs2);
101843 +}
101844 +
101845 +static const_tree search_field_decl(const_tree comp_ref)
101846 +{
101847 + const_tree field = NULL_TREE;
101848 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
101849 +
101850 + for (i = 0; i < len; i++) {
101851 + field = TREE_OPERAND(comp_ref, i);
101852 + if (TREE_CODE(field) == FIELD_DECL)
101853 + break;
101854 + }
101855 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
101856 + return field;
101857 +}
101858 +
101859 +static enum mark mark_status(const_tree fndecl, unsigned int argnum)
101860 +{
101861 + const_tree attr, p;
101862 +
101863 + // mm/filemap.c D.35286_51 = D.35283_46 (file_10(D), mapping_11, pos_1, D.35273_50, D.35285_49, page.14_48, fsdata.15_47);
101864 + if (fndecl == NULL_TREE)
101865 + return MARK_NO;
101866 +
101867 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl));
101868 + if (!attr || !TREE_VALUE(attr))
101869 + return MARK_NO;
101870 +
101871 + p = TREE_VALUE(attr);
101872 + if (TREE_INT_CST_HIGH(TREE_VALUE(p)) == -1)
101873 + return MARK_TURN_OFF;
101874 + if (!TREE_INT_CST_LOW(TREE_VALUE(p)))
101875 + return MARK_NOT_INTENTIONAL;
101876 + if (argnum == 0) {
101877 + gcc_assert(current_function_decl == fndecl);
101878 + return MARK_NO;
101879 + }
101880 +
101881 + do {
101882 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p)))
101883 + return MARK_YES;
101884 + p = TREE_CHAIN(p);
101885 + } while (p);
101886 +
101887 + return MARK_NO;
101888 +}
101889 +
101890 +static void print_missing_msg(tree func, unsigned int argnum)
101891 +{
101892 + unsigned int new_hash;
101893 + size_t len;
101894 + unsigned char tree_codes[CODES_LIMIT];
101895 + location_t loc;
101896 + const char *curfunc;
101897 +
101898 + func = get_original_function_decl(func);
101899 + loc = DECL_SOURCE_LOCATION(func);
101900 + curfunc = get_asm_name(func);
101901 +
101902 + len = get_function_decl(func, tree_codes);
101903 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
101904 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash);
101905 +}
101906 +
101907 +static unsigned int search_missing_attribute(const_tree arg)
101908 +{
101909 + unsigned int argnum;
101910 + const struct size_overflow_hash *hash;
101911 + const_tree type = TREE_TYPE(arg);
101912 + tree func = get_original_function_decl(current_function_decl);
101913 +
101914 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
101915 +
101916 + if (TREE_CODE(type) == POINTER_TYPE)
101917 + return 0;
101918 +
101919 + argnum = find_arg_number(arg, func);
101920 + if (argnum == 0)
101921 + return 0;
101922 +
101923 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
101924 + return argnum;
101925 +
101926 + hash = get_function_hash(func);
101927 + if (!hash || !(hash->param & (1U << argnum))) {
101928 + print_missing_msg(func, argnum);
101929 + return 0;
101930 + }
101931 + return argnum;
101932 +}
101933 +
101934 +static enum mark is_already_marked(const_tree lhs)
101935 +{
101936 + unsigned int argnum;
101937 + const_tree fndecl;
101938 +
101939 + argnum = search_missing_attribute(lhs);
101940 + fndecl = get_original_function_decl(current_function_decl);
101941 + if (argnum && mark_status(fndecl, argnum) == MARK_YES)
101942 + return MARK_YES;
101943 + return MARK_NO;
101944 +}
101945 +
101946 +static enum mark pre_expand(struct pointer_set_t *visited, bool *search_err_code, const_tree lhs)
101947 +{
101948 + const_gimple def_stmt;
101949 +
101950 + if (skip_types(lhs))
101951 + return MARK_NO;
101952 +
101953 + if (TREE_CODE(lhs) == PARM_DECL)
101954 + return is_already_marked(lhs);
101955 +
101956 + if (TREE_CODE(lhs) == COMPONENT_REF) {
101957 + const_tree field, attr;
101958 +
101959 + field = search_field_decl(lhs);
101960 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field));
101961 + if (!attr || !TREE_VALUE(attr))
101962 + return MARK_NO;
101963 + return MARK_YES;
101964 + }
101965 +
101966 + def_stmt = get_def_stmt(lhs);
101967 +
101968 + if (!def_stmt)
101969 + return MARK_NO;
101970 +
101971 + if (pointer_set_contains(visited, def_stmt))
101972 + return MARK_NO;
101973 +
101974 + switch (gimple_code(def_stmt)) {
101975 + case GIMPLE_NOP:
101976 + if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL)
101977 + return is_already_marked(lhs);
101978 + return MARK_NO;
101979 + case GIMPLE_PHI:
101980 + return walk_phi(visited, search_err_code, lhs);
101981 + case GIMPLE_CALL:
101982 + if (mark_status((gimple_call_fndecl(def_stmt)), 0) == MARK_TURN_OFF)
101983 + return MARK_TURN_OFF;
101984 + check_function_hash(def_stmt);
101985 + return MARK_NO;
101986 + case GIMPLE_ASM:
101987 + search_err_code[CAST_ONLY] = false;
101988 + return MARK_NO;
101989 + case GIMPLE_ASSIGN:
101990 + switch (gimple_num_ops(def_stmt)) {
101991 + case 2:
101992 + return walk_unary_ops(visited, search_err_code, lhs);
101993 + case 3:
101994 + return walk_binary_ops(visited, search_err_code, lhs);
101995 + }
101996 + default:
101997 + debug_gimple_stmt((gimple)def_stmt);
101998 + error("%s: unknown gimple code", __func__);
101999 + gcc_unreachable();
102000 + }
102001 +}
102002 +
102003 +// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
102004 +static bool skip_asm(const_tree arg)
102005 +{
102006 + gimple def_stmt = get_def_stmt(arg);
102007 +
102008 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
102009 + return false;
102010 +
102011 + def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
102012 + return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
102013 +}
102014 +
102015 +/*
102016 +0</MARK_YES: no dup, search attributes (so, int)
102017 +0/MARK_NOT_INTENTIONAL: no dup, search attribute (int)
102018 +-1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
102019 +*/
102020 +
102021 +static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum, bool where)
102022 +{
102023 + struct pointer_set_t *visited;
102024 + enum mark is_marked, is_found;
102025 + location_t loc;
102026 + bool search_err_code[2] = {true, false};
102027 +
102028 + is_marked = mark_status(current_function_decl, 0);
102029 + if (is_marked == MARK_TURN_OFF)
102030 + return true;
102031 +
102032 + is_marked = mark_status(fndecl, argnum + 1);
102033 + if (is_marked == MARK_TURN_OFF || is_marked == MARK_NOT_INTENTIONAL)
102034 + return true;
102035 +
102036 + visited = pointer_set_create();
102037 + is_found = pre_expand(visited, search_err_code, arg);
102038 + pointer_set_destroy(visited);
102039 +
102040 + if (where == FROM_RET && search_err_code[CAST_ONLY] && search_err_code[FROM_CONST])
102041 + return true;
102042 +
102043 + if (where == FROM_ARG && skip_asm(arg))
102044 + return true;
102045 +
102046 + if (is_found == MARK_TURN_OFF)
102047 + return true;
102048 +
102049 + if ((is_found == MARK_YES && is_marked == MARK_YES))
102050 + return true;
102051 +
102052 + if (is_found == MARK_YES) {
102053 + loc = DECL_SOURCE_LOCATION(fndecl);
102054 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1);
102055 + return true;
102056 + }
102057 + return false;
102058 +}
102059 +
102060 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
102061 +{
102062 + struct pointer_set_t *visited;
102063 + tree arg, new_arg;
102064 + bool match;
102065 +
102066 + if (argnum == 0)
102067 + return;
102068 +
102069 + argnum--;
102070 +
102071 + match = get_function_arg(&argnum, fndecl);
102072 + if (!match)
102073 + return;
102074 + gcc_assert(gimple_call_num_args(stmt) > argnum);
102075 + arg = gimple_call_arg(stmt, argnum);
102076 + if (arg == NULL_TREE)
102077 + return;
102078 +
102079 + if (skip_types(arg))
102080 + return;
102081 +
102082 + if (search_attributes(fndecl, arg, argnum, FROM_ARG))
102083 + return;
102084 +
102085 + visited = pointer_set_create();
102086 + new_arg = expand(visited, arg);
102087 + pointer_set_destroy(visited);
102088 +
102089 + if (new_arg == NULL_TREE)
102090 + return;
102091 +
102092 + change_function_arg(stmt, arg, argnum, new_arg);
102093 + check_size_overflow(stmt, TREE_TYPE(new_arg), new_arg, arg, BEFORE_STMT);
102094 +}
102095 +
102096 +static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl)
102097 +{
102098 + tree p = TREE_VALUE(attr);
102099 + do {
102100 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p)));
102101 + p = TREE_CHAIN(p);
102102 + } while (p);
102103 +}
102104 +
102105 +static void handle_function_by_hash(gimple stmt, tree fndecl)
102106 +{
102107 + unsigned int num;
102108 + const struct size_overflow_hash *hash;
102109 +
102110 + hash = get_function_hash(fndecl);
102111 + if (!hash)
102112 + return;
102113 +
102114 + for (num = 0; num <= MAX_PARAM; num++)
102115 + if (hash->param & (1U << num))
102116 + handle_function_arg(stmt, fndecl, num);
102117 +}
102118 +
102119 +static bool check_return_value(void)
102120 +{
102121 + const struct size_overflow_hash *hash;
102122 +
102123 + hash = get_function_hash(current_function_decl);
102124 + if (!hash || !(hash->param & 1U << 0))
102125 + return false;
102126 +
102127 + return true;
102128 +}
102129 +
102130 +static void handle_return_value(gimple ret_stmt)
102131 +{
102132 + struct pointer_set_t *visited;
102133 + tree ret, new_ret;
102134 +
102135 + if (gimple_code(ret_stmt) != GIMPLE_RETURN)
102136 + return;
102137 +
102138 + ret = gimple_return_retval(ret_stmt);
102139 +
102140 + if (skip_types(ret))
102141 + return;
102142 +
102143 + if (search_attributes(current_function_decl, ret, 0, FROM_RET))
102144 + return;
102145 +
102146 + visited = pointer_set_create();
102147 + new_ret = expand(visited, ret);
102148 + pointer_set_destroy(visited);
102149 +
102150 + change_function_return(ret_stmt, ret, new_ret);
102151 + check_size_overflow(ret_stmt, TREE_TYPE(new_ret), new_ret, ret, BEFORE_STMT);
102152 +}
102153 +
102154 +static void set_plf_false(void)
102155 +{
102156 + basic_block bb;
102157 +
102158 + FOR_ALL_BB(bb) {
102159 + gimple_stmt_iterator si;
102160 +
102161 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
102162 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
102163 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
102164 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
102165 + }
102166 +}
102167 +
102168 +static unsigned int handle_function(void)
102169 +{
102170 + basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
102171 + bool check_ret;
102172 +
102173 + set_plf_false();
102174 +
102175 + check_ret = check_return_value();
102176 +
102177 + do {
102178 + gimple_stmt_iterator gsi;
102179 + next = bb->next_bb;
102180 +
102181 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102182 + tree fndecl, attr;
102183 + gimple stmt = gsi_stmt(gsi);
102184 +
102185 + if (check_ret)
102186 + handle_return_value(stmt);
102187 +
102188 + if (!(is_gimple_call(stmt)))
102189 + continue;
102190 + fndecl = gimple_call_fndecl(stmt);
102191 + if (fndecl == NULL_TREE)
102192 + continue;
102193 + if (gimple_call_num_args(stmt) == 0)
102194 + continue;
102195 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
102196 + if (!attr || !TREE_VALUE(attr))
102197 + handle_function_by_hash(stmt, fndecl);
102198 + else
102199 + handle_function_by_attribute(stmt, attr, fndecl);
102200 + gsi = gsi_for_stmt(stmt);
102201 + next = gimple_bb(stmt)->next_bb;
102202 + }
102203 + bb = next;
102204 + } while (bb);
102205 + return 0;
102206 +}
102207 +
102208 +static struct gimple_opt_pass size_overflow_pass = {
102209 + .pass = {
102210 + .type = GIMPLE_PASS,
102211 + .name = "size_overflow",
102212 +#if BUILDING_GCC_VERSION >= 4008
102213 + .optinfo_flags = OPTGROUP_NONE,
102214 +#endif
102215 + .gate = NULL,
102216 + .execute = handle_function,
102217 + .sub = NULL,
102218 + .next = NULL,
102219 + .static_pass_number = 0,
102220 + .tv_id = TV_NONE,
102221 + .properties_required = PROP_cfg,
102222 + .properties_provided = 0,
102223 + .properties_destroyed = 0,
102224 + .todo_flags_start = 0,
102225 + .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
102226 + }
102227 +};
102228 +
102229 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
102230 +{
102231 + tree fntype;
102232 +
102233 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
102234 +
102235 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
102236 + fntype = build_function_type_list(void_type_node,
102237 + const_char_ptr_type_node,
102238 + unsigned_type_node,
102239 + const_char_ptr_type_node,
102240 + const_char_ptr_type_node,
102241 + NULL_TREE);
102242 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
102243 +
102244 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
102245 + TREE_PUBLIC(report_size_overflow_decl) = 1;
102246 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
102247 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
102248 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
102249 +}
102250 +
102251 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102252 +{
102253 + int i;
102254 + const char * const plugin_name = plugin_info->base_name;
102255 + const int argc = plugin_info->argc;
102256 + const struct plugin_argument * const argv = plugin_info->argv;
102257 + bool enable = true;
102258 +
102259 + struct register_pass_info size_overflow_pass_info = {
102260 + .pass = &size_overflow_pass.pass,
102261 + .reference_pass_name = "ssa",
102262 + .ref_pass_instance_number = 1,
102263 + .pos_op = PASS_POS_INSERT_AFTER
102264 + };
102265 +
102266 + if (!plugin_default_version_check(version, &gcc_version)) {
102267 + error(G_("incompatible gcc/plugin versions"));
102268 + return 1;
102269 + }
102270 +
102271 + for (i = 0; i < argc; ++i) {
102272 + if (!strcmp(argv[i].key, "no-size-overflow")) {
102273 + enable = false;
102274 + continue;
102275 + }
102276 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102277 + }
102278 +
102279 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
102280 + if (enable) {
102281 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
102282 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
102283 + }
102284 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
102285 +
102286 + return 0;
102287 +}
102288 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
102289 new file mode 100644
102290 index 0000000..ac2901e
102291 --- /dev/null
102292 +++ b/tools/gcc/stackleak_plugin.c
102293 @@ -0,0 +1,327 @@
102294 +/*
102295 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
102296 + * Licensed under the GPL v2
102297 + *
102298 + * Note: the choice of the license means that the compilation process is
102299 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
102300 + * but for the kernel it doesn't matter since it doesn't link against
102301 + * any of the gcc libraries
102302 + *
102303 + * gcc plugin to help implement various PaX features
102304 + *
102305 + * - track lowest stack pointer
102306 + *
102307 + * TODO:
102308 + * - initialize all local variables
102309 + *
102310 + * BUGS:
102311 + * - none known
102312 + */
102313 +#include "gcc-plugin.h"
102314 +#include "config.h"
102315 +#include "system.h"
102316 +#include "coretypes.h"
102317 +#include "tree.h"
102318 +#include "tree-pass.h"
102319 +#include "flags.h"
102320 +#include "intl.h"
102321 +#include "toplev.h"
102322 +#include "plugin.h"
102323 +//#include "expr.h" where are you...
102324 +#include "diagnostic.h"
102325 +#include "plugin-version.h"
102326 +#include "tm.h"
102327 +#include "function.h"
102328 +#include "basic-block.h"
102329 +#include "gimple.h"
102330 +#include "rtl.h"
102331 +#include "emit-rtl.h"
102332 +
102333 +#if BUILDING_GCC_VERSION >= 4008
102334 +#define TODO_dump_func 0
102335 +#endif
102336 +
102337 +extern void print_gimple_stmt(FILE *, gimple, int, int);
102338 +
102339 +int plugin_is_GPL_compatible;
102340 +
102341 +static int track_frame_size = -1;
102342 +static const char track_function[] = "pax_track_stack";
102343 +static const char check_function[] = "pax_check_alloca";
102344 +static bool init_locals;
102345 +
102346 +static struct plugin_info stackleak_plugin_info = {
102347 + .version = "201302112000",
102348 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
102349 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
102350 +};
102351 +
102352 +static bool gate_stackleak_track_stack(void);
102353 +static unsigned int execute_stackleak_tree_instrument(void);
102354 +static unsigned int execute_stackleak_final(void);
102355 +
102356 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
102357 + .pass = {
102358 + .type = GIMPLE_PASS,
102359 + .name = "stackleak_tree_instrument",
102360 +#if BUILDING_GCC_VERSION >= 4008
102361 + .optinfo_flags = OPTGROUP_NONE,
102362 +#endif
102363 + .gate = gate_stackleak_track_stack,
102364 + .execute = execute_stackleak_tree_instrument,
102365 + .sub = NULL,
102366 + .next = NULL,
102367 + .static_pass_number = 0,
102368 + .tv_id = TV_NONE,
102369 + .properties_required = PROP_gimple_leh | PROP_cfg,
102370 + .properties_provided = 0,
102371 + .properties_destroyed = 0,
102372 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
102373 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
102374 + }
102375 +};
102376 +
102377 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
102378 + .pass = {
102379 + .type = RTL_PASS,
102380 + .name = "stackleak_final",
102381 +#if BUILDING_GCC_VERSION >= 4008
102382 + .optinfo_flags = OPTGROUP_NONE,
102383 +#endif
102384 + .gate = gate_stackleak_track_stack,
102385 + .execute = execute_stackleak_final,
102386 + .sub = NULL,
102387 + .next = NULL,
102388 + .static_pass_number = 0,
102389 + .tv_id = TV_NONE,
102390 + .properties_required = 0,
102391 + .properties_provided = 0,
102392 + .properties_destroyed = 0,
102393 + .todo_flags_start = 0,
102394 + .todo_flags_finish = TODO_dump_func
102395 + }
102396 +};
102397 +
102398 +static bool gate_stackleak_track_stack(void)
102399 +{
102400 + return track_frame_size >= 0;
102401 +}
102402 +
102403 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
102404 +{
102405 + gimple check_alloca;
102406 + tree fntype, fndecl, alloca_size;
102407 +
102408 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
102409 + fndecl = build_fn_decl(check_function, fntype);
102410 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
102411 +
102412 + // insert call to void pax_check_alloca(unsigned long size)
102413 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
102414 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
102415 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
102416 +}
102417 +
102418 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
102419 +{
102420 + gimple track_stack;
102421 + tree fntype, fndecl;
102422 +
102423 + fntype = build_function_type_list(void_type_node, NULL_TREE);
102424 + fndecl = build_fn_decl(track_function, fntype);
102425 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
102426 +
102427 + // insert call to void pax_track_stack(void)
102428 + track_stack = gimple_build_call(fndecl, 0);
102429 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
102430 +}
102431 +
102432 +#if BUILDING_GCC_VERSION == 4005
102433 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
102434 +{
102435 + tree fndecl;
102436 +
102437 + if (!is_gimple_call(stmt))
102438 + return false;
102439 + fndecl = gimple_call_fndecl(stmt);
102440 + if (!fndecl)
102441 + return false;
102442 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
102443 + return false;
102444 +// print_node(stderr, "pax", fndecl, 4);
102445 + return DECL_FUNCTION_CODE(fndecl) == code;
102446 +}
102447 +#endif
102448 +
102449 +static bool is_alloca(gimple stmt)
102450 +{
102451 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
102452 + return true;
102453 +
102454 +#if BUILDING_GCC_VERSION >= 4007
102455 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
102456 + return true;
102457 +#endif
102458 +
102459 + return false;
102460 +}
102461 +
102462 +static unsigned int execute_stackleak_tree_instrument(void)
102463 +{
102464 + basic_block bb, entry_bb;
102465 + bool prologue_instrumented = false, is_leaf = true;
102466 +
102467 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
102468 +
102469 + // 1. loop through BBs and GIMPLE statements
102470 + FOR_EACH_BB(bb) {
102471 + gimple_stmt_iterator gsi;
102472 +
102473 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102474 + gimple stmt;
102475 +
102476 + stmt = gsi_stmt(gsi);
102477 +
102478 + if (is_gimple_call(stmt))
102479 + is_leaf = false;
102480 +
102481 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
102482 + if (!is_alloca(stmt))
102483 + continue;
102484 +
102485 + // 2. insert stack overflow check before each __builtin_alloca call
102486 + stackleak_check_alloca(&gsi);
102487 +
102488 + // 3. insert track call after each __builtin_alloca call
102489 + stackleak_add_instrumentation(&gsi);
102490 + if (bb == entry_bb)
102491 + prologue_instrumented = true;
102492 + }
102493 + }
102494 +
102495 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
102496 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
102497 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
102498 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
102499 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
102500 + return 0;
102501 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
102502 + return 0;
102503 +
102504 + // 4. insert track call at the beginning
102505 + if (!prologue_instrumented) {
102506 + gimple_stmt_iterator gsi;
102507 +
102508 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
102509 + if (dom_info_available_p(CDI_DOMINATORS))
102510 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
102511 + gsi = gsi_start_bb(bb);
102512 + stackleak_add_instrumentation(&gsi);
102513 + }
102514 +
102515 + return 0;
102516 +}
102517 +
102518 +static unsigned int execute_stackleak_final(void)
102519 +{
102520 + rtx insn, next;
102521 +
102522 + if (cfun->calls_alloca)
102523 + return 0;
102524 +
102525 + // keep calls only if function frame is big enough
102526 + if (get_frame_size() >= track_frame_size)
102527 + return 0;
102528 +
102529 + // 1. find pax_track_stack calls
102530 + for (insn = get_insns(); insn; insn = next) {
102531 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
102532 + rtx body;
102533 +
102534 + next = NEXT_INSN(insn);
102535 + if (!CALL_P(insn))
102536 + continue;
102537 + body = PATTERN(insn);
102538 + if (GET_CODE(body) != CALL)
102539 + continue;
102540 + body = XEXP(body, 0);
102541 + if (GET_CODE(body) != MEM)
102542 + continue;
102543 + body = XEXP(body, 0);
102544 + if (GET_CODE(body) != SYMBOL_REF)
102545 + continue;
102546 + if (strcmp(XSTR(body, 0), track_function))
102547 + continue;
102548 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
102549 + // 2. delete call
102550 + delete_insn_and_edges(insn);
102551 +#if BUILDING_GCC_VERSION >= 4007
102552 + if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
102553 + insn = next;
102554 + next = NEXT_INSN(insn);
102555 + delete_insn_and_edges(insn);
102556 + }
102557 +#endif
102558 + }
102559 +
102560 +// print_simple_rtl(stderr, get_insns());
102561 +// print_rtl(stderr, get_insns());
102562 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
102563 +
102564 + return 0;
102565 +}
102566 +
102567 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102568 +{
102569 + const char * const plugin_name = plugin_info->base_name;
102570 + const int argc = plugin_info->argc;
102571 + const struct plugin_argument * const argv = plugin_info->argv;
102572 + int i;
102573 + struct register_pass_info stackleak_tree_instrument_pass_info = {
102574 + .pass = &stackleak_tree_instrument_pass.pass,
102575 +// .reference_pass_name = "tree_profile",
102576 + .reference_pass_name = "optimized",
102577 + .ref_pass_instance_number = 1,
102578 + .pos_op = PASS_POS_INSERT_BEFORE
102579 + };
102580 + struct register_pass_info stackleak_final_pass_info = {
102581 + .pass = &stackleak_final_rtl_opt_pass.pass,
102582 + .reference_pass_name = "final",
102583 + .ref_pass_instance_number = 1,
102584 + .pos_op = PASS_POS_INSERT_BEFORE
102585 + };
102586 +
102587 + if (!plugin_default_version_check(version, &gcc_version)) {
102588 + error(G_("incompatible gcc/plugin versions"));
102589 + return 1;
102590 + }
102591 +
102592 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
102593 +
102594 + for (i = 0; i < argc; ++i) {
102595 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
102596 + if (!argv[i].value) {
102597 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102598 + continue;
102599 + }
102600 + track_frame_size = atoi(argv[i].value);
102601 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
102602 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
102603 + continue;
102604 + }
102605 + if (!strcmp(argv[i].key, "initialize-locals")) {
102606 + if (argv[i].value) {
102607 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
102608 + continue;
102609 + }
102610 + init_locals = true;
102611 + continue;
102612 + }
102613 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102614 + }
102615 +
102616 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
102617 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
102618 +
102619 + return 0;
102620 +}
102621 diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
102622 new file mode 100644
102623 index 0000000..4fae911
102624 --- /dev/null
102625 +++ b/tools/gcc/structleak_plugin.c
102626 @@ -0,0 +1,277 @@
102627 +/*
102628 + * Copyright 2013 by PaX Team <pageexec@freemail.hu>
102629 + * Licensed under the GPL v2
102630 + *
102631 + * Note: the choice of the license means that the compilation process is
102632 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
102633 + * but for the kernel it doesn't matter since it doesn't link against
102634 + * any of the gcc libraries
102635 + *
102636 + * gcc plugin to forcibly initialize certain local variables that could
102637 + * otherwise leak kernel stack to userland if they aren't properly initialized
102638 + * by later code
102639 + *
102640 + * Homepage: http://pax.grsecurity.net/
102641 + *
102642 + * Usage:
102643 + * $ # for 4.5/4.6/C based 4.7
102644 + * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
102645 + * $ # for C++ based 4.7/4.8+
102646 + * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
102647 + * $ gcc -fplugin=./structleak_plugin.so test.c -O2
102648 + *
102649 + * TODO: eliminate redundant initializers
102650 + * increase type coverage
102651 + */
102652 +
102653 +#include "gcc-plugin.h"
102654 +#include "config.h"
102655 +#include "system.h"
102656 +#include "coretypes.h"
102657 +#include "tree.h"
102658 +#include "tree-pass.h"
102659 +#include "intl.h"
102660 +#include "plugin-version.h"
102661 +#include "tm.h"
102662 +#include "toplev.h"
102663 +#include "function.h"
102664 +#include "tree-flow.h"
102665 +#include "plugin.h"
102666 +#include "gimple.h"
102667 +#include "diagnostic.h"
102668 +#include "cfgloop.h"
102669 +#include "langhooks.h"
102670 +
102671 +#if BUILDING_GCC_VERSION >= 4008
102672 +#define TODO_dump_func 0
102673 +#endif
102674 +
102675 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
102676 +
102677 +// unused type flag in all versions 4.5-4.8
102678 +#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
102679 +
102680 +int plugin_is_GPL_compatible;
102681 +void debug_gimple_stmt(gimple gs);
102682 +
102683 +static struct plugin_info structleak_plugin_info = {
102684 + .version = "201304082245",
102685 + .help = "disable\tdo not activate plugin\n",
102686 +};
102687 +
102688 +static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
102689 +{
102690 + *no_add_attrs = true;
102691 +
102692 + // check for types? for now accept everything linux has to offer
102693 + if (TREE_CODE(*node) != FIELD_DECL)
102694 + return NULL_TREE;
102695 +
102696 + *no_add_attrs = false;
102697 + return NULL_TREE;
102698 +}
102699 +
102700 +static struct attribute_spec user_attr = {
102701 + .name = "user",
102702 + .min_length = 0,
102703 + .max_length = 0,
102704 + .decl_required = false,
102705 + .type_required = false,
102706 + .function_type_required = false,
102707 + .handler = handle_user_attribute,
102708 +#if BUILDING_GCC_VERSION >= 4007
102709 + .affects_type_identity = true
102710 +#endif
102711 +};
102712 +
102713 +static void register_attributes(void *event_data, void *data)
102714 +{
102715 + register_attribute(&user_attr);
102716 +// register_attribute(&force_attr);
102717 +}
102718 +
102719 +static tree get_field_type(tree field)
102720 +{
102721 + return strip_array_types(TREE_TYPE(field));
102722 +}
102723 +
102724 +static bool is_userspace_type(tree type)
102725 +{
102726 + tree field;
102727 +
102728 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
102729 + tree fieldtype = get_field_type(field);
102730 + enum tree_code code = TREE_CODE(fieldtype);
102731 +
102732 + if (code == RECORD_TYPE || code == UNION_TYPE)
102733 + if (is_userspace_type(fieldtype))
102734 + return true;
102735 +
102736 + if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
102737 + return true;
102738 + }
102739 + return false;
102740 +}
102741 +
102742 +static void finish_type(void *event_data, void *data)
102743 +{
102744 + tree type = (tree)event_data;
102745 +
102746 + if (TYPE_USERSPACE(type))
102747 + return;
102748 +
102749 + if (is_userspace_type(type))
102750 + TYPE_USERSPACE(type) = 1;
102751 +}
102752 +
102753 +static void initialize(tree var)
102754 +{
102755 + basic_block bb;
102756 + gimple_stmt_iterator gsi;
102757 + tree initializer;
102758 + gimple init_stmt;
102759 +
102760 + // this is the original entry bb before the forced split
102761 + // TODO: check further BBs in case more splits occured before us
102762 + bb = ENTRY_BLOCK_PTR->next_bb->next_bb;
102763 +
102764 + // first check if the variable is already initialized, warn otherwise
102765 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102766 + gimple stmt = gsi_stmt(gsi);
102767 + tree rhs1;
102768 +
102769 + // we're looking for an assignment of a single rhs...
102770 + if (!gimple_assign_single_p(stmt))
102771 + continue;
102772 + rhs1 = gimple_assign_rhs1(stmt);
102773 +#if BUILDING_GCC_VERSION >= 4007
102774 + // ... of a non-clobbering expression...
102775 + if (TREE_CLOBBER_P(rhs1))
102776 + continue;
102777 +#endif
102778 + // ... to our variable...
102779 + if (gimple_get_lhs(stmt) != var)
102780 + continue;
102781 + // if it's an initializer then we're good
102782 + if (TREE_CODE(rhs1) == CONSTRUCTOR)
102783 + return;
102784 + }
102785 +
102786 + // these aren't the 0days you're looking for
102787 +// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
102788 +
102789 + // build the initializer expression
102790 + initializer = build_constructor(TREE_TYPE(var), NULL);
102791 +
102792 + // build the initializer stmt
102793 + init_stmt = gimple_build_assign(var, initializer);
102794 + gsi = gsi_start_bb(ENTRY_BLOCK_PTR->next_bb);
102795 + gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
102796 + update_stmt(init_stmt);
102797 +}
102798 +
102799 +static unsigned int handle_function(void)
102800 +{
102801 + basic_block bb;
102802 + unsigned int ret = 0;
102803 + tree var;
102804 +
102805 +#if BUILDING_GCC_VERSION == 4005
102806 + tree vars;
102807 +#else
102808 + unsigned int i;
102809 +#endif
102810 +
102811 + // split the first bb where we can put the forced initializers
102812 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
102813 + if (dom_info_available_p(CDI_DOMINATORS))
102814 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
102815 +
102816 + // enumarate all local variables and forcibly initialize our targets
102817 +#if BUILDING_GCC_VERSION == 4005
102818 + for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
102819 + var = TREE_VALUE(vars);
102820 +#else
102821 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
102822 +#endif
102823 + tree type = TREE_TYPE(var);
102824 +
102825 + gcc_assert(DECL_P(var));
102826 + if (!auto_var_in_fn_p(var, current_function_decl))
102827 + continue;
102828 +
102829 + // only care about structure types
102830 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
102831 + continue;
102832 +
102833 + // if the type is of interest, examine the variable
102834 + if (TYPE_USERSPACE(type))
102835 + initialize(var);
102836 + }
102837 +
102838 + return ret;
102839 +}
102840 +
102841 +static struct gimple_opt_pass structleak_pass = {
102842 + .pass = {
102843 + .type = GIMPLE_PASS,
102844 + .name = "structleak",
102845 +#if BUILDING_GCC_VERSION >= 4008
102846 + .optinfo_flags = OPTGROUP_NONE,
102847 +#endif
102848 + .gate = NULL,
102849 + .execute = handle_function,
102850 + .sub = NULL,
102851 + .next = NULL,
102852 + .static_pass_number = 0,
102853 + .tv_id = TV_NONE,
102854 + .properties_required = PROP_cfg,
102855 + .properties_provided = 0,
102856 + .properties_destroyed = 0,
102857 + .todo_flags_start = 0,
102858 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
102859 + }
102860 +};
102861 +
102862 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102863 +{
102864 + int i;
102865 + const char * const plugin_name = plugin_info->base_name;
102866 + const int argc = plugin_info->argc;
102867 + const struct plugin_argument * const argv = plugin_info->argv;
102868 + bool enable = true;
102869 +
102870 + struct register_pass_info structleak_pass_info = {
102871 + .pass = &structleak_pass.pass,
102872 + .reference_pass_name = "ssa",
102873 + .ref_pass_instance_number = 1,
102874 + .pos_op = PASS_POS_INSERT_AFTER
102875 + };
102876 +
102877 + if (!plugin_default_version_check(version, &gcc_version)) {
102878 + error(G_("incompatible gcc/plugin versions"));
102879 + return 1;
102880 + }
102881 +
102882 + if (strcmp(lang_hooks.name, "GNU C")) {
102883 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
102884 + enable = false;
102885 + }
102886 +
102887 + for (i = 0; i < argc; ++i) {
102888 + if (!strcmp(argv[i].key, "disable")) {
102889 + enable = false;
102890 + continue;
102891 + }
102892 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102893 + }
102894 +
102895 + register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
102896 + if (enable) {
102897 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
102898 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
102899 + }
102900 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
102901 +
102902 + return 0;
102903 +}
102904 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
102905 index 6789d78..4afd019e 100644
102906 --- a/tools/perf/util/include/asm/alternative-asm.h
102907 +++ b/tools/perf/util/include/asm/alternative-asm.h
102908 @@ -5,4 +5,7 @@
102909
102910 #define altinstruction_entry #
102911
102912 + .macro pax_force_retaddr rip=0, reload=0
102913 + .endm
102914 +
102915 #endif
102916 diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
102917 index 96b919d..c49bb74 100644
102918 --- a/tools/perf/util/include/linux/compiler.h
102919 +++ b/tools/perf/util/include/linux/compiler.h
102920 @@ -18,4 +18,12 @@
102921 #define __force
102922 #endif
102923
102924 +#ifndef __size_overflow
102925 +# define __size_overflow(...)
102926 +#endif
102927 +
102928 +#ifndef __intentional_overflow
102929 +# define __intentional_overflow(...)
102930 +#endif
102931 +
102932 #endif
102933 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
102934 index f18013f..90421df 100644
102935 --- a/virt/kvm/kvm_main.c
102936 +++ b/virt/kvm/kvm_main.c
102937 @@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
102938
102939 static cpumask_var_t cpus_hardware_enabled;
102940 static int kvm_usage_count = 0;
102941 -static atomic_t hardware_enable_failed;
102942 +static atomic_unchecked_t hardware_enable_failed;
102943
102944 struct kmem_cache *kvm_vcpu_cache;
102945 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
102946
102947 -static __read_mostly struct preempt_ops kvm_preempt_ops;
102948 +static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
102949 +static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
102950 +static struct preempt_ops kvm_preempt_ops = {
102951 + .sched_in = kvm_sched_in,
102952 + .sched_out = kvm_sched_out,
102953 +};
102954
102955 struct dentry *kvm_debugfs_dir;
102956
102957 @@ -769,7 +774,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
102958 /* We can read the guest memory with __xxx_user() later on. */
102959 if (user_alloc &&
102960 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
102961 - !access_ok(VERIFY_WRITE,
102962 + !__access_ok(VERIFY_WRITE,
102963 (void __user *)(unsigned long)mem->userspace_addr,
102964 mem->memory_size)))
102965 goto out;
102966 @@ -1881,7 +1886,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
102967 return 0;
102968 }
102969
102970 -static struct file_operations kvm_vcpu_fops = {
102971 +static file_operations_no_const kvm_vcpu_fops __read_only = {
102972 .release = kvm_vcpu_release,
102973 .unlocked_ioctl = kvm_vcpu_ioctl,
102974 #ifdef CONFIG_COMPAT
102975 @@ -2402,7 +2407,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
102976 return 0;
102977 }
102978
102979 -static struct file_operations kvm_vm_fops = {
102980 +static file_operations_no_const kvm_vm_fops __read_only = {
102981 .release = kvm_vm_release,
102982 .unlocked_ioctl = kvm_vm_ioctl,
102983 #ifdef CONFIG_COMPAT
102984 @@ -2500,7 +2505,7 @@ out:
102985 return r;
102986 }
102987
102988 -static struct file_operations kvm_chardev_ops = {
102989 +static file_operations_no_const kvm_chardev_ops __read_only = {
102990 .unlocked_ioctl = kvm_dev_ioctl,
102991 .compat_ioctl = kvm_dev_ioctl,
102992 .llseek = noop_llseek,
102993 @@ -2526,7 +2531,7 @@ static void hardware_enable_nolock(void *junk)
102994
102995 if (r) {
102996 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
102997 - atomic_inc(&hardware_enable_failed);
102998 + atomic_inc_unchecked(&hardware_enable_failed);
102999 printk(KERN_INFO "kvm: enabling virtualization on "
103000 "CPU%d failed\n", cpu);
103001 }
103002 @@ -2580,10 +2585,10 @@ static int hardware_enable_all(void)
103003
103004 kvm_usage_count++;
103005 if (kvm_usage_count == 1) {
103006 - atomic_set(&hardware_enable_failed, 0);
103007 + atomic_set_unchecked(&hardware_enable_failed, 0);
103008 on_each_cpu(hardware_enable_nolock, NULL, 1);
103009
103010 - if (atomic_read(&hardware_enable_failed)) {
103011 + if (atomic_read_unchecked(&hardware_enable_failed)) {
103012 hardware_disable_all_nolock();
103013 r = -EBUSY;
103014 }
103015 @@ -2941,7 +2946,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
103016 kvm_arch_vcpu_put(vcpu);
103017 }
103018
103019 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
103020 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
103021 struct module *module)
103022 {
103023 int r;
103024 @@ -2977,7 +2982,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
103025 if (!vcpu_align)
103026 vcpu_align = __alignof__(struct kvm_vcpu);
103027 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
103028 - 0, NULL);
103029 + SLAB_USERCOPY, NULL);
103030 if (!kvm_vcpu_cache) {
103031 r = -ENOMEM;
103032 goto out_free_3;
103033 @@ -2987,9 +2992,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
103034 if (r)
103035 goto out_free;
103036
103037 + pax_open_kernel();
103038 kvm_chardev_ops.owner = module;
103039 kvm_vm_fops.owner = module;
103040 kvm_vcpu_fops.owner = module;
103041 + pax_close_kernel();
103042
103043 r = misc_register(&kvm_dev);
103044 if (r) {
103045 @@ -2999,9 +3006,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
103046
103047 register_syscore_ops(&kvm_syscore_ops);
103048
103049 - kvm_preempt_ops.sched_in = kvm_sched_in;
103050 - kvm_preempt_ops.sched_out = kvm_sched_out;
103051 -
103052 r = kvm_init_debug();
103053 if (r) {
103054 printk(KERN_ERR "kvm: create debugfs files failed\n");